Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: MIT
2/*
3 * Copyright © 2023 Intel Corporation
4 */
5
6#include <drm/drm_managed.h>
7
8#include "regs/xe_guc_regs.h"
9#include "regs/xe_irq_regs.h"
10
11#include "xe_assert.h"
12#include "xe_bo.h"
13#include "xe_device.h"
14#include "xe_device_types.h"
15#include "xe_gt.h"
16#include "xe_guc.h"
17#include "xe_hw_engine.h"
18#include "xe_memirq.h"
19#include "xe_tile_printk.h"
20
21#define memirq_assert(m, condition) xe_tile_assert(memirq_to_tile(m), condition)
22#define memirq_printk(m, _level, _fmt, ...) \
23 xe_tile_##_level(memirq_to_tile(m), "MEMIRQ: " _fmt, ##__VA_ARGS__)
24
25#ifdef CONFIG_DRM_XE_DEBUG_MEMIRQ
26#define memirq_debug(m, _fmt, ...) memirq_printk(m, dbg, _fmt, ##__VA_ARGS__)
27#else
28#define memirq_debug(...)
29#endif
30
31#define memirq_err(m, _fmt, ...) memirq_printk(m, err, _fmt, ##__VA_ARGS__)
32#define memirq_err_ratelimited(m, _fmt, ...) \
33 memirq_printk(m, err_ratelimited, _fmt, ##__VA_ARGS__)
34
35static struct xe_tile *memirq_to_tile(struct xe_memirq *memirq)
36{
37 return container_of(memirq, struct xe_tile, memirq);
38}
39
40static struct xe_device *memirq_to_xe(struct xe_memirq *memirq)
41{
42 return tile_to_xe(memirq_to_tile(memirq));
43}
44
45static const char *guc_name(struct xe_guc *guc)
46{
47 return xe_gt_is_media_type(guc_to_gt(guc)) ? "media GuC" : "GuC";
48}
49
50/**
51 * DOC: Memory Based Interrupts
52 *
53 * MMIO register based interrupts infrastructure used for non-virtualized mode
54 * or SRIOV-8 (which supports 8 Virtual Functions) does not scale efficiently
55 * to allow delivering interrupts to a large number of Virtual machines or
56 * containers. Memory based interrupt status reporting provides an efficient
57 * and scalable infrastructure.
58 *
59 * For memory based interrupt status reporting hardware sequence is:
60 * * Engine writes the interrupt event to memory
61 * (Pointer to memory location is provided by SW. This memory surface must
62 * be mapped to system memory and must be marked as un-cacheable (UC) on
63 * Graphics IP Caches)
64 * * Engine triggers an interrupt to host.
65 */
66
67/**
68 * DOC: Memory Based Interrupts Page Layout
69 *
70 * `Memory Based Interrupts`_ requires three different objects, which are
71 * called "page" in the specs, even if they aren't page-sized or aligned.
72 *
73 * To simplify the code we allocate a single page size object and then use
74 * offsets to embedded "pages". The address of those "pages" are then
75 * programmed in the HW via LRI and LRM in the context image.
76 *
77 * - _`Interrupt Status Report Page`: this page contains the interrupt
78 * status vectors for each unit. Each bit in the interrupt vectors is
79 * converted to a byte, with the byte being set to 0xFF when an
80 * interrupt is triggered; interrupt vectors are 16b big so each unit
81 * gets 16B. One space is reserved for each bit in one of the
82 * GT_INTR_DWx registers, so this object needs a total of 1024B.
83 * This object needs to be 4KiB aligned.
84 *
85 * - _`Interrupt Source Report Page`: this is the equivalent of the
86 * GT_INTR_DWx registers, with each bit in those registers being
87 * mapped to a byte here. The offsets are the same, just bytes instead
88 * of bits. This object needs to be cacheline aligned.
89 *
90 * - Interrupt Mask: the HW needs a location to fetch the interrupt
91 * mask vector to be used by the LRM in the context, so we just use
92 * the next available space in the interrupt page.
93 *
94 * ::
95 *
96 * 0x0000 +===========+ <== Interrupt Status Report Page
97 * | |
98 * | | ____ +----+----------------+
99 * | | / | 0 | USER INTERRUPT |
100 * +-----------+ __/ | 1 | |
101 * | HWE(n) | __ | | CTX SWITCH |
102 * +-----------+ \ | | WAIT SEMAPHORE |
103 * | | \____ | 15 | |
104 * | | +----+----------------+
105 * | |
106 * 0x0400 +===========+ <== Interrupt Source Report Page
107 * | HWE(0) |
108 * | HWE(1) |
109 * | |
110 * | HWE(x) |
111 * 0x0440 +===========+ <== Interrupt Enable Mask
112 * | |
113 * | |
114 * +-----------+
115 *
116 *
117 * MSI-X use case
118 *
119 * When using MSI-X, hw engines report interrupt status and source to engine
120 * instance 0. For this scenario, in order to differentiate between the
121 * engines, we need to pass different status/source pointers in the LRC.
122 *
123 * The requirements on those pointers are:
124 * - Interrupt status should be 4KiB aligned
125 * - Interrupt source should be 64 bytes aligned
126 *
127 * To accommodate this, we duplicate the memirq page layout above -
128 * allocating a page for each engine instance and pass this page in the LRC.
129 * Note that the same page can be reused for different engine types.
130 * For example, an LRC executing on CCS #x will have pointers to page #x,
131 * and an LRC executing on BCS #x will have the same pointers.
132 *
133 * ::
134 *
135 * 0x0000 +==============================+ <== page for instance 0 (BCS0, CCS0, etc.)
136 * | Interrupt Status Report Page |
137 * 0x0400 +==============================+
138 * | Interrupt Source Report Page |
139 * 0x0440 +==============================+
140 * | Interrupt Enable Mask |
141 * +==============================+
142 * | Not used |
143 * 0x1000 +==============================+ <== page for instance 1 (BCS1, CCS1, etc.)
144 * | Interrupt Status Report Page |
145 * 0x1400 +==============================+
146 * | Interrupt Source Report Page |
147 * 0x1440 +==============================+
148 * | Not used |
149 * 0x2000 +==============================+ <== page for instance 2 (BCS2, CCS2, etc.)
150 * | ... |
151 * +==============================+
152 *
153 */
154
155static inline bool hw_reports_to_instance_zero(struct xe_memirq *memirq)
156{
157 /*
158 * When the HW engines are configured to use MSI-X,
159 * they report interrupt status and source to the offset of
160 * engine instance 0.
161 */
162 return xe_device_has_msix(memirq_to_xe(memirq));
163}
164
165static int memirq_alloc_pages(struct xe_memirq *memirq)
166{
167 struct xe_device *xe = memirq_to_xe(memirq);
168 struct xe_tile *tile = memirq_to_tile(memirq);
169 size_t bo_size = hw_reports_to_instance_zero(memirq) ?
170 XE_HW_ENGINE_MAX_INSTANCE * SZ_4K : SZ_4K;
171 struct xe_bo *bo;
172 int err;
173
174 BUILD_BUG_ON(!IS_ALIGNED(XE_MEMIRQ_SOURCE_OFFSET(0), SZ_64));
175 BUILD_BUG_ON(!IS_ALIGNED(XE_MEMIRQ_STATUS_OFFSET(0), SZ_4K));
176
177 bo = xe_managed_bo_create_pin_map(xe, tile, bo_size,
178 XE_BO_FLAG_SYSTEM |
179 XE_BO_FLAG_GGTT |
180 XE_BO_FLAG_GGTT_INVALIDATE |
181 XE_BO_FLAG_NEEDS_UC |
182 XE_BO_FLAG_NEEDS_CPU_ACCESS);
183 if (IS_ERR(bo)) {
184 err = PTR_ERR(bo);
185 goto out;
186 }
187
188 memirq_assert(memirq, !xe_bo_is_vram(bo));
189 memirq_assert(memirq, !memirq->bo);
190
191 iosys_map_memset(&bo->vmap, 0, 0, bo_size);
192
193 memirq->bo = bo;
194 memirq->source = IOSYS_MAP_INIT_OFFSET(&bo->vmap, XE_MEMIRQ_SOURCE_OFFSET(0));
195 memirq->status = IOSYS_MAP_INIT_OFFSET(&bo->vmap, XE_MEMIRQ_STATUS_OFFSET(0));
196 memirq->mask = IOSYS_MAP_INIT_OFFSET(&bo->vmap, XE_MEMIRQ_ENABLE_OFFSET);
197
198 memirq_assert(memirq, !memirq->source.is_iomem);
199 memirq_assert(memirq, !memirq->status.is_iomem);
200 memirq_assert(memirq, !memirq->mask.is_iomem);
201
202 memirq_debug(memirq, "page offsets: bo %#x bo_size %zu source %#x status %#x\n",
203 xe_bo_ggtt_addr(bo), bo_size, XE_MEMIRQ_SOURCE_OFFSET(0),
204 XE_MEMIRQ_STATUS_OFFSET(0));
205
206 return 0;
207
208out:
209 memirq_err(memirq, "Failed to allocate memirq page (%pe)\n", ERR_PTR(err));
210 return err;
211}
212
213static void memirq_set_enable(struct xe_memirq *memirq, bool enable)
214{
215 iosys_map_wr(&memirq->mask, 0, u32, enable ? GENMASK(15, 0) : 0);
216
217 memirq->enabled = enable;
218}
219
220/**
221 * xe_memirq_init - Initialize data used by `Memory Based Interrupts`_.
222 * @memirq: the &xe_memirq to initialize
223 *
224 * Allocate `Interrupt Source Report Page`_ and `Interrupt Status Report Page`_
225 * used by `Memory Based Interrupts`_.
226 *
227 * These allocations are managed and will be implicitly released on unload.
228 *
229 * If this function fails then the driver won't be able to operate correctly.
230 * If `Memory Based Interrupts`_ are not used this function will return 0.
231 *
232 * Return: 0 on success or a negative error code on failure.
233 */
234int xe_memirq_init(struct xe_memirq *memirq)
235{
236 struct xe_device *xe = memirq_to_xe(memirq);
237 int err;
238
239 if (!xe_device_uses_memirq(xe))
240 return 0;
241
242 err = memirq_alloc_pages(memirq);
243 if (unlikely(err))
244 return err;
245
246 /* we need to start with all irqs enabled */
247 memirq_set_enable(memirq, true);
248
249 return 0;
250}
251
252static u32 __memirq_source_page(struct xe_memirq *memirq, u16 instance)
253{
254 memirq_assert(memirq, instance <= XE_HW_ENGINE_MAX_INSTANCE);
255 memirq_assert(memirq, memirq->bo);
256
257 instance = hw_reports_to_instance_zero(memirq) ? instance : 0;
258 return xe_bo_ggtt_addr(memirq->bo) + XE_MEMIRQ_SOURCE_OFFSET(instance);
259}
260
261/**
262 * xe_memirq_source_ptr - Get GGTT's offset of the `Interrupt Source Report Page`_.
263 * @memirq: the &xe_memirq to query
264 * @hwe: the hw engine for which we want the report page
265 *
266 * Shall be called when `Memory Based Interrupts`_ are used
267 * and xe_memirq_init() didn't fail.
268 *
269 * Return: GGTT's offset of the `Interrupt Source Report Page`_.
270 */
271u32 xe_memirq_source_ptr(struct xe_memirq *memirq, struct xe_hw_engine *hwe)
272{
273 memirq_assert(memirq, xe_device_uses_memirq(memirq_to_xe(memirq)));
274
275 return __memirq_source_page(memirq, hwe->instance);
276}
277
278static u32 __memirq_status_page(struct xe_memirq *memirq, u16 instance)
279{
280 memirq_assert(memirq, instance <= XE_HW_ENGINE_MAX_INSTANCE);
281 memirq_assert(memirq, memirq->bo);
282
283 instance = hw_reports_to_instance_zero(memirq) ? instance : 0;
284 return xe_bo_ggtt_addr(memirq->bo) + XE_MEMIRQ_STATUS_OFFSET(instance);
285}
286
287/**
288 * xe_memirq_status_ptr - Get GGTT's offset of the `Interrupt Status Report Page`_.
289 * @memirq: the &xe_memirq to query
290 * @hwe: the hw engine for which we want the report page
291 *
292 * Shall be called when `Memory Based Interrupts`_ are used
293 * and xe_memirq_init() didn't fail.
294 *
295 * Return: GGTT's offset of the `Interrupt Status Report Page`_.
296 */
297u32 xe_memirq_status_ptr(struct xe_memirq *memirq, struct xe_hw_engine *hwe)
298{
299 memirq_assert(memirq, xe_device_uses_memirq(memirq_to_xe(memirq)));
300
301 return __memirq_status_page(memirq, hwe->instance);
302}
303
304/**
305 * xe_memirq_enable_ptr - Get GGTT's offset of the Interrupt Enable Mask.
306 * @memirq: the &xe_memirq to query
307 *
308 * Shall be called when `Memory Based Interrupts`_ are used
309 * and xe_memirq_init() didn't fail.
310 *
311 * Return: GGTT's offset of the Interrupt Enable Mask.
312 */
313u32 xe_memirq_enable_ptr(struct xe_memirq *memirq)
314{
315 memirq_assert(memirq, xe_device_uses_memirq(memirq_to_xe(memirq)));
316 memirq_assert(memirq, memirq->bo);
317
318 return xe_bo_ggtt_addr(memirq->bo) + XE_MEMIRQ_ENABLE_OFFSET;
319}
320
321/**
322 * xe_memirq_init_guc - Prepare GuC for `Memory Based Interrupts`_.
323 * @memirq: the &xe_memirq
324 * @guc: the &xe_guc to setup
325 *
326 * Register `Interrupt Source Report Page`_ and `Interrupt Status Report Page`_
327 * to be used by the GuC when `Memory Based Interrupts`_ are required.
328 *
329 * Shall be called when `Memory Based Interrupts`_ are used
330 * and xe_memirq_init() didn't fail.
331 *
332 * Return: 0 on success or a negative error code on failure.
333 */
334int xe_memirq_init_guc(struct xe_memirq *memirq, struct xe_guc *guc)
335{
336 bool is_media = xe_gt_is_media_type(guc_to_gt(guc));
337 u32 offset = is_media ? ilog2(INTR_MGUC) : ilog2(INTR_GUC);
338 u32 source, status;
339 int err;
340
341 memirq_assert(memirq, xe_device_uses_memirq(memirq_to_xe(memirq)));
342
343 source = __memirq_source_page(memirq, 0) + offset;
344 status = __memirq_status_page(memirq, 0) + offset * SZ_16;
345
346 err = xe_guc_self_cfg64(guc, GUC_KLV_SELF_CFG_MEMIRQ_SOURCE_ADDR_KEY,
347 source);
348 if (unlikely(err))
349 goto failed;
350
351 err = xe_guc_self_cfg64(guc, GUC_KLV_SELF_CFG_MEMIRQ_STATUS_ADDR_KEY,
352 status);
353 if (unlikely(err))
354 goto failed;
355
356 return 0;
357
358failed:
359 memirq_err(memirq, "Failed to setup report pages in %s (%pe)\n",
360 guc_name(guc), ERR_PTR(err));
361 return err;
362}
363
364/**
365 * xe_memirq_reset - Disable processing of `Memory Based Interrupts`_.
366 * @memirq: struct xe_memirq
367 *
368 * This is part of the driver IRQ setup flow.
369 *
370 * This function shall only be used on platforms that use
371 * `Memory Based Interrupts`_.
372 */
373void xe_memirq_reset(struct xe_memirq *memirq)
374{
375 memirq_assert(memirq, xe_device_uses_memirq(memirq_to_xe(memirq)));
376
377 if (memirq->bo)
378 memirq_set_enable(memirq, false);
379}
380
381/**
382 * xe_memirq_postinstall - Enable processing of `Memory Based Interrupts`_.
383 * @memirq: the &xe_memirq
384 *
385 * This is part of the driver IRQ setup flow.
386 *
387 * This function shall only be used on platforms that use
388 * `Memory Based Interrupts`_.
389 */
390void xe_memirq_postinstall(struct xe_memirq *memirq)
391{
392 memirq_assert(memirq, xe_device_uses_memirq(memirq_to_xe(memirq)));
393
394 if (memirq->bo)
395 memirq_set_enable(memirq, true);
396}
397
398static bool __memirq_received(struct xe_memirq *memirq,
399 struct iosys_map *vector, u16 offset,
400 const char *name, bool clear)
401{
402 u8 value;
403
404 value = iosys_map_rd(vector, offset, u8);
405 if (value) {
406 if (value != 0xff)
407 memirq_err_ratelimited(memirq,
408 "Unexpected memirq value %#x from %s at %u\n",
409 value, name, offset);
410 if (clear)
411 iosys_map_wr(vector, offset, u8, 0x00);
412 }
413
414 return value;
415}
416
417static bool memirq_received_noclear(struct xe_memirq *memirq,
418 struct iosys_map *vector,
419 u16 offset, const char *name)
420{
421 return __memirq_received(memirq, vector, offset, name, false);
422}
423
424static bool memirq_received(struct xe_memirq *memirq, struct iosys_map *vector,
425 u16 offset, const char *name)
426{
427 return __memirq_received(memirq, vector, offset, name, true);
428}
429
430static void memirq_dispatch_engine(struct xe_memirq *memirq, struct iosys_map *status,
431 struct xe_hw_engine *hwe)
432{
433 memirq_debug(memirq, "STATUS %s %*ph\n", hwe->name, 16, status->vaddr);
434
435 if (memirq_received(memirq, status, ilog2(GT_MI_USER_INTERRUPT), hwe->name))
436 xe_hw_engine_handle_irq(hwe, GT_MI_USER_INTERRUPT);
437}
438
439static void memirq_dispatch_guc(struct xe_memirq *memirq, struct iosys_map *status,
440 struct xe_guc *guc)
441{
442 const char *name = guc_name(guc);
443
444 memirq_debug(memirq, "STATUS %s %*ph\n", name, 16, status->vaddr);
445
446 if (memirq_received(memirq, status, ilog2(GUC_INTR_GUC2HOST), name))
447 xe_guc_irq_handler(guc, GUC_INTR_GUC2HOST);
448
449 /*
450 * This is a software interrupt that must be cleared after it's consumed
451 * to avoid race conditions where xe_gt_sriov_vf_recovery_pending()
452 * returns false.
453 */
454 if (memirq_received_noclear(memirq, status, ilog2(GUC_INTR_SW_INT_0),
455 name)) {
456 xe_guc_irq_handler(guc, GUC_INTR_SW_INT_0);
457 iosys_map_wr(status, ilog2(GUC_INTR_SW_INT_0), u8, 0x00);
458 }
459}
460
461/**
462 * xe_memirq_hwe_handler - Check and process interrupts for a specific HW engine.
463 * @memirq: the &xe_memirq
464 * @hwe: the hw engine to process
465 *
466 * This function reads and dispatches `Memory Based Interrupts` for the provided HW engine.
467 */
468void xe_memirq_hwe_handler(struct xe_memirq *memirq, struct xe_hw_engine *hwe)
469{
470 u16 offset = hwe->irq_offset;
471 u16 instance = hw_reports_to_instance_zero(memirq) ? hwe->instance : 0;
472 struct iosys_map src_offset = IOSYS_MAP_INIT_OFFSET(&memirq->bo->vmap,
473 XE_MEMIRQ_SOURCE_OFFSET(instance));
474
475 if (memirq_received(memirq, &src_offset, offset, "SRC")) {
476 struct iosys_map status_offset =
477 IOSYS_MAP_INIT_OFFSET(&memirq->bo->vmap,
478 XE_MEMIRQ_STATUS_OFFSET(instance) + offset * SZ_16);
479 memirq_dispatch_engine(memirq, &status_offset, hwe);
480 }
481}
482
483/**
484 * xe_memirq_guc_sw_int_0_irq_pending() - SW_INT_0 IRQ is pending
485 * @memirq: the &xe_memirq
486 * @guc: the &xe_guc to check for IRQ
487 *
488 * Return: True if SW_INT_0 IRQ is pending on @guc, False otherwise
489 */
490bool xe_memirq_guc_sw_int_0_irq_pending(struct xe_memirq *memirq, struct xe_guc *guc)
491{
492 struct xe_gt *gt = guc_to_gt(guc);
493 u32 offset = xe_gt_is_media_type(gt) ? ilog2(INTR_MGUC) : ilog2(INTR_GUC);
494 struct iosys_map map = IOSYS_MAP_INIT_OFFSET(&memirq->status, offset * SZ_16);
495
496 return memirq_received_noclear(memirq, &map, ilog2(GUC_INTR_SW_INT_0),
497 guc_name(guc));
498}
499
500/**
501 * xe_memirq_handler - The `Memory Based Interrupts`_ Handler.
502 * @memirq: the &xe_memirq
503 *
504 * This function reads and dispatches `Memory Based Interrupts`.
505 */
506void xe_memirq_handler(struct xe_memirq *memirq)
507{
508 struct xe_device *xe = memirq_to_xe(memirq);
509 struct xe_tile *tile = memirq_to_tile(memirq);
510 struct xe_hw_engine *hwe;
511 enum xe_hw_engine_id id;
512 struct iosys_map map;
513 unsigned int gtid;
514 struct xe_gt *gt;
515
516 if (!memirq->bo)
517 return;
518
519 memirq_assert(memirq, !memirq->source.is_iomem);
520 memirq_debug(memirq, "SOURCE %*ph\n", 32, memirq->source.vaddr);
521 memirq_debug(memirq, "SOURCE %*ph\n", 32, memirq->source.vaddr + 32);
522
523 for_each_gt(gt, xe, gtid) {
524 if (gt->tile != tile)
525 continue;
526
527 for_each_hw_engine(hwe, gt, id)
528 xe_memirq_hwe_handler(memirq, hwe);
529 }
530
531 /* GuC and media GuC (if present) must be checked separately */
532
533 if (memirq_received(memirq, &memirq->source, ilog2(INTR_GUC), "SRC")) {
534 map = IOSYS_MAP_INIT_OFFSET(&memirq->status, ilog2(INTR_GUC) * SZ_16);
535 memirq_dispatch_guc(memirq, &map, &tile->primary_gt->uc.guc);
536 }
537
538 if (!tile->media_gt)
539 return;
540
541 if (memirq_received(memirq, &memirq->source, ilog2(INTR_MGUC), "SRC")) {
542 map = IOSYS_MAP_INIT_OFFSET(&memirq->status, ilog2(INTR_MGUC) * SZ_16);
543 memirq_dispatch_guc(memirq, &map, &tile->media_gt->uc.guc);
544 }
545}