Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: MIT
2/*
3 * Copyright © 2021 Intel Corporation
4 */
5
6#include "xe_irq.h"
7
8#include <linux/sched/clock.h>
9
10#include <drm/drm_managed.h>
11
12#include "display/xe_display.h"
13#include "regs/xe_gt_regs.h"
14#include "regs/xe_regs.h"
15#include "xe_device.h"
16#include "xe_drv.h"
17#include "xe_gsc_proxy.h"
18#include "xe_gt.h"
19#include "xe_guc.h"
20#include "xe_hw_engine.h"
21#include "xe_memirq.h"
22#include "xe_mmio.h"
23#include "xe_sriov.h"
24
25/*
26 * Interrupt registers for a unit are always consecutive and ordered
27 * ISR, IMR, IIR, IER.
28 */
29#define IMR(offset) XE_REG(offset + 0x4)
30#define IIR(offset) XE_REG(offset + 0x8)
31#define IER(offset) XE_REG(offset + 0xc)
32
33static void assert_iir_is_zero(struct xe_gt *mmio, struct xe_reg reg)
34{
35 u32 val = xe_mmio_read32(mmio, reg);
36
37 if (val == 0)
38 return;
39
40 drm_WARN(>_to_xe(mmio)->drm, 1,
41 "Interrupt register 0x%x is not zero: 0x%08x\n",
42 reg.addr, val);
43 xe_mmio_write32(mmio, reg, 0xffffffff);
44 xe_mmio_read32(mmio, reg);
45 xe_mmio_write32(mmio, reg, 0xffffffff);
46 xe_mmio_read32(mmio, reg);
47}
48
49/*
50 * Unmask and enable the specified interrupts. Does not check current state,
51 * so any bits not specified here will become masked and disabled.
52 */
53static void unmask_and_enable(struct xe_tile *tile, u32 irqregs, u32 bits)
54{
55 struct xe_gt *mmio = tile->primary_gt;
56
57 /*
58 * If we're just enabling an interrupt now, it shouldn't already
59 * be raised in the IIR.
60 */
61 assert_iir_is_zero(mmio, IIR(irqregs));
62
63 xe_mmio_write32(mmio, IER(irqregs), bits);
64 xe_mmio_write32(mmio, IMR(irqregs), ~bits);
65
66 /* Posting read */
67 xe_mmio_read32(mmio, IMR(irqregs));
68}
69
70/* Mask and disable all interrupts. */
71static void mask_and_disable(struct xe_tile *tile, u32 irqregs)
72{
73 struct xe_gt *mmio = tile->primary_gt;
74
75 xe_mmio_write32(mmio, IMR(irqregs), ~0);
76 /* Posting read */
77 xe_mmio_read32(mmio, IMR(irqregs));
78
79 xe_mmio_write32(mmio, IER(irqregs), 0);
80
81 /* IIR can theoretically queue up two events. Be paranoid. */
82 xe_mmio_write32(mmio, IIR(irqregs), ~0);
83 xe_mmio_read32(mmio, IIR(irqregs));
84 xe_mmio_write32(mmio, IIR(irqregs), ~0);
85 xe_mmio_read32(mmio, IIR(irqregs));
86}
87
88static u32 xelp_intr_disable(struct xe_device *xe)
89{
90 struct xe_gt *mmio = xe_root_mmio_gt(xe);
91
92 xe_mmio_write32(mmio, GFX_MSTR_IRQ, 0);
93
94 /*
95 * Now with master disabled, get a sample of level indications
96 * for this interrupt. Indications will be cleared on related acks.
97 * New indications can and will light up during processing,
98 * and will generate new interrupt after enabling master.
99 */
100 return xe_mmio_read32(mmio, GFX_MSTR_IRQ);
101}
102
103static u32
104gu_misc_irq_ack(struct xe_device *xe, const u32 master_ctl)
105{
106 struct xe_gt *mmio = xe_root_mmio_gt(xe);
107 u32 iir;
108
109 if (!(master_ctl & GU_MISC_IRQ))
110 return 0;
111
112 iir = xe_mmio_read32(mmio, IIR(GU_MISC_IRQ_OFFSET));
113 if (likely(iir))
114 xe_mmio_write32(mmio, IIR(GU_MISC_IRQ_OFFSET), iir);
115
116 return iir;
117}
118
119static inline void xelp_intr_enable(struct xe_device *xe, bool stall)
120{
121 struct xe_gt *mmio = xe_root_mmio_gt(xe);
122
123 xe_mmio_write32(mmio, GFX_MSTR_IRQ, MASTER_IRQ);
124 if (stall)
125 xe_mmio_read32(mmio, GFX_MSTR_IRQ);
126}
127
128/* Enable/unmask the HWE interrupts for a specific GT's engines. */
129void xe_irq_enable_hwe(struct xe_gt *gt)
130{
131 struct xe_device *xe = gt_to_xe(gt);
132 u32 ccs_mask, bcs_mask;
133 u32 irqs, dmask, smask;
134 u32 gsc_mask = 0;
135 u32 heci_mask = 0;
136
137 if (IS_SRIOV_VF(xe) && xe_device_has_memirq(xe))
138 return;
139
140 if (xe_device_uc_enabled(xe)) {
141 irqs = GT_RENDER_USER_INTERRUPT |
142 GT_RENDER_PIPECTL_NOTIFY_INTERRUPT;
143 } else {
144 irqs = GT_RENDER_USER_INTERRUPT |
145 GT_CS_MASTER_ERROR_INTERRUPT |
146 GT_CONTEXT_SWITCH_INTERRUPT |
147 GT_WAIT_SEMAPHORE_INTERRUPT;
148 }
149
150 ccs_mask = xe_hw_engine_mask_per_class(gt, XE_ENGINE_CLASS_COMPUTE);
151 bcs_mask = xe_hw_engine_mask_per_class(gt, XE_ENGINE_CLASS_COPY);
152
153 dmask = irqs << 16 | irqs;
154 smask = irqs << 16;
155
156 if (!xe_gt_is_media_type(gt)) {
157 /* Enable interrupts for each engine class */
158 xe_mmio_write32(gt, RENDER_COPY_INTR_ENABLE, dmask);
159 if (ccs_mask)
160 xe_mmio_write32(gt, CCS_RSVD_INTR_ENABLE, smask);
161
162 /* Unmask interrupts for each engine instance */
163 xe_mmio_write32(gt, RCS0_RSVD_INTR_MASK, ~smask);
164 xe_mmio_write32(gt, BCS_RSVD_INTR_MASK, ~smask);
165 if (bcs_mask & (BIT(1)|BIT(2)))
166 xe_mmio_write32(gt, XEHPC_BCS1_BCS2_INTR_MASK, ~dmask);
167 if (bcs_mask & (BIT(3)|BIT(4)))
168 xe_mmio_write32(gt, XEHPC_BCS3_BCS4_INTR_MASK, ~dmask);
169 if (bcs_mask & (BIT(5)|BIT(6)))
170 xe_mmio_write32(gt, XEHPC_BCS5_BCS6_INTR_MASK, ~dmask);
171 if (bcs_mask & (BIT(7)|BIT(8)))
172 xe_mmio_write32(gt, XEHPC_BCS7_BCS8_INTR_MASK, ~dmask);
173 if (ccs_mask & (BIT(0)|BIT(1)))
174 xe_mmio_write32(gt, CCS0_CCS1_INTR_MASK, ~dmask);
175 if (ccs_mask & (BIT(2)|BIT(3)))
176 xe_mmio_write32(gt, CCS2_CCS3_INTR_MASK, ~dmask);
177 }
178
179 if (xe_gt_is_media_type(gt) || MEDIA_VER(xe) < 13) {
180 /* Enable interrupts for each engine class */
181 xe_mmio_write32(gt, VCS_VECS_INTR_ENABLE, dmask);
182
183 /* Unmask interrupts for each engine instance */
184 xe_mmio_write32(gt, VCS0_VCS1_INTR_MASK, ~dmask);
185 xe_mmio_write32(gt, VCS2_VCS3_INTR_MASK, ~dmask);
186 xe_mmio_write32(gt, VECS0_VECS1_INTR_MASK, ~dmask);
187
188 /*
189 * the heci2 interrupt is enabled via the same register as the
190 * GSCCS interrupts, but it has its own mask register.
191 */
192 if (xe_hw_engine_mask_per_class(gt, XE_ENGINE_CLASS_OTHER)) {
193 gsc_mask = irqs | GSC_ER_COMPLETE;
194 heci_mask = GSC_IRQ_INTF(1);
195 } else if (HAS_HECI_GSCFI(xe)) {
196 gsc_mask = GSC_IRQ_INTF(1);
197 }
198
199 if (gsc_mask) {
200 xe_mmio_write32(gt, GUNIT_GSC_INTR_ENABLE, gsc_mask | heci_mask);
201 xe_mmio_write32(gt, GUNIT_GSC_INTR_MASK, ~gsc_mask);
202 }
203 if (heci_mask)
204 xe_mmio_write32(gt, HECI2_RSVD_INTR_MASK, ~(heci_mask << 16));
205 }
206}
207
208static u32
209gt_engine_identity(struct xe_device *xe,
210 struct xe_gt *mmio,
211 const unsigned int bank,
212 const unsigned int bit)
213{
214 u32 timeout_ts;
215 u32 ident;
216
217 lockdep_assert_held(&xe->irq.lock);
218
219 xe_mmio_write32(mmio, IIR_REG_SELECTOR(bank), BIT(bit));
220
221 /*
222 * NB: Specs do not specify how long to spin wait,
223 * so we do ~100us as an educated guess.
224 */
225 timeout_ts = (local_clock() >> 10) + 100;
226 do {
227 ident = xe_mmio_read32(mmio, INTR_IDENTITY_REG(bank));
228 } while (!(ident & INTR_DATA_VALID) &&
229 !time_after32(local_clock() >> 10, timeout_ts));
230
231 if (unlikely(!(ident & INTR_DATA_VALID))) {
232 drm_err(&xe->drm, "INTR_IDENTITY_REG%u:%u 0x%08x not valid!\n",
233 bank, bit, ident);
234 return 0;
235 }
236
237 xe_mmio_write32(mmio, INTR_IDENTITY_REG(bank), ident);
238
239 return ident;
240}
241
242#define OTHER_MEDIA_GUC_INSTANCE 16
243
244static void
245gt_other_irq_handler(struct xe_gt *gt, const u8 instance, const u16 iir)
246{
247 if (instance == OTHER_GUC_INSTANCE && !xe_gt_is_media_type(gt))
248 return xe_guc_irq_handler(>->uc.guc, iir);
249 if (instance == OTHER_MEDIA_GUC_INSTANCE && xe_gt_is_media_type(gt))
250 return xe_guc_irq_handler(>->uc.guc, iir);
251 if (instance == OTHER_GSC_HECI2_INSTANCE && xe_gt_is_media_type(gt))
252 return xe_gsc_proxy_irq_handler(>->uc.gsc, iir);
253
254 if (instance != OTHER_GUC_INSTANCE &&
255 instance != OTHER_MEDIA_GUC_INSTANCE) {
256 WARN_ONCE(1, "unhandled other interrupt instance=0x%x, iir=0x%x\n",
257 instance, iir);
258 }
259}
260
261static struct xe_gt *pick_engine_gt(struct xe_tile *tile,
262 enum xe_engine_class class,
263 unsigned int instance)
264{
265 struct xe_device *xe = tile_to_xe(tile);
266
267 if (MEDIA_VER(xe) < 13)
268 return tile->primary_gt;
269
270 switch (class) {
271 case XE_ENGINE_CLASS_VIDEO_DECODE:
272 case XE_ENGINE_CLASS_VIDEO_ENHANCE:
273 return tile->media_gt;
274 case XE_ENGINE_CLASS_OTHER:
275 switch (instance) {
276 case OTHER_MEDIA_GUC_INSTANCE:
277 case OTHER_GSC_INSTANCE:
278 case OTHER_GSC_HECI2_INSTANCE:
279 return tile->media_gt;
280 default:
281 break;
282 };
283 fallthrough;
284 default:
285 return tile->primary_gt;
286 }
287}
288
289static void gt_irq_handler(struct xe_tile *tile,
290 u32 master_ctl, unsigned long *intr_dw,
291 u32 *identity)
292{
293 struct xe_device *xe = tile_to_xe(tile);
294 struct xe_gt *mmio = tile->primary_gt;
295 unsigned int bank, bit;
296 u16 instance, intr_vec;
297 enum xe_engine_class class;
298 struct xe_hw_engine *hwe;
299
300 spin_lock(&xe->irq.lock);
301
302 for (bank = 0; bank < 2; bank++) {
303 if (!(master_ctl & GT_DW_IRQ(bank)))
304 continue;
305
306 intr_dw[bank] = xe_mmio_read32(mmio, GT_INTR_DW(bank));
307 for_each_set_bit(bit, intr_dw + bank, 32)
308 identity[bit] = gt_engine_identity(xe, mmio, bank, bit);
309 xe_mmio_write32(mmio, GT_INTR_DW(bank), intr_dw[bank]);
310
311 for_each_set_bit(bit, intr_dw + bank, 32) {
312 struct xe_gt *engine_gt;
313
314 class = INTR_ENGINE_CLASS(identity[bit]);
315 instance = INTR_ENGINE_INSTANCE(identity[bit]);
316 intr_vec = INTR_ENGINE_INTR(identity[bit]);
317
318 engine_gt = pick_engine_gt(tile, class, instance);
319
320 hwe = xe_gt_hw_engine(engine_gt, class, instance, false);
321 if (hwe) {
322 xe_hw_engine_handle_irq(hwe, intr_vec);
323 continue;
324 }
325
326 if (class == XE_ENGINE_CLASS_OTHER) {
327 /* HECI GSCFI interrupts come from outside of GT */
328 if (HAS_HECI_GSCFI(xe) && instance == OTHER_GSC_INSTANCE)
329 xe_heci_gsc_irq_handler(xe, intr_vec);
330 else
331 gt_other_irq_handler(engine_gt, instance, intr_vec);
332 }
333 }
334 }
335
336 spin_unlock(&xe->irq.lock);
337}
338
339/*
340 * Top-level interrupt handler for Xe_LP platforms (which did not have
341 * a "master tile" interrupt register.
342 */
343static irqreturn_t xelp_irq_handler(int irq, void *arg)
344{
345 struct xe_device *xe = arg;
346 struct xe_tile *tile = xe_device_get_root_tile(xe);
347 u32 master_ctl, gu_misc_iir;
348 unsigned long intr_dw[2];
349 u32 identity[32];
350
351 spin_lock(&xe->irq.lock);
352 if (!xe->irq.enabled) {
353 spin_unlock(&xe->irq.lock);
354 return IRQ_NONE;
355 }
356 spin_unlock(&xe->irq.lock);
357
358 master_ctl = xelp_intr_disable(xe);
359 if (!master_ctl) {
360 xelp_intr_enable(xe, false);
361 return IRQ_NONE;
362 }
363
364 gt_irq_handler(tile, master_ctl, intr_dw, identity);
365
366 xe_display_irq_handler(xe, master_ctl);
367
368 gu_misc_iir = gu_misc_irq_ack(xe, master_ctl);
369
370 xelp_intr_enable(xe, false);
371
372 xe_display_irq_enable(xe, gu_misc_iir);
373
374 return IRQ_HANDLED;
375}
376
377static u32 dg1_intr_disable(struct xe_device *xe)
378{
379 struct xe_gt *mmio = xe_root_mmio_gt(xe);
380 u32 val;
381
382 /* First disable interrupts */
383 xe_mmio_write32(mmio, DG1_MSTR_TILE_INTR, 0);
384
385 /* Get the indication levels and ack the master unit */
386 val = xe_mmio_read32(mmio, DG1_MSTR_TILE_INTR);
387 if (unlikely(!val))
388 return 0;
389
390 xe_mmio_write32(mmio, DG1_MSTR_TILE_INTR, val);
391
392 return val;
393}
394
395static void dg1_intr_enable(struct xe_device *xe, bool stall)
396{
397 struct xe_gt *mmio = xe_root_mmio_gt(xe);
398
399 xe_mmio_write32(mmio, DG1_MSTR_TILE_INTR, DG1_MSTR_IRQ);
400 if (stall)
401 xe_mmio_read32(mmio, DG1_MSTR_TILE_INTR);
402}
403
404/*
405 * Top-level interrupt handler for Xe_LP+ and beyond. These platforms have
406 * a "master tile" interrupt register which must be consulted before the
407 * "graphics master" interrupt register.
408 */
409static irqreturn_t dg1_irq_handler(int irq, void *arg)
410{
411 struct xe_device *xe = arg;
412 struct xe_tile *tile;
413 u32 master_tile_ctl, master_ctl = 0, gu_misc_iir = 0;
414 unsigned long intr_dw[2];
415 u32 identity[32];
416 u8 id;
417
418 /* TODO: This really shouldn't be copied+pasted */
419
420 spin_lock(&xe->irq.lock);
421 if (!xe->irq.enabled) {
422 spin_unlock(&xe->irq.lock);
423 return IRQ_NONE;
424 }
425 spin_unlock(&xe->irq.lock);
426
427 master_tile_ctl = dg1_intr_disable(xe);
428 if (!master_tile_ctl) {
429 dg1_intr_enable(xe, false);
430 return IRQ_NONE;
431 }
432
433 for_each_tile(tile, xe, id) {
434 struct xe_gt *mmio = tile->primary_gt;
435
436 if ((master_tile_ctl & DG1_MSTR_TILE(tile->id)) == 0)
437 continue;
438
439 master_ctl = xe_mmio_read32(mmio, GFX_MSTR_IRQ);
440
441 /*
442 * We might be in irq handler just when PCIe DPC is initiated
443 * and all MMIO reads will be returned with all 1's. Ignore this
444 * irq as device is inaccessible.
445 */
446 if (master_ctl == REG_GENMASK(31, 0)) {
447 drm_dbg(&tile_to_xe(tile)->drm,
448 "Ignore this IRQ as device might be in DPC containment.\n");
449 return IRQ_HANDLED;
450 }
451
452 xe_mmio_write32(mmio, GFX_MSTR_IRQ, master_ctl);
453
454 gt_irq_handler(tile, master_ctl, intr_dw, identity);
455
456 /*
457 * Display interrupts (including display backlight operations
458 * that get reported as Gunit GSE) would only be hooked up to
459 * the primary tile.
460 */
461 if (id == 0) {
462 xe_display_irq_handler(xe, master_ctl);
463 gu_misc_iir = gu_misc_irq_ack(xe, master_ctl);
464 }
465 }
466
467 dg1_intr_enable(xe, false);
468 xe_display_irq_enable(xe, gu_misc_iir);
469
470 return IRQ_HANDLED;
471}
472
473static void gt_irq_reset(struct xe_tile *tile)
474{
475 struct xe_gt *mmio = tile->primary_gt;
476
477 u32 ccs_mask = xe_hw_engine_mask_per_class(tile->primary_gt,
478 XE_ENGINE_CLASS_COMPUTE);
479 u32 bcs_mask = xe_hw_engine_mask_per_class(tile->primary_gt,
480 XE_ENGINE_CLASS_COPY);
481
482 /* Disable RCS, BCS, VCS and VECS class engines. */
483 xe_mmio_write32(mmio, RENDER_COPY_INTR_ENABLE, 0);
484 xe_mmio_write32(mmio, VCS_VECS_INTR_ENABLE, 0);
485 if (ccs_mask)
486 xe_mmio_write32(mmio, CCS_RSVD_INTR_ENABLE, 0);
487
488 /* Restore masks irqs on RCS, BCS, VCS and VECS engines. */
489 xe_mmio_write32(mmio, RCS0_RSVD_INTR_MASK, ~0);
490 xe_mmio_write32(mmio, BCS_RSVD_INTR_MASK, ~0);
491 if (bcs_mask & (BIT(1)|BIT(2)))
492 xe_mmio_write32(mmio, XEHPC_BCS1_BCS2_INTR_MASK, ~0);
493 if (bcs_mask & (BIT(3)|BIT(4)))
494 xe_mmio_write32(mmio, XEHPC_BCS3_BCS4_INTR_MASK, ~0);
495 if (bcs_mask & (BIT(5)|BIT(6)))
496 xe_mmio_write32(mmio, XEHPC_BCS5_BCS6_INTR_MASK, ~0);
497 if (bcs_mask & (BIT(7)|BIT(8)))
498 xe_mmio_write32(mmio, XEHPC_BCS7_BCS8_INTR_MASK, ~0);
499 xe_mmio_write32(mmio, VCS0_VCS1_INTR_MASK, ~0);
500 xe_mmio_write32(mmio, VCS2_VCS3_INTR_MASK, ~0);
501 xe_mmio_write32(mmio, VECS0_VECS1_INTR_MASK, ~0);
502 if (ccs_mask & (BIT(0)|BIT(1)))
503 xe_mmio_write32(mmio, CCS0_CCS1_INTR_MASK, ~0);
504 if (ccs_mask & (BIT(2)|BIT(3)))
505 xe_mmio_write32(mmio, CCS2_CCS3_INTR_MASK, ~0);
506
507 if ((tile->media_gt &&
508 xe_hw_engine_mask_per_class(tile->media_gt, XE_ENGINE_CLASS_OTHER)) ||
509 HAS_HECI_GSCFI(tile_to_xe(tile))) {
510 xe_mmio_write32(mmio, GUNIT_GSC_INTR_ENABLE, 0);
511 xe_mmio_write32(mmio, GUNIT_GSC_INTR_MASK, ~0);
512 xe_mmio_write32(mmio, HECI2_RSVD_INTR_MASK, ~0);
513 }
514
515 xe_mmio_write32(mmio, GPM_WGBOXPERF_INTR_ENABLE, 0);
516 xe_mmio_write32(mmio, GPM_WGBOXPERF_INTR_MASK, ~0);
517 xe_mmio_write32(mmio, GUC_SG_INTR_ENABLE, 0);
518 xe_mmio_write32(mmio, GUC_SG_INTR_MASK, ~0);
519}
520
521static void xelp_irq_reset(struct xe_tile *tile)
522{
523 xelp_intr_disable(tile_to_xe(tile));
524
525 gt_irq_reset(tile);
526
527 if (IS_SRIOV_VF(tile_to_xe(tile)))
528 return;
529
530 mask_and_disable(tile, PCU_IRQ_OFFSET);
531}
532
533static void dg1_irq_reset(struct xe_tile *tile)
534{
535 if (tile->id == 0)
536 dg1_intr_disable(tile_to_xe(tile));
537
538 gt_irq_reset(tile);
539
540 if (IS_SRIOV_VF(tile_to_xe(tile)))
541 return;
542
543 mask_and_disable(tile, PCU_IRQ_OFFSET);
544}
545
546static void dg1_irq_reset_mstr(struct xe_tile *tile)
547{
548 struct xe_gt *mmio = tile->primary_gt;
549
550 xe_mmio_write32(mmio, GFX_MSTR_IRQ, ~0);
551}
552
553static void vf_irq_reset(struct xe_device *xe)
554{
555 struct xe_tile *tile;
556 unsigned int id;
557
558 xe_assert(xe, IS_SRIOV_VF(xe));
559
560 if (GRAPHICS_VERx100(xe) < 1210)
561 xelp_intr_disable(xe);
562 else
563 xe_assert(xe, xe_device_has_memirq(xe));
564
565 for_each_tile(tile, xe, id) {
566 if (xe_device_has_memirq(xe))
567 xe_memirq_reset(&tile->sriov.vf.memirq);
568 else
569 gt_irq_reset(tile);
570 }
571}
572
573static void xe_irq_reset(struct xe_device *xe)
574{
575 struct xe_tile *tile;
576 u8 id;
577
578 if (IS_SRIOV_VF(xe))
579 return vf_irq_reset(xe);
580
581 for_each_tile(tile, xe, id) {
582 if (GRAPHICS_VERx100(xe) >= 1210)
583 dg1_irq_reset(tile);
584 else
585 xelp_irq_reset(tile);
586 }
587
588 tile = xe_device_get_root_tile(xe);
589 mask_and_disable(tile, GU_MISC_IRQ_OFFSET);
590 xe_display_irq_reset(xe);
591
592 /*
593 * The tile's top-level status register should be the last one
594 * to be reset to avoid possible bit re-latching from lower
595 * level interrupts.
596 */
597 if (GRAPHICS_VERx100(xe) >= 1210) {
598 for_each_tile(tile, xe, id)
599 dg1_irq_reset_mstr(tile);
600 }
601}
602
603static void vf_irq_postinstall(struct xe_device *xe)
604{
605 struct xe_tile *tile;
606 unsigned int id;
607
608 for_each_tile(tile, xe, id)
609 if (xe_device_has_memirq(xe))
610 xe_memirq_postinstall(&tile->sriov.vf.memirq);
611
612 if (GRAPHICS_VERx100(xe) < 1210)
613 xelp_intr_enable(xe, true);
614 else
615 xe_assert(xe, xe_device_has_memirq(xe));
616}
617
618static void xe_irq_postinstall(struct xe_device *xe)
619{
620 if (IS_SRIOV_VF(xe))
621 return vf_irq_postinstall(xe);
622
623 xe_display_irq_postinstall(xe, xe_root_mmio_gt(xe));
624
625 /*
626 * ASLE backlight operations are reported via GUnit GSE interrupts
627 * on the root tile.
628 */
629 unmask_and_enable(xe_device_get_root_tile(xe),
630 GU_MISC_IRQ_OFFSET, GU_MISC_GSE);
631
632 /* Enable top-level interrupts */
633 if (GRAPHICS_VERx100(xe) >= 1210)
634 dg1_intr_enable(xe, true);
635 else
636 xelp_intr_enable(xe, true);
637}
638
639static irqreturn_t vf_mem_irq_handler(int irq, void *arg)
640{
641 struct xe_device *xe = arg;
642 struct xe_tile *tile;
643 unsigned int id;
644
645 spin_lock(&xe->irq.lock);
646 if (!xe->irq.enabled) {
647 spin_unlock(&xe->irq.lock);
648 return IRQ_NONE;
649 }
650 spin_unlock(&xe->irq.lock);
651
652 for_each_tile(tile, xe, id)
653 xe_memirq_handler(&tile->sriov.vf.memirq);
654
655 return IRQ_HANDLED;
656}
657
658static irq_handler_t xe_irq_handler(struct xe_device *xe)
659{
660 if (IS_SRIOV_VF(xe) && xe_device_has_memirq(xe))
661 return vf_mem_irq_handler;
662
663 if (GRAPHICS_VERx100(xe) >= 1210)
664 return dg1_irq_handler;
665 else
666 return xelp_irq_handler;
667}
668
669static void irq_uninstall(void *arg)
670{
671 struct xe_device *xe = arg;
672 struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
673 int irq;
674
675 if (!xe->irq.enabled)
676 return;
677
678 xe->irq.enabled = false;
679 xe_irq_reset(xe);
680
681 irq = pci_irq_vector(pdev, 0);
682 free_irq(irq, xe);
683}
684
685int xe_irq_install(struct xe_device *xe)
686{
687 struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
688 unsigned int irq_flags = PCI_IRQ_MSIX;
689 irq_handler_t irq_handler;
690 int err, irq, nvec;
691
692 irq_handler = xe_irq_handler(xe);
693 if (!irq_handler) {
694 drm_err(&xe->drm, "No supported interrupt handler");
695 return -EINVAL;
696 }
697
698 xe_irq_reset(xe);
699
700 nvec = pci_msix_vec_count(pdev);
701 if (nvec <= 0) {
702 if (nvec == -EINVAL) {
703 /* MSIX capability is not supported in the device, using MSI */
704 irq_flags = PCI_IRQ_MSI;
705 nvec = 1;
706 } else {
707 drm_err(&xe->drm, "MSIX: Failed getting count\n");
708 return nvec;
709 }
710 }
711
712 err = pci_alloc_irq_vectors(pdev, nvec, nvec, irq_flags);
713 if (err < 0) {
714 drm_err(&xe->drm, "MSI/MSIX: Failed to enable support %d\n", err);
715 return err;
716 }
717
718 irq = pci_irq_vector(pdev, 0);
719 err = request_irq(irq, irq_handler, IRQF_SHARED, DRIVER_NAME, xe);
720 if (err < 0) {
721 drm_err(&xe->drm, "Failed to request MSI/MSIX IRQ %d\n", err);
722 return err;
723 }
724
725 xe->irq.enabled = true;
726
727 xe_irq_postinstall(xe);
728
729 err = devm_add_action_or_reset(xe->drm.dev, irq_uninstall, xe);
730 if (err)
731 goto free_irq_handler;
732
733 return 0;
734
735free_irq_handler:
736 free_irq(irq, xe);
737
738 return err;
739}
740
741void xe_irq_suspend(struct xe_device *xe)
742{
743 int irq = to_pci_dev(xe->drm.dev)->irq;
744
745 spin_lock_irq(&xe->irq.lock);
746 xe->irq.enabled = false; /* no new irqs */
747 spin_unlock_irq(&xe->irq.lock);
748
749 synchronize_irq(irq); /* flush irqs */
750 xe_irq_reset(xe); /* turn irqs off */
751}
752
753void xe_irq_resume(struct xe_device *xe)
754{
755 struct xe_gt *gt;
756 int id;
757
758 /*
759 * lock not needed:
760 * 1. no irq will arrive before the postinstall
761 * 2. display is not yet resumed
762 */
763 xe->irq.enabled = true;
764 xe_irq_reset(xe);
765 xe_irq_postinstall(xe); /* turn irqs on */
766
767 for_each_gt(gt, xe, id)
768 xe_irq_enable_hwe(gt);
769}