Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/xe/irq: Convert register access to use xe_mmio

Stop using GT pointers for register access. This misusage has been
especially confusing in interrupt code because even though some of the
interrupts are related to GTs (or engines within GTs), the interrupt
registers themselves live outside the GT, in the sgunit.

Reviewed-by: Lucas De Marchi <lucas.demarchi@intel.com>
Signed-off-by: Matt Roper <matthew.d.roper@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20240910234719.3335472-55-matthew.d.roper@intel.com

+32 -31
+32 -31
drivers/gpu/drm/xe/xe_irq.c
··· 30 30 #define IIR(offset) XE_REG(offset + 0x8) 31 31 #define IER(offset) XE_REG(offset + 0xc) 32 32 33 - static void assert_iir_is_zero(struct xe_gt *mmio, struct xe_reg reg) 33 + static void assert_iir_is_zero(struct xe_mmio *mmio, struct xe_reg reg) 34 34 { 35 35 u32 val = xe_mmio_read32(mmio, reg); 36 36 37 37 if (val == 0) 38 38 return; 39 39 40 - drm_WARN(&gt_to_xe(mmio)->drm, 1, 40 + drm_WARN(&mmio->tile->xe->drm, 1, 41 41 "Interrupt register 0x%x is not zero: 0x%08x\n", 42 42 reg.addr, val); 43 43 xe_mmio_write32(mmio, reg, 0xffffffff); ··· 52 52 */ 53 53 static void unmask_and_enable(struct xe_tile *tile, u32 irqregs, u32 bits) 54 54 { 55 - struct xe_gt *mmio = tile->primary_gt; 55 + struct xe_mmio *mmio = &tile->mmio; 56 56 57 57 /* 58 58 * If we're just enabling an interrupt now, it shouldn't already ··· 70 70 /* Mask and disable all interrupts. */ 71 71 static void mask_and_disable(struct xe_tile *tile, u32 irqregs) 72 72 { 73 - struct xe_gt *mmio = tile->primary_gt; 73 + struct xe_mmio *mmio = &tile->mmio; 74 74 75 75 xe_mmio_write32(mmio, IMR(irqregs), ~0); 76 76 /* Posting read */ ··· 87 87 88 88 static u32 xelp_intr_disable(struct xe_device *xe) 89 89 { 90 - struct xe_gt *mmio = xe_root_mmio_gt(xe); 90 + struct xe_mmio *mmio = xe_root_tile_mmio(xe); 91 91 92 92 xe_mmio_write32(mmio, GFX_MSTR_IRQ, 0); 93 93 ··· 103 103 static u32 104 104 gu_misc_irq_ack(struct xe_device *xe, const u32 master_ctl) 105 105 { 106 - struct xe_gt *mmio = xe_root_mmio_gt(xe); 106 + struct xe_mmio *mmio = xe_root_tile_mmio(xe); 107 107 u32 iir; 108 108 109 109 if (!(master_ctl & GU_MISC_IRQ)) ··· 118 118 119 119 static inline void xelp_intr_enable(struct xe_device *xe, bool stall) 120 120 { 121 - struct xe_gt *mmio = xe_root_mmio_gt(xe); 121 + struct xe_mmio *mmio = xe_root_tile_mmio(xe); 122 122 123 123 xe_mmio_write32(mmio, GFX_MSTR_IRQ, MASTER_IRQ); 124 124 if (stall) ··· 129 129 void xe_irq_enable_hwe(struct xe_gt *gt) 130 130 { 131 131 struct xe_device *xe = gt_to_xe(gt); 132 + struct xe_mmio *mmio = &gt->mmio; 132 133 u32 ccs_mask, bcs_mask; 133 134 u32 irqs, dmask, smask; 134 135 u32 gsc_mask = 0; ··· 156 155 157 156 if (!xe_gt_is_media_type(gt)) { 158 157 /* Enable interrupts for each engine class */ 159 - xe_mmio_write32(gt, RENDER_COPY_INTR_ENABLE, dmask); 158 + xe_mmio_write32(mmio, RENDER_COPY_INTR_ENABLE, dmask); 160 159 if (ccs_mask) 161 - xe_mmio_write32(gt, CCS_RSVD_INTR_ENABLE, smask); 160 + xe_mmio_write32(mmio, CCS_RSVD_INTR_ENABLE, smask); 162 161 163 162 /* Unmask interrupts for each engine instance */ 164 - xe_mmio_write32(gt, RCS0_RSVD_INTR_MASK, ~smask); 165 - xe_mmio_write32(gt, BCS_RSVD_INTR_MASK, ~smask); 163 + xe_mmio_write32(mmio, RCS0_RSVD_INTR_MASK, ~smask); 164 + xe_mmio_write32(mmio, BCS_RSVD_INTR_MASK, ~smask); 166 165 if (bcs_mask & (BIT(1)|BIT(2))) 167 - xe_mmio_write32(gt, XEHPC_BCS1_BCS2_INTR_MASK, ~dmask); 166 + xe_mmio_write32(mmio, XEHPC_BCS1_BCS2_INTR_MASK, ~dmask); 168 167 if (bcs_mask & (BIT(3)|BIT(4))) 169 - xe_mmio_write32(gt, XEHPC_BCS3_BCS4_INTR_MASK, ~dmask); 168 + xe_mmio_write32(mmio, XEHPC_BCS3_BCS4_INTR_MASK, ~dmask); 170 169 if (bcs_mask & (BIT(5)|BIT(6))) 171 - xe_mmio_write32(gt, XEHPC_BCS5_BCS6_INTR_MASK, ~dmask); 170 + xe_mmio_write32(mmio, XEHPC_BCS5_BCS6_INTR_MASK, ~dmask); 172 171 if (bcs_mask & (BIT(7)|BIT(8))) 173 - xe_mmio_write32(gt, XEHPC_BCS7_BCS8_INTR_MASK, ~dmask); 172 + xe_mmio_write32(mmio, XEHPC_BCS7_BCS8_INTR_MASK, ~dmask); 174 173 if (ccs_mask & (BIT(0)|BIT(1))) 175 - xe_mmio_write32(gt, CCS0_CCS1_INTR_MASK, ~dmask); 174 + xe_mmio_write32(mmio, CCS0_CCS1_INTR_MASK, ~dmask); 176 175 if (ccs_mask & (BIT(2)|BIT(3))) 177 - xe_mmio_write32(gt, CCS2_CCS3_INTR_MASK, ~dmask); 176 + xe_mmio_write32(mmio, CCS2_CCS3_INTR_MASK, ~dmask); 178 177 } 179 178 180 179 if (xe_gt_is_media_type(gt) || MEDIA_VER(xe) < 13) { 181 180 /* Enable interrupts for each engine class */ 182 - xe_mmio_write32(gt, VCS_VECS_INTR_ENABLE, dmask); 181 + xe_mmio_write32(mmio, VCS_VECS_INTR_ENABLE, dmask); 183 182 184 183 /* Unmask interrupts for each engine instance */ 185 - xe_mmio_write32(gt, VCS0_VCS1_INTR_MASK, ~dmask); 186 - xe_mmio_write32(gt, VCS2_VCS3_INTR_MASK, ~dmask); 187 - xe_mmio_write32(gt, VECS0_VECS1_INTR_MASK, ~dmask); 184 + xe_mmio_write32(mmio, VCS0_VCS1_INTR_MASK, ~dmask); 185 + xe_mmio_write32(mmio, VCS2_VCS3_INTR_MASK, ~dmask); 186 + xe_mmio_write32(mmio, VECS0_VECS1_INTR_MASK, ~dmask); 188 187 189 188 /* 190 189 * the heci2 interrupt is enabled via the same register as the ··· 198 197 } 199 198 200 199 if (gsc_mask) { 201 - xe_mmio_write32(gt, GUNIT_GSC_INTR_ENABLE, gsc_mask | heci_mask); 202 - xe_mmio_write32(gt, GUNIT_GSC_INTR_MASK, ~gsc_mask); 200 + xe_mmio_write32(mmio, GUNIT_GSC_INTR_ENABLE, gsc_mask | heci_mask); 201 + xe_mmio_write32(mmio, GUNIT_GSC_INTR_MASK, ~gsc_mask); 203 202 } 204 203 if (heci_mask) 205 - xe_mmio_write32(gt, HECI2_RSVD_INTR_MASK, ~(heci_mask << 16)); 204 + xe_mmio_write32(mmio, HECI2_RSVD_INTR_MASK, ~(heci_mask << 16)); 206 205 } 207 206 } 208 207 209 208 static u32 210 209 gt_engine_identity(struct xe_device *xe, 211 - struct xe_gt *mmio, 210 + struct xe_mmio *mmio, 212 211 const unsigned int bank, 213 212 const unsigned int bit) 214 213 { ··· 292 291 u32 *identity) 293 292 { 294 293 struct xe_device *xe = tile_to_xe(tile); 295 - struct xe_gt *mmio = tile->primary_gt; 294 + struct xe_mmio *mmio = &tile->mmio; 296 295 unsigned int bank, bit; 297 296 u16 instance, intr_vec; 298 297 enum xe_engine_class class; ··· 377 376 378 377 static u32 dg1_intr_disable(struct xe_device *xe) 379 378 { 380 - struct xe_gt *mmio = xe_root_mmio_gt(xe); 379 + struct xe_mmio *mmio = xe_root_tile_mmio(xe); 381 380 u32 val; 382 381 383 382 /* First disable interrupts */ ··· 395 394 396 395 static void dg1_intr_enable(struct xe_device *xe, bool stall) 397 396 { 398 - struct xe_gt *mmio = xe_root_mmio_gt(xe); 397 + struct xe_mmio *mmio = xe_root_tile_mmio(xe); 399 398 400 399 xe_mmio_write32(mmio, DG1_MSTR_TILE_INTR, DG1_MSTR_IRQ); 401 400 if (stall) ··· 432 431 } 433 432 434 433 for_each_tile(tile, xe, id) { 435 - struct xe_gt *mmio = tile->primary_gt; 434 + struct xe_mmio *mmio = &tile->mmio; 436 435 437 436 if ((master_tile_ctl & DG1_MSTR_TILE(tile->id)) == 0) 438 437 continue; ··· 475 474 476 475 static void gt_irq_reset(struct xe_tile *tile) 477 476 { 478 - struct xe_gt *mmio = tile->primary_gt; 477 + struct xe_mmio *mmio = &tile->mmio; 479 478 480 479 u32 ccs_mask = xe_hw_engine_mask_per_class(tile->primary_gt, 481 480 XE_ENGINE_CLASS_COMPUTE); ··· 548 547 549 548 static void dg1_irq_reset_mstr(struct xe_tile *tile) 550 549 { 551 - struct xe_gt *mmio = tile->primary_gt; 550 + struct xe_mmio *mmio = &tile->mmio; 552 551 553 552 xe_mmio_write32(mmio, GFX_MSTR_IRQ, ~0); 554 553 }