Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/i915/sbi: clean up SBI register macro definitions and usage

Use REG_BIT() and friends for defining the register macros. Switch GVT
to use the same macros, and drop its own copies.

Reviewed-by: Luca Coelho <luciano.coelho@intel.com>
Link: https://lore.kernel.org/r/e148e8621c6055d0441fdf6d651d4ad24be53d09.1748343520.git.jani.nikula@intel.com
Signed-off-by: Jani Nikula <jani.nikula@intel.com>

Jani Nikula e583c27a 0b6d7dbf

+51 -51
+5 -5
drivers/gpu/drm/i915/display/intel_sbi.c
··· 21 21 22 22 lockdep_assert_held(&display->sbi.lock); 23 23 24 - if (intel_de_wait_fw(display, SBI_CTL_STAT, SBI_BUSY, 0, 100, NULL)) { 24 + if (intel_de_wait_fw(display, SBI_CTL_STAT, SBI_STATUS_MASK, SBI_STATUS_READY, 100, NULL)) { 25 25 drm_err(display->drm, "timeout waiting for SBI to become ready\n"); 26 26 return -EBUSY; 27 27 } 28 28 29 - intel_de_write_fw(display, SBI_ADDR, (u32)reg << 16); 29 + intel_de_write_fw(display, SBI_ADDR, SBI_ADDR_VALUE(reg)); 30 30 intel_de_write_fw(display, SBI_DATA, is_read ? 0 : *val); 31 31 32 32 if (destination == SBI_ICLK) ··· 34 34 else 35 35 cmd = SBI_CTL_DEST_MPHY | SBI_CTL_OP_IORD; 36 36 if (!is_read) 37 - cmd |= BIT(8); 38 - intel_de_write_fw(display, SBI_CTL_STAT, cmd | SBI_BUSY); 37 + cmd |= SBI_CTL_OP_WR; 38 + intel_de_write_fw(display, SBI_CTL_STAT, cmd | SBI_STATUS_BUSY); 39 39 40 - if (intel_de_wait_fw(display, SBI_CTL_STAT, SBI_BUSY, 0, 100, &cmd)) { 40 + if (intel_de_wait_fw(display, SBI_CTL_STAT, SBI_STATUS_MASK, SBI_STATUS_READY, 100, &cmd)) { 41 41 drm_err(display->drm, "timeout waiting for SBI to complete read\n"); 42 42 return -ETIMEDOUT; 43 43 }
+32 -18
drivers/gpu/drm/i915/display/intel_sbi_regs.h
··· 11 11 * contains the register offset; and SBI_DATA, which contains the payload. 12 12 */ 13 13 #define SBI_ADDR _MMIO(0xC6000) 14 + #define SBI_ADDR_MASK REG_GENMASK(31, 16) 15 + #define SBI_ADDR_VALUE(addr) REG_FIELD_PREP(SBI_ADDR_MASK, (addr)) 16 + 14 17 #define SBI_DATA _MMIO(0xC6004) 18 + 15 19 #define SBI_CTL_STAT _MMIO(0xC6008) 16 - #define SBI_CTL_DEST_ICLK (0x0 << 16) 17 - #define SBI_CTL_DEST_MPHY (0x1 << 16) 18 - #define SBI_CTL_OP_IORD (0x2 << 8) 19 - #define SBI_CTL_OP_IOWR (0x3 << 8) 20 - #define SBI_CTL_OP_CRRD (0x6 << 8) 21 - #define SBI_CTL_OP_CRWR (0x7 << 8) 22 - #define SBI_RESPONSE_FAIL (0x1 << 1) 23 - #define SBI_RESPONSE_SUCCESS (0x0 << 1) 24 - #define SBI_BUSY (0x1 << 0) 25 - #define SBI_READY (0x0 << 0) 20 + #define SBI_CTL_DEST_MASK REG_GENMASK(16, 16) 21 + #define SBI_CTL_DEST_ICLK REG_FIELD_PREP(SBI_CTL_DEST_MASK, 0) 22 + #define SBI_CTL_DEST_MPHY REG_FIELD_PREP(SBI_CTL_DEST_MASK, 1) 23 + #define SBI_CTL_OP_MASK REG_GENMASK(15, 8) 24 + #define SBI_CTL_OP_IORD REG_FIELD_PREP(SBI_CTL_OP_MASK, 2) 25 + #define SBI_CTL_OP_IOWR REG_FIELD_PREP(SBI_CTL_OP_MASK, 3) 26 + #define SBI_CTL_OP_CRRD REG_FIELD_PREP(SBI_CTL_OP_MASK, 6) 27 + #define SBI_CTL_OP_CRWR REG_FIELD_PREP(SBI_CTL_OP_MASK, 7) 28 + #define SBI_CTL_OP_WR REG_BIT(8) 29 + #define SBI_RESPONSE_MASK REG_GENMASK(2, 1) 30 + #define SBI_RESPONSE_FAIL REG_FIELD_PREP(SBI_RESPONSE_MASK, 1) 31 + #define SBI_RESPONSE_SUCCESS REG_FIELD_PREP(SBI_RESPONSE_MASK, 0) 32 + #define SBI_STATUS_MASK REG_GENMASK(0, 0) 33 + #define SBI_STATUS_BUSY REG_FIELD_PREP(SBI_STATUS_MASK, 1) 34 + #define SBI_STATUS_READY REG_FIELD_PREP(SBI_STATUS_MASK, 0) 26 35 27 36 /* SBI offsets */ 28 - #define SBI_SSCDIVINTPHASE 0x0200 29 - #define SBI_SSCDIVINTPHASE6 0x0600 37 + #define SBI_SSCDIVINTPHASE 0x0200 38 + 39 + #define SBI_SSCDIVINTPHASE6 0x0600 30 40 #define SBI_SSCDIVINTPHASE_DIVSEL_SHIFT 1 31 41 #define SBI_SSCDIVINTPHASE_DIVSEL_MASK (0x7f << 1) 32 42 #define SBI_SSCDIVINTPHASE_DIVSEL(x) ((x) << 1) ··· 45 35 #define SBI_SSCDIVINTPHASE_INCVAL(x) ((x) << 8) 46 36 #define SBI_SSCDIVINTPHASE_DIR(x) ((x) << 15) 47 37 #define SBI_SSCDIVINTPHASE_PROPAGATE (1 << 0) 48 - #define SBI_SSCDITHPHASE 0x0204 49 - #define SBI_SSCCTL 0x020c 50 - #define SBI_SSCCTL6 0x060C 38 + 39 + #define SBI_SSCDITHPHASE 0x0204 40 + #define SBI_SSCCTL 0x020c 41 + #define SBI_SSCCTL6 0x060C 51 42 #define SBI_SSCCTL_PATHALT (1 << 3) 52 43 #define SBI_SSCCTL_DISABLE (1 << 0) 53 - #define SBI_SSCAUXDIV6 0x0610 44 + 45 + #define SBI_SSCAUXDIV6 0x0610 54 46 #define SBI_SSCAUXDIV_FINALDIV2SEL_SHIFT 4 55 47 #define SBI_SSCAUXDIV_FINALDIV2SEL_MASK (1 << 4) 56 48 #define SBI_SSCAUXDIV_FINALDIV2SEL(x) ((x) << 4) 57 - #define SBI_DBUFF0 0x2a00 58 - #define SBI_GEN0 0x1f00 49 + 50 + #define SBI_DBUFF0 0x2a00 51 + 52 + #define SBI_GEN0 0x1f00 59 53 #define SBI_GEN0_CFG_BUFFENABLE_DISABLE (1 << 0) 60 54 61 55 #endif /* __INTEL_SBI_REGS_H__ */
-13
drivers/gpu/drm/i915/gvt/display.h
··· 63 63 64 64 #define AUX_BURST_SIZE 20 65 65 66 - #define SBI_RESPONSE_MASK 0x3 67 - #define SBI_RESPONSE_SHIFT 0x1 68 - #define SBI_STAT_MASK 0x1 69 - #define SBI_STAT_SHIFT 0x0 70 - #define SBI_OPCODE_SHIFT 8 71 - #define SBI_OPCODE_MASK (0xff << SBI_OPCODE_SHIFT) 72 - #define SBI_CMD_IORD 2 73 - #define SBI_CMD_IOWR 3 74 - #define SBI_CMD_CRRD 6 75 - #define SBI_CMD_CRWR 7 76 - #define SBI_ADDR_OFFSET_SHIFT 16 77 - #define SBI_ADDR_OFFSET_MASK (0xffff << SBI_ADDR_OFFSET_SHIFT) 78 - 79 66 struct intel_vgpu_sbi_register { 80 67 unsigned int offset; 81 68 u32 value;
+14 -15
drivers/gpu/drm/i915/gvt/handlers.c
··· 1414 1414 static int sbi_data_mmio_read(struct intel_vgpu *vgpu, unsigned int offset, 1415 1415 void *p_data, unsigned int bytes) 1416 1416 { 1417 - if (((vgpu_vreg_t(vgpu, SBI_CTL_STAT) & SBI_OPCODE_MASK) >> 1418 - SBI_OPCODE_SHIFT) == SBI_CMD_CRRD) { 1419 - unsigned int sbi_offset = (vgpu_vreg_t(vgpu, SBI_ADDR) & 1420 - SBI_ADDR_OFFSET_MASK) >> SBI_ADDR_OFFSET_SHIFT; 1421 - vgpu_vreg(vgpu, offset) = read_virtual_sbi_register(vgpu, 1422 - sbi_offset); 1417 + if ((vgpu_vreg_t(vgpu, SBI_CTL_STAT) & SBI_CTL_OP_MASK) == SBI_CTL_OP_CRRD) { 1418 + unsigned int sbi_offset; 1419 + 1420 + sbi_offset = REG_FIELD_GET(SBI_ADDR_MASK, vgpu_vreg_t(vgpu, SBI_ADDR)); 1421 + 1422 + vgpu_vreg(vgpu, offset) = read_virtual_sbi_register(vgpu, sbi_offset); 1423 1423 } 1424 1424 read_vreg(vgpu, offset, p_data, bytes); 1425 1425 return 0; ··· 1433 1433 write_vreg(vgpu, offset, p_data, bytes); 1434 1434 data = vgpu_vreg(vgpu, offset); 1435 1435 1436 - data &= ~(SBI_STAT_MASK << SBI_STAT_SHIFT); 1437 - data |= SBI_READY; 1436 + data &= ~SBI_STATUS_MASK; 1437 + data |= SBI_STATUS_READY; 1438 1438 1439 - data &= ~(SBI_RESPONSE_MASK << SBI_RESPONSE_SHIFT); 1439 + data &= ~SBI_RESPONSE_MASK; 1440 1440 data |= SBI_RESPONSE_SUCCESS; 1441 1441 1442 1442 vgpu_vreg(vgpu, offset) = data; 1443 1443 1444 - if (((vgpu_vreg_t(vgpu, SBI_CTL_STAT) & SBI_OPCODE_MASK) >> 1445 - SBI_OPCODE_SHIFT) == SBI_CMD_CRWR) { 1446 - unsigned int sbi_offset = (vgpu_vreg_t(vgpu, SBI_ADDR) & 1447 - SBI_ADDR_OFFSET_MASK) >> SBI_ADDR_OFFSET_SHIFT; 1444 + if ((vgpu_vreg_t(vgpu, SBI_CTL_STAT) & SBI_CTL_OP_MASK) == SBI_CTL_OP_CRWR) { 1445 + unsigned int sbi_offset; 1448 1446 1449 - write_virtual_sbi_register(vgpu, sbi_offset, 1450 - vgpu_vreg_t(vgpu, SBI_DATA)); 1447 + sbi_offset = REG_FIELD_GET(SBI_ADDR_MASK, vgpu_vreg_t(vgpu, SBI_ADDR)); 1448 + 1449 + write_virtual_sbi_register(vgpu, sbi_offset, vgpu_vreg_t(vgpu, SBI_DATA)); 1451 1450 } 1452 1451 return 0; 1453 1452 }