Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/xe/compat-i915: Convert register access to use xe_mmio

Stop using GT pointers for register access.

v2:
- Don't remove _Generic wrappers for 8/16-bit yet; save that for the
last patch of the series. (Rodrigo)

Reviewed-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Signed-off-by: Matt Roper <matthew.d.roper@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20240910234719.3335472-59-matthew.d.roper@intel.com

+18 -18
+18 -18
drivers/gpu/drm/xe/compat-i915-headers/intel_uncore.h
··· 10 10 #include "xe_device_types.h" 11 11 #include "xe_mmio.h" 12 12 13 - static inline struct xe_gt *__compat_uncore_to_gt(struct intel_uncore *uncore) 13 + static inline struct xe_mmio *__compat_uncore_to_mmio(struct intel_uncore *uncore) 14 14 { 15 15 struct xe_device *xe = container_of(uncore, struct xe_device, uncore); 16 16 17 - return xe_root_mmio_gt(xe); 17 + return xe_root_tile_mmio(xe); 18 18 } 19 19 20 20 static inline struct xe_tile *__compat_uncore_to_tile(struct intel_uncore *uncore) ··· 29 29 { 30 30 struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg)); 31 31 32 - return xe_mmio_read32(__compat_uncore_to_gt(uncore), reg); 32 + return xe_mmio_read32(__compat_uncore_to_mmio(uncore), reg); 33 33 } 34 34 35 35 static inline u8 intel_uncore_read8(struct intel_uncore *uncore, ··· 37 37 { 38 38 struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg)); 39 39 40 - return xe_mmio_read8(__compat_uncore_to_gt(uncore), reg); 40 + return xe_mmio_read8(__compat_uncore_to_mmio(uncore), reg); 41 41 } 42 42 43 43 static inline u16 intel_uncore_read16(struct intel_uncore *uncore, ··· 45 45 { 46 46 struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg)); 47 47 48 - return xe_mmio_read16(__compat_uncore_to_gt(uncore), reg); 48 + return xe_mmio_read16(__compat_uncore_to_mmio(uncore), reg); 49 49 } 50 50 51 51 static inline u64 ··· 57 57 u32 upper, lower, old_upper; 58 58 int loop = 0; 59 59 60 - upper = xe_mmio_read32(__compat_uncore_to_gt(uncore), upper_reg); 60 + upper = xe_mmio_read32(__compat_uncore_to_mmio(uncore), upper_reg); 61 61 do { 62 62 old_upper = upper; 63 - lower = xe_mmio_read32(__compat_uncore_to_gt(uncore), lower_reg); 64 - upper = xe_mmio_read32(__compat_uncore_to_gt(uncore), upper_reg); 63 + lower = xe_mmio_read32(__compat_uncore_to_mmio(uncore), lower_reg); 64 + upper = xe_mmio_read32(__compat_uncore_to_mmio(uncore), upper_reg); 65 65 } while (upper != old_upper && loop++ < 2); 66 66 67 67 return (u64)upper << 32 | lower; ··· 72 72 { 73 73 struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg)); 74 74 75 - xe_mmio_read32(__compat_uncore_to_gt(uncore), reg); 75 + xe_mmio_read32(__compat_uncore_to_mmio(uncore), reg); 76 76 } 77 77 78 78 static inline void intel_uncore_write(struct intel_uncore *uncore, ··· 80 80 { 81 81 struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg)); 82 82 83 - xe_mmio_write32(__compat_uncore_to_gt(uncore), reg, val); 83 + xe_mmio_write32(__compat_uncore_to_mmio(uncore), reg, val); 84 84 } 85 85 86 86 static inline u32 intel_uncore_rmw(struct intel_uncore *uncore, ··· 88 88 { 89 89 struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg)); 90 90 91 - return xe_mmio_rmw32(__compat_uncore_to_gt(uncore), reg, clear, set); 91 + return xe_mmio_rmw32(__compat_uncore_to_mmio(uncore), reg, clear, set); 92 92 } 93 93 94 94 static inline int intel_wait_for_register(struct intel_uncore *uncore, ··· 97 97 { 98 98 struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg)); 99 99 100 - return xe_mmio_wait32(__compat_uncore_to_gt(uncore), reg, mask, value, 100 + return xe_mmio_wait32(__compat_uncore_to_mmio(uncore), reg, mask, value, 101 101 timeout * USEC_PER_MSEC, NULL, false); 102 102 } 103 103 ··· 107 107 { 108 108 struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg)); 109 109 110 - return xe_mmio_wait32(__compat_uncore_to_gt(uncore), reg, mask, value, 110 + return xe_mmio_wait32(__compat_uncore_to_mmio(uncore), reg, mask, value, 111 111 timeout * USEC_PER_MSEC, NULL, false); 112 112 } 113 113 ··· 118 118 { 119 119 struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg)); 120 120 121 - return xe_mmio_wait32(__compat_uncore_to_gt(uncore), reg, mask, value, 121 + return xe_mmio_wait32(__compat_uncore_to_mmio(uncore), reg, mask, value, 122 122 fast_timeout_us + 1000 * slow_timeout_ms, 123 123 out_value, false); 124 124 } ··· 128 128 { 129 129 struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg)); 130 130 131 - return xe_mmio_read32(__compat_uncore_to_gt(uncore), reg); 131 + return xe_mmio_read32(__compat_uncore_to_mmio(uncore), reg); 132 132 } 133 133 134 134 static inline void intel_uncore_write_fw(struct intel_uncore *uncore, ··· 136 136 { 137 137 struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg)); 138 138 139 - xe_mmio_write32(__compat_uncore_to_gt(uncore), reg, val); 139 + xe_mmio_write32(__compat_uncore_to_mmio(uncore), reg, val); 140 140 } 141 141 142 142 static inline u32 intel_uncore_read_notrace(struct intel_uncore *uncore, ··· 144 144 { 145 145 struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg)); 146 146 147 - return xe_mmio_read32(__compat_uncore_to_gt(uncore), reg); 147 + return xe_mmio_read32(__compat_uncore_to_mmio(uncore), reg); 148 148 } 149 149 150 150 static inline void intel_uncore_write_notrace(struct intel_uncore *uncore, ··· 152 152 { 153 153 struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg)); 154 154 155 - xe_mmio_write32(__compat_uncore_to_gt(uncore), reg, val); 155 + xe_mmio_write32(__compat_uncore_to_mmio(uncore), reg, val); 156 156 } 157 157 158 158 static inline void __iomem *intel_uncore_regs(struct intel_uncore *uncore)