Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/amdgpu: add IH ring to ih_get_wptr/ih_set_rptr v2

Let's start to support multiple rings.

v2: decode IV is needed as well

Signed-off-by: Christian König <christian.koenig@amd.com>
Acked-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>

authored by

Christian König and committed by
Alex Deucher
8bb9eb48 73c97fa4

+128 -110
+3 -3
drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
··· 137 137 if (!ih->enabled || adev->shutdown) 138 138 return IRQ_NONE; 139 139 140 - wptr = amdgpu_ih_get_wptr(adev); 140 + wptr = amdgpu_ih_get_wptr(adev, ih); 141 141 142 142 restart_ih: 143 143 /* is somebody else already processing irqs? */ ··· 154 154 ih->rptr &= ih->ptr_mask; 155 155 } 156 156 157 - amdgpu_ih_set_rptr(adev); 157 + amdgpu_ih_set_rptr(adev, ih); 158 158 atomic_set(&ih->lock, 0); 159 159 160 160 /* make sure wptr hasn't changed while processing */ 161 - wptr = amdgpu_ih_get_wptr(adev); 161 + wptr = amdgpu_ih_get_wptr(adev, ih); 162 162 if (wptr != ih->rptr) 163 163 goto restart_ih; 164 164
+7 -6
drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
··· 50 50 /* provided by the ih block */ 51 51 struct amdgpu_ih_funcs { 52 52 /* ring read/write ptr handling, called from interrupt context */ 53 - u32 (*get_wptr)(struct amdgpu_device *adev); 54 - void (*decode_iv)(struct amdgpu_device *adev, 53 + u32 (*get_wptr)(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih); 54 + void (*decode_iv)(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih, 55 55 struct amdgpu_iv_entry *entry); 56 - void (*set_rptr)(struct amdgpu_device *adev); 56 + void (*set_rptr)(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih); 57 57 }; 58 58 59 - #define amdgpu_ih_get_wptr(adev) (adev)->irq.ih_funcs->get_wptr((adev)) 60 - #define amdgpu_ih_decode_iv(adev, iv) (adev)->irq.ih_funcs->decode_iv((adev), (iv)) 61 - #define amdgpu_ih_set_rptr(adev) (adev)->irq.ih_funcs->set_rptr((adev)) 59 + #define amdgpu_ih_get_wptr(adev, ih) (adev)->irq.ih_funcs->get_wptr((adev), (ih)) 60 + #define amdgpu_ih_decode_iv(adev, iv) \ 61 + (adev)->irq.ih_funcs->decode_iv((adev), (ih), (iv)) 62 + #define amdgpu_ih_set_rptr(adev, ih) (adev)->irq.ih_funcs->set_rptr((adev), (ih)) 62 63 63 64 int amdgpu_ih_ring_init(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih, 64 65 unsigned ring_size, bool use_bus_addr);
+16 -13
drivers/gpu/drm/amd/amdgpu/cik_ih.c
··· 183 183 * Used by cik_irq_process(). 184 184 * Returns the value of the wptr. 185 185 */ 186 - static u32 cik_ih_get_wptr(struct amdgpu_device *adev) 186 + static u32 cik_ih_get_wptr(struct amdgpu_device *adev, 187 + struct amdgpu_ih_ring *ih) 187 188 { 188 189 u32 wptr, tmp; 189 190 190 - wptr = le32_to_cpu(adev->wb.wb[adev->irq.ih.wptr_offs]); 191 + wptr = le32_to_cpu(adev->wb.wb[ih->wptr_offs]); 191 192 192 193 if (wptr & IH_RB_WPTR__RB_OVERFLOW_MASK) { 193 194 wptr &= ~IH_RB_WPTR__RB_OVERFLOW_MASK; ··· 197 196 * this should allow us to catchup. 198 197 */ 199 198 dev_warn(adev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n", 200 - wptr, adev->irq.ih.rptr, (wptr + 16) & adev->irq.ih.ptr_mask); 201 - adev->irq.ih.rptr = (wptr + 16) & adev->irq.ih.ptr_mask; 199 + wptr, ih->rptr, (wptr + 16) & ih->ptr_mask); 200 + ih->rptr = (wptr + 16) & ih->ptr_mask; 202 201 tmp = RREG32(mmIH_RB_CNTL); 203 202 tmp |= IH_RB_CNTL__WPTR_OVERFLOW_CLEAR_MASK; 204 203 WREG32(mmIH_RB_CNTL, tmp); 205 204 } 206 - return (wptr & adev->irq.ih.ptr_mask); 205 + return (wptr & ih->ptr_mask); 207 206 } 208 207 209 208 /* CIK IV Ring ··· 238 237 * position and also advance the position. 239 238 */ 240 239 static void cik_ih_decode_iv(struct amdgpu_device *adev, 240 + struct amdgpu_ih_ring *ih, 241 241 struct amdgpu_iv_entry *entry) 242 242 { 243 243 /* wptr/rptr are in bytes! */ 244 - u32 ring_index = adev->irq.ih.rptr >> 2; 244 + u32 ring_index = ih->rptr >> 2; 245 245 uint32_t dw[4]; 246 246 247 - dw[0] = le32_to_cpu(adev->irq.ih.ring[ring_index + 0]); 248 - dw[1] = le32_to_cpu(adev->irq.ih.ring[ring_index + 1]); 249 - dw[2] = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]); 250 - dw[3] = le32_to_cpu(adev->irq.ih.ring[ring_index + 3]); 247 + dw[0] = le32_to_cpu(ih->ring[ring_index + 0]); 248 + dw[1] = le32_to_cpu(ih->ring[ring_index + 1]); 249 + dw[2] = le32_to_cpu(ih->ring[ring_index + 2]); 250 + dw[3] = le32_to_cpu(ih->ring[ring_index + 3]); 251 251 252 252 entry->client_id = AMDGPU_IRQ_CLIENTID_LEGACY; 253 253 entry->src_id = dw[0] & 0xff; ··· 258 256 entry->pasid = (dw[2] >> 16) & 0xffff; 259 257 260 258 /* wptr/rptr are in bytes! */ 261 - adev->irq.ih.rptr += 16; 259 + ih->rptr += 16; 262 260 } 263 261 264 262 /** ··· 268 266 * 269 267 * Set the IH ring buffer rptr. 270 268 */ 271 - static void cik_ih_set_rptr(struct amdgpu_device *adev) 269 + static void cik_ih_set_rptr(struct amdgpu_device *adev, 270 + struct amdgpu_ih_ring *ih) 272 271 { 273 - WREG32(mmIH_RB_RPTR, adev->irq.ih.rptr); 272 + WREG32(mmIH_RB_RPTR, ih->rptr); 274 273 } 275 274 276 275 static int cik_ih_early_init(void *handle)
+17 -14
drivers/gpu/drm/amd/amdgpu/cz_ih.c
··· 185 185 * Used by cz_irq_process(VI). 186 186 * Returns the value of the wptr. 187 187 */ 188 - static u32 cz_ih_get_wptr(struct amdgpu_device *adev) 188 + static u32 cz_ih_get_wptr(struct amdgpu_device *adev, 189 + struct amdgpu_ih_ring *ih) 189 190 { 190 191 u32 wptr, tmp; 191 192 192 - wptr = le32_to_cpu(adev->wb.wb[adev->irq.ih.wptr_offs]); 193 + wptr = le32_to_cpu(adev->wb.wb[ih->wptr_offs]); 193 194 194 195 if (REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW)) { 195 196 wptr = REG_SET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW, 0); ··· 199 198 * this should allow us to catchup. 200 199 */ 201 200 dev_warn(adev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n", 202 - wptr, adev->irq.ih.rptr, (wptr + 16) & adev->irq.ih.ptr_mask); 203 - adev->irq.ih.rptr = (wptr + 16) & adev->irq.ih.ptr_mask; 201 + wptr, ih->rptr, (wptr + 16) & ih->ptr_mask); 202 + ih->rptr = (wptr + 16) & ih->ptr_mask; 204 203 tmp = RREG32(mmIH_RB_CNTL); 205 204 tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1); 206 205 WREG32(mmIH_RB_CNTL, tmp); 207 206 } 208 - return (wptr & adev->irq.ih.ptr_mask); 207 + return (wptr & ih->ptr_mask); 209 208 } 210 209 211 210 /** ··· 217 216 * position and also advance the position. 218 217 */ 219 218 static void cz_ih_decode_iv(struct amdgpu_device *adev, 220 - struct amdgpu_iv_entry *entry) 219 + struct amdgpu_ih_ring *ih, 220 + struct amdgpu_iv_entry *entry) 221 221 { 222 222 /* wptr/rptr are in bytes! */ 223 - u32 ring_index = adev->irq.ih.rptr >> 2; 223 + u32 ring_index = ih->rptr >> 2; 224 224 uint32_t dw[4]; 225 225 226 - dw[0] = le32_to_cpu(adev->irq.ih.ring[ring_index + 0]); 227 - dw[1] = le32_to_cpu(adev->irq.ih.ring[ring_index + 1]); 228 - dw[2] = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]); 229 - dw[3] = le32_to_cpu(adev->irq.ih.ring[ring_index + 3]); 226 + dw[0] = le32_to_cpu(ih->ring[ring_index + 0]); 227 + dw[1] = le32_to_cpu(ih->ring[ring_index + 1]); 228 + dw[2] = le32_to_cpu(ih->ring[ring_index + 2]); 229 + dw[3] = le32_to_cpu(ih->ring[ring_index + 3]); 230 230 231 231 entry->client_id = AMDGPU_IRQ_CLIENTID_LEGACY; 232 232 entry->src_id = dw[0] & 0xff; ··· 237 235 entry->pasid = (dw[2] >> 16) & 0xffff; 238 236 239 237 /* wptr/rptr are in bytes! */ 240 - adev->irq.ih.rptr += 16; 238 + ih->rptr += 16; 241 239 } 242 240 243 241 /** ··· 247 245 * 248 246 * Set the IH ring buffer rptr. 249 247 */ 250 - static void cz_ih_set_rptr(struct amdgpu_device *adev) 248 + static void cz_ih_set_rptr(struct amdgpu_device *adev, 249 + struct amdgpu_ih_ring *ih) 251 250 { 252 - WREG32(mmIH_RB_RPTR, adev->irq.ih.rptr); 251 + WREG32(mmIH_RB_RPTR, ih->rptr); 253 252 } 254 253 255 254 static int cz_ih_early_init(void *handle)
+16 -13
drivers/gpu/drm/amd/amdgpu/iceland_ih.c
··· 185 185 * Used by cz_irq_process(VI). 186 186 * Returns the value of the wptr. 187 187 */ 188 - static u32 iceland_ih_get_wptr(struct amdgpu_device *adev) 188 + static u32 iceland_ih_get_wptr(struct amdgpu_device *adev, 189 + struct amdgpu_ih_ring *ih) 189 190 { 190 191 u32 wptr, tmp; 191 192 192 - wptr = le32_to_cpu(adev->wb.wb[adev->irq.ih.wptr_offs]); 193 + wptr = le32_to_cpu(adev->wb.wb[ih->wptr_offs]); 193 194 194 195 if (REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW)) { 195 196 wptr = REG_SET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW, 0); ··· 199 198 * this should allow us to catchup. 200 199 */ 201 200 dev_warn(adev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n", 202 - wptr, adev->irq.ih.rptr, (wptr + 16) & adev->irq.ih.ptr_mask); 203 - adev->irq.ih.rptr = (wptr + 16) & adev->irq.ih.ptr_mask; 201 + wptr, ih->rptr, (wptr + 16) & ih->ptr_mask); 202 + ih->rptr = (wptr + 16) & ih->ptr_mask; 204 203 tmp = RREG32(mmIH_RB_CNTL); 205 204 tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1); 206 205 WREG32(mmIH_RB_CNTL, tmp); 207 206 } 208 - return (wptr & adev->irq.ih.ptr_mask); 207 + return (wptr & ih->ptr_mask); 209 208 } 210 209 211 210 /** ··· 217 216 * position and also advance the position. 218 217 */ 219 218 static void iceland_ih_decode_iv(struct amdgpu_device *adev, 219 + struct amdgpu_ih_ring *ih, 220 220 struct amdgpu_iv_entry *entry) 221 221 { 222 222 /* wptr/rptr are in bytes! */ 223 - u32 ring_index = adev->irq.ih.rptr >> 2; 223 + u32 ring_index = ih->rptr >> 2; 224 224 uint32_t dw[4]; 225 225 226 - dw[0] = le32_to_cpu(adev->irq.ih.ring[ring_index + 0]); 227 - dw[1] = le32_to_cpu(adev->irq.ih.ring[ring_index + 1]); 228 - dw[2] = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]); 229 - dw[3] = le32_to_cpu(adev->irq.ih.ring[ring_index + 3]); 226 + dw[0] = le32_to_cpu(ih->ring[ring_index + 0]); 227 + dw[1] = le32_to_cpu(ih->ring[ring_index + 1]); 228 + dw[2] = le32_to_cpu(ih->ring[ring_index + 2]); 229 + dw[3] = le32_to_cpu(ih->ring[ring_index + 3]); 230 230 231 231 entry->client_id = AMDGPU_IRQ_CLIENTID_LEGACY; 232 232 entry->src_id = dw[0] & 0xff; ··· 237 235 entry->pasid = (dw[2] >> 16) & 0xffff; 238 236 239 237 /* wptr/rptr are in bytes! */ 240 - adev->irq.ih.rptr += 16; 238 + ih->rptr += 16; 241 239 } 242 240 243 241 /** ··· 247 245 * 248 246 * Set the IH ring buffer rptr. 249 247 */ 250 - static void iceland_ih_set_rptr(struct amdgpu_device *adev) 248 + static void iceland_ih_set_rptr(struct amdgpu_device *adev, 249 + struct amdgpu_ih_ring *ih) 251 250 { 252 - WREG32(mmIH_RB_RPTR, adev->irq.ih.rptr); 251 + WREG32(mmIH_RB_RPTR, ih->rptr); 253 252 } 254 253 255 254 static int iceland_ih_early_init(void *handle)
+17 -14
drivers/gpu/drm/amd/amdgpu/si_ih.c
··· 100 100 mdelay(1); 101 101 } 102 102 103 - static u32 si_ih_get_wptr(struct amdgpu_device *adev) 103 + static u32 si_ih_get_wptr(struct amdgpu_device *adev, 104 + struct amdgpu_ih_ring *ih) 104 105 { 105 106 u32 wptr, tmp; 106 107 107 - wptr = le32_to_cpu(adev->wb.wb[adev->irq.ih.wptr_offs]); 108 + wptr = le32_to_cpu(adev->wb.wb[ih->wptr_offs]); 108 109 109 110 if (wptr & IH_RB_WPTR__RB_OVERFLOW_MASK) { 110 111 wptr &= ~IH_RB_WPTR__RB_OVERFLOW_MASK; 111 112 dev_warn(adev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n", 112 - wptr, adev->irq.ih.rptr, (wptr + 16) & adev->irq.ih.ptr_mask); 113 - adev->irq.ih.rptr = (wptr + 16) & adev->irq.ih.ptr_mask; 113 + wptr, ih->rptr, (wptr + 16) & ih->ptr_mask); 114 + ih->rptr = (wptr + 16) & ih->ptr_mask; 114 115 tmp = RREG32(IH_RB_CNTL); 115 116 tmp |= IH_RB_CNTL__WPTR_OVERFLOW_CLEAR_MASK; 116 117 WREG32(IH_RB_CNTL, tmp); 117 118 } 118 - return (wptr & adev->irq.ih.ptr_mask); 119 + return (wptr & ih->ptr_mask); 119 120 } 120 121 121 122 static void si_ih_decode_iv(struct amdgpu_device *adev, 122 - struct amdgpu_iv_entry *entry) 123 + struct amdgpu_ih_ring *ih, 124 + struct amdgpu_iv_entry *entry) 123 125 { 124 - u32 ring_index = adev->irq.ih.rptr >> 2; 126 + u32 ring_index = ih->rptr >> 2; 125 127 uint32_t dw[4]; 126 128 127 - dw[0] = le32_to_cpu(adev->irq.ih.ring[ring_index + 0]); 128 - dw[1] = le32_to_cpu(adev->irq.ih.ring[ring_index + 1]); 129 - dw[2] = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]); 130 - dw[3] = le32_to_cpu(adev->irq.ih.ring[ring_index + 3]); 129 + dw[0] = le32_to_cpu(ih->ring[ring_index + 0]); 130 + dw[1] = le32_to_cpu(ih->ring[ring_index + 1]); 131 + dw[2] = le32_to_cpu(ih->ring[ring_index + 2]); 132 + dw[3] = le32_to_cpu(ih->ring[ring_index + 3]); 131 133 132 134 entry->client_id = AMDGPU_IRQ_CLIENTID_LEGACY; 133 135 entry->src_id = dw[0] & 0xff; ··· 137 135 entry->ring_id = dw[2] & 0xff; 138 136 entry->vmid = (dw[2] >> 8) & 0xff; 139 137 140 - adev->irq.ih.rptr += 16; 138 + ih->rptr += 16; 141 139 } 142 140 143 - static void si_ih_set_rptr(struct amdgpu_device *adev) 141 + static void si_ih_set_rptr(struct amdgpu_device *adev, 142 + struct amdgpu_ih_ring *ih) 144 143 { 145 - WREG32(IH_RB_RPTR, adev->irq.ih.rptr); 144 + WREG32(IH_RB_RPTR, ih->rptr); 146 145 } 147 146 148 147 static int si_ih_early_init(void *handle)
+23 -20
drivers/gpu/drm/amd/amdgpu/tonga_ih.c
··· 193 193 * Used by cz_irq_process(VI). 194 194 * Returns the value of the wptr. 195 195 */ 196 - static u32 tonga_ih_get_wptr(struct amdgpu_device *adev) 196 + static u32 tonga_ih_get_wptr(struct amdgpu_device *adev, 197 + struct amdgpu_ih_ring *ih) 197 198 { 198 199 u32 wptr, tmp; 199 200 200 201 if (adev->irq.ih.use_bus_addr) 201 - wptr = le32_to_cpu(adev->irq.ih.ring[adev->irq.ih.wptr_offs]); 202 + wptr = le32_to_cpu(ih->ring[ih->wptr_offs]); 202 203 else 203 - wptr = le32_to_cpu(adev->wb.wb[adev->irq.ih.wptr_offs]); 204 + wptr = le32_to_cpu(adev->wb.wb[ih->wptr_offs]); 204 205 205 206 if (REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW)) { 206 207 wptr = REG_SET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW, 0); ··· 210 209 * this should allow us to catchup. 211 210 */ 212 211 dev_warn(adev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n", 213 - wptr, adev->irq.ih.rptr, (wptr + 16) & adev->irq.ih.ptr_mask); 214 - adev->irq.ih.rptr = (wptr + 16) & adev->irq.ih.ptr_mask; 212 + wptr, ih->rptr, (wptr + 16) & ih->ptr_mask); 213 + ih->rptr = (wptr + 16) & ih->ptr_mask; 215 214 tmp = RREG32(mmIH_RB_CNTL); 216 215 tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1); 217 216 WREG32(mmIH_RB_CNTL, tmp); 218 217 } 219 - return (wptr & adev->irq.ih.ptr_mask); 218 + return (wptr & ih->ptr_mask); 220 219 } 221 220 222 221 /** ··· 228 227 * position and also advance the position. 229 228 */ 230 229 static void tonga_ih_decode_iv(struct amdgpu_device *adev, 231 - struct amdgpu_iv_entry *entry) 230 + struct amdgpu_ih_ring *ih, 231 + struct amdgpu_iv_entry *entry) 232 232 { 233 233 /* wptr/rptr are in bytes! */ 234 - u32 ring_index = adev->irq.ih.rptr >> 2; 234 + u32 ring_index = ih->rptr >> 2; 235 235 uint32_t dw[4]; 236 236 237 - dw[0] = le32_to_cpu(adev->irq.ih.ring[ring_index + 0]); 238 - dw[1] = le32_to_cpu(adev->irq.ih.ring[ring_index + 1]); 239 - dw[2] = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]); 240 - dw[3] = le32_to_cpu(adev->irq.ih.ring[ring_index + 3]); 237 + dw[0] = le32_to_cpu(ih->ring[ring_index + 0]); 238 + dw[1] = le32_to_cpu(ih->ring[ring_index + 1]); 239 + dw[2] = le32_to_cpu(ih->ring[ring_index + 2]); 240 + dw[3] = le32_to_cpu(ih->ring[ring_index + 3]); 241 241 242 242 entry->client_id = AMDGPU_IRQ_CLIENTID_LEGACY; 243 243 entry->src_id = dw[0] & 0xff; ··· 248 246 entry->pasid = (dw[2] >> 16) & 0xffff; 249 247 250 248 /* wptr/rptr are in bytes! */ 251 - adev->irq.ih.rptr += 16; 249 + ih->rptr += 16; 252 250 } 253 251 254 252 /** ··· 258 256 * 259 257 * Set the IH ring buffer rptr. 260 258 */ 261 - static void tonga_ih_set_rptr(struct amdgpu_device *adev) 259 + static void tonga_ih_set_rptr(struct amdgpu_device *adev, 260 + struct amdgpu_ih_ring *ih) 262 261 { 263 - if (adev->irq.ih.use_doorbell) { 262 + if (ih->use_doorbell) { 264 263 /* XXX check if swapping is necessary on BE */ 265 - if (adev->irq.ih.use_bus_addr) 266 - adev->irq.ih.ring[adev->irq.ih.rptr_offs] = adev->irq.ih.rptr; 264 + if (ih->use_bus_addr) 265 + ih->ring[ih->rptr_offs] = ih->rptr; 267 266 else 268 - adev->wb.wb[adev->irq.ih.rptr_offs] = adev->irq.ih.rptr; 269 - WDOORBELL32(adev->irq.ih.doorbell_index, adev->irq.ih.rptr); 267 + adev->wb.wb[ih->rptr_offs] = ih->rptr; 268 + WDOORBELL32(ih->doorbell_index, ih->rptr); 270 269 } else { 271 - WREG32(mmIH_RB_RPTR, adev->irq.ih.rptr); 270 + WREG32(mmIH_RB_RPTR, ih->rptr); 272 271 } 273 272 } 274 273
+29 -27
drivers/gpu/drm/amd/amdgpu/vega10_ih.c
··· 191 191 * ring buffer overflow and deal with it. 192 192 * Returns the value of the wptr. 193 193 */ 194 - static u32 vega10_ih_get_wptr(struct amdgpu_device *adev) 194 + static u32 vega10_ih_get_wptr(struct amdgpu_device *adev, 195 + struct amdgpu_ih_ring *ih) 195 196 { 196 197 u32 wptr, tmp; 197 198 198 - if (adev->irq.ih.use_bus_addr) 199 - wptr = le32_to_cpu(adev->irq.ih.ring[adev->irq.ih.wptr_offs]); 199 + if (ih->use_bus_addr) 200 + wptr = le32_to_cpu(ih->ring[ih->wptr_offs]); 200 201 else 201 - wptr = le32_to_cpu(adev->wb.wb[adev->irq.ih.wptr_offs]); 202 + wptr = le32_to_cpu(adev->wb.wb[ih->wptr_offs]); 202 203 203 204 if (REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW)) { 204 205 wptr = REG_SET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW, 0); ··· 208 207 * from the last not overwritten vector (wptr + 32). Hopefully 209 208 * this should allow us to catchup. 210 209 */ 211 - tmp = (wptr + 32) & adev->irq.ih.ptr_mask; 210 + tmp = (wptr + 32) & ih->ptr_mask; 212 211 dev_warn(adev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n", 213 - wptr, adev->irq.ih.rptr, tmp); 214 - adev->irq.ih.rptr = tmp; 212 + wptr, ih->rptr, tmp); 213 + ih->rptr = tmp; 215 214 216 215 tmp = RREG32_NO_KIQ(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL)); 217 216 tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1); 218 217 WREG32_NO_KIQ(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL), tmp); 219 218 } 220 - return (wptr & adev->irq.ih.ptr_mask); 219 + return (wptr & ih->ptr_mask); 221 220 } 222 221 223 222 /** ··· 229 228 * position and also advance the position. 230 229 */ 231 230 static void vega10_ih_decode_iv(struct amdgpu_device *adev, 232 - struct amdgpu_iv_entry *entry) 231 + struct amdgpu_ih_ring *ih, 232 + struct amdgpu_iv_entry *entry) 233 233 { 234 234 /* wptr/rptr are in bytes! */ 235 - u32 ring_index = adev->irq.ih.rptr >> 2; 235 + u32 ring_index = ih->rptr >> 2; 236 236 uint32_t dw[8]; 237 237 238 - dw[0] = le32_to_cpu(adev->irq.ih.ring[ring_index + 0]); 239 - dw[1] = le32_to_cpu(adev->irq.ih.ring[ring_index + 1]); 240 - dw[2] = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]); 241 - dw[3] = le32_to_cpu(adev->irq.ih.ring[ring_index + 3]); 242 - dw[4] = le32_to_cpu(adev->irq.ih.ring[ring_index + 4]); 243 - dw[5] = le32_to_cpu(adev->irq.ih.ring[ring_index + 5]); 244 - dw[6] = le32_to_cpu(adev->irq.ih.ring[ring_index + 6]); 245 - dw[7] = le32_to_cpu(adev->irq.ih.ring[ring_index + 7]); 238 + dw[0] = le32_to_cpu(ih->ring[ring_index + 0]); 239 + dw[1] = le32_to_cpu(ih->ring[ring_index + 1]); 240 + dw[2] = le32_to_cpu(ih->ring[ring_index + 2]); 241 + dw[3] = le32_to_cpu(ih->ring[ring_index + 3]); 242 + dw[4] = le32_to_cpu(ih->ring[ring_index + 4]); 243 + dw[5] = le32_to_cpu(ih->ring[ring_index + 5]); 244 + dw[6] = le32_to_cpu(ih->ring[ring_index + 6]); 245 + dw[7] = le32_to_cpu(ih->ring[ring_index + 7]); 246 246 247 247 entry->client_id = dw[0] & 0xff; 248 248 entry->src_id = (dw[0] >> 8) & 0xff; ··· 259 257 entry->src_data[2] = dw[6]; 260 258 entry->src_data[3] = dw[7]; 261 259 262 - 263 260 /* wptr/rptr are in bytes! */ 264 - adev->irq.ih.rptr += 32; 261 + ih->rptr += 32; 265 262 } 266 263 267 264 /** ··· 270 269 * 271 270 * Set the IH ring buffer rptr. 272 271 */ 273 - static void vega10_ih_set_rptr(struct amdgpu_device *adev) 272 + static void vega10_ih_set_rptr(struct amdgpu_device *adev, 273 + struct amdgpu_ih_ring *ih) 274 274 { 275 - if (adev->irq.ih.use_doorbell) { 275 + if (ih->use_doorbell) { 276 276 /* XXX check if swapping is necessary on BE */ 277 - if (adev->irq.ih.use_bus_addr) 278 - adev->irq.ih.ring[adev->irq.ih.rptr_offs] = adev->irq.ih.rptr; 277 + if (ih->use_bus_addr) 278 + ih->ring[ih->rptr_offs] = ih->rptr; 279 279 else 280 - adev->wb.wb[adev->irq.ih.rptr_offs] = adev->irq.ih.rptr; 281 - WDOORBELL32(adev->irq.ih.doorbell_index, adev->irq.ih.rptr); 280 + adev->wb.wb[ih->rptr_offs] = ih->rptr; 281 + WDOORBELL32(ih->doorbell_index, ih->rptr); 282 282 } else { 283 - WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR, adev->irq.ih.rptr); 283 + WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR, ih->rptr); 284 284 } 285 285 } 286 286