Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/msm: Support multiple ringbuffers

Add the infrastructure to support the idea of multiple ringbuffers.
Assign each ringbuffer an id and use that as an index for the various
ring specific operations.

The biggest delta is to support legacy fences. Each fence gets its own
sequence number but the legacy functions expect to use a unique integer.
To handle this we return a unique identifier for each submission but
map it to a specific ring/sequence under the covers. Newer users use
a dma_fence pointer anyway so they don't care about the actual sequence
ID or ring.

The actual mechanics for multiple ringbuffers are very target specific
so this code just allows for the possibility but still only defines
one ringbuffer for each target family.

Signed-off-by: Jordan Crouse <jcrouse@codeaurora.org>
Signed-off-by: Rob Clark <robdclark@gmail.com>

authored by

Jordan Crouse and committed by
Rob Clark
f97decac cd414f3d

+1256 -215
+5 -4
drivers/gpu/drm/msm/adreno/a3xx_gpu.c
··· 44 44 45 45 static bool a3xx_me_init(struct msm_gpu *gpu) 46 46 { 47 - struct msm_ringbuffer *ring = gpu->rb; 47 + struct msm_ringbuffer *ring = gpu->rb[0]; 48 48 49 49 OUT_PKT3(ring, CP_ME_INIT, 17); 50 50 OUT_RING(ring, 0x000003f7); ··· 65 65 OUT_RING(ring, 0x00000000); 66 66 OUT_RING(ring, 0x00000000); 67 67 68 - gpu->funcs->flush(gpu); 68 + gpu->funcs->flush(gpu, ring); 69 69 return a3xx_idle(gpu); 70 70 } 71 71 ··· 339 339 static bool a3xx_idle(struct msm_gpu *gpu) 340 340 { 341 341 /* wait for ringbuffer to drain: */ 342 - if (!adreno_idle(gpu)) 342 + if (!adreno_idle(gpu, gpu->rb[0])) 343 343 return false; 344 344 345 345 /* then wait for GPU to finish: */ ··· 446 446 .recover = a3xx_recover, 447 447 .submit = adreno_submit, 448 448 .flush = adreno_flush, 449 + .active_ring = adreno_active_ring, 449 450 .irq = a3xx_irq, 450 451 .destroy = a3xx_destroy, 451 452 #ifdef CONFIG_DEBUG_FS ··· 492 491 adreno_gpu->registers = a3xx_registers; 493 492 adreno_gpu->reg_offsets = a3xx_register_offsets; 494 493 495 - ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs); 494 + ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1); 496 495 if (ret) 497 496 goto fail; 498 497
+5 -4
drivers/gpu/drm/msm/adreno/a4xx_gpu.c
··· 116 116 117 117 static bool a4xx_me_init(struct msm_gpu *gpu) 118 118 { 119 - struct msm_ringbuffer *ring = gpu->rb; 119 + struct msm_ringbuffer *ring = gpu->rb[0]; 120 120 121 121 OUT_PKT3(ring, CP_ME_INIT, 17); 122 122 OUT_RING(ring, 0x000003f7); ··· 137 137 OUT_RING(ring, 0x00000000); 138 138 OUT_RING(ring, 0x00000000); 139 139 140 - gpu->funcs->flush(gpu); 140 + gpu->funcs->flush(gpu, ring); 141 141 return a4xx_idle(gpu); 142 142 } 143 143 ··· 337 337 static bool a4xx_idle(struct msm_gpu *gpu) 338 338 { 339 339 /* wait for ringbuffer to drain: */ 340 - if (!adreno_idle(gpu)) 340 + if (!adreno_idle(gpu, gpu->rb[0])) 341 341 return false; 342 342 343 343 /* then wait for GPU to finish: */ ··· 534 534 .recover = a4xx_recover, 535 535 .submit = adreno_submit, 536 536 .flush = adreno_flush, 537 + .active_ring = adreno_active_ring, 537 538 .irq = a4xx_irq, 538 539 .destroy = a4xx_destroy, 539 540 #ifdef CONFIG_DEBUG_FS ··· 574 573 adreno_gpu->registers = a4xx_registers; 575 574 adreno_gpu->reg_offsets = a4xx_register_offsets; 576 575 577 - ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs); 576 + ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1); 578 577 if (ret) 579 578 goto fail; 580 579
+28 -26
drivers/gpu/drm/msm/adreno/a5xx_gpu.c
··· 117 117 struct msm_file_private *ctx) 118 118 { 119 119 struct msm_drm_private *priv = gpu->dev->dev_private; 120 - struct msm_ringbuffer *ring = gpu->rb; 120 + struct msm_ringbuffer *ring = submit->ring; 121 121 unsigned int i, ibs = 0; 122 122 123 123 for (i = 0; i < submit->nr_cmds; i++) { ··· 138 138 } 139 139 140 140 OUT_PKT4(ring, REG_A5XX_CP_SCRATCH_REG(2), 1); 141 - OUT_RING(ring, submit->fence->seqno); 141 + OUT_RING(ring, submit->seqno); 142 142 143 143 OUT_PKT7(ring, CP_EVENT_WRITE, 4); 144 144 OUT_RING(ring, CACHE_FLUSH_TS | (1 << 31)); 145 - OUT_RING(ring, lower_32_bits(rbmemptr(gpu, fence))); 146 - OUT_RING(ring, upper_32_bits(rbmemptr(gpu, fence))); 147 - OUT_RING(ring, submit->fence->seqno); 145 + OUT_RING(ring, lower_32_bits(rbmemptr(ring, fence))); 146 + OUT_RING(ring, upper_32_bits(rbmemptr(ring, fence))); 147 + OUT_RING(ring, submit->seqno); 148 148 149 - gpu->funcs->flush(gpu); 149 + gpu->funcs->flush(gpu, ring); 150 150 } 151 151 152 152 static const struct { ··· 262 262 static int a5xx_me_init(struct msm_gpu *gpu) 263 263 { 264 264 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); 265 - struct msm_ringbuffer *ring = gpu->rb; 265 + struct msm_ringbuffer *ring = gpu->rb[0]; 266 266 267 267 OUT_PKT7(ring, CP_ME_INIT, 8); 268 268 ··· 293 293 OUT_RING(ring, 0x00000000); 294 294 OUT_RING(ring, 0x00000000); 295 295 296 - gpu->funcs->flush(gpu); 297 - 298 - return a5xx_idle(gpu) ? 0 : -EINVAL; 296 + gpu->funcs->flush(gpu, ring); 297 + return a5xx_idle(gpu, ring) ? 0 : -EINVAL; 299 298 } 300 299 301 300 static struct drm_gem_object *a5xx_ucode_load_bo(struct msm_gpu *gpu, ··· 580 581 * ticking correctly 581 582 */ 582 583 if (adreno_is_a530(adreno_gpu)) { 583 - OUT_PKT7(gpu->rb, CP_EVENT_WRITE, 1); 584 - OUT_RING(gpu->rb, 0x0F); 584 + OUT_PKT7(gpu->rb[0], CP_EVENT_WRITE, 1); 585 + OUT_RING(gpu->rb[0], 0x0F); 585 586 586 - gpu->funcs->flush(gpu); 587 - if (!a5xx_idle(gpu)) 587 + gpu->funcs->flush(gpu, gpu->rb[0]); 588 + if (!a5xx_idle(gpu, gpu->rb[0])) 588 589 return -EINVAL; 589 590 } 590 591 ··· 597 598 */ 598 599 ret = a5xx_zap_shader_init(gpu); 599 600 if (!ret) { 600 - OUT_PKT7(gpu->rb, CP_SET_SECURE_MODE, 1); 601 - OUT_RING(gpu->rb, 0x00000000); 601 + OUT_PKT7(gpu->rb[0], CP_SET_SECURE_MODE, 1); 602 + OUT_RING(gpu->rb[0], 0x00000000); 602 603 603 - gpu->funcs->flush(gpu); 604 - if (!a5xx_idle(gpu)) 604 + gpu->funcs->flush(gpu, gpu->rb[0]); 605 + if (!a5xx_idle(gpu, gpu->rb[0])) 605 606 return -EINVAL; 606 607 } else { 607 608 /* Print a warning so if we die, we know why */ ··· 675 676 A5XX_RBBM_INT_0_MASK_MISC_HANG_DETECT); 676 677 } 677 678 678 - bool a5xx_idle(struct msm_gpu *gpu) 679 + bool a5xx_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring) 679 680 { 680 681 /* wait for CP to drain ringbuffer: */ 681 - if (!adreno_idle(gpu)) 682 + if (!adreno_idle(gpu, ring)) 682 683 return false; 683 684 684 685 if (spin_until(_a5xx_check_idle(gpu))) { 685 - DRM_ERROR("%s: %ps: timeout waiting for GPU to idle: status %8.8X irq %8.8X\n", 686 + DRM_ERROR("%s: %ps: timeout waiting for GPU to idle: status %8.8X irq %8.8X rptr/wptr %d/%d\n", 686 687 gpu->name, __builtin_return_address(0), 687 688 gpu_read(gpu, REG_A5XX_RBBM_STATUS), 688 - gpu_read(gpu, REG_A5XX_RBBM_INT_0_STATUS)); 689 - 689 + gpu_read(gpu, REG_A5XX_RBBM_INT_0_STATUS), 690 + gpu_read(gpu, REG_A5XX_CP_RB_RPTR), 691 + gpu_read(gpu, REG_A5XX_CP_RB_WPTR)); 690 692 return false; 691 693 } 692 694 ··· 818 818 { 819 819 struct drm_device *dev = gpu->dev; 820 820 struct msm_drm_private *priv = dev->dev_private; 821 + struct msm_ringbuffer *ring = gpu->funcs->active_ring(gpu); 821 822 822 - dev_err(dev->dev, "gpu fault fence %x status %8.8X rb %4.4x/%4.4x ib1 %16.16llX/%4.4x ib2 %16.16llX/%4.4x\n", 823 - gpu->memptrs->fence, 823 + dev_err(dev->dev, "gpu fault ring %d fence %x status %8.8X rb %4.4x/%4.4x ib1 %16.16llX/%4.4x ib2 %16.16llX/%4.4x\n", 824 + ring ? ring->id : -1, ring ? ring->seqno : 0, 824 825 gpu_read(gpu, REG_A5XX_RBBM_STATUS), 825 826 gpu_read(gpu, REG_A5XX_CP_RB_RPTR), 826 827 gpu_read(gpu, REG_A5XX_CP_RB_WPTR), ··· 1011 1010 .recover = a5xx_recover, 1012 1011 .submit = a5xx_submit, 1013 1012 .flush = adreno_flush, 1013 + .active_ring = adreno_active_ring, 1014 1014 .irq = a5xx_irq, 1015 1015 .destroy = a5xx_destroy, 1016 1016 #ifdef CONFIG_DEBUG_FS ··· 1047 1045 1048 1046 a5xx_gpu->lm_leakage = 0x4E001A; 1049 1047 1050 - ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs); 1048 + ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1); 1051 1049 if (ret) { 1052 1050 a5xx_destroy(&(a5xx_gpu->base.base)); 1053 1051 return ERR_PTR(ret);
+1 -1
drivers/gpu/drm/msm/adreno/a5xx_gpu.h
··· 55 55 return -ETIMEDOUT; 56 56 } 57 57 58 - bool a5xx_idle(struct msm_gpu *gpu); 58 + bool a5xx_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring); 59 59 void a5xx_set_hwcg(struct msm_gpu *gpu, bool state); 60 60 61 61 #endif /* __A5XX_GPU_H__ */
+3 -3
drivers/gpu/drm/msm/adreno/a5xx_power.c
··· 173 173 { 174 174 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); 175 175 struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu); 176 - struct msm_ringbuffer *ring = gpu->rb; 176 + struct msm_ringbuffer *ring = gpu->rb[0]; 177 177 178 178 if (!a5xx_gpu->gpmu_dwords) 179 179 return 0; ··· 192 192 OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1); 193 193 OUT_RING(ring, 1); 194 194 195 - gpu->funcs->flush(gpu); 195 + gpu->funcs->flush(gpu, ring); 196 196 197 - if (!a5xx_idle(gpu)) { 197 + if (!a5xx_idle(gpu, ring)) { 198 198 DRM_ERROR("%s: Unable to load GPMU firmware. GPMU will not be active\n", 199 199 gpu->name); 200 200 return -EINVAL;
+83 -55
drivers/gpu/drm/msm/adreno/adreno_gpu.c
··· 21 21 #include "msm_gem.h" 22 22 #include "msm_mmu.h" 23 23 24 - #define RB_SIZE SZ_32K 25 24 #define RB_BLKSIZE 32 26 25 27 26 int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value) ··· 162 163 int adreno_hw_init(struct msm_gpu *gpu) 163 164 { 164 165 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); 165 - int ret; 166 + int ret, i; 166 167 167 168 DBG("%s", gpu->name); 168 169 ··· 170 171 if (ret) 171 172 return ret; 172 173 173 - ret = msm_gem_get_iova(gpu->rb->bo, gpu->aspace, &gpu->rb_iova); 174 - if (ret) { 175 - gpu->rb_iova = 0; 176 - dev_err(gpu->dev->dev, "could not map ringbuffer: %d\n", ret); 177 - return ret; 174 + for (i = 0; i < gpu->nr_rings; i++) { 175 + struct msm_ringbuffer *ring = gpu->rb[i]; 176 + 177 + if (!ring) 178 + continue; 179 + 180 + ret = msm_gem_get_iova(ring->bo, gpu->aspace, &ring->iova); 181 + if (ret) { 182 + ring->iova = 0; 183 + dev_err(gpu->dev->dev, 184 + "could not map ringbuffer %d: %d\n", i, ret); 185 + return ret; 186 + } 187 + 188 + ring->cur = ring->start; 189 + 190 + /* reset completed fence seqno: */ 191 + ring->memptrs->fence = ring->seqno; 192 + ring->memptrs->rptr = 0; 178 193 } 179 - 180 - /* reset ringbuffer: */ 181 - gpu->rb->cur = gpu->rb->start; 182 - 183 - /* reset completed fence seqno: */ 184 - gpu->memptrs->fence = gpu->fctx->completed_fence; 185 - gpu->memptrs->rptr = 0; 186 194 187 195 /* Setup REG_CP_RB_CNTL: */ 188 196 adreno_gpu_write(adreno_gpu, REG_ADRENO_CP_RB_CNTL, 189 - /* size is log2(quad-words): */ 190 - AXXX_CP_RB_CNTL_BUFSZ(ilog2(gpu->rb->size / 8)) | 191 - AXXX_CP_RB_CNTL_BLKSZ(ilog2(RB_BLKSIZE / 8)) | 192 - (adreno_is_a430(adreno_gpu) ? AXXX_CP_RB_CNTL_NO_UPDATE : 0)); 197 + /* size is log2(quad-words): */ 198 + AXXX_CP_RB_CNTL_BUFSZ(ilog2(MSM_GPU_RINGBUFFER_SZ / 8)) | 199 + AXXX_CP_RB_CNTL_BLKSZ(ilog2(RB_BLKSIZE / 8)) | 200 + (adreno_is_a430(adreno_gpu) ? AXXX_CP_RB_CNTL_NO_UPDATE : 0)); 193 201 194 - /* Setup ringbuffer address: */ 202 + /* Setup ringbuffer address - use ringbuffer[0] for GPU init */ 195 203 adreno_gpu_write64(adreno_gpu, REG_ADRENO_CP_RB_BASE, 196 - REG_ADRENO_CP_RB_BASE_HI, gpu->rb_iova); 204 + REG_ADRENO_CP_RB_BASE_HI, gpu->rb[0]->iova); 197 205 198 206 if (!adreno_is_a430(adreno_gpu)) { 199 207 adreno_gpu_write64(adreno_gpu, REG_ADRENO_CP_RB_RPTR_ADDR, 200 - REG_ADRENO_CP_RB_RPTR_ADDR_HI, rbmemptr(gpu, rptr)); 208 + REG_ADRENO_CP_RB_RPTR_ADDR_HI, 209 + rbmemptr(gpu->rb[0], rptr)); 201 210 } 202 211 203 212 return 0; ··· 217 210 } 218 211 219 212 /* Use this helper to read rptr, since a430 doesn't update rptr in memory */ 220 - static uint32_t get_rptr(struct adreno_gpu *adreno_gpu) 213 + static uint32_t get_rptr(struct adreno_gpu *adreno_gpu, 214 + struct msm_ringbuffer *ring) 221 215 { 222 - struct msm_gpu *gpu = &adreno_gpu->base; 223 - 224 216 if (adreno_is_a430(adreno_gpu)) 225 - return gpu->memptrs->rptr = adreno_gpu_read( 217 + return ring->memptrs->rptr = adreno_gpu_read( 226 218 adreno_gpu, REG_ADRENO_CP_RB_RPTR); 227 219 else 228 - return gpu->memptrs->rptr; 220 + return ring->memptrs->rptr; 221 + } 222 + 223 + struct msm_ringbuffer *adreno_active_ring(struct msm_gpu *gpu) 224 + { 225 + return gpu->rb[0]; 229 226 } 230 227 231 228 void adreno_recover(struct msm_gpu *gpu) ··· 255 244 { 256 245 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); 257 246 struct msm_drm_private *priv = gpu->dev->dev_private; 258 - struct msm_ringbuffer *ring = gpu->rb; 247 + struct msm_ringbuffer *ring = submit->ring; 259 248 unsigned i; 260 249 261 250 for (i = 0; i < submit->nr_cmds; i++) { ··· 278 267 } 279 268 280 269 OUT_PKT0(ring, REG_AXXX_CP_SCRATCH_REG2, 1); 281 - OUT_RING(ring, submit->fence->seqno); 270 + OUT_RING(ring, submit->seqno); 282 271 283 272 if (adreno_is_a3xx(adreno_gpu) || adreno_is_a4xx(adreno_gpu)) { 284 273 /* Flush HLSQ lazy updates to make sure there is nothing ··· 294 283 295 284 OUT_PKT3(ring, CP_EVENT_WRITE, 3); 296 285 OUT_RING(ring, CACHE_FLUSH_TS); 297 - OUT_RING(ring, rbmemptr(gpu, fence)); 298 - OUT_RING(ring, submit->fence->seqno); 286 + OUT_RING(ring, rbmemptr(ring, fence)); 287 + OUT_RING(ring, submit->seqno); 299 288 300 289 /* we could maybe be clever and only CP_COND_EXEC the interrupt: */ 301 290 OUT_PKT3(ring, CP_INTERRUPT, 1); ··· 321 310 } 322 311 #endif 323 312 324 - gpu->funcs->flush(gpu); 313 + gpu->funcs->flush(gpu, ring); 325 314 } 326 315 327 - void adreno_flush(struct msm_gpu *gpu) 316 + void adreno_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring) 328 317 { 329 318 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); 330 319 uint32_t wptr; ··· 334 323 * to account for the possibility that the last command fit exactly into 335 324 * the ringbuffer and rb->next hasn't wrapped to zero yet 336 325 */ 337 - wptr = get_wptr(gpu->rb) & ((gpu->rb->size / 4) - 1); 326 + wptr = get_wptr(ring) % (MSM_GPU_RINGBUFFER_SZ >> 2); 338 327 339 328 /* ensure writes to ringbuffer have hit system memory: */ 340 329 mb(); ··· 342 331 adreno_gpu_write(adreno_gpu, REG_ADRENO_CP_RB_WPTR, wptr); 343 332 } 344 333 345 - bool adreno_idle(struct msm_gpu *gpu) 334 + bool adreno_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring) 346 335 { 347 336 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); 348 - uint32_t wptr = get_wptr(gpu->rb); 337 + uint32_t wptr = get_wptr(ring); 349 338 350 339 /* wait for CP to drain ringbuffer: */ 351 - if (!spin_until(get_rptr(adreno_gpu) == wptr)) 340 + if (!spin_until(get_rptr(adreno_gpu, ring) == wptr)) 352 341 return true; 353 342 354 343 /* TODO maybe we need to reset GPU here to recover from hang? */ 355 - DRM_ERROR("%s: timeout waiting to drain ringbuffer!\n", gpu->name); 344 + DRM_ERROR("%s: timeout waiting to drain ringbuffer %d!\n", gpu->name, 345 + ring->id); 356 346 return false; 357 347 } 358 348 ··· 368 356 adreno_gpu->rev.major, adreno_gpu->rev.minor, 369 357 adreno_gpu->rev.patchid); 370 358 371 - seq_printf(m, "fence: %d/%d\n", gpu->memptrs->fence, 372 - gpu->fctx->last_fence); 373 - seq_printf(m, "rptr: %d\n", get_rptr(adreno_gpu)); 374 - seq_printf(m, "rb wptr: %d\n", get_wptr(gpu->rb)); 359 + for (i = 0; i < gpu->nr_rings; i++) { 360 + struct msm_ringbuffer *ring = gpu->rb[i]; 361 + 362 + seq_printf(m, "rb %d: fence: %d/%d\n", i, 363 + ring->memptrs->fence, ring->seqno); 364 + 365 + seq_printf(m, " rptr: %d\n", 366 + get_rptr(adreno_gpu, ring)); 367 + seq_printf(m, "rb wptr: %d\n", get_wptr(ring)); 368 + } 375 369 376 370 /* dump these out in a form that can be parsed by demsm: */ 377 371 seq_printf(m, "IO:region %s 00000000 00020000\n", gpu->name); ··· 403 385 void adreno_dump_info(struct msm_gpu *gpu) 404 386 { 405 387 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); 388 + int i; 406 389 407 390 printk("revision: %d (%d.%d.%d.%d)\n", 408 391 adreno_gpu->info->revn, adreno_gpu->rev.core, 409 392 adreno_gpu->rev.major, adreno_gpu->rev.minor, 410 393 adreno_gpu->rev.patchid); 411 394 412 - printk("fence: %d/%d\n", gpu->memptrs->fence, 413 - gpu->fctx->last_fence); 414 - printk("rptr: %d\n", get_rptr(adreno_gpu)); 415 - printk("rb wptr: %d\n", get_wptr(gpu->rb)); 395 + for (i = 0; i < gpu->nr_rings; i++) { 396 + struct msm_ringbuffer *ring = gpu->rb[i]; 397 + 398 + printk("rb %d: fence: %d/%d\n", i, 399 + ring->memptrs->fence, 400 + ring->seqno); 401 + 402 + printk("rptr: %d\n", get_rptr(adreno_gpu, ring)); 403 + printk("rb wptr: %d\n", get_wptr(ring)); 404 + } 416 405 } 417 406 418 407 /* would be nice to not have to duplicate the _show() stuff with printk(): */ ··· 442 417 } 443 418 } 444 419 445 - static uint32_t ring_freewords(struct msm_gpu *gpu) 420 + static uint32_t ring_freewords(struct msm_ringbuffer *ring) 446 421 { 447 - struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); 448 - uint32_t size = gpu->rb->size / 4; 449 - uint32_t wptr = get_wptr(gpu->rb); 450 - uint32_t rptr = get_rptr(adreno_gpu); 422 + struct adreno_gpu *adreno_gpu = to_adreno_gpu(ring->gpu); 423 + uint32_t size = MSM_GPU_RINGBUFFER_SZ >> 2; 424 + uint32_t wptr = get_wptr(ring); 425 + uint32_t rptr = get_rptr(adreno_gpu, ring); 451 426 return (rptr + (size - 1) - wptr) % size; 452 427 } 453 428 454 - void adreno_wait_ring(struct msm_gpu *gpu, uint32_t ndwords) 429 + void adreno_wait_ring(struct msm_ringbuffer *ring, uint32_t ndwords) 455 430 { 456 - if (spin_until(ring_freewords(gpu) >= ndwords)) 457 - DRM_ERROR("%s: timeout waiting for ringbuffer space\n", gpu->name); 431 + if (spin_until(ring_freewords(ring) >= ndwords)) 432 + DRM_DEV_ERROR(ring->gpu->dev->dev, 433 + "timeout waiting for space in ringubffer %d\n", 434 + ring->id); 458 435 } 459 436 460 437 int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev, 461 - struct adreno_gpu *adreno_gpu, const struct adreno_gpu_funcs *funcs) 438 + struct adreno_gpu *adreno_gpu, 439 + const struct adreno_gpu_funcs *funcs, int nr_rings) 462 440 { 463 441 struct adreno_platform_config *config = pdev->dev.platform_data; 464 442 struct msm_gpu_config adreno_gpu_config = { 0 }; ··· 488 460 adreno_gpu_config.va_start = SZ_16M; 489 461 adreno_gpu_config.va_end = 0xffffffff; 490 462 491 - adreno_gpu_config.ringsz = RB_SIZE; 463 + adreno_gpu_config.nr_rings = nr_rings; 492 464 493 465 pm_runtime_set_autosuspend_delay(&pdev->dev, DRM_MSM_INACTIVE_PERIOD); 494 466 pm_runtime_use_autosuspend(&pdev->dev);
+11 -9
drivers/gpu/drm/msm/adreno/adreno_gpu.h
··· 208 208 void adreno_recover(struct msm_gpu *gpu); 209 209 void adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, 210 210 struct msm_file_private *ctx); 211 - void adreno_flush(struct msm_gpu *gpu); 212 - bool adreno_idle(struct msm_gpu *gpu); 211 + void adreno_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring); 212 + bool adreno_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring); 213 213 #ifdef CONFIG_DEBUG_FS 214 214 void adreno_show(struct msm_gpu *gpu, struct seq_file *m); 215 215 #endif 216 216 void adreno_dump_info(struct msm_gpu *gpu); 217 217 void adreno_dump(struct msm_gpu *gpu); 218 - void adreno_wait_ring(struct msm_gpu *gpu, uint32_t ndwords); 218 + void adreno_wait_ring(struct msm_ringbuffer *ring, uint32_t ndwords); 219 + struct msm_ringbuffer *adreno_active_ring(struct msm_gpu *gpu); 219 220 220 221 int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev, 221 - struct adreno_gpu *gpu, const struct adreno_gpu_funcs *funcs); 222 + struct adreno_gpu *gpu, const struct adreno_gpu_funcs *funcs, 223 + int nr_rings); 222 224 void adreno_gpu_cleanup(struct adreno_gpu *gpu); 223 225 224 226 ··· 229 227 static inline void 230 228 OUT_PKT0(struct msm_ringbuffer *ring, uint16_t regindx, uint16_t cnt) 231 229 { 232 - adreno_wait_ring(ring->gpu, cnt+1); 230 + adreno_wait_ring(ring, cnt+1); 233 231 OUT_RING(ring, CP_TYPE0_PKT | ((cnt-1) << 16) | (regindx & 0x7FFF)); 234 232 } 235 233 ··· 237 235 static inline void 238 236 OUT_PKT2(struct msm_ringbuffer *ring) 239 237 { 240 - adreno_wait_ring(ring->gpu, 1); 238 + adreno_wait_ring(ring, 1); 241 239 OUT_RING(ring, CP_TYPE2_PKT); 242 240 } 243 241 244 242 static inline void 245 243 OUT_PKT3(struct msm_ringbuffer *ring, uint8_t opcode, uint16_t cnt) 246 244 { 247 - adreno_wait_ring(ring->gpu, cnt+1); 245 + adreno_wait_ring(ring, cnt+1); 248 246 OUT_RING(ring, CP_TYPE3_PKT | ((cnt-1) << 16) | ((opcode & 0xFF) << 8)); 249 247 } 250 248 ··· 266 264 static inline void 267 265 OUT_PKT4(struct msm_ringbuffer *ring, uint16_t regindx, uint16_t cnt) 268 266 { 269 - adreno_wait_ring(ring->gpu, cnt + 1); 267 + adreno_wait_ring(ring, cnt + 1); 270 268 OUT_RING(ring, PKT4(regindx, cnt)); 271 269 } 272 270 273 271 static inline void 274 272 OUT_PKT7(struct msm_ringbuffer *ring, uint8_t opcode, uint16_t cnt) 275 273 { 276 - adreno_wait_ring(ring->gpu, cnt + 1); 274 + adreno_wait_ring(ring, cnt + 1); 277 275 OUT_RING(ring, CP_TYPE7_PKT | (cnt << 0) | (PM4_PARITY(cnt) << 15) | 278 276 ((opcode & 0x7F) << 16) | (PM4_PARITY(opcode) << 23)); 279 277 }
+17 -6
drivers/gpu/drm/msm/msm_drv.c
··· 507 507 mutex_unlock(&init_lock); 508 508 } 509 509 510 - static int context_init(struct drm_file *file) 510 + static int context_init(struct drm_device *dev, struct drm_file *file) 511 511 { 512 512 struct msm_file_private *ctx; 513 513 ··· 515 515 if (!ctx) 516 516 return -ENOMEM; 517 517 518 - msm_submitqueue_init(ctx); 518 + msm_submitqueue_init(dev, ctx); 519 519 520 520 file->driver_priv = ctx; 521 521 ··· 529 529 */ 530 530 load_gpu(dev); 531 531 532 - return context_init(file); 532 + return context_init(dev, file); 533 533 } 534 534 535 535 static void context_close(struct msm_file_private *ctx) ··· 743 743 struct msm_drm_private *priv = dev->dev_private; 744 744 struct drm_msm_wait_fence *args = data; 745 745 ktime_t timeout = to_ktime(args->timeout); 746 + struct msm_gpu_submitqueue *queue; 747 + struct msm_gpu *gpu = priv->gpu; 748 + int ret; 746 749 747 750 if (args->pad) { 748 751 DRM_ERROR("invalid pad: %08x\n", args->pad); 749 752 return -EINVAL; 750 753 } 751 754 752 - if (!priv->gpu) 755 + if (!gpu) 753 756 return 0; 754 757 755 - return msm_wait_fence(priv->gpu->fctx, args->fence, &timeout, true); 758 + queue = msm_submitqueue_get(file->driver_priv, args->queueid); 759 + if (!queue) 760 + return -ENOENT; 761 + 762 + ret = msm_wait_fence(gpu->rb[queue->prio]->fctx, args->fence, &timeout, 763 + true); 764 + 765 + msm_submitqueue_put(queue); 766 + return ret; 756 767 } 757 768 758 769 static int msm_ioctl_gem_madvise(struct drm_device *dev, void *data, ··· 813 802 if (args->flags & ~MSM_SUBMITQUEUE_FLAGS) 814 803 return -EINVAL; 815 804 816 - return msm_submitqueue_create(file->driver_priv, args->prio, 805 + return msm_submitqueue_create(dev, file->driver_priv, args->prio, 817 806 args->flags, &args->id); 818 807 } 819 808
+5 -3
drivers/gpu/drm/msm/msm_drv.h
··· 74 74 spinlock_t lock; 75 75 }; 76 76 77 + #define MSM_GPU_MAX_RINGS 1 78 + 77 79 struct msm_drm_private { 78 80 79 81 struct drm_device *dev; ··· 320 318 u32 msm_readl(const void __iomem *addr); 321 319 322 320 struct msm_gpu_submitqueue; 323 - int msm_submitqueue_init(struct msm_file_private *ctx); 321 + int msm_submitqueue_init(struct drm_device *drm, struct msm_file_private *ctx); 324 322 struct msm_gpu_submitqueue *msm_submitqueue_get(struct msm_file_private *ctx, 325 323 u32 id); 326 - int msm_submitqueue_create(struct msm_file_private *ctx, u32 prio, 327 - u32 flags, u32 *id); 324 + int msm_submitqueue_create(struct drm_device *drm, struct msm_file_private *ctx, 325 + u32 prio, u32 flags, u32 *id); 328 326 int msm_submitqueue_remove(struct msm_file_private *ctx, u32 id); 329 327 void msm_submitqueue_close(struct msm_file_private *ctx); 330 328
+1 -1
drivers/gpu/drm/msm/msm_fence.c
··· 31 31 return ERR_PTR(-ENOMEM); 32 32 33 33 fctx->dev = dev; 34 - fctx->name = name; 34 + strncpy(fctx->name, name, sizeof(fctx->name)); 35 35 fctx->context = dma_fence_context_alloc(1); 36 36 init_waitqueue_head(&fctx->event); 37 37 spin_lock_init(&fctx->spinlock);
+1 -1
drivers/gpu/drm/msm/msm_fence.h
··· 22 22 23 23 struct msm_fence_context { 24 24 struct drm_device *dev; 25 - const char *name; 25 + char name[32]; 26 26 unsigned context; 27 27 /* last_fence == completed_fence --> no pending work */ 28 28 uint32_t last_fence; /* last assigned fence */
+3 -1
drivers/gpu/drm/msm/msm_gem.h
··· 138 138 struct msm_gem_submit { 139 139 struct drm_device *dev; 140 140 struct msm_gpu *gpu; 141 - struct list_head node; /* node in gpu submit_list */ 141 + struct list_head node; /* node in ring submit list */ 142 142 struct list_head bo_list; 143 143 struct ww_acquire_ctx ticket; 144 + uint32_t seqno; /* Sequence number of the submit on the ring */ 144 145 struct dma_fence *fence; 145 146 struct msm_gpu_submitqueue *queue; 146 147 struct pid *pid; /* submitting process */ 147 148 bool valid; /* true if no cmdstream patching needed */ 149 + struct msm_ringbuffer *ring; 148 150 unsigned int nr_cmds; 149 151 unsigned int nr_bos; 150 152 struct {
+8 -4
drivers/gpu/drm/msm/msm_gem_submit.c
··· 51 51 submit->pid = get_pid(task_pid(current)); 52 52 submit->cmd = (void *)&submit->bos[nr_bos]; 53 53 submit->queue = queue; 54 + submit->ring = gpu->rb[queue->prio]; 54 55 55 56 /* initially, until copy_from_user() and bo lookup succeeds: */ 56 57 submit->nr_bos = 0; ··· 248 247 if (no_implicit) 249 248 continue; 250 249 251 - ret = msm_gem_sync_object(&msm_obj->base, submit->gpu->fctx, write); 250 + ret = msm_gem_sync_object(&msm_obj->base, submit->ring->fctx, 251 + write); 252 252 if (ret) 253 253 break; 254 254 } ··· 412 410 struct dma_fence *in_fence = NULL; 413 411 struct sync_file *sync_file = NULL; 414 412 struct msm_gpu_submitqueue *queue; 413 + struct msm_ringbuffer *ring; 415 414 int out_fence_fd = -1; 416 415 unsigned i; 417 416 int ret; ··· 433 430 if (!queue) 434 431 return -ENOENT; 435 432 433 + ring = gpu->rb[queue->prio]; 434 + 436 435 if (args->flags & MSM_SUBMIT_FENCE_FD_IN) { 437 436 in_fence = sync_file_get_fence(args->fence_fd); 438 437 ··· 445 440 * Wait if the fence is from a foreign context, or if the fence 446 441 * array contains any fence from a foreign context. 447 442 */ 448 - if (!dma_fence_match_context(in_fence, gpu->fctx->context)) { 443 + if (!dma_fence_match_context(in_fence, ring->fctx->context)) { 449 444 ret = dma_fence_wait(in_fence, true); 450 445 if (ret) 451 446 return ret; ··· 548 543 549 544 submit->nr_cmds = i; 550 545 551 - submit->fence = msm_fence_alloc(gpu->fctx); 552 - 546 + submit->fence = msm_fence_alloc(ring->fctx); 553 547 if (IS_ERR(submit->fence)) { 554 548 ret = PTR_ERR(submit->fence); 555 549 submit->fence = NULL;
+111 -52
drivers/gpu/drm/msm/msm_gpu.c
··· 221 221 * Hangcheck detection for locked gpu: 222 222 */ 223 223 224 + static void update_fences(struct msm_gpu *gpu, struct msm_ringbuffer *ring, 225 + uint32_t fence) 226 + { 227 + struct msm_gem_submit *submit; 228 + 229 + list_for_each_entry(submit, &ring->submits, node) { 230 + if (submit->seqno > fence) 231 + break; 232 + 233 + msm_update_fence(submit->ring->fctx, 234 + submit->fence->seqno); 235 + } 236 + } 237 + 224 238 static void retire_submits(struct msm_gpu *gpu); 225 239 226 240 static void recover_worker(struct work_struct *work) ··· 242 228 struct msm_gpu *gpu = container_of(work, struct msm_gpu, recover_work); 243 229 struct drm_device *dev = gpu->dev; 244 230 struct msm_gem_submit *submit; 245 - uint32_t fence = gpu->memptrs->fence; 231 + struct msm_ringbuffer *cur_ring = gpu->funcs->active_ring(gpu); 232 + uint64_t fence; 233 + int i; 246 234 247 - msm_update_fence(gpu->fctx, fence + 1); 235 + /* Update all the rings with the latest and greatest fence */ 236 + for (i = 0; i < ARRAY_SIZE(gpu->rb); i++) { 237 + struct msm_ringbuffer *ring = gpu->rb[i]; 238 + 239 + fence = ring->memptrs->fence; 240 + 241 + /* 242 + * For the current (faulting?) ring/submit advance the fence by 243 + * one more to clear the faulting submit 244 + */ 245 + if (ring == cur_ring) 246 + fence = fence + 1; 247 + 248 + update_fences(gpu, ring, fence); 249 + } 248 250 249 251 mutex_lock(&dev->struct_mutex); 250 252 253 + 251 254 dev_err(dev->dev, "%s: hangcheck recover!\n", gpu->name); 252 - list_for_each_entry(submit, &gpu->submit_list, node) { 253 - if (submit->fence->seqno == (fence + 1)) { 255 + fence = cur_ring->memptrs->fence + 1; 256 + 257 + list_for_each_entry(submit, &cur_ring->submits, node) { 258 + if (submit->seqno == fence) { 254 259 struct task_struct *task; 255 260 256 261 rcu_read_lock(); ··· 291 258 gpu->funcs->recover(gpu); 292 259 pm_runtime_put_sync(&gpu->pdev->dev); 293 260 294 - /* replay the remaining submits after the one that hung: */ 295 - list_for_each_entry(submit, &gpu->submit_list, node) { 296 - gpu->funcs->submit(gpu, submit, NULL); 261 + /* 262 + * Replay all remaining submits starting with highest priority 263 + * ring 264 + */ 265 + 266 + for (i = gpu->nr_rings - 1; i >= 0; i--) { 267 + struct msm_ringbuffer *ring = gpu->rb[i]; 268 + 269 + list_for_each_entry(submit, &ring->submits, node) 270 + gpu->funcs->submit(gpu, submit, NULL); 297 271 } 298 272 } 299 273 ··· 321 281 struct msm_gpu *gpu = (struct msm_gpu *)data; 322 282 struct drm_device *dev = gpu->dev; 323 283 struct msm_drm_private *priv = dev->dev_private; 324 - uint32_t fence = gpu->memptrs->fence; 284 + struct msm_ringbuffer *ring = gpu->funcs->active_ring(gpu); 285 + uint32_t fence = ring->memptrs->fence; 325 286 326 - if (fence != gpu->hangcheck_fence) { 287 + if (fence != ring->hangcheck_fence) { 327 288 /* some progress has been made.. ya! */ 328 - gpu->hangcheck_fence = fence; 329 - } else if (fence < gpu->fctx->last_fence) { 289 + ring->hangcheck_fence = fence; 290 + } else if (fence < ring->seqno) { 330 291 /* no progress and not done.. hung! */ 331 - gpu->hangcheck_fence = fence; 332 - dev_err(dev->dev, "%s: hangcheck detected gpu lockup!\n", 333 - gpu->name); 292 + ring->hangcheck_fence = fence; 293 + dev_err(dev->dev, "%s: hangcheck detected gpu lockup rb %d!\n", 294 + gpu->name, ring->id); 334 295 dev_err(dev->dev, "%s: completed fence: %u\n", 335 296 gpu->name, fence); 336 297 dev_err(dev->dev, "%s: submitted fence: %u\n", 337 - gpu->name, gpu->fctx->last_fence); 298 + gpu->name, ring->seqno); 299 + 338 300 queue_work(priv->wq, &gpu->recover_work); 339 301 } 340 302 341 303 /* if still more pending work, reset the hangcheck timer: */ 342 - if (gpu->fctx->last_fence > gpu->hangcheck_fence) 304 + if (ring->seqno > ring->hangcheck_fence) 343 305 hangcheck_timer_reset(gpu); 344 306 345 307 /* workaround for missing irq: */ ··· 470 428 static void retire_submits(struct msm_gpu *gpu) 471 429 { 472 430 struct drm_device *dev = gpu->dev; 431 + struct msm_gem_submit *submit, *tmp; 432 + int i; 473 433 474 434 WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 475 435 476 - while (!list_empty(&gpu->submit_list)) { 477 - struct msm_gem_submit *submit; 436 + /* Retire the commits starting with highest priority */ 437 + for (i = gpu->nr_rings - 1; i >= 0; i--) { 438 + struct msm_ringbuffer *ring = gpu->rb[i]; 478 439 479 - submit = list_first_entry(&gpu->submit_list, 480 - struct msm_gem_submit, node); 481 - 482 - if (dma_fence_is_signaled(submit->fence)) { 483 - retire_submit(gpu, submit); 484 - } else { 485 - break; 440 + list_for_each_entry_safe(submit, tmp, &ring->submits, node) { 441 + if (dma_fence_is_signaled(submit->fence)) 442 + retire_submit(gpu, submit); 486 443 } 487 444 } 488 445 } ··· 490 449 { 491 450 struct msm_gpu *gpu = container_of(work, struct msm_gpu, retire_work); 492 451 struct drm_device *dev = gpu->dev; 493 - uint32_t fence = gpu->memptrs->fence; 452 + int i; 494 453 495 - msm_update_fence(gpu->fctx, fence); 454 + for (i = 0; i < gpu->nr_rings; i++) 455 + update_fences(gpu, gpu->rb[i], gpu->rb[i]->memptrs->fence); 496 456 497 457 mutex_lock(&dev->struct_mutex); 498 458 retire_submits(gpu); ··· 514 472 { 515 473 struct drm_device *dev = gpu->dev; 516 474 struct msm_drm_private *priv = dev->dev_private; 475 + struct msm_ringbuffer *ring = submit->ring; 517 476 int i; 518 477 519 478 WARN_ON(!mutex_is_locked(&dev->struct_mutex)); ··· 523 480 524 481 msm_gpu_hw_init(gpu); 525 482 526 - list_add_tail(&submit->node, &gpu->submit_list); 483 + submit->seqno = ++ring->seqno; 484 + 485 + list_add_tail(&submit->node, &ring->submits); 527 486 528 487 msm_rd_dump_submit(submit); 529 488 ··· 650 605 struct msm_gpu *gpu, const struct msm_gpu_funcs *funcs, 651 606 const char *name, struct msm_gpu_config *config) 652 607 { 653 - int ret; 608 + int i, ret, nr_rings = config->nr_rings; 609 + void *memptrs; 610 + uint64_t memptrs_iova; 654 611 655 612 if (WARN_ON(gpu->num_perfcntrs > ARRAY_SIZE(gpu->last_cntrs))) 656 613 gpu->num_perfcntrs = ARRAY_SIZE(gpu->last_cntrs); ··· 660 613 gpu->dev = drm; 661 614 gpu->funcs = funcs; 662 615 gpu->name = name; 663 - gpu->fctx = msm_fence_context_alloc(drm, name); 664 - if (IS_ERR(gpu->fctx)) { 665 - ret = PTR_ERR(gpu->fctx); 666 - gpu->fctx = NULL; 667 - goto fail; 668 - } 669 616 670 617 INIT_LIST_HEAD(&gpu->active_list); 671 618 INIT_WORK(&gpu->retire_work, retire_worker); 672 619 INIT_WORK(&gpu->recover_work, recover_worker); 673 620 674 - INIT_LIST_HEAD(&gpu->submit_list); 675 621 676 622 setup_timer(&gpu->hangcheck_timer, hangcheck_handler, 677 623 (unsigned long)gpu); ··· 729 689 goto fail; 730 690 } 731 691 732 - gpu->memptrs = msm_gem_kernel_new(drm, sizeof(*gpu->memptrs_bo), 692 + memptrs = msm_gem_kernel_new(drm, sizeof(*gpu->memptrs_bo), 733 693 MSM_BO_UNCACHED, gpu->aspace, &gpu->memptrs_bo, 734 - &gpu->memptrs_iova); 694 + &memptrs_iova); 735 695 736 - if (IS_ERR(gpu->memptrs)) { 737 - ret = PTR_ERR(gpu->memptrs); 738 - gpu->memptrs = NULL; 696 + if (IS_ERR(memptrs)) { 697 + ret = PTR_ERR(memptrs); 739 698 dev_err(drm->dev, "could not allocate memptrs: %d\n", ret); 740 699 goto fail; 741 700 } 742 701 743 - /* Create ringbuffer: */ 744 - gpu->rb = msm_ringbuffer_new(gpu, config->ringsz); 745 - if (IS_ERR(gpu->rb)) { 746 - ret = PTR_ERR(gpu->rb); 747 - gpu->rb = NULL; 748 - dev_err(drm->dev, "could not create ringbuffer: %d\n", ret); 749 - goto fail; 702 + if (nr_rings > ARRAY_SIZE(gpu->rb)) { 703 + DRM_DEV_INFO_ONCE(drm->dev, "Only creating %lu ringbuffers\n", 704 + ARRAY_SIZE(gpu->rb)); 705 + nr_rings = ARRAY_SIZE(gpu->rb); 750 706 } 707 + 708 + /* Create ringbuffer(s): */ 709 + for (i = 0; i < nr_rings; i++) { 710 + gpu->rb[i] = msm_ringbuffer_new(gpu, i, memptrs, memptrs_iova); 711 + 712 + if (IS_ERR(gpu->rb[i])) { 713 + ret = PTR_ERR(gpu->rb[i]); 714 + dev_err(drm->dev, 715 + "could not create ringbuffer %d: %d\n", i, ret); 716 + goto fail; 717 + } 718 + 719 + memptrs += sizeof(struct msm_rbmemptrs); 720 + memptrs_iova += sizeof(struct msm_rbmemptrs); 721 + } 722 + 723 + gpu->nr_rings = nr_rings; 751 724 752 725 return 0; 753 726 754 727 fail: 728 + for (i = 0; i < ARRAY_SIZE(gpu->rb); i++) { 729 + msm_ringbuffer_destroy(gpu->rb[i]); 730 + gpu->rb[i] = NULL; 731 + } 732 + 755 733 if (gpu->memptrs_bo) { 756 734 msm_gem_put_vaddr(gpu->memptrs_bo); 757 735 msm_gem_put_iova(gpu->memptrs_bo, gpu->aspace); ··· 782 724 783 725 void msm_gpu_cleanup(struct msm_gpu *gpu) 784 726 { 727 + int i; 728 + 785 729 DBG("%s", gpu->name); 786 730 787 731 WARN_ON(!list_empty(&gpu->active_list)); 788 732 789 733 bs_fini(gpu); 790 734 791 - if (gpu->rb) { 792 - if (gpu->rb_iova) 793 - msm_gem_put_iova(gpu->rb->bo, gpu->aspace); 794 - msm_ringbuffer_destroy(gpu->rb); 735 + for (i = 0; i < ARRAY_SIZE(gpu->rb); i++) { 736 + msm_ringbuffer_destroy(gpu->rb[i]); 737 + gpu->rb[i] = NULL; 795 738 } 796 739 797 740 if (gpu->memptrs_bo) {
+18 -24
drivers/gpu/drm/msm/msm_gpu.h
··· 33 33 const char *irqname; 34 34 uint64_t va_start; 35 35 uint64_t va_end; 36 - unsigned int ringsz; 36 + unsigned int nr_rings; 37 37 }; 38 38 39 39 /* So far, with hardware that I've seen to date, we can have: ··· 57 57 int (*pm_resume)(struct msm_gpu *gpu); 58 58 void (*submit)(struct msm_gpu *gpu, struct msm_gem_submit *submit, 59 59 struct msm_file_private *ctx); 60 - void (*flush)(struct msm_gpu *gpu); 60 + void (*flush)(struct msm_gpu *gpu, struct msm_ringbuffer *ring); 61 61 irqreturn_t (*irq)(struct msm_gpu *irq); 62 + struct msm_ringbuffer *(*active_ring)(struct msm_gpu *gpu); 62 63 void (*recover)(struct msm_gpu *gpu); 63 64 void (*destroy)(struct msm_gpu *gpu); 64 65 #ifdef CONFIG_DEBUG_FS 65 66 /* show GPU status in debugfs: */ 66 67 void (*show)(struct msm_gpu *gpu, struct seq_file *m); 67 68 #endif 68 - }; 69 - 70 - #define rbmemptr(gpu, member) \ 71 - ((gpu)->memptrs_iova + offsetof(struct msm_rbmemptrs, member)) 72 - 73 - struct msm_rbmemptrs { 74 - volatile uint32_t rptr; 75 - volatile uint32_t fence; 76 69 }; 77 70 78 71 struct msm_gpu { ··· 86 93 const struct msm_gpu_perfcntr *perfcntrs; 87 94 uint32_t num_perfcntrs; 88 95 89 - /* ringbuffer: */ 90 - struct msm_ringbuffer *rb; 91 - uint64_t rb_iova; 96 + struct msm_ringbuffer *rb[MSM_GPU_MAX_RINGS]; 97 + int nr_rings; 92 98 93 99 /* list of GEM active objects: */ 94 100 struct list_head active_list; 95 - 96 - /* fencing: */ 97 - struct msm_fence_context *fctx; 98 101 99 102 /* does gpu need hw_init? */ 100 103 bool needs_hw_init; ··· 122 133 #define DRM_MSM_HANGCHECK_PERIOD 500 /* in ms */ 123 134 #define DRM_MSM_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_MSM_HANGCHECK_PERIOD) 124 135 struct timer_list hangcheck_timer; 125 - uint32_t hangcheck_fence; 126 136 struct work_struct recover_work; 127 137 128 - struct list_head submit_list; 129 - 130 - struct msm_rbmemptrs *memptrs; 131 138 struct drm_gem_object *memptrs_bo; 132 - uint64_t memptrs_iova; 133 - 134 - 135 139 }; 140 + 141 + /* It turns out that all targets use the same ringbuffer size */ 142 + #define MSM_GPU_RINGBUFFER_SZ SZ_32K 136 143 137 144 static inline bool msm_gpu_active(struct msm_gpu *gpu) 138 145 { 139 - return gpu->fctx->last_fence > gpu->memptrs->fence; 146 + int i; 147 + 148 + for (i = 0; i < gpu->nr_rings; i++) { 149 + struct msm_ringbuffer *ring = gpu->rb[i]; 150 + 151 + if (ring->seqno > ring->memptrs->fence) 152 + return true; 153 + } 154 + 155 + return false; 140 156 } 141 157 142 158 /* Perf-Counters:
+24 -10
drivers/gpu/drm/msm/msm_ringbuffer.c
··· 18 18 #include "msm_ringbuffer.h" 19 19 #include "msm_gpu.h" 20 20 21 - struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int size) 21 + struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int id, 22 + void *memptrs, uint64_t memptrs_iova) 22 23 { 23 24 struct msm_ringbuffer *ring; 25 + char name[32]; 24 26 int ret; 25 27 26 - if (WARN_ON(!is_power_of_2(size))) 27 - return ERR_PTR(-EINVAL); 28 + /* We assume everwhere that MSM_GPU_RINGBUFFER_SZ is a power of 2 */ 29 + BUILD_BUG_ON(!is_power_of_2(MSM_GPU_RINGBUFFER_SZ)); 28 30 29 31 ring = kzalloc(sizeof(*ring), GFP_KERNEL); 30 32 if (!ring) { ··· 35 33 } 36 34 37 35 ring->gpu = gpu; 38 - 36 + ring->id = id; 39 37 /* Pass NULL for the iova pointer - we will map it later */ 40 - ring->start = msm_gem_kernel_new(gpu->dev, size, MSM_BO_WC, 41 - gpu->aspace, &ring->bo, NULL); 38 + ring->start = msm_gem_kernel_new(gpu->dev, MSM_GPU_RINGBUFFER_SZ, 39 + MSM_BO_WC, gpu->aspace, &ring->bo, NULL); 42 40 43 41 if (IS_ERR(ring->start)) { 44 42 ret = PTR_ERR(ring->start); 45 43 ring->start = 0; 46 44 goto fail; 47 45 } 48 - ring->end = ring->start + (size / 4); 46 + ring->end = ring->start + (MSM_GPU_RINGBUFFER_SZ >> 2); 49 47 ring->cur = ring->start; 50 48 51 - ring->size = size; 49 + ring->memptrs = memptrs; 50 + ring->memptrs_iova = memptrs_iova; 51 + 52 + INIT_LIST_HEAD(&ring->submits); 53 + 54 + snprintf(name, sizeof(name), "gpu-ring-%d", ring->id); 55 + 56 + ring->fctx = msm_fence_context_alloc(gpu->dev, name); 52 57 53 58 return ring; 54 59 55 60 fail: 56 - if (ring) 57 - msm_ringbuffer_destroy(ring); 61 + msm_ringbuffer_destroy(ring); 58 62 return ERR_PTR(ret); 59 63 } 60 64 61 65 void msm_ringbuffer_destroy(struct msm_ringbuffer *ring) 62 66 { 67 + if (IS_ERR_OR_NULL(ring)) 68 + return; 69 + 70 + msm_fence_context_free(ring->fctx); 71 + 63 72 if (ring->bo) { 73 + msm_gem_put_iova(ring->bo, ring->gpu->aspace); 64 74 msm_gem_put_vaddr(ring->bo); 65 75 drm_gem_object_unreference_unlocked(ring->bo); 66 76 }
+22 -6
drivers/gpu/drm/msm/msm_ringbuffer.h
··· 20 20 21 21 #include "msm_drv.h" 22 22 23 - struct msm_ringbuffer { 24 - struct msm_gpu *gpu; 25 - int size; 26 - struct drm_gem_object *bo; 27 - uint32_t *start, *end, *cur; 23 + #define rbmemptr(ring, member) \ 24 + ((ring)->memptrs_iova + offsetof(struct msm_rbmemptrs, member)) 25 + 26 + struct msm_rbmemptrs { 27 + volatile uint32_t rptr; 28 + volatile uint32_t fence; 28 29 }; 29 30 30 - struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int size); 31 + struct msm_ringbuffer { 32 + struct msm_gpu *gpu; 33 + int id; 34 + struct drm_gem_object *bo; 35 + uint32_t *start, *end, *cur; 36 + struct list_head submits; 37 + uint64_t iova; 38 + uint32_t seqno; 39 + uint32_t hangcheck_fence; 40 + struct msm_rbmemptrs *memptrs; 41 + uint64_t memptrs_iova; 42 + struct msm_fence_context *fctx; 43 + }; 44 + 45 + struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int id, 46 + void *memptrs, uint64_t memptrs_iova); 31 47 void msm_ringbuffer_destroy(struct msm_ringbuffer *ring); 32 48 33 49 /* ringbuffer helpers (the parts that are same for a3xx/a2xx/z180..) */
+22 -5
drivers/gpu/drm/msm/msm_submitqueue.c
··· 60 60 msm_submitqueue_put(entry); 61 61 } 62 62 63 - int msm_submitqueue_create(struct msm_file_private *ctx, u32 prio, u32 flags, 64 - u32 *id) 63 + int msm_submitqueue_create(struct drm_device *drm, struct msm_file_private *ctx, 64 + u32 prio, u32 flags, u32 *id) 65 65 { 66 + struct msm_drm_private *priv = drm->dev_private; 66 67 struct msm_gpu_submitqueue *queue; 67 68 68 69 if (!ctx) ··· 76 75 77 76 kref_init(&queue->ref); 78 77 queue->flags = flags; 79 - queue->prio = prio; 78 + 79 + if (priv->gpu) { 80 + if (prio >= priv->gpu->nr_rings) 81 + return -EINVAL; 82 + 83 + queue->prio = prio; 84 + } 80 85 81 86 write_lock(&ctx->queuelock); 82 87 ··· 98 91 return 0; 99 92 } 100 93 101 - int msm_submitqueue_init(struct msm_file_private *ctx) 94 + int msm_submitqueue_init(struct drm_device *drm, struct msm_file_private *ctx) 102 95 { 96 + struct msm_drm_private *priv = drm->dev_private; 97 + int default_prio; 98 + 103 99 if (!ctx) 104 100 return 0; 101 + 102 + /* 103 + * Select priority 2 as the "default priority" unless nr_rings is less 104 + * than 2 and then pick the lowest pirority 105 + */ 106 + default_prio = priv->gpu ? 107 + clamp_t(uint32_t, 2, 0, priv->gpu->nr_rings - 1) : 0; 105 108 106 109 INIT_LIST_HEAD(&ctx->submitqueues); 107 110 108 111 rwlock_init(&ctx->queuelock); 109 112 110 - return msm_submitqueue_create(ctx, 2, 0, NULL); 113 + return msm_submitqueue_create(drm, ctx, default_prio, 0, NULL); 111 114 } 112 115 113 116 int msm_submitqueue_remove(struct msm_file_private *ctx, u32 id)
+887
include/dt-bindings/msm/msm-bus-ids.h
··· 1 + /* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved. 2 + * 3 + * This program is free software; you can redistribute it and/or modify 4 + * it under the terms of the GNU General Public License version 2 and 5 + * only version 2 as published by the Free Software Foundation. 6 + * 7 + * This program is distributed in the hope that it will be useful, 8 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 9 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 10 + * GNU General Public License for more details. 11 + */ 12 + 13 + #ifndef __MSM_BUS_IDS_H 14 + #define __MSM_BUS_IDS_H 15 + 16 + /* Aggregation types */ 17 + #define AGG_SCHEME_NONE 0 18 + #define AGG_SCHEME_LEG 1 19 + #define AGG_SCHEME_1 2 20 + 21 + /* Topology related enums */ 22 + #define MSM_BUS_FAB_DEFAULT 0 23 + #define MSM_BUS_FAB_APPSS 0 24 + #define MSM_BUS_FAB_SYSTEM 1024 25 + #define MSM_BUS_FAB_MMSS 2048 26 + #define MSM_BUS_FAB_SYSTEM_FPB 3072 27 + #define MSM_BUS_FAB_CPSS_FPB 4096 28 + 29 + #define MSM_BUS_FAB_BIMC 0 30 + #define MSM_BUS_FAB_SYS_NOC 1024 31 + #define MSM_BUS_FAB_MMSS_NOC 2048 32 + #define MSM_BUS_FAB_OCMEM_NOC 3072 33 + #define MSM_BUS_FAB_PERIPH_NOC 4096 34 + #define MSM_BUS_FAB_CONFIG_NOC 5120 35 + #define MSM_BUS_FAB_OCMEM_VNOC 6144 36 + #define MSM_BUS_FAB_MMSS_AHB 2049 37 + #define MSM_BUS_FAB_A0_NOC 6145 38 + #define MSM_BUS_FAB_A1_NOC 6146 39 + #define MSM_BUS_FAB_A2_NOC 6147 40 + #define MSM_BUS_FAB_GNOC 6148 41 + #define MSM_BUS_FAB_CR_VIRT 6149 42 + 43 + #define MSM_BUS_MASTER_FIRST 1 44 + #define MSM_BUS_MASTER_AMPSS_M0 1 45 + #define MSM_BUS_MASTER_AMPSS_M1 2 46 + #define MSM_BUS_APPSS_MASTER_FAB_MMSS 3 47 + #define MSM_BUS_APPSS_MASTER_FAB_SYSTEM 4 48 + #define MSM_BUS_SYSTEM_MASTER_FAB_APPSS 5 49 + #define MSM_BUS_MASTER_SPS 6 50 + #define MSM_BUS_MASTER_ADM_PORT0 7 51 + #define MSM_BUS_MASTER_ADM_PORT1 8 52 + #define MSM_BUS_SYSTEM_MASTER_ADM1_PORT0 9 53 + #define MSM_BUS_MASTER_ADM1_PORT1 10 54 + #define MSM_BUS_MASTER_LPASS_PROC 11 55 + #define MSM_BUS_MASTER_MSS_PROCI 12 56 + #define MSM_BUS_MASTER_MSS_PROCD 13 57 + #define MSM_BUS_MASTER_MSS_MDM_PORT0 14 58 + #define MSM_BUS_MASTER_LPASS 15 59 + #define MSM_BUS_SYSTEM_MASTER_CPSS_FPB 16 60 + #define MSM_BUS_SYSTEM_MASTER_SYSTEM_FPB 17 61 + #define MSM_BUS_SYSTEM_MASTER_MMSS_FPB 18 62 + #define MSM_BUS_MASTER_ADM1_CI 19 63 + #define MSM_BUS_MASTER_ADM0_CI 20 64 + #define MSM_BUS_MASTER_MSS_MDM_PORT1 21 65 + #define MSM_BUS_MASTER_MDP_PORT0 22 66 + #define MSM_BUS_MASTER_MDP_PORT1 23 67 + #define MSM_BUS_MMSS_MASTER_ADM1_PORT0 24 68 + #define MSM_BUS_MASTER_ROTATOR 25 69 + #define MSM_BUS_MASTER_GRAPHICS_3D 26 70 + #define MSM_BUS_MASTER_JPEG_DEC 27 71 + #define MSM_BUS_MASTER_GRAPHICS_2D_CORE0 28 72 + #define MSM_BUS_MASTER_VFE 29 73 + #define MSM_BUS_MASTER_VFE0 MSM_BUS_MASTER_VFE 74 + #define MSM_BUS_MASTER_VPE 30 75 + #define MSM_BUS_MASTER_JPEG_ENC 31 76 + #define MSM_BUS_MASTER_GRAPHICS_2D_CORE1 32 77 + #define MSM_BUS_MMSS_MASTER_APPS_FAB 33 78 + #define MSM_BUS_MASTER_HD_CODEC_PORT0 34 79 + #define MSM_BUS_MASTER_HD_CODEC_PORT1 35 80 + #define MSM_BUS_MASTER_SPDM 36 81 + #define MSM_BUS_MASTER_RPM 37 82 + #define MSM_BUS_MASTER_MSS 38 83 + #define MSM_BUS_MASTER_RIVA 39 84 + #define MSM_BUS_MASTER_SNOC_VMEM 40 85 + #define MSM_BUS_MASTER_MSS_SW_PROC 41 86 + #define MSM_BUS_MASTER_MSS_FW_PROC 42 87 + #define MSM_BUS_MASTER_HMSS 43 88 + #define MSM_BUS_MASTER_GSS_NAV 44 89 + #define MSM_BUS_MASTER_PCIE 45 90 + #define MSM_BUS_MASTER_SATA 46 91 + #define MSM_BUS_MASTER_CRYPTO 47 92 + #define MSM_BUS_MASTER_VIDEO_CAP 48 93 + #define MSM_BUS_MASTER_GRAPHICS_3D_PORT1 49 94 + #define MSM_BUS_MASTER_VIDEO_ENC 50 95 + #define MSM_BUS_MASTER_VIDEO_DEC 51 96 + #define MSM_BUS_MASTER_LPASS_AHB 52 97 + #define MSM_BUS_MASTER_QDSS_BAM 53 98 + #define MSM_BUS_MASTER_SNOC_CFG 54 99 + #define MSM_BUS_MASTER_CRYPTO_CORE0 55 100 + #define MSM_BUS_MASTER_CRYPTO_CORE1 56 101 + #define MSM_BUS_MASTER_MSS_NAV 57 102 + #define MSM_BUS_MASTER_OCMEM_DMA 58 103 + #define MSM_BUS_MASTER_WCSS 59 104 + #define MSM_BUS_MASTER_QDSS_ETR 60 105 + #define MSM_BUS_MASTER_USB3 61 106 + #define MSM_BUS_MASTER_JPEG 62 107 + #define MSM_BUS_MASTER_VIDEO_P0 63 108 + #define MSM_BUS_MASTER_VIDEO_P1 64 109 + #define MSM_BUS_MASTER_MSS_PROC 65 110 + #define MSM_BUS_MASTER_JPEG_OCMEM 66 111 + #define MSM_BUS_MASTER_MDP_OCMEM 67 112 + #define MSM_BUS_MASTER_VIDEO_P0_OCMEM 68 113 + #define MSM_BUS_MASTER_VIDEO_P1_OCMEM 69 114 + #define MSM_BUS_MASTER_VFE_OCMEM 70 115 + #define MSM_BUS_MASTER_CNOC_ONOC_CFG 71 116 + #define MSM_BUS_MASTER_RPM_INST 72 117 + #define MSM_BUS_MASTER_RPM_DATA 73 118 + #define MSM_BUS_MASTER_RPM_SYS 74 119 + #define MSM_BUS_MASTER_DEHR 75 120 + #define MSM_BUS_MASTER_QDSS_DAP 76 121 + #define MSM_BUS_MASTER_TIC 77 122 + #define MSM_BUS_MASTER_SDCC_1 78 123 + #define MSM_BUS_MASTER_SDCC_3 79 124 + #define MSM_BUS_MASTER_SDCC_4 80 125 + #define MSM_BUS_MASTER_SDCC_2 81 126 + #define MSM_BUS_MASTER_TSIF 82 127 + #define MSM_BUS_MASTER_BAM_DMA 83 128 + #define MSM_BUS_MASTER_BLSP_2 84 129 + #define MSM_BUS_MASTER_USB_HSIC 85 130 + #define MSM_BUS_MASTER_BLSP_1 86 131 + #define MSM_BUS_MASTER_USB_HS 87 132 + #define MSM_BUS_MASTER_PNOC_CFG 88 133 + #define MSM_BUS_MASTER_V_OCMEM_GFX3D 89 134 + #define MSM_BUS_MASTER_IPA 90 135 + #define MSM_BUS_MASTER_QPIC 91 136 + #define MSM_BUS_MASTER_MDPE 92 137 + #define MSM_BUS_MASTER_USB_HS2 93 138 + #define MSM_BUS_MASTER_VPU 94 139 + #define MSM_BUS_MASTER_UFS 95 140 + #define MSM_BUS_MASTER_BCAST 96 141 + #define MSM_BUS_MASTER_CRYPTO_CORE2 97 142 + #define MSM_BUS_MASTER_EMAC 98 143 + #define MSM_BUS_MASTER_VPU_1 99 144 + #define MSM_BUS_MASTER_PCIE_1 100 145 + #define MSM_BUS_MASTER_USB3_1 101 146 + #define MSM_BUS_MASTER_CNOC_MNOC_MMSS_CFG 102 147 + #define MSM_BUS_MASTER_CNOC_MNOC_CFG 103 148 + #define MSM_BUS_MASTER_TCU_0 104 149 + #define MSM_BUS_MASTER_TCU_1 105 150 + #define MSM_BUS_MASTER_CPP 106 151 + #define MSM_BUS_MASTER_AUDIO 107 152 + #define MSM_BUS_MASTER_PCIE_2 108 153 + #define MSM_BUS_MASTER_VFE1 109 154 + #define MSM_BUS_MASTER_XM_USB_HS1 110 155 + #define MSM_BUS_MASTER_PCNOC_BIMC_1 111 156 + #define MSM_BUS_MASTER_BIMC_PCNOC 112 157 + #define MSM_BUS_MASTER_XI_USB_HSIC 113 158 + #define MSM_BUS_MASTER_SGMII 114 159 + #define MSM_BUS_SPMI_FETCHER 115 160 + #define MSM_BUS_MASTER_GNOC_BIMC 116 161 + #define MSM_BUS_MASTER_CRVIRT_A2NOC 117 162 + #define MSM_BUS_MASTER_CNOC_A2NOC 118 163 + #define MSM_BUS_MASTER_WLAN 119 164 + #define MSM_BUS_MASTER_MSS_CE 120 165 + #define MSM_BUS_MASTER_CDSP_PROC 121 166 + #define MSM_BUS_MASTER_GNOC_SNOC 122 167 + #define MSM_BUS_MASTER_PIMEM 123 168 + #define MSM_BUS_MASTER_MASTER_LAST 124 169 + 170 + #define MSM_BUS_SYSTEM_FPB_MASTER_SYSTEM MSM_BUS_SYSTEM_MASTER_SYSTEM_FPB 171 + #define MSM_BUS_CPSS_FPB_MASTER_SYSTEM MSM_BUS_SYSTEM_MASTER_CPSS_FPB 172 + 173 + #define MSM_BUS_SNOC_MM_INT_0 10000 174 + #define MSM_BUS_SNOC_MM_INT_1 10001 175 + #define MSM_BUS_SNOC_MM_INT_2 10002 176 + #define MSM_BUS_SNOC_MM_INT_BIMC 10003 177 + #define MSM_BUS_SNOC_INT_0 10004 178 + #define MSM_BUS_SNOC_INT_1 10005 179 + #define MSM_BUS_SNOC_INT_BIMC 10006 180 + #define MSM_BUS_SNOC_BIMC_0_MAS 10007 181 + #define MSM_BUS_SNOC_BIMC_1_MAS 10008 182 + #define MSM_BUS_SNOC_QDSS_INT 10009 183 + #define MSM_BUS_PNOC_SNOC_MAS 10010 184 + #define MSM_BUS_PNOC_SNOC_SLV 10011 185 + #define MSM_BUS_PNOC_INT_0 10012 186 + #define MSM_BUS_PNOC_INT_1 10013 187 + #define MSM_BUS_PNOC_M_0 10014 188 + #define MSM_BUS_PNOC_M_1 10015 189 + #define MSM_BUS_BIMC_SNOC_MAS 10016 190 + #define MSM_BUS_BIMC_SNOC_SLV 10017 191 + #define MSM_BUS_PNOC_SLV_0 10018 192 + #define MSM_BUS_PNOC_SLV_1 10019 193 + #define MSM_BUS_PNOC_SLV_2 10020 194 + #define MSM_BUS_PNOC_SLV_3 10021 195 + #define MSM_BUS_PNOC_SLV_4 10022 196 + #define MSM_BUS_PNOC_SLV_8 10023 197 + #define MSM_BUS_PNOC_SLV_9 10024 198 + #define MSM_BUS_SNOC_BIMC_0_SLV 10025 199 + #define MSM_BUS_SNOC_BIMC_1_SLV 10026 200 + #define MSM_BUS_MNOC_BIMC_MAS 10027 201 + #define MSM_BUS_MNOC_BIMC_SLV 10028 202 + #define MSM_BUS_BIMC_MNOC_MAS 10029 203 + #define MSM_BUS_BIMC_MNOC_SLV 10030 204 + #define MSM_BUS_SNOC_BIMC_MAS 10031 205 + #define MSM_BUS_SNOC_BIMC_SLV 10032 206 + #define MSM_BUS_CNOC_SNOC_MAS 10033 207 + #define MSM_BUS_CNOC_SNOC_SLV 10034 208 + #define MSM_BUS_SNOC_CNOC_MAS 10035 209 + #define MSM_BUS_SNOC_CNOC_SLV 10036 210 + #define MSM_BUS_OVNOC_SNOC_MAS 10037 211 + #define MSM_BUS_OVNOC_SNOC_SLV 10038 212 + #define MSM_BUS_SNOC_OVNOC_MAS 10039 213 + #define MSM_BUS_SNOC_OVNOC_SLV 10040 214 + #define MSM_BUS_SNOC_PNOC_MAS 10041 215 + #define MSM_BUS_SNOC_PNOC_SLV 10042 216 + #define MSM_BUS_BIMC_INT_APPS_EBI 10043 217 + #define MSM_BUS_BIMC_INT_APPS_SNOC 10044 218 + #define MSM_BUS_SNOC_BIMC_2_MAS 10045 219 + #define MSM_BUS_SNOC_BIMC_2_SLV 10046 220 + #define MSM_BUS_PNOC_SLV_5 10047 221 + #define MSM_BUS_PNOC_SLV_7 10048 222 + #define MSM_BUS_PNOC_INT_2 10049 223 + #define MSM_BUS_PNOC_INT_3 10050 224 + #define MSM_BUS_PNOC_INT_4 10051 225 + #define MSM_BUS_PNOC_INT_5 10052 226 + #define MSM_BUS_PNOC_INT_6 10053 227 + #define MSM_BUS_PNOC_INT_7 10054 228 + #define MSM_BUS_BIMC_SNOC_1_MAS 10055 229 + #define MSM_BUS_BIMC_SNOC_1_SLV 10056 230 + #define MSM_BUS_PNOC_A1NOC_MAS 10057 231 + #define MSM_BUS_PNOC_A1NOC_SLV 10058 232 + #define MSM_BUS_CNOC_A1NOC_MAS 10059 233 + #define MSM_BUS_A0NOC_SNOC_MAS 10060 234 + #define MSM_BUS_A0NOC_SNOC_SLV 10061 235 + #define MSM_BUS_A1NOC_SNOC_SLV 10062 236 + #define MSM_BUS_A1NOC_SNOC_MAS 10063 237 + #define MSM_BUS_A2NOC_SNOC_MAS 10064 238 + #define MSM_BUS_A2NOC_SNOC_SLV 10065 239 + #define MSM_BUS_SNOC_INT_2 10066 240 + #define MSM_BUS_A0NOC_QDSS_INT 10067 241 + #define MSM_BUS_INT_LAST 10068 242 + 243 + #define MSM_BUS_INT_TEST_ID 20000 244 + #define MSM_BUS_INT_TEST_LAST 20050 245 + 246 + #define MSM_BUS_SLAVE_FIRST 512 247 + #define MSM_BUS_SLAVE_EBI_CH0 512 248 + #define MSM_BUS_SLAVE_EBI_CH1 513 249 + #define MSM_BUS_SLAVE_AMPSS_L2 514 250 + #define MSM_BUS_APPSS_SLAVE_FAB_MMSS 515 251 + #define MSM_BUS_APPSS_SLAVE_FAB_SYSTEM 516 252 + #define MSM_BUS_SYSTEM_SLAVE_FAB_APPS 517 253 + #define MSM_BUS_SLAVE_SPS 518 254 + #define MSM_BUS_SLAVE_SYSTEM_IMEM 519 255 + #define MSM_BUS_SLAVE_AMPSS 520 256 + #define MSM_BUS_SLAVE_MSS 521 257 + #define MSM_BUS_SLAVE_LPASS 522 258 + #define MSM_BUS_SYSTEM_SLAVE_CPSS_FPB 523 259 + #define MSM_BUS_SYSTEM_SLAVE_SYSTEM_FPB 524 260 + #define MSM_BUS_SYSTEM_SLAVE_MMSS_FPB 525 261 + #define MSM_BUS_SLAVE_CORESIGHT 526 262 + #define MSM_BUS_SLAVE_RIVA 527 263 + #define MSM_BUS_SLAVE_SMI 528 264 + #define MSM_BUS_MMSS_SLAVE_FAB_APPS 529 265 + #define MSM_BUS_MMSS_SLAVE_FAB_APPS_1 530 266 + #define MSM_BUS_SLAVE_MM_IMEM 531 267 + #define MSM_BUS_SLAVE_CRYPTO 532 268 + #define MSM_BUS_SLAVE_SPDM 533 269 + #define MSM_BUS_SLAVE_RPM 534 270 + #define MSM_BUS_SLAVE_RPM_MSG_RAM 535 271 + #define MSM_BUS_SLAVE_MPM 536 272 + #define MSM_BUS_SLAVE_PMIC1_SSBI1_A 537 273 + #define MSM_BUS_SLAVE_PMIC1_SSBI1_B 538 274 + #define MSM_BUS_SLAVE_PMIC1_SSBI1_C 539 275 + #define MSM_BUS_SLAVE_PMIC2_SSBI2_A 540 276 + #define MSM_BUS_SLAVE_PMIC2_SSBI2_B 541 277 + #define MSM_BUS_SLAVE_GSBI1_UART 542 278 + #define MSM_BUS_SLAVE_GSBI2_UART 543 279 + #define MSM_BUS_SLAVE_GSBI3_UART 544 280 + #define MSM_BUS_SLAVE_GSBI4_UART 545 281 + #define MSM_BUS_SLAVE_GSBI5_UART 546 282 + #define MSM_BUS_SLAVE_GSBI6_UART 547 283 + #define MSM_BUS_SLAVE_GSBI7_UART 548 284 + #define MSM_BUS_SLAVE_GSBI8_UART 549 285 + #define MSM_BUS_SLAVE_GSBI9_UART 550 286 + #define MSM_BUS_SLAVE_GSBI10_UART 551 287 + #define MSM_BUS_SLAVE_GSBI11_UART 552 288 + #define MSM_BUS_SLAVE_GSBI12_UART 553 289 + #define MSM_BUS_SLAVE_GSBI1_QUP 554 290 + #define MSM_BUS_SLAVE_GSBI2_QUP 555 291 + #define MSM_BUS_SLAVE_GSBI3_QUP 556 292 + #define MSM_BUS_SLAVE_GSBI4_QUP 557 293 + #define MSM_BUS_SLAVE_GSBI5_QUP 558 294 + #define MSM_BUS_SLAVE_GSBI6_QUP 559 295 + #define MSM_BUS_SLAVE_GSBI7_QUP 560 296 + #define MSM_BUS_SLAVE_GSBI8_QUP 561 297 + #define MSM_BUS_SLAVE_GSBI9_QUP 562 298 + #define MSM_BUS_SLAVE_GSBI10_QUP 563 299 + #define MSM_BUS_SLAVE_GSBI11_QUP 564 300 + #define MSM_BUS_SLAVE_GSBI12_QUP 565 301 + #define MSM_BUS_SLAVE_EBI2_NAND 566 302 + #define MSM_BUS_SLAVE_EBI2_CS0 567 303 + #define MSM_BUS_SLAVE_EBI2_CS1 568 304 + #define MSM_BUS_SLAVE_EBI2_CS2 569 305 + #define MSM_BUS_SLAVE_EBI2_CS3 570 306 + #define MSM_BUS_SLAVE_EBI2_CS4 571 307 + #define MSM_BUS_SLAVE_EBI2_CS5 572 308 + #define MSM_BUS_SLAVE_USB_FS1 573 309 + #define MSM_BUS_SLAVE_USB_FS2 574 310 + #define MSM_BUS_SLAVE_TSIF 575 311 + #define MSM_BUS_SLAVE_MSM_TSSC 576 312 + #define MSM_BUS_SLAVE_MSM_PDM 577 313 + #define MSM_BUS_SLAVE_MSM_DIMEM 578 314 + #define MSM_BUS_SLAVE_MSM_TCSR 579 315 + #define MSM_BUS_SLAVE_MSM_PRNG 580 316 + #define MSM_BUS_SLAVE_GSS 581 317 + #define MSM_BUS_SLAVE_SATA 582 318 + #define MSM_BUS_SLAVE_USB3 583 319 + #define MSM_BUS_SLAVE_WCSS 584 320 + #define MSM_BUS_SLAVE_OCIMEM 585 321 + #define MSM_BUS_SLAVE_SNOC_OCMEM 586 322 + #define MSM_BUS_SLAVE_SERVICE_SNOC 587 323 + #define MSM_BUS_SLAVE_QDSS_STM 588 324 + #define MSM_BUS_SLAVE_CAMERA_CFG 589 325 + #define MSM_BUS_SLAVE_DISPLAY_CFG 590 326 + #define MSM_BUS_SLAVE_OCMEM_CFG 591 327 + #define MSM_BUS_SLAVE_CPR_CFG 592 328 + #define MSM_BUS_SLAVE_CPR_XPU_CFG 593 329 + #define MSM_BUS_SLAVE_MISC_CFG 594 330 + #define MSM_BUS_SLAVE_MISC_XPU_CFG 595 331 + #define MSM_BUS_SLAVE_VENUS_CFG 596 332 + #define MSM_BUS_SLAVE_MISC_VENUS_CFG 597 333 + #define MSM_BUS_SLAVE_GRAPHICS_3D_CFG 598 334 + #define MSM_BUS_SLAVE_MMSS_CLK_CFG 599 335 + #define MSM_BUS_SLAVE_MMSS_CLK_XPU_CFG 600 336 + #define MSM_BUS_SLAVE_MNOC_MPU_CFG 601 337 + #define MSM_BUS_SLAVE_ONOC_MPU_CFG 602 338 + #define MSM_BUS_SLAVE_SERVICE_MNOC 603 339 + #define MSM_BUS_SLAVE_OCMEM 604 340 + #define MSM_BUS_SLAVE_SERVICE_ONOC 605 341 + #define MSM_BUS_SLAVE_SDCC_1 606 342 + #define MSM_BUS_SLAVE_SDCC_3 607 343 + #define MSM_BUS_SLAVE_SDCC_2 608 344 + #define MSM_BUS_SLAVE_SDCC_4 609 345 + #define MSM_BUS_SLAVE_BAM_DMA 610 346 + #define MSM_BUS_SLAVE_BLSP_2 611 347 + #define MSM_BUS_SLAVE_USB_HSIC 612 348 + #define MSM_BUS_SLAVE_BLSP_1 613 349 + #define MSM_BUS_SLAVE_USB_HS 614 350 + #define MSM_BUS_SLAVE_PDM 615 351 + #define MSM_BUS_SLAVE_PERIPH_APU_CFG 616 352 + #define MSM_BUS_SLAVE_PNOC_MPU_CFG 617 353 + #define MSM_BUS_SLAVE_PRNG 618 354 + #define MSM_BUS_SLAVE_SERVICE_PNOC 619 355 + #define MSM_BUS_SLAVE_CLK_CTL 620 356 + #define MSM_BUS_SLAVE_CNOC_MSS 621 357 + #define MSM_BUS_SLAVE_SECURITY 622 358 + #define MSM_BUS_SLAVE_TCSR 623 359 + #define MSM_BUS_SLAVE_TLMM 624 360 + #define MSM_BUS_SLAVE_CRYPTO_0_CFG 625 361 + #define MSM_BUS_SLAVE_CRYPTO_1_CFG 626 362 + #define MSM_BUS_SLAVE_IMEM_CFG 627 363 + #define MSM_BUS_SLAVE_MESSAGE_RAM 628 364 + #define MSM_BUS_SLAVE_BIMC_CFG 629 365 + #define MSM_BUS_SLAVE_BOOT_ROM 630 366 + #define MSM_BUS_SLAVE_CNOC_MNOC_MMSS_CFG 631 367 + #define MSM_BUS_SLAVE_PMIC_ARB 632 368 + #define MSM_BUS_SLAVE_SPDM_WRAPPER 633 369 + #define MSM_BUS_SLAVE_DEHR_CFG 634 370 + #define MSM_BUS_SLAVE_QDSS_CFG 635 371 + #define MSM_BUS_SLAVE_RBCPR_CFG 636 372 + #define MSM_BUS_SLAVE_RBCPR_QDSS_APU_CFG 637 373 + #define MSM_BUS_SLAVE_SNOC_MPU_CFG 638 374 + #define MSM_BUS_SLAVE_CNOC_ONOC_CFG 639 375 + #define MSM_BUS_SLAVE_CNOC_MNOC_CFG 640 376 + #define MSM_BUS_SLAVE_PNOC_CFG 641 377 + #define MSM_BUS_SLAVE_SNOC_CFG 642 378 + #define MSM_BUS_SLAVE_EBI1_DLL_CFG 643 379 + #define MSM_BUS_SLAVE_PHY_APU_CFG 644 380 + #define MSM_BUS_SLAVE_EBI1_PHY_CFG 645 381 + #define MSM_BUS_SLAVE_SERVICE_CNOC 646 382 + #define MSM_BUS_SLAVE_IPS_CFG 647 383 + #define MSM_BUS_SLAVE_QPIC 648 384 + #define MSM_BUS_SLAVE_DSI_CFG 649 385 + #define MSM_BUS_SLAVE_UFS_CFG 650 386 + #define MSM_BUS_SLAVE_RBCPR_CX_CFG 651 387 + #define MSM_BUS_SLAVE_RBCPR_MX_CFG 652 388 + #define MSM_BUS_SLAVE_PCIE_CFG 653 389 + #define MSM_BUS_SLAVE_USB_PHYS_CFG 654 390 + #define MSM_BUS_SLAVE_VIDEO_CAP_CFG 655 391 + #define MSM_BUS_SLAVE_AVSYNC_CFG 656 392 + #define MSM_BUS_SLAVE_CRYPTO_2_CFG 657 393 + #define MSM_BUS_SLAVE_VPU_CFG 658 394 + #define MSM_BUS_SLAVE_BCAST_CFG 659 395 + #define MSM_BUS_SLAVE_KLM_CFG 660 396 + #define MSM_BUS_SLAVE_GENI_IR_CFG 661 397 + #define MSM_BUS_SLAVE_OCMEM_GFX 662 398 + #define MSM_BUS_SLAVE_CATS_128 663 399 + #define MSM_BUS_SLAVE_OCMEM_64 664 400 + #define MSM_BUS_SLAVE_PCIE_0 665 401 + #define MSM_BUS_SLAVE_PCIE_1 666 402 + #define MSM_BUS_SLAVE_PCIE_0_CFG 667 403 + #define MSM_BUS_SLAVE_PCIE_1_CFG 668 404 + #define MSM_BUS_SLAVE_SRVC_MNOC 669 405 + #define MSM_BUS_SLAVE_USB_HS2 670 406 + #define MSM_BUS_SLAVE_AUDIO 671 407 + #define MSM_BUS_SLAVE_TCU 672 408 + #define MSM_BUS_SLAVE_APPSS 673 409 + #define MSM_BUS_SLAVE_PCIE_PARF 674 410 + #define MSM_BUS_SLAVE_USB3_PHY_CFG 675 411 + #define MSM_BUS_SLAVE_IPA_CFG 676 412 + #define MSM_BUS_SLAVE_A0NOC_SNOC 677 413 + #define MSM_BUS_SLAVE_A1NOC_SNOC 678 414 + #define MSM_BUS_SLAVE_A2NOC_SNOC 679 415 + #define MSM_BUS_SLAVE_HMSS_L3 680 416 + #define MSM_BUS_SLAVE_PIMEM_CFG 681 417 + #define MSM_BUS_SLAVE_DCC_CFG 682 418 + #define MSM_BUS_SLAVE_QDSS_RBCPR_APU_CFG 683 419 + #define MSM_BUS_SLAVE_PCIE_2_CFG 684 420 + #define MSM_BUS_SLAVE_PCIE20_AHB2PHY 685 421 + #define MSM_BUS_SLAVE_A0NOC_CFG 686 422 + #define MSM_BUS_SLAVE_A1NOC_CFG 687 423 + #define MSM_BUS_SLAVE_A2NOC_CFG 688 424 + #define MSM_BUS_SLAVE_A1NOC_MPU_CFG 689 425 + #define MSM_BUS_SLAVE_A2NOC_MPU_CFG 690 426 + #define MSM_BUS_SLAVE_A0NOC_SMMU_CFG 691 427 + #define MSM_BUS_SLAVE_A1NOC_SMMU_CFG 692 428 + #define MSM_BUS_SLAVE_A2NOC_SMMU_CFG 693 429 + #define MSM_BUS_SLAVE_LPASS_SMMU_CFG 694 430 + #define MSM_BUS_SLAVE_MMAGIC_CFG 695 431 + #define MSM_BUS_SLAVE_VENUS_THROTTLE_CFG 696 432 + #define MSM_BUS_SLAVE_SSC_CFG 697 433 + #define MSM_BUS_SLAVE_DSA_CFG 698 434 + #define MSM_BUS_SLAVE_DSA_MPU_CFG 699 435 + #define MSM_BUS_SLAVE_DISPLAY_THROTTLE_CFG 700 436 + #define MSM_BUS_SLAVE_SMMU_CPP_CFG 701 437 + #define MSM_BUS_SLAVE_SMMU_JPEG_CFG 702 438 + #define MSM_BUS_SLAVE_SMMU_MDP_CFG 703 439 + #define MSM_BUS_SLAVE_SMMU_ROTATOR_CFG 704 440 + #define MSM_BUS_SLAVE_SMMU_VENUS_CFG 705 441 + #define MSM_BUS_SLAVE_SMMU_VFE_CFG 706 442 + #define MSM_BUS_SLAVE_A0NOC_MPU_CFG 707 443 + #define MSM_BUS_SLAVE_VMEM_CFG 708 444 + #define MSM_BUS_SLAVE_CAMERA_THROTTLE_CFG 709 445 + #define MSM_BUS_SLAVE_VMEM 710 446 + #define MSM_BUS_SLAVE_AHB2PHY 711 447 + #define MSM_BUS_SLAVE_PIMEM 712 448 + #define MSM_BUS_SLAVE_SNOC_VMEM 713 449 + #define MSM_BUS_SLAVE_PCIE_2 714 450 + #define MSM_BUS_SLAVE_RBCPR_MX 715 451 + #define MSM_BUS_SLAVE_RBCPR_CX 716 452 + #define MSM_BUS_SLAVE_BIMC_PCNOC 717 453 + #define MSM_BUS_SLAVE_PCNOC_BIMC_1 718 454 + #define MSM_BUS_SLAVE_SGMII 719 455 + #define MSM_BUS_SLAVE_SPMI_FETCHER 720 456 + #define MSM_BUS_PNOC_SLV_6 721 457 + #define MSM_BUS_SLAVE_MMSS_SMMU_CFG 722 458 + #define MSM_BUS_SLAVE_WLAN 723 459 + #define MSM_BUS_SLAVE_CRVIRT_A2NOC 724 460 + #define MSM_BUS_SLAVE_CNOC_A2NOC 725 461 + #define MSM_BUS_SLAVE_GLM 726 462 + #define MSM_BUS_SLAVE_GNOC_BIMC 727 463 + #define MSM_BUS_SLAVE_GNOC_SNOC 728 464 + #define MSM_BUS_SLAVE_QM_CFG 729 465 + #define MSM_BUS_SLAVE_TLMM_EAST 730 466 + #define MSM_BUS_SLAVE_TLMM_NORTH 731 467 + #define MSM_BUS_SLAVE_TLMM_WEST 732 468 + #define MSM_BUS_SLAVE_SKL 733 469 + #define MSM_BUS_SLAVE_LPASS_TCM 734 470 + #define MSM_BUS_SLAVE_TLMM_SOUTH 735 471 + #define MSM_BUS_SLAVE_TLMM_CENTER 736 472 + #define MSM_BUS_MSS_NAV_CE_MPU_CFG 737 473 + #define MSM_BUS_SLAVE_A2NOC_THROTTLE_CFG 738 474 + #define MSM_BUS_SLAVE_CDSP 739 475 + #define MSM_BUS_SLAVE_CDSP_SMMU_CFG 740 476 + #define MSM_BUS_SLAVE_LPASS_MPU_CFG 741 477 + #define MSM_BUS_SLAVE_CSI_PHY_CFG 742 478 + #define MSM_BUS_SLAVE_LAST 743 479 + 480 + #define MSM_BUS_SYSTEM_FPB_SLAVE_SYSTEM MSM_BUS_SYSTEM_SLAVE_SYSTEM_FPB 481 + #define MSM_BUS_CPSS_FPB_SLAVE_SYSTEM MSM_BUS_SYSTEM_SLAVE_CPSS_FPB 482 + 483 + /* 484 + * ID's used in RPM messages 485 + */ 486 + #define ICBID_MASTER_APPSS_PROC 0 487 + #define ICBID_MASTER_MSS_PROC 1 488 + #define ICBID_MASTER_MNOC_BIMC 2 489 + #define ICBID_MASTER_SNOC_BIMC 3 490 + #define ICBID_MASTER_SNOC_BIMC_0 ICBID_MASTER_SNOC_BIMC 491 + #define ICBID_MASTER_CNOC_MNOC_MMSS_CFG 4 492 + #define ICBID_MASTER_CNOC_MNOC_CFG 5 493 + #define ICBID_MASTER_GFX3D 6 494 + #define ICBID_MASTER_JPEG 7 495 + #define ICBID_MASTER_MDP 8 496 + #define ICBID_MASTER_MDP0 ICBID_MASTER_MDP 497 + #define ICBID_MASTER_MDPS ICBID_MASTER_MDP 498 + #define ICBID_MASTER_VIDEO 9 499 + #define ICBID_MASTER_VIDEO_P0 ICBID_MASTER_VIDEO 500 + #define ICBID_MASTER_VIDEO_P1 10 501 + #define ICBID_MASTER_VFE 11 502 + #define ICBID_MASTER_VFE0 ICBID_MASTER_VFE 503 + #define ICBID_MASTER_CNOC_ONOC_CFG 12 504 + #define ICBID_MASTER_JPEG_OCMEM 13 505 + #define ICBID_MASTER_MDP_OCMEM 14 506 + #define ICBID_MASTER_VIDEO_P0_OCMEM 15 507 + #define ICBID_MASTER_VIDEO_P1_OCMEM 16 508 + #define ICBID_MASTER_VFE_OCMEM 17 509 + #define ICBID_MASTER_LPASS_AHB 18 510 + #define ICBID_MASTER_QDSS_BAM 19 511 + #define ICBID_MASTER_SNOC_CFG 20 512 + #define ICBID_MASTER_BIMC_SNOC 21 513 + #define ICBID_MASTER_BIMC_SNOC_0 ICBID_MASTER_BIMC_SNOC 514 + #define ICBID_MASTER_CNOC_SNOC 22 515 + #define ICBID_MASTER_CRYPTO 23 516 + #define ICBID_MASTER_CRYPTO_CORE0 ICBID_MASTER_CRYPTO 517 + #define ICBID_MASTER_CRYPTO_CORE1 24 518 + #define ICBID_MASTER_LPASS_PROC 25 519 + #define ICBID_MASTER_MSS 26 520 + #define ICBID_MASTER_MSS_NAV 27 521 + #define ICBID_MASTER_OCMEM_DMA 28 522 + #define ICBID_MASTER_PNOC_SNOC 29 523 + #define ICBID_MASTER_WCSS 30 524 + #define ICBID_MASTER_QDSS_ETR 31 525 + #define ICBID_MASTER_USB3 32 526 + #define ICBID_MASTER_USB3_0 ICBID_MASTER_USB3 527 + #define ICBID_MASTER_SDCC_1 33 528 + #define ICBID_MASTER_SDCC_3 34 529 + #define ICBID_MASTER_SDCC_2 35 530 + #define ICBID_MASTER_SDCC_4 36 531 + #define ICBID_MASTER_TSIF 37 532 + #define ICBID_MASTER_BAM_DMA 38 533 + #define ICBID_MASTER_BLSP_2 39 534 + #define ICBID_MASTER_USB_HSIC 40 535 + #define ICBID_MASTER_BLSP_1 41 536 + #define ICBID_MASTER_USB_HS 42 537 + #define ICBID_MASTER_USB_HS1 ICBID_MASTER_USB_HS 538 + #define ICBID_MASTER_PNOC_CFG 43 539 + #define ICBID_MASTER_SNOC_PNOC 44 540 + #define ICBID_MASTER_RPM_INST 45 541 + #define ICBID_MASTER_RPM_DATA 46 542 + #define ICBID_MASTER_RPM_SYS 47 543 + #define ICBID_MASTER_DEHR 48 544 + #define ICBID_MASTER_QDSS_DAP 49 545 + #define ICBID_MASTER_SPDM 50 546 + #define ICBID_MASTER_TIC 51 547 + #define ICBID_MASTER_SNOC_CNOC 52 548 + #define ICBID_MASTER_GFX3D_OCMEM 53 549 + #define ICBID_MASTER_GFX3D_GMEM ICBID_MASTER_GFX3D_OCMEM 550 + #define ICBID_MASTER_OVIRT_SNOC 54 551 + #define ICBID_MASTER_SNOC_OVIRT 55 552 + #define ICBID_MASTER_SNOC_GVIRT ICBID_MASTER_SNOC_OVIRT 553 + #define ICBID_MASTER_ONOC_OVIRT 56 554 + #define ICBID_MASTER_USB_HS2 57 555 + #define ICBID_MASTER_QPIC 58 556 + #define ICBID_MASTER_IPA 59 557 + #define ICBID_MASTER_DSI 60 558 + #define ICBID_MASTER_MDP1 61 559 + #define ICBID_MASTER_MDPE ICBID_MASTER_MDP1 560 + #define ICBID_MASTER_VPU_PROC 62 561 + #define ICBID_MASTER_VPU 63 562 + #define ICBID_MASTER_VPU0 ICBID_MASTER_VPU 563 + #define ICBID_MASTER_CRYPTO_CORE2 64 564 + #define ICBID_MASTER_PCIE_0 65 565 + #define ICBID_MASTER_PCIE_1 66 566 + #define ICBID_MASTER_SATA 67 567 + #define ICBID_MASTER_UFS 68 568 + #define ICBID_MASTER_USB3_1 69 569 + #define ICBID_MASTER_VIDEO_OCMEM 70 570 + #define ICBID_MASTER_VPU1 71 571 + #define ICBID_MASTER_VCAP 72 572 + #define ICBID_MASTER_EMAC 73 573 + #define ICBID_MASTER_BCAST 74 574 + #define ICBID_MASTER_MMSS_PROC 75 575 + #define ICBID_MASTER_SNOC_BIMC_1 76 576 + #define ICBID_MASTER_SNOC_PCNOC 77 577 + #define ICBID_MASTER_AUDIO 78 578 + #define ICBID_MASTER_MM_INT_0 79 579 + #define ICBID_MASTER_MM_INT_1 80 580 + #define ICBID_MASTER_MM_INT_2 81 581 + #define ICBID_MASTER_MM_INT_BIMC 82 582 + #define ICBID_MASTER_MSS_INT 83 583 + #define ICBID_MASTER_PCNOC_CFG 84 584 + #define ICBID_MASTER_PCNOC_INT_0 85 585 + #define ICBID_MASTER_PCNOC_INT_1 86 586 + #define ICBID_MASTER_PCNOC_M_0 87 587 + #define ICBID_MASTER_PCNOC_M_1 88 588 + #define ICBID_MASTER_PCNOC_S_0 89 589 + #define ICBID_MASTER_PCNOC_S_1 90 590 + #define ICBID_MASTER_PCNOC_S_2 91 591 + #define ICBID_MASTER_PCNOC_S_3 92 592 + #define ICBID_MASTER_PCNOC_S_4 93 593 + #define ICBID_MASTER_PCNOC_S_6 94 594 + #define ICBID_MASTER_PCNOC_S_7 95 595 + #define ICBID_MASTER_PCNOC_S_8 96 596 + #define ICBID_MASTER_PCNOC_S_9 97 597 + #define ICBID_MASTER_QDSS_INT 98 598 + #define ICBID_MASTER_SNOC_INT_0 99 599 + #define ICBID_MASTER_SNOC_INT_1 100 600 + #define ICBID_MASTER_SNOC_INT_BIMC 101 601 + #define ICBID_MASTER_TCU_0 102 602 + #define ICBID_MASTER_TCU_1 103 603 + #define ICBID_MASTER_BIMC_INT_0 104 604 + #define ICBID_MASTER_BIMC_INT_1 105 605 + #define ICBID_MASTER_CAMERA 106 606 + #define ICBID_MASTER_RICA 107 607 + #define ICBID_MASTER_SNOC_BIMC_2 108 608 + #define ICBID_MASTER_BIMC_SNOC_1 109 609 + #define ICBID_MASTER_A0NOC_SNOC 110 610 + #define ICBID_MASTER_A1NOC_SNOC 111 611 + #define ICBID_MASTER_A2NOC_SNOC 112 612 + #define ICBID_MASTER_PIMEM 113 613 + #define ICBID_MASTER_SNOC_VMEM 114 614 + #define ICBID_MASTER_CPP 115 615 + #define ICBID_MASTER_CNOC_A1NOC 116 616 + #define ICBID_MASTER_PNOC_A1NOC 117 617 + #define ICBID_MASTER_HMSS 118 618 + #define ICBID_MASTER_PCIE_2 119 619 + #define ICBID_MASTER_ROTATOR 120 620 + #define ICBID_MASTER_VENUS_VMEM 121 621 + #define ICBID_MASTER_DCC 122 622 + #define ICBID_MASTER_MCDMA 123 623 + #define ICBID_MASTER_PCNOC_INT_2 124 624 + #define ICBID_MASTER_PCNOC_INT_3 125 625 + #define ICBID_MASTER_PCNOC_INT_4 126 626 + #define ICBID_MASTER_PCNOC_INT_5 127 627 + #define ICBID_MASTER_PCNOC_INT_6 128 628 + #define ICBID_MASTER_PCNOC_S_5 129 629 + #define ICBID_MASTER_SENSORS_AHB 130 630 + #define ICBID_MASTER_SENSORS_PROC 131 631 + #define ICBID_MASTER_QSPI 132 632 + #define ICBID_MASTER_VFE1 133 633 + #define ICBID_MASTER_SNOC_INT_2 134 634 + #define ICBID_MASTER_SMMNOC_BIMC 135 635 + #define ICBID_MASTER_CRVIRT_A1NOC 136 636 + #define ICBID_MASTER_XM_USB_HS1 137 637 + #define ICBID_MASTER_XI_USB_HS1 138 638 + #define ICBID_MASTER_PCNOC_BIMC_1 139 639 + #define ICBID_MASTER_BIMC_PCNOC 140 640 + #define ICBID_MASTER_XI_HSIC 141 641 + #define ICBID_MASTER_SGMII 142 642 + #define ICBID_MASTER_SPMI_FETCHER 143 643 + #define ICBID_MASTER_GNOC_BIMC 144 644 + #define ICBID_MASTER_CRVIRT_A2NOC 145 645 + #define ICBID_MASTER_CNOC_A2NOC 146 646 + #define ICBID_MASTER_WLAN 147 647 + #define ICBID_MASTER_MSS_CE 148 648 + #define ICBID_MASTER_CDSP_PROC 149 649 + #define ICBID_MASTER_GNOC_SNOC 150 650 + 651 + #define ICBID_SLAVE_EBI1 0 652 + #define ICBID_SLAVE_APPSS_L2 1 653 + #define ICBID_SLAVE_BIMC_SNOC 2 654 + #define ICBID_SLAVE_BIMC_SNOC_0 ICBID_SLAVE_BIMC_SNOC 655 + #define ICBID_SLAVE_CAMERA_CFG 3 656 + #define ICBID_SLAVE_DISPLAY_CFG 4 657 + #define ICBID_SLAVE_OCMEM_CFG 5 658 + #define ICBID_SLAVE_CPR_CFG 6 659 + #define ICBID_SLAVE_CPR_XPU_CFG 7 660 + #define ICBID_SLAVE_MISC_CFG 8 661 + #define ICBID_SLAVE_MISC_XPU_CFG 9 662 + #define ICBID_SLAVE_VENUS_CFG 10 663 + #define ICBID_SLAVE_GFX3D_CFG 11 664 + #define ICBID_SLAVE_MMSS_CLK_CFG 12 665 + #define ICBID_SLAVE_MMSS_CLK_XPU_CFG 13 666 + #define ICBID_SLAVE_MNOC_MPU_CFG 14 667 + #define ICBID_SLAVE_ONOC_MPU_CFG 15 668 + #define ICBID_SLAVE_MNOC_BIMC 16 669 + #define ICBID_SLAVE_SERVICE_MNOC 17 670 + #define ICBID_SLAVE_OCMEM 18 671 + #define ICBID_SLAVE_GMEM ICBID_SLAVE_OCMEM 672 + #define ICBID_SLAVE_SERVICE_ONOC 19 673 + #define ICBID_SLAVE_APPSS 20 674 + #define ICBID_SLAVE_LPASS 21 675 + #define ICBID_SLAVE_USB3 22 676 + #define ICBID_SLAVE_USB3_0 ICBID_SLAVE_USB3 677 + #define ICBID_SLAVE_WCSS 23 678 + #define ICBID_SLAVE_SNOC_BIMC 24 679 + #define ICBID_SLAVE_SNOC_BIMC_0 ICBID_SLAVE_SNOC_BIMC 680 + #define ICBID_SLAVE_SNOC_CNOC 25 681 + #define ICBID_SLAVE_IMEM 26 682 + #define ICBID_SLAVE_OCIMEM ICBID_SLAVE_IMEM 683 + #define ICBID_SLAVE_SNOC_OVIRT 27 684 + #define ICBID_SLAVE_SNOC_GVIRT ICBID_SLAVE_SNOC_OVIRT 685 + #define ICBID_SLAVE_SNOC_PNOC 28 686 + #define ICBID_SLAVE_SNOC_PCNOC ICBID_SLAVE_SNOC_PNOC 687 + #define ICBID_SLAVE_SERVICE_SNOC 29 688 + #define ICBID_SLAVE_QDSS_STM 30 689 + #define ICBID_SLAVE_SDCC_1 31 690 + #define ICBID_SLAVE_SDCC_3 32 691 + #define ICBID_SLAVE_SDCC_2 33 692 + #define ICBID_SLAVE_SDCC_4 34 693 + #define ICBID_SLAVE_TSIF 35 694 + #define ICBID_SLAVE_BAM_DMA 36 695 + #define ICBID_SLAVE_BLSP_2 37 696 + #define ICBID_SLAVE_USB_HSIC 38 697 + #define ICBID_SLAVE_BLSP_1 39 698 + #define ICBID_SLAVE_USB_HS 40 699 + #define ICBID_SLAVE_USB_HS1 ICBID_SLAVE_USB_HS 700 + #define ICBID_SLAVE_PDM 41 701 + #define ICBID_SLAVE_PERIPH_APU_CFG 42 702 + #define ICBID_SLAVE_PNOC_MPU_CFG 43 703 + #define ICBID_SLAVE_PRNG 44 704 + #define ICBID_SLAVE_PNOC_SNOC 45 705 + #define ICBID_SLAVE_PCNOC_SNOC ICBID_SLAVE_PNOC_SNOC 706 + #define ICBID_SLAVE_SERVICE_PNOC 46 707 + #define ICBID_SLAVE_CLK_CTL 47 708 + #define ICBID_SLAVE_CNOC_MSS 48 709 + #define ICBID_SLAVE_PCNOC_MSS ICBID_SLAVE_CNOC_MSS 710 + #define ICBID_SLAVE_SECURITY 49 711 + #define ICBID_SLAVE_TCSR 50 712 + #define ICBID_SLAVE_TLMM 51 713 + #define ICBID_SLAVE_CRYPTO_0_CFG 52 714 + #define ICBID_SLAVE_CRYPTO_1_CFG 53 715 + #define ICBID_SLAVE_IMEM_CFG 54 716 + #define ICBID_SLAVE_MESSAGE_RAM 55 717 + #define ICBID_SLAVE_BIMC_CFG 56 718 + #define ICBID_SLAVE_BOOT_ROM 57 719 + #define ICBID_SLAVE_CNOC_MNOC_MMSS_CFG 58 720 + #define ICBID_SLAVE_PMIC_ARB 59 721 + #define ICBID_SLAVE_SPDM_WRAPPER 60 722 + #define ICBID_SLAVE_DEHR_CFG 61 723 + #define ICBID_SLAVE_MPM 62 724 + #define ICBID_SLAVE_QDSS_CFG 63 725 + #define ICBID_SLAVE_RBCPR_CFG 64 726 + #define ICBID_SLAVE_RBCPR_CX_CFG ICBID_SLAVE_RBCPR_CFG 727 + #define ICBID_SLAVE_RBCPR_QDSS_APU_CFG 65 728 + #define ICBID_SLAVE_CNOC_MNOC_CFG 66 729 + #define ICBID_SLAVE_SNOC_MPU_CFG 67 730 + #define ICBID_SLAVE_CNOC_ONOC_CFG 68 731 + #define ICBID_SLAVE_PNOC_CFG 69 732 + #define ICBID_SLAVE_SNOC_CFG 70 733 + #define ICBID_SLAVE_EBI1_DLL_CFG 71 734 + #define ICBID_SLAVE_PHY_APU_CFG 72 735 + #define ICBID_SLAVE_EBI1_PHY_CFG 73 736 + #define ICBID_SLAVE_RPM 74 737 + #define ICBID_SLAVE_CNOC_SNOC 75 738 + #define ICBID_SLAVE_SERVICE_CNOC 76 739 + #define ICBID_SLAVE_OVIRT_SNOC 77 740 + #define ICBID_SLAVE_OVIRT_OCMEM 78 741 + #define ICBID_SLAVE_USB_HS2 79 742 + #define ICBID_SLAVE_QPIC 80 743 + #define ICBID_SLAVE_IPS_CFG 81 744 + #define ICBID_SLAVE_DSI_CFG 82 745 + #define ICBID_SLAVE_USB3_1 83 746 + #define ICBID_SLAVE_PCIE_0 84 747 + #define ICBID_SLAVE_PCIE_1 85 748 + #define ICBID_SLAVE_PSS_SMMU_CFG 86 749 + #define ICBID_SLAVE_CRYPTO_2_CFG 87 750 + #define ICBID_SLAVE_PCIE_0_CFG 88 751 + #define ICBID_SLAVE_PCIE_1_CFG 89 752 + #define ICBID_SLAVE_SATA_CFG 90 753 + #define ICBID_SLAVE_SPSS_GENI_IR 91 754 + #define ICBID_SLAVE_UFS_CFG 92 755 + #define ICBID_SLAVE_AVSYNC_CFG 93 756 + #define ICBID_SLAVE_VPU_CFG 94 757 + #define ICBID_SLAVE_USB_PHY_CFG 95 758 + #define ICBID_SLAVE_RBCPR_MX_CFG 96 759 + #define ICBID_SLAVE_PCIE_PARF 97 760 + #define ICBID_SLAVE_VCAP_CFG 98 761 + #define ICBID_SLAVE_EMAC_CFG 99 762 + #define ICBID_SLAVE_BCAST_CFG 100 763 + #define ICBID_SLAVE_KLM_CFG 101 764 + #define ICBID_SLAVE_DISPLAY_PWM 102 765 + #define ICBID_SLAVE_GENI 103 766 + #define ICBID_SLAVE_SNOC_BIMC_1 104 767 + #define ICBID_SLAVE_AUDIO 105 768 + #define ICBID_SLAVE_CATS_0 106 769 + #define ICBID_SLAVE_CATS_1 107 770 + #define ICBID_SLAVE_MM_INT_0 108 771 + #define ICBID_SLAVE_MM_INT_1 109 772 + #define ICBID_SLAVE_MM_INT_2 110 773 + #define ICBID_SLAVE_MM_INT_BIMC 111 774 + #define ICBID_SLAVE_MMU_MODEM_XPU_CFG 112 775 + #define ICBID_SLAVE_MSS_INT 113 776 + #define ICBID_SLAVE_PCNOC_INT_0 114 777 + #define ICBID_SLAVE_PCNOC_INT_1 115 778 + #define ICBID_SLAVE_PCNOC_M_0 116 779 + #define ICBID_SLAVE_PCNOC_M_1 117 780 + #define ICBID_SLAVE_PCNOC_S_0 118 781 + #define ICBID_SLAVE_PCNOC_S_1 119 782 + #define ICBID_SLAVE_PCNOC_S_2 120 783 + #define ICBID_SLAVE_PCNOC_S_3 121 784 + #define ICBID_SLAVE_PCNOC_S_4 122 785 + #define ICBID_SLAVE_PCNOC_S_6 123 786 + #define ICBID_SLAVE_PCNOC_S_7 124 787 + #define ICBID_SLAVE_PCNOC_S_8 125 788 + #define ICBID_SLAVE_PCNOC_S_9 126 789 + #define ICBID_SLAVE_PRNG_XPU_CFG 127 790 + #define ICBID_SLAVE_QDSS_INT 128 791 + #define ICBID_SLAVE_RPM_XPU_CFG 129 792 + #define ICBID_SLAVE_SNOC_INT_0 130 793 + #define ICBID_SLAVE_SNOC_INT_1 131 794 + #define ICBID_SLAVE_SNOC_INT_BIMC 132 795 + #define ICBID_SLAVE_TCU 133 796 + #define ICBID_SLAVE_BIMC_INT_0 134 797 + #define ICBID_SLAVE_BIMC_INT_1 135 798 + #define ICBID_SLAVE_RICA_CFG 136 799 + #define ICBID_SLAVE_SNOC_BIMC_2 137 800 + #define ICBID_SLAVE_BIMC_SNOC_1 138 801 + #define ICBID_SLAVE_PNOC_A1NOC 139 802 + #define ICBID_SLAVE_SNOC_VMEM 140 803 + #define ICBID_SLAVE_A0NOC_SNOC 141 804 + #define ICBID_SLAVE_A1NOC_SNOC 142 805 + #define ICBID_SLAVE_A2NOC_SNOC 143 806 + #define ICBID_SLAVE_A0NOC_CFG 144 807 + #define ICBID_SLAVE_A0NOC_MPU_CFG 145 808 + #define ICBID_SLAVE_A0NOC_SMMU_CFG 146 809 + #define ICBID_SLAVE_A1NOC_CFG 147 810 + #define ICBID_SLAVE_A1NOC_MPU_CFG 148 811 + #define ICBID_SLAVE_A1NOC_SMMU_CFG 149 812 + #define ICBID_SLAVE_A2NOC_CFG 150 813 + #define ICBID_SLAVE_A2NOC_MPU_CFG 151 814 + #define ICBID_SLAVE_A2NOC_SMMU_CFG 152 815 + #define ICBID_SLAVE_AHB2PHY 153 816 + #define ICBID_SLAVE_CAMERA_THROTTLE_CFG 154 817 + #define ICBID_SLAVE_DCC_CFG 155 818 + #define ICBID_SLAVE_DISPLAY_THROTTLE_CFG 156 819 + #define ICBID_SLAVE_DSA_CFG 157 820 + #define ICBID_SLAVE_DSA_MPU_CFG 158 821 + #define ICBID_SLAVE_SSC_MPU_CFG 159 822 + #define ICBID_SLAVE_HMSS_L3 160 823 + #define ICBID_SLAVE_LPASS_SMMU_CFG 161 824 + #define ICBID_SLAVE_MMAGIC_CFG 162 825 + #define ICBID_SLAVE_PCIE20_AHB2PHY 163 826 + #define ICBID_SLAVE_PCIE_2 164 827 + #define ICBID_SLAVE_PCIE_2_CFG 165 828 + #define ICBID_SLAVE_PIMEM 166 829 + #define ICBID_SLAVE_PIMEM_CFG 167 830 + #define ICBID_SLAVE_QDSS_RBCPR_APU_CFG 168 831 + #define ICBID_SLAVE_RBCPR_CX 169 832 + #define ICBID_SLAVE_RBCPR_MX 170 833 + #define ICBID_SLAVE_SMMU_CPP_CFG 171 834 + #define ICBID_SLAVE_SMMU_JPEG_CFG 172 835 + #define ICBID_SLAVE_SMMU_MDP_CFG 173 836 + #define ICBID_SLAVE_SMMU_ROTATOR_CFG 174 837 + #define ICBID_SLAVE_SMMU_VENUS_CFG 175 838 + #define ICBID_SLAVE_SMMU_VFE_CFG 176 839 + #define ICBID_SLAVE_SSC_CFG 177 840 + #define ICBID_SLAVE_VENUS_THROTTLE_CFG 178 841 + #define ICBID_SLAVE_VMEM 179 842 + #define ICBID_SLAVE_VMEM_CFG 180 843 + #define ICBID_SLAVE_QDSS_MPU_CFG 181 844 + #define ICBID_SLAVE_USB3_PHY_CFG 182 845 + #define ICBID_SLAVE_IPA_CFG 183 846 + #define ICBID_SLAVE_PCNOC_INT_2 184 847 + #define ICBID_SLAVE_PCNOC_INT_3 185 848 + #define ICBID_SLAVE_PCNOC_INT_4 186 849 + #define ICBID_SLAVE_PCNOC_INT_5 187 850 + #define ICBID_SLAVE_PCNOC_INT_6 188 851 + #define ICBID_SLAVE_PCNOC_S_5 189 852 + #define ICBID_SLAVE_QSPI 190 853 + #define ICBID_SLAVE_A1NOC_MS_MPU_CFG 191 854 + #define ICBID_SLAVE_A2NOC_MS_MPU_CFG 192 855 + #define ICBID_SLAVE_MODEM_Q6_SMMU_CFG 193 856 + #define ICBID_SLAVE_MSS_MPU_CFG 194 857 + #define ICBID_SLAVE_MSS_PROC_MS_MPU_CFG 195 858 + #define ICBID_SLAVE_SKL 196 859 + #define ICBID_SLAVE_SNOC_INT_2 197 860 + #define ICBID_SLAVE_SMMNOC_BIMC 198 861 + #define ICBID_SLAVE_CRVIRT_A1NOC 199 862 + #define ICBID_SLAVE_SGMII 200 863 + #define ICBID_SLAVE_QHS4_APPS 201 864 + #define ICBID_SLAVE_BIMC_PCNOC 202 865 + #define ICBID_SLAVE_PCNOC_BIMC_1 203 866 + #define ICBID_SLAVE_SPMI_FETCHER 204 867 + #define ICBID_SLAVE_MMSS_SMMU_CFG 205 868 + #define ICBID_SLAVE_WLAN 206 869 + #define ICBID_SLAVE_CRVIRT_A2NOC 207 870 + #define ICBID_SLAVE_CNOC_A2NOC 208 871 + #define ICBID_SLAVE_GLM 209 872 + #define ICBID_SLAVE_GNOC_BIMC 210 873 + #define ICBID_SLAVE_GNOC_SNOC 211 874 + #define ICBID_SLAVE_QM_CFG 212 875 + #define ICBID_SLAVE_TLMM_EAST 213 876 + #define ICBID_SLAVE_TLMM_NORTH 214 877 + #define ICBID_SLAVE_TLMM_WEST 215 878 + #define ICBID_SLAVE_LPASS_TCM 216 879 + #define ICBID_SLAVE_TLMM_SOUTH 217 880 + #define ICBID_SLAVE_TLMM_CENTER 218 881 + #define ICBID_SLAVE_MSS_NAV_CE_MPU_CFG 219 882 + #define ICBID_SLAVE_A2NOC_THROTTLE_CFG 220 883 + #define ICBID_SLAVE_CDSP 221 884 + #define ICBID_SLAVE_CDSP_SMMU_CFG 222 885 + #define ICBID_SLAVE_LPASS_MPU_CFG 223 886 + #define ICBID_SLAVE_CSI_PHY_CFG 224 887 + #endif
+1
include/uapi/drm/msm_drm.h
··· 232 232 __u32 fence; /* in */ 233 233 __u32 pad; 234 234 struct drm_msm_timespec timeout; /* in */ 235 + __u32 queueid; /* in, submitqueue id */ 235 236 }; 236 237 237 238 /* madvise provides a way to tell the kernel in case a buffers contents