Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/amdgpu: Modify unmap_queue format for gfx9 (v6)

1. Modify the unmap_queue package on gfx9. Add trailing fence to track the
preemption done.
2. Modify emit_ce_meta emit_de_meta functions for the resumed ibs.

v2: Restyle code not to use ternary operator.
v3: Modify code format.
v4: Enable Mid-Command Buffer Preemption for gfx9 by default.
v5: Optimize the flag bit set for emit_fence.
v6: Modify log message for preemption timeout.

Cc: Christian Koenig <Christian.Koenig@amd.com>
Cc: Michel Dänzer <michel@daenzer.net>
Cc: Luben Tuikov <Luben.Tuikov@amd.com>
Signed-off-by: Jiadong.Zhu <Jiadong.Zhu@amd.com>
Acked-by: Christian König <christian.koenig@amd.com>
Acked-by: Huang Rui <ray.huang@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>

authored by

Jiadong.Zhu and committed by
Alex Deucher
be254550 0c97a19a

+156 -29
+1
drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
··· 60 60 #define AMDGPU_FENCE_FLAG_64BIT (1 << 0) 61 61 #define AMDGPU_FENCE_FLAG_INT (1 << 1) 62 62 #define AMDGPU_FENCE_FLAG_TC_WB_ONLY (1 << 2) 63 + #define AMDGPU_FENCE_FLAG_EXEC (1 << 3) 63 64 64 65 #define to_amdgpu_ring(s) container_of((s), struct amdgpu_ring, sched) 65 66
+153 -29
drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
··· 755 755 static int gfx_v9_0_get_cu_info(struct amdgpu_device *adev, 756 756 struct amdgpu_cu_info *cu_info); 757 757 static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev); 758 - static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring); 758 + static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring, bool resume); 759 759 static u64 gfx_v9_0_ring_get_rptr_compute(struct amdgpu_ring *ring); 760 760 static void gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev, 761 761 void *ras_error_status); ··· 828 828 PACKET3_UNMAP_QUEUES_DOORBELL_OFFSET0(ring->doorbell_index)); 829 829 830 830 if (action == PREEMPT_QUEUES_NO_UNMAP) { 831 - amdgpu_ring_write(kiq_ring, lower_32_bits(gpu_addr)); 832 - amdgpu_ring_write(kiq_ring, upper_32_bits(gpu_addr)); 833 - amdgpu_ring_write(kiq_ring, seq); 831 + amdgpu_ring_write(kiq_ring, lower_32_bits(ring->wptr & ring->buf_mask)); 832 + amdgpu_ring_write(kiq_ring, 0); 833 + amdgpu_ring_write(kiq_ring, 0); 834 + 834 835 } else { 835 836 amdgpu_ring_write(kiq_ring, 0); 836 837 amdgpu_ring_write(kiq_ring, 0); ··· 5205 5204 5206 5205 control |= ib->length_dw | (vmid << 24); 5207 5206 5208 - if (amdgpu_sriov_vf(ring->adev) && (ib->flags & AMDGPU_IB_FLAG_PREEMPT)) { 5207 + if (ib->flags & AMDGPU_IB_FLAG_PREEMPT) { 5209 5208 control |= INDIRECT_BUFFER_PRE_ENB(1); 5210 5209 5210 + if (flags & AMDGPU_IB_PREEMPTED) 5211 + control |= INDIRECT_BUFFER_PRE_RESUME(1); 5212 + 5211 5213 if (!(ib->flags & AMDGPU_IB_FLAG_CE) && vmid) 5212 - gfx_v9_0_ring_emit_de_meta(ring); 5214 + gfx_v9_0_ring_emit_de_meta(ring, 5215 + (!amdgpu_sriov_vf(ring->adev) && 5216 + flags & AMDGPU_IB_PREEMPTED) ? 5217 + true : false); 5213 5218 } 5214 5219 5215 5220 amdgpu_ring_write(ring, header); ··· 5270 5263 bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT; 5271 5264 bool int_sel = flags & AMDGPU_FENCE_FLAG_INT; 5272 5265 bool writeback = flags & AMDGPU_FENCE_FLAG_TC_WB_ONLY; 5266 + bool exec = flags & AMDGPU_FENCE_FLAG_EXEC; 5267 + uint32_t dw2 = 0; 5273 5268 5274 5269 /* RELEASE_MEM - flush caches, send int */ 5275 5270 amdgpu_ring_write(ring, PACKET3(PACKET3_RELEASE_MEM, 6)); 5276 - amdgpu_ring_write(ring, ((writeback ? (EOP_TC_WB_ACTION_EN | 5277 - EOP_TC_NC_ACTION_EN) : 5278 - (EOP_TCL1_ACTION_EN | 5279 - EOP_TC_ACTION_EN | 5280 - EOP_TC_WB_ACTION_EN | 5281 - EOP_TC_MD_ACTION_EN)) | 5282 - EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) | 5283 - EVENT_INDEX(5))); 5271 + 5272 + if (writeback) { 5273 + dw2 = EOP_TC_NC_ACTION_EN; 5274 + } else { 5275 + dw2 = EOP_TCL1_ACTION_EN | EOP_TC_ACTION_EN | 5276 + EOP_TC_MD_ACTION_EN; 5277 + } 5278 + dw2 |= EOP_TC_WB_ACTION_EN | EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) | 5279 + EVENT_INDEX(5); 5280 + if (exec) 5281 + dw2 |= EOP_EXEC; 5282 + 5283 + amdgpu_ring_write(ring, dw2); 5284 5284 amdgpu_ring_write(ring, DATA_SEL(write64bit ? 2 : 1) | INT_SEL(int_sel ? 2 : 0)); 5285 5285 5286 5286 /* ··· 5392 5378 amdgpu_ring_write(ring, 0); 5393 5379 } 5394 5380 5395 - static void gfx_v9_0_ring_emit_ce_meta(struct amdgpu_ring *ring) 5381 + static void gfx_v9_0_ring_emit_ce_meta(struct amdgpu_ring *ring, bool resume) 5396 5382 { 5383 + struct amdgpu_device *adev = ring->adev; 5397 5384 struct v9_ce_ib_state ce_payload = {0}; 5398 - uint64_t csa_addr; 5385 + uint64_t offset, ce_payload_gpu_addr; 5386 + void *ce_payload_cpu_addr; 5399 5387 int cnt; 5400 5388 5401 5389 cnt = (sizeof(ce_payload) >> 2) + 4 - 2; 5402 - csa_addr = amdgpu_csa_vaddr(ring->adev); 5390 + 5391 + if (ring->is_mes_queue) { 5392 + offset = offsetof(struct amdgpu_mes_ctx_meta_data, 5393 + gfx[0].gfx_meta_data) + 5394 + offsetof(struct v9_gfx_meta_data, ce_payload); 5395 + ce_payload_gpu_addr = 5396 + amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset); 5397 + ce_payload_cpu_addr = 5398 + amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset); 5399 + } else { 5400 + offset = offsetof(struct v9_gfx_meta_data, ce_payload); 5401 + ce_payload_gpu_addr = amdgpu_csa_vaddr(ring->adev) + offset; 5402 + ce_payload_cpu_addr = adev->virt.csa_cpu_addr + offset; 5403 + } 5403 5404 5404 5405 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, cnt)); 5405 5406 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(2) | 5406 5407 WRITE_DATA_DST_SEL(8) | 5407 5408 WR_CONFIRM) | 5408 5409 WRITE_DATA_CACHE_POLICY(0)); 5409 - amdgpu_ring_write(ring, lower_32_bits(csa_addr + offsetof(struct v9_gfx_meta_data, ce_payload))); 5410 - amdgpu_ring_write(ring, upper_32_bits(csa_addr + offsetof(struct v9_gfx_meta_data, ce_payload))); 5411 - amdgpu_ring_write_multiple(ring, (void *)&ce_payload, sizeof(ce_payload) >> 2); 5410 + amdgpu_ring_write(ring, lower_32_bits(ce_payload_gpu_addr)); 5411 + amdgpu_ring_write(ring, upper_32_bits(ce_payload_gpu_addr)); 5412 + 5413 + if (resume) 5414 + amdgpu_ring_write_multiple(ring, ce_payload_cpu_addr, 5415 + sizeof(ce_payload) >> 2); 5416 + else 5417 + amdgpu_ring_write_multiple(ring, (void *)&ce_payload, 5418 + sizeof(ce_payload) >> 2); 5412 5419 } 5413 5420 5414 - static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring) 5421 + static int gfx_v9_0_ring_preempt_ib(struct amdgpu_ring *ring) 5415 5422 { 5423 + int i, r = 0; 5424 + struct amdgpu_device *adev = ring->adev; 5425 + struct amdgpu_kiq *kiq = &adev->gfx.kiq; 5426 + struct amdgpu_ring *kiq_ring = &kiq->ring; 5427 + unsigned long flags; 5428 + 5429 + if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues) 5430 + return -EINVAL; 5431 + 5432 + spin_lock_irqsave(&kiq->ring_lock, flags); 5433 + 5434 + if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size)) { 5435 + spin_unlock_irqrestore(&kiq->ring_lock, flags); 5436 + return -ENOMEM; 5437 + } 5438 + 5439 + /* assert preemption condition */ 5440 + amdgpu_ring_set_preempt_cond_exec(ring, false); 5441 + 5442 + ring->trail_seq += 1; 5443 + amdgpu_ring_alloc(ring, 13); 5444 + gfx_v9_0_ring_emit_fence(ring, ring->trail_fence_gpu_addr, 5445 + ring->trail_seq, AMDGPU_FENCE_FLAG_EXEC); 5446 + /*reset the CP_VMID_PREEMPT after trailing fence*/ 5447 + amdgpu_ring_emit_wreg(ring, 5448 + SOC15_REG_OFFSET(GC, 0, mmCP_VMID_PREEMPT), 5449 + 0x0); 5450 + 5451 + /* assert IB preemption, emit the trailing fence */ 5452 + kiq->pmf->kiq_unmap_queues(kiq_ring, ring, PREEMPT_QUEUES_NO_UNMAP, 5453 + ring->trail_fence_gpu_addr, 5454 + ring->trail_seq); 5455 + 5456 + amdgpu_ring_commit(kiq_ring); 5457 + spin_unlock_irqrestore(&kiq->ring_lock, flags); 5458 + 5459 + /* poll the trailing fence */ 5460 + for (i = 0; i < adev->usec_timeout; i++) { 5461 + if (ring->trail_seq == 5462 + le32_to_cpu(*ring->trail_fence_cpu_addr)) 5463 + break; 5464 + udelay(1); 5465 + } 5466 + 5467 + if (i >= adev->usec_timeout) { 5468 + r = -EINVAL; 5469 + DRM_WARN("ring %d timeout to preempt ib\n", ring->idx); 5470 + } 5471 + 5472 + amdgpu_ring_commit(ring); 5473 + 5474 + /* deassert preemption condition */ 5475 + amdgpu_ring_set_preempt_cond_exec(ring, true); 5476 + return r; 5477 + } 5478 + 5479 + static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring, bool resume) 5480 + { 5481 + struct amdgpu_device *adev = ring->adev; 5416 5482 struct v9_de_ib_state de_payload = {0}; 5417 - uint64_t csa_addr, gds_addr; 5483 + uint64_t offset, gds_addr, de_payload_gpu_addr; 5484 + void *de_payload_cpu_addr; 5418 5485 int cnt; 5419 5486 5420 - csa_addr = amdgpu_csa_vaddr(ring->adev); 5421 - gds_addr = csa_addr + 4096; 5487 + if (ring->is_mes_queue) { 5488 + offset = offsetof(struct amdgpu_mes_ctx_meta_data, 5489 + gfx[0].gfx_meta_data) + 5490 + offsetof(struct v9_gfx_meta_data, de_payload); 5491 + de_payload_gpu_addr = 5492 + amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset); 5493 + de_payload_cpu_addr = 5494 + amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset); 5495 + 5496 + offset = offsetof(struct amdgpu_mes_ctx_meta_data, 5497 + gfx[0].gds_backup) + 5498 + offsetof(struct v9_gfx_meta_data, de_payload); 5499 + gds_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset); 5500 + } else { 5501 + offset = offsetof(struct v9_gfx_meta_data, de_payload); 5502 + de_payload_gpu_addr = amdgpu_csa_vaddr(ring->adev) + offset; 5503 + de_payload_cpu_addr = adev->virt.csa_cpu_addr + offset; 5504 + 5505 + gds_addr = ALIGN(amdgpu_csa_vaddr(ring->adev) + 5506 + AMDGPU_CSA_SIZE - adev->gds.gds_size, 5507 + PAGE_SIZE); 5508 + } 5509 + 5422 5510 de_payload.gds_backup_addrlo = lower_32_bits(gds_addr); 5423 5511 de_payload.gds_backup_addrhi = upper_32_bits(gds_addr); 5424 5512 ··· 5530 5414 WRITE_DATA_DST_SEL(8) | 5531 5415 WR_CONFIRM) | 5532 5416 WRITE_DATA_CACHE_POLICY(0)); 5533 - amdgpu_ring_write(ring, lower_32_bits(csa_addr + offsetof(struct v9_gfx_meta_data, de_payload))); 5534 - amdgpu_ring_write(ring, upper_32_bits(csa_addr + offsetof(struct v9_gfx_meta_data, de_payload))); 5535 - amdgpu_ring_write_multiple(ring, (void *)&de_payload, sizeof(de_payload) >> 2); 5417 + amdgpu_ring_write(ring, lower_32_bits(de_payload_gpu_addr)); 5418 + amdgpu_ring_write(ring, upper_32_bits(de_payload_gpu_addr)); 5419 + 5420 + if (resume) 5421 + amdgpu_ring_write_multiple(ring, de_payload_cpu_addr, 5422 + sizeof(de_payload) >> 2); 5423 + else 5424 + amdgpu_ring_write_multiple(ring, (void *)&de_payload, 5425 + sizeof(de_payload) >> 2); 5536 5426 } 5537 5427 5538 5428 static void gfx_v9_0_ring_emit_frame_cntl(struct amdgpu_ring *ring, bool start, ··· 5554 5432 { 5555 5433 uint32_t dw2 = 0; 5556 5434 5557 - if (amdgpu_sriov_vf(ring->adev)) 5558 - gfx_v9_0_ring_emit_ce_meta(ring); 5435 + gfx_v9_0_ring_emit_ce_meta(ring, 5436 + (!amdgpu_sriov_vf(ring->adev) && 5437 + flags & AMDGPU_IB_PREEMPTED) ? true : false); 5559 5438 5560 5439 dw2 |= 0x80000000; /* set load_enable otherwise this package is just NOPs */ 5561 5440 if (flags & AMDGPU_HAVE_CTX_SWITCH) { ··· 6883 6760 .emit_cntxcntl = gfx_v9_ring_emit_cntxcntl, 6884 6761 .init_cond_exec = gfx_v9_0_ring_emit_init_cond_exec, 6885 6762 .patch_cond_exec = gfx_v9_0_ring_emit_patch_cond_exec, 6763 + .preempt_ib = gfx_v9_0_ring_preempt_ib, 6886 6764 .emit_frame_cntl = gfx_v9_0_ring_emit_frame_cntl, 6887 6765 .emit_wreg = gfx_v9_0_ring_emit_wreg, 6888 6766 .emit_reg_wait = gfx_v9_0_ring_emit_reg_wait,
+2
drivers/gpu/drm/amd/amdgpu/soc15d.h
··· 162 162 * 2 - Bypass 163 163 */ 164 164 #define INDIRECT_BUFFER_PRE_ENB(x) ((x) << 21) 165 + #define INDIRECT_BUFFER_PRE_RESUME(x) ((x) << 30) 165 166 #define PACKET3_COPY_DATA 0x40 166 167 #define PACKET3_PFP_SYNC_ME 0x42 167 168 #define PACKET3_COND_WRITE 0x45 ··· 185 184 #define EOP_TC_ACTION_EN (1 << 17) /* L2 */ 186 185 #define EOP_TC_NC_ACTION_EN (1 << 19) 187 186 #define EOP_TC_MD_ACTION_EN (1 << 21) /* L2 metadata */ 187 + #define EOP_EXEC (1 << 28) /* For Trailing Fence */ 188 188 189 189 #define DATA_SEL(x) ((x) << 29) 190 190 /* 0 - discard