Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/amdgpu: provide a generic function interface for reading/writing register by KIQ

Move amdgpu_virt_kiq_rreg/amdgpu_virt_kiq_wreg function to amdgpu_gfx.c,
and rename them to amdgpu_kiq_rreg/amdgpu_kiq_wreg.Make it generic and
flexible.

Signed-off-by: chen gong <curry.gong@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Reviewed-by: Huang Rui <ray.huang@amd.com>
Acked-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>

authored by

chen gong and committed by
Alex Deucher
d33a99c4 a6c44d25

+108 -104
+2 -2
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
··· 217 217 uint32_t ret; 218 218 219 219 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev)) 220 - return amdgpu_virt_kiq_rreg(adev, reg); 220 + return amdgpu_kiq_rreg(adev, reg); 221 221 222 222 if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX)) 223 223 ret = readl(((void __iomem *)adev->rmmio) + (reg * 4)); ··· 295 295 } 296 296 297 297 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev)) 298 - return amdgpu_virt_kiq_wreg(adev, reg, v); 298 + return amdgpu_kiq_wreg(adev, reg, v); 299 299 300 300 if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX)) 301 301 writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
+94 -2
drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
··· 296 296 297 297 spin_lock_init(&kiq->ring_lock); 298 298 299 - r = amdgpu_device_wb_get(adev, &adev->virt.reg_val_offs); 299 + r = amdgpu_device_wb_get(adev, &kiq->reg_val_offs); 300 300 if (r) 301 301 return r; 302 302 ··· 321 321 322 322 void amdgpu_gfx_kiq_free_ring(struct amdgpu_ring *ring) 323 323 { 324 - amdgpu_device_wb_free(ring->adev, ring->adev->virt.reg_val_offs); 324 + amdgpu_device_wb_free(ring->adev, ring->adev->gfx.kiq.reg_val_offs); 325 325 amdgpu_ring_fini(ring); 326 326 } 327 327 ··· 657 657 DRM_ERROR("CP ECC ERROR IRQ\n"); 658 658 amdgpu_ras_interrupt_dispatch(adev, &ih_data); 659 659 return 0; 660 + } 661 + 662 + uint32_t amdgpu_kiq_rreg(struct amdgpu_device *adev, uint32_t reg) 663 + { 664 + signed long r, cnt = 0; 665 + unsigned long flags; 666 + uint32_t seq; 667 + struct amdgpu_kiq *kiq = &adev->gfx.kiq; 668 + struct amdgpu_ring *ring = &kiq->ring; 669 + 670 + BUG_ON(!ring->funcs->emit_rreg); 671 + 672 + spin_lock_irqsave(&kiq->ring_lock, flags); 673 + amdgpu_ring_alloc(ring, 32); 674 + amdgpu_ring_emit_rreg(ring, reg); 675 + amdgpu_fence_emit_polling(ring, &seq); 676 + amdgpu_ring_commit(ring); 677 + spin_unlock_irqrestore(&kiq->ring_lock, flags); 678 + 679 + r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT); 680 + 681 + /* don't wait anymore for gpu reset case because this way may 682 + * block gpu_recover() routine forever, e.g. this virt_kiq_rreg 683 + * is triggered in TTM and ttm_bo_lock_delayed_workqueue() will 684 + * never return if we keep waiting in virt_kiq_rreg, which cause 685 + * gpu_recover() hang there. 686 + * 687 + * also don't wait anymore for IRQ context 688 + * */ 689 + if (r < 1 && (adev->in_gpu_reset || in_interrupt())) 690 + goto failed_kiq_read; 691 + 692 + might_sleep(); 693 + while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) { 694 + msleep(MAX_KIQ_REG_BAILOUT_INTERVAL); 695 + r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT); 696 + } 697 + 698 + if (cnt > MAX_KIQ_REG_TRY) 699 + goto failed_kiq_read; 700 + 701 + return adev->wb.wb[kiq->reg_val_offs]; 702 + 703 + failed_kiq_read: 704 + pr_err("failed to read reg:%x\n", reg); 705 + return ~0; 706 + } 707 + 708 + void amdgpu_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v) 709 + { 710 + signed long r, cnt = 0; 711 + unsigned long flags; 712 + uint32_t seq; 713 + struct amdgpu_kiq *kiq = &adev->gfx.kiq; 714 + struct amdgpu_ring *ring = &kiq->ring; 715 + 716 + BUG_ON(!ring->funcs->emit_wreg); 717 + 718 + spin_lock_irqsave(&kiq->ring_lock, flags); 719 + amdgpu_ring_alloc(ring, 32); 720 + amdgpu_ring_emit_wreg(ring, reg, v); 721 + amdgpu_fence_emit_polling(ring, &seq); 722 + amdgpu_ring_commit(ring); 723 + spin_unlock_irqrestore(&kiq->ring_lock, flags); 724 + 725 + r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT); 726 + 727 + /* don't wait anymore for gpu reset case because this way may 728 + * block gpu_recover() routine forever, e.g. this virt_kiq_rreg 729 + * is triggered in TTM and ttm_bo_lock_delayed_workqueue() will 730 + * never return if we keep waiting in virt_kiq_rreg, which cause 731 + * gpu_recover() hang there. 732 + * 733 + * also don't wait anymore for IRQ context 734 + * */ 735 + if (r < 1 && (adev->in_gpu_reset || in_interrupt())) 736 + goto failed_kiq_write; 737 + 738 + might_sleep(); 739 + while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) { 740 + 741 + msleep(MAX_KIQ_REG_BAILOUT_INTERVAL); 742 + r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT); 743 + } 744 + 745 + if (cnt > MAX_KIQ_REG_TRY) 746 + goto failed_kiq_write; 747 + 748 + return; 749 + 750 + failed_kiq_write: 751 + pr_err("failed to write reg:%x\n", reg); 660 752 }
+3
drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
··· 94 94 struct amdgpu_ring ring; 95 95 struct amdgpu_irq_src irq; 96 96 const struct kiq_pm4_funcs *pmf; 97 + uint32_t reg_val_offs; 97 98 }; 98 99 99 100 /* ··· 376 375 int amdgpu_gfx_cp_ecc_error_irq(struct amdgpu_device *adev, 377 376 struct amdgpu_irq_src *source, 378 377 struct amdgpu_iv_entry *entry); 378 + uint32_t amdgpu_kiq_rreg(struct amdgpu_device *adev, uint32_t reg); 379 + void amdgpu_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v); 379 380 #endif
-92
drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
··· 45 45 adev->pg_flags = 0; 46 46 } 47 47 48 - uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg) 49 - { 50 - signed long r, cnt = 0; 51 - unsigned long flags; 52 - uint32_t seq; 53 - struct amdgpu_kiq *kiq = &adev->gfx.kiq; 54 - struct amdgpu_ring *ring = &kiq->ring; 55 - 56 - BUG_ON(!ring->funcs->emit_rreg); 57 - 58 - spin_lock_irqsave(&kiq->ring_lock, flags); 59 - amdgpu_ring_alloc(ring, 32); 60 - amdgpu_ring_emit_rreg(ring, reg); 61 - amdgpu_fence_emit_polling(ring, &seq); 62 - amdgpu_ring_commit(ring); 63 - spin_unlock_irqrestore(&kiq->ring_lock, flags); 64 - 65 - r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT); 66 - 67 - /* don't wait anymore for gpu reset case because this way may 68 - * block gpu_recover() routine forever, e.g. this virt_kiq_rreg 69 - * is triggered in TTM and ttm_bo_lock_delayed_workqueue() will 70 - * never return if we keep waiting in virt_kiq_rreg, which cause 71 - * gpu_recover() hang there. 72 - * 73 - * also don't wait anymore for IRQ context 74 - * */ 75 - if (r < 1 && (adev->in_gpu_reset || in_interrupt())) 76 - goto failed_kiq_read; 77 - 78 - might_sleep(); 79 - while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) { 80 - msleep(MAX_KIQ_REG_BAILOUT_INTERVAL); 81 - r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT); 82 - } 83 - 84 - if (cnt > MAX_KIQ_REG_TRY) 85 - goto failed_kiq_read; 86 - 87 - return adev->wb.wb[adev->virt.reg_val_offs]; 88 - 89 - failed_kiq_read: 90 - pr_err("failed to read reg:%x\n", reg); 91 - return ~0; 92 - } 93 - 94 - void amdgpu_virt_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v) 95 - { 96 - signed long r, cnt = 0; 97 - unsigned long flags; 98 - uint32_t seq; 99 - struct amdgpu_kiq *kiq = &adev->gfx.kiq; 100 - struct amdgpu_ring *ring = &kiq->ring; 101 - 102 - BUG_ON(!ring->funcs->emit_wreg); 103 - 104 - spin_lock_irqsave(&kiq->ring_lock, flags); 105 - amdgpu_ring_alloc(ring, 32); 106 - amdgpu_ring_emit_wreg(ring, reg, v); 107 - amdgpu_fence_emit_polling(ring, &seq); 108 - amdgpu_ring_commit(ring); 109 - spin_unlock_irqrestore(&kiq->ring_lock, flags); 110 - 111 - r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT); 112 - 113 - /* don't wait anymore for gpu reset case because this way may 114 - * block gpu_recover() routine forever, e.g. this virt_kiq_rreg 115 - * is triggered in TTM and ttm_bo_lock_delayed_workqueue() will 116 - * never return if we keep waiting in virt_kiq_rreg, which cause 117 - * gpu_recover() hang there. 118 - * 119 - * also don't wait anymore for IRQ context 120 - * */ 121 - if (r < 1 && (adev->in_gpu_reset || in_interrupt())) 122 - goto failed_kiq_write; 123 - 124 - might_sleep(); 125 - while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) { 126 - 127 - msleep(MAX_KIQ_REG_BAILOUT_INTERVAL); 128 - r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT); 129 - } 130 - 131 - if (cnt > MAX_KIQ_REG_TRY) 132 - goto failed_kiq_write; 133 - 134 - return; 135 - 136 - failed_kiq_write: 137 - pr_err("failed to write reg:%x\n", reg); 138 - } 139 - 140 48 void amdgpu_virt_kiq_reg_write_reg_wait(struct amdgpu_device *adev, 141 49 uint32_t reg0, uint32_t reg1, 142 50 uint32_t ref, uint32_t mask)
-2
drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
··· 287 287 288 288 bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev); 289 289 void amdgpu_virt_init_setting(struct amdgpu_device *adev); 290 - uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg); 291 - void amdgpu_virt_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v); 292 290 void amdgpu_virt_kiq_reg_write_reg_wait(struct amdgpu_device *adev, 293 291 uint32_t reg0, uint32_t rreg1, 294 292 uint32_t ref, uint32_t mask);
+3 -2
drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
··· 4737 4737 static void gfx_v10_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg) 4738 4738 { 4739 4739 struct amdgpu_device *adev = ring->adev; 4740 + struct amdgpu_kiq *kiq = &adev->gfx.kiq; 4740 4741 4741 4742 amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4)); 4742 4743 amdgpu_ring_write(ring, 0 | /* src: register*/ ··· 4746 4745 amdgpu_ring_write(ring, reg); 4747 4746 amdgpu_ring_write(ring, 0); 4748 4747 amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr + 4749 - adev->virt.reg_val_offs * 4)); 4748 + kiq->reg_val_offs * 4)); 4750 4749 amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr + 4751 - adev->virt.reg_val_offs * 4)); 4750 + kiq->reg_val_offs * 4)); 4752 4751 } 4753 4752 4754 4753 static void gfx_v10_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg,
+3 -2
drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
··· 6449 6449 static void gfx_v8_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg) 6450 6450 { 6451 6451 struct amdgpu_device *adev = ring->adev; 6452 + struct amdgpu_kiq *kiq = &adev->gfx.kiq; 6452 6453 6453 6454 amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4)); 6454 6455 amdgpu_ring_write(ring, 0 | /* src: register*/ ··· 6458 6457 amdgpu_ring_write(ring, reg); 6459 6458 amdgpu_ring_write(ring, 0); 6460 6459 amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr + 6461 - adev->virt.reg_val_offs * 4)); 6460 + kiq->reg_val_offs * 4)); 6462 6461 amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr + 6463 - adev->virt.reg_val_offs * 4)); 6462 + kiq->reg_val_offs * 4)); 6464 6463 } 6465 6464 6466 6465 static void gfx_v8_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg,
+3 -2
drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
··· 5213 5213 static void gfx_v9_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg) 5214 5214 { 5215 5215 struct amdgpu_device *adev = ring->adev; 5216 + struct amdgpu_kiq *kiq = &adev->gfx.kiq; 5216 5217 5217 5218 amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4)); 5218 5219 amdgpu_ring_write(ring, 0 | /* src: register*/ ··· 5222 5221 amdgpu_ring_write(ring, reg); 5223 5222 amdgpu_ring_write(ring, 0); 5224 5223 amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr + 5225 - adev->virt.reg_val_offs * 4)); 5224 + kiq->reg_val_offs * 4)); 5226 5225 amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr + 5227 - adev->virt.reg_val_offs * 4)); 5226 + kiq->reg_val_offs * 4)); 5228 5227 } 5229 5228 5230 5229 static void gfx_v9_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg,