Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/amdgpu/sdma5.2: implement ring reset callback for sdma5.2

Implement sdma queue reset callback via MMIO.

v2: enter/exit safemode for mmio queue reset.

Signed-off-by: Jiadong Zhu <Jiadong.Zhu@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>

authored by

Jiadong Zhu and committed by
Alex Deucher
5682cd86 1fd7c37e

+91
+91
drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
··· 1445 1445 return -ETIMEDOUT; 1446 1446 } 1447 1447 1448 + static int sdma_v5_2_reset_queue(struct amdgpu_ring *ring, unsigned int vmid) 1449 + { 1450 + struct amdgpu_device *adev = ring->adev; 1451 + int i, j, r; 1452 + u32 rb_cntl, ib_cntl, f32_cntl, freeze, cntl, preempt, soft_reset, stat1_reg; 1453 + 1454 + if (amdgpu_sriov_vf(adev)) 1455 + return -EINVAL; 1456 + 1457 + for (i = 0; i < adev->sdma.num_instances; i++) { 1458 + if (ring == &adev->sdma.instance[i].ring) 1459 + break; 1460 + } 1461 + 1462 + if (i == adev->sdma.num_instances) { 1463 + DRM_ERROR("sdma instance not found\n"); 1464 + return -EINVAL; 1465 + } 1466 + 1467 + amdgpu_gfx_rlc_enter_safe_mode(adev, 0); 1468 + 1469 + /* stop queue */ 1470 + ib_cntl = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL)); 1471 + ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0); 1472 + WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL), ib_cntl); 1473 + 1474 + rb_cntl = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL)); 1475 + rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0); 1476 + WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl); 1477 + 1478 + /*engine stop SDMA1_F32_CNTL.HALT to 1 and SDMAx_FREEZE freeze bit to 1 */ 1479 + freeze = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_FREEZE)); 1480 + freeze = REG_SET_FIELD(freeze, SDMA0_FREEZE, FREEZE, 1); 1481 + WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_FREEZE), freeze); 1482 + 1483 + for (j = 0; j < adev->usec_timeout; j++) { 1484 + freeze = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_FREEZE)); 1485 + 1486 + if (REG_GET_FIELD(freeze, SDMA0_FREEZE, FROZEN) & 1) 1487 + break; 1488 + udelay(1); 1489 + } 1490 + 1491 + 1492 + if (j == adev->usec_timeout) { 1493 + stat1_reg = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_STATUS1_REG)); 1494 + if ((stat1_reg & 0x3FF) != 0x3FF) { 1495 + DRM_ERROR("cannot soft reset as sdma not idle\n"); 1496 + r = -ETIMEDOUT; 1497 + goto err0; 1498 + } 1499 + } 1500 + 1501 + f32_cntl = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_F32_CNTL)); 1502 + f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, 1); 1503 + WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_F32_CNTL), f32_cntl); 1504 + 1505 + cntl = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_CNTL)); 1506 + cntl = REG_SET_FIELD(cntl, SDMA0_CNTL, UTC_L1_ENABLE, 0); 1507 + WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_CNTL), cntl); 1508 + 1509 + /* soft reset SDMA_GFX_PREEMPT.IB_PREEMPT = 0 mmGRBM_SOFT_RESET.SOFT_RESET_SDMA0/1 = 1 */ 1510 + preempt = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_PREEMPT)); 1511 + preempt = REG_SET_FIELD(preempt, SDMA0_GFX_PREEMPT, IB_PREEMPT, 0); 1512 + WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_PREEMPT), preempt); 1513 + 1514 + soft_reset = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET); 1515 + soft_reset |= 1 << GRBM_SOFT_RESET__SOFT_RESET_SDMA0__SHIFT << i; 1516 + 1517 + 1518 + WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, soft_reset); 1519 + 1520 + udelay(50); 1521 + 1522 + soft_reset &= ~(1 << GRBM_SOFT_RESET__SOFT_RESET_SDMA0__SHIFT << i); 1523 + 1524 + WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, soft_reset); 1525 + 1526 + /* unfreeze and unhalt */ 1527 + freeze = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_FREEZE)); 1528 + freeze = REG_SET_FIELD(freeze, SDMA0_FREEZE, FREEZE, 0); 1529 + WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_FREEZE), freeze); 1530 + 1531 + r = sdma_v5_2_gfx_resume_instance(adev, i, true); 1532 + 1533 + err0: 1534 + amdgpu_gfx_rlc_exit_safe_mode(adev, 0); 1535 + return r; 1536 + } 1537 + 1448 1538 static int sdma_v5_2_ring_preempt_ib(struct amdgpu_ring *ring) 1449 1539 { 1450 1540 int i, r = 0; ··· 1970 1880 .emit_reg_write_reg_wait = sdma_v5_2_ring_emit_reg_write_reg_wait, 1971 1881 .init_cond_exec = sdma_v5_2_ring_init_cond_exec, 1972 1882 .preempt_ib = sdma_v5_2_ring_preempt_ib, 1883 + .reset = sdma_v5_2_reset_queue, 1973 1884 }; 1974 1885 1975 1886 static void sdma_v5_2_set_ring_funcs(struct amdgpu_device *adev)