Merge tag 'drm-next-2025-04-05' of https://gitlab.freedesktop.org/drm/kernel

Pull drm fixes from Dave Airlie:
"Weekly fixes, mostly from the end of last week, this week was very
quiet, maybe you scared everyone away. It's mostly amdgpu, and xe,
with some i915, adp and bridge bits, since I think this is overly
quiet I'd expect rc2 to be a bit more lively.

bridge:
- tda998x: Select CONFIG_DRM_KMS_HELPER

amdgpu:
- Guard against potential division by 0 in fan code
- Zero RPM support for SMU 14.0.2
- Properly handle SI and CIK support being disabled
- PSR fixes
- DML2 fixes
- DP Link training fix
- Vblank fixes
- RAS fixes
- Partitioning fix
- SDMA fix
- SMU 13.0.x fixes
- Rom fetching fix
- MES fixes
- Queue reset fix

xe:
- Fix NULL pointer dereference on error path
- Add missing HW workaround for BMG
- Fix survivability mode not triggering
- Fix build warning when DRM_FBDEV_EMULATION is not set

i915:
- Bounds check for scalers in DSC prefill latency computation
- Fix build by adding a missing include

adp:
- Fix error handling in plane setup"

# -----BEGIN PGP SIGNATURE-----

* tag 'drm-next-2025-04-05' of https://gitlab.freedesktop.org/drm/kernel: (34 commits)
drm/i2c: tda998x: select CONFIG_DRM_KMS_HELPER
drm/amdgpu/gfx12: fix num_mec
drm/amdgpu/gfx11: fix num_mec
drm/amd/pm: Add gpu_metrics_v1_8
drm/amdgpu: Prefer shadow rom when available
drm/amd/pm: Update smu metrics table for smu_v13_0_6
drm/amd/pm: Remove host limit metrics support
Remove unnecessary firmware version check for gc v9_4_2
drm/amdgpu: stop unmapping MQD for kernel queues v3
Revert "drm/amdgpu/sdma_v4_4_2: update VM flush implementation for SDMA"
drm/amdgpu: Parse all deferred errors with UMC aca handle
drm/amdgpu: Update ta ras block
drm/amdgpu: Add NPS2 to DPX compatible mode
drm/amdgpu: Use correct gfx deferred error count
drm/amd/display: Actually do immediate vblank disable
drm/amd/display: prevent hang on link training fail
Revert "drm/amd/display: dml2 soc dscclk use DPM table clk setting"
drm/amd/display: Increase vblank offdelay for PSR panels
drm/amd: Handle being compiled without SI or CIK support better
drm/amd/pm: Add zero RPM enabled OD setting support for SMU14.0.2
...

+537 -701
+2 -2
drivers/gpu/drm/adp/adp_drv.c
··· 232 232 ALL_CRTCS, &adp_plane_funcs, 233 233 plane_formats, ARRAY_SIZE(plane_formats), 234 234 NULL, DRM_PLANE_TYPE_PRIMARY, "plane"); 235 - if (!plane) { 235 + if (IS_ERR(plane)) { 236 236 drm_err(drm, "failed to allocate plane"); 237 - return ERR_PTR(-ENOMEM); 237 + return plane; 238 238 } 239 239 240 240 drm_plane_helper_add(plane, &adp_plane_helper_funcs);
+4
drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c
··· 195 195 { 196 196 const struct aca_bank_ops *bank_ops = handle->bank_ops; 197 197 198 + /* Parse all deferred errors with UMC aca handle */ 199 + if (ACA_BANK_ERR_IS_DEFFERED(bank)) 200 + return handle->hwip == ACA_HWIP_TYPE_UMC; 201 + 198 202 if (!aca_bank_hwip_is_matched(bank, handle->hwip)) 199 203 return false; 200 204
-8
drivers/gpu/drm/amd/amdgpu/amdgpu_aca.h
··· 80 80 (ACA_REG__STATUS__POISON((bank)->regs[ACA_REG_IDX_STATUS]) || \ 81 81 ACA_REG__STATUS__DEFERRED((bank)->regs[ACA_REG_IDX_STATUS])) 82 82 83 - #define ACA_BANK_ERR_CE_DE_DECODE(bank) \ 84 - (ACA_BANK_ERR_IS_DEFFERED(bank) ? ACA_ERROR_TYPE_DEFERRED : \ 85 - ACA_ERROR_TYPE_CE) 86 - 87 - #define ACA_BANK_ERR_UE_DE_DECODE(bank) \ 88 - (ACA_BANK_ERR_IS_DEFFERED(bank) ? ACA_ERROR_TYPE_DEFERRED : \ 89 - ACA_ERROR_TYPE_UE) 90 - 91 83 enum aca_reg_idx { 92 84 ACA_REG_IDX_CTL = 0, 93 85 ACA_REG_IDX_STATUS = 1,
+27 -7
drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
··· 447 447 return true; 448 448 } 449 449 450 + static bool amdgpu_prefer_rom_resource(struct amdgpu_device *adev) 451 + { 452 + struct resource *res = &adev->pdev->resource[PCI_ROM_RESOURCE]; 453 + 454 + return (res->flags & IORESOURCE_ROM_SHADOW); 455 + } 456 + 450 457 static bool amdgpu_get_bios_dgpu(struct amdgpu_device *adev) 451 458 { 452 459 if (amdgpu_atrm_get_bios(adev)) { ··· 472 465 goto success; 473 466 } 474 467 475 - if (amdgpu_read_platform_bios(adev)) { 476 - dev_info(adev->dev, "Fetched VBIOS from platform\n"); 477 - goto success; 478 - } 468 + if (amdgpu_prefer_rom_resource(adev)) { 469 + if (amdgpu_read_bios(adev)) { 470 + dev_info(adev->dev, "Fetched VBIOS from ROM BAR\n"); 471 + goto success; 472 + } 479 473 480 - if (amdgpu_read_bios(adev)) { 481 - dev_info(adev->dev, "Fetched VBIOS from ROM BAR\n"); 482 - goto success; 474 + if (amdgpu_read_platform_bios(adev)) { 475 + dev_info(adev->dev, "Fetched VBIOS from platform\n"); 476 + goto success; 477 + } 478 + 479 + } else { 480 + if (amdgpu_read_platform_bios(adev)) { 481 + dev_info(adev->dev, "Fetched VBIOS from platform\n"); 482 + goto success; 483 + } 484 + 485 + if (amdgpu_read_bios(adev)) { 486 + dev_info(adev->dev, "Fetched VBIOS from ROM BAR\n"); 487 + goto success; 488 + } 483 489 } 484 490 485 491 if (amdgpu_read_bios_from_rom(adev)) {
+24 -20
drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
··· 1809 1809 }; 1810 1810 1811 1811 static const struct pci_device_id pciidlist[] = { 1812 - #ifdef CONFIG_DRM_AMDGPU_SI 1813 1812 {0x1002, 0x6780, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI}, 1814 1813 {0x1002, 0x6784, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI}, 1815 1814 {0x1002, 0x6788, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI}, ··· 1881 1882 {0x1002, 0x6665, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|AMD_IS_MOBILITY}, 1882 1883 {0x1002, 0x6667, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|AMD_IS_MOBILITY}, 1883 1884 {0x1002, 0x666F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|AMD_IS_MOBILITY}, 1884 - #endif 1885 - #ifdef CONFIG_DRM_AMDGPU_CIK 1886 1885 /* Kaveri */ 1887 1886 {0x1002, 0x1304, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_MOBILITY|AMD_IS_APU}, 1888 1887 {0x1002, 0x1305, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_APU}, ··· 1963 1966 {0x1002, 0x985D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU}, 1964 1967 {0x1002, 0x985E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU}, 1965 1968 {0x1002, 0x985F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU}, 1966 - #endif 1967 1969 /* topaz */ 1968 1970 {0x1002, 0x6900, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ}, 1969 1971 {0x1002, 0x6901, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ}, ··· 2309 2313 return -ENOTSUPP; 2310 2314 } 2311 2315 2316 + switch (flags & AMD_ASIC_MASK) { 2317 + case CHIP_TAHITI: 2318 + case CHIP_PITCAIRN: 2319 + case CHIP_VERDE: 2320 + case CHIP_OLAND: 2321 + case CHIP_HAINAN: 2312 2322 #ifdef CONFIG_DRM_AMDGPU_SI 2313 - if (!amdgpu_si_support) { 2314 - switch (flags & AMD_ASIC_MASK) { 2315 - case CHIP_TAHITI: 2316 - case CHIP_PITCAIRN: 2317 - case CHIP_VERDE: 2318 - case CHIP_OLAND: 2319 - case CHIP_HAINAN: 2323 + if (!amdgpu_si_support) { 2320 2324 dev_info(&pdev->dev, 2321 2325 "SI support provided by radeon.\n"); 2322 2326 dev_info(&pdev->dev, ··· 2324 2328 ); 2325 2329 return -ENODEV; 2326 2330 } 2327 - } 2331 + break; 2332 + #else 2333 + dev_info(&pdev->dev, "amdgpu is built without SI support.\n"); 2334 + return -ENODEV; 2328 2335 #endif 2336 + case CHIP_KAVERI: 2337 + case CHIP_BONAIRE: 2338 + case CHIP_HAWAII: 2339 + case CHIP_KABINI: 2340 + case CHIP_MULLINS: 2329 2341 #ifdef CONFIG_DRM_AMDGPU_CIK 2330 - if (!amdgpu_cik_support) { 2331 - switch (flags & AMD_ASIC_MASK) { 2332 - case CHIP_KAVERI: 2333 - case CHIP_BONAIRE: 2334 - case CHIP_HAWAII: 2335 - case CHIP_KABINI: 2336 - case CHIP_MULLINS: 2342 + if (!amdgpu_cik_support) { 2337 2343 dev_info(&pdev->dev, 2338 2344 "CIK support provided by radeon.\n"); 2339 2345 dev_info(&pdev->dev, ··· 2343 2345 ); 2344 2346 return -ENODEV; 2345 2347 } 2346 - } 2348 + break; 2349 + #else 2350 + dev_info(&pdev->dev, "amdgpu is built without CIK support.\n"); 2351 + return -ENODEV; 2347 2352 #endif 2353 + default: 2354 + break; 2355 + } 2348 2356 2349 2357 adev = devm_drm_dev_alloc(&pdev->dev, &amdgpu_kms_driver, typeof(*adev), ddev); 2350 2358 if (IS_ERR(adev))
+1
drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
··· 77 77 "jpeg", 78 78 "ih", 79 79 "mpio", 80 + "mmsch", 80 81 }; 81 82 82 83 const char *ras_mca_block_string[] = {
+7
drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
··· 98 98 AMDGPU_RAS_BLOCK__JPEG, 99 99 AMDGPU_RAS_BLOCK__IH, 100 100 AMDGPU_RAS_BLOCK__MPIO, 101 + AMDGPU_RAS_BLOCK__MMSCH, 101 102 102 103 AMDGPU_RAS_BLOCK__LAST, 103 104 AMDGPU_RAS_BLOCK__ANY = -1 ··· 796 795 return TA_RAS_BLOCK__VCN; 797 796 case AMDGPU_RAS_BLOCK__JPEG: 798 797 return TA_RAS_BLOCK__JPEG; 798 + case AMDGPU_RAS_BLOCK__IH: 799 + return TA_RAS_BLOCK__IH; 800 + case AMDGPU_RAS_BLOCK__MPIO: 801 + return TA_RAS_BLOCK__MPIO; 802 + case AMDGPU_RAS_BLOCK__MMSCH: 803 + return TA_RAS_BLOCK__MMSCH; 799 804 default: 800 805 WARN_ONCE(1, "RAS ERROR: unexpected block id %d\n", block); 801 806 return TA_RAS_BLOCK__UMC;
+8 -50
drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
··· 608 608 size_t size, loff_t *pos) 609 609 { 610 610 struct amdgpu_ring *ring = file_inode(f)->i_private; 611 - volatile u32 *mqd; 612 - u32 *kbuf; 613 - int r, i; 614 - uint32_t value, result; 611 + ssize_t bytes = min_t(ssize_t, ring->mqd_size - *pos, size); 612 + void *from = ((u8 *)ring->mqd_ptr) + *pos; 615 613 616 - if (*pos & 3 || size & 3) 617 - return -EINVAL; 614 + if (*pos > ring->mqd_size) 615 + return 0; 618 616 619 - kbuf = kmalloc(ring->mqd_size, GFP_KERNEL); 620 - if (!kbuf) 621 - return -ENOMEM; 617 + if (copy_to_user(buf, from, bytes)) 618 + return -EFAULT; 622 619 623 - r = amdgpu_bo_reserve(ring->mqd_obj, false); 624 - if (unlikely(r != 0)) 625 - goto err_free; 626 - 627 - r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&mqd); 628 - if (r) 629 - goto err_unreserve; 630 - 631 - /* 632 - * Copy to local buffer to avoid put_user(), which might fault 633 - * and acquire mmap_sem, under reservation_ww_class_mutex. 634 - */ 635 - for (i = 0; i < ring->mqd_size/sizeof(u32); i++) 636 - kbuf[i] = mqd[i]; 637 - 638 - amdgpu_bo_kunmap(ring->mqd_obj); 639 - amdgpu_bo_unreserve(ring->mqd_obj); 640 - 641 - result = 0; 642 - while (size) { 643 - if (*pos >= ring->mqd_size) 644 - break; 645 - 646 - value = kbuf[*pos/4]; 647 - r = put_user(value, (uint32_t *)buf); 648 - if (r) 649 - goto err_free; 650 - buf += 4; 651 - result += 4; 652 - size -= 4; 653 - *pos += 4; 654 - } 655 - 656 - kfree(kbuf); 657 - return result; 658 - 659 - err_unreserve: 660 - amdgpu_bo_unreserve(ring->mqd_obj); 661 - err_free: 662 - kfree(kbuf); 663 - return r; 620 + *pos += bytes; 621 + return bytes; 664 622 } 665 623 666 624 static const struct file_operations amdgpu_debugfs_mqd_fops = {
+1 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
··· 1172 1172 break; 1173 1173 case ACA_SMU_TYPE_CE: 1174 1174 count = ext_error_code == 6 ? count : 0ULL; 1175 - bank->aca_err_type = ACA_BANK_ERR_CE_DE_DECODE(bank); 1175 + bank->aca_err_type = ACA_ERROR_TYPE_CE; 1176 1176 ret = aca_error_cache_log_bank_error(handle, &info, bank->aca_err_type, count); 1177 1177 break; 1178 1178 default:
+2 -1
drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c
··· 473 473 break; 474 474 case AMDGPU_DPX_PARTITION_MODE: 475 475 num_xcp = 2; 476 - nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE); 476 + nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE) | 477 + BIT(AMDGPU_NPS2_PARTITION_MODE); 477 478 break; 478 479 case AMDGPU_TPX_PARTITION_MODE: 479 480 num_xcp = 3;
+11 -77
drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
··· 6851 6851 static int gfx_v10_0_cp_async_gfx_ring_resume(struct amdgpu_device *adev) 6852 6852 { 6853 6853 int r, i; 6854 - struct amdgpu_ring *ring; 6855 6854 6856 6855 for (i = 0; i < adev->gfx.num_gfx_rings; i++) { 6857 - ring = &adev->gfx.gfx_ring[i]; 6858 - 6859 - r = amdgpu_bo_reserve(ring->mqd_obj, false); 6860 - if (unlikely(r != 0)) 6861 - return r; 6862 - 6863 - r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr); 6864 - if (!r) { 6865 - r = gfx_v10_0_kgq_init_queue(ring, false); 6866 - amdgpu_bo_kunmap(ring->mqd_obj); 6867 - ring->mqd_ptr = NULL; 6868 - } 6869 - amdgpu_bo_unreserve(ring->mqd_obj); 6856 + r = gfx_v10_0_kgq_init_queue(&adev->gfx.gfx_ring[i], false); 6870 6857 if (r) 6871 6858 return r; 6872 6859 } ··· 7160 7173 7161 7174 static int gfx_v10_0_kiq_resume(struct amdgpu_device *adev) 7162 7175 { 7163 - struct amdgpu_ring *ring; 7164 - int r; 7165 - 7166 - ring = &adev->gfx.kiq[0].ring; 7167 - 7168 - r = amdgpu_bo_reserve(ring->mqd_obj, false); 7169 - if (unlikely(r != 0)) 7170 - return r; 7171 - 7172 - r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr); 7173 - if (unlikely(r != 0)) { 7174 - amdgpu_bo_unreserve(ring->mqd_obj); 7175 - return r; 7176 - } 7177 - 7178 - gfx_v10_0_kiq_init_queue(ring); 7179 - amdgpu_bo_kunmap(ring->mqd_obj); 7180 - ring->mqd_ptr = NULL; 7181 - amdgpu_bo_unreserve(ring->mqd_obj); 7176 + gfx_v10_0_kiq_init_queue(&adev->gfx.kiq[0].ring); 7182 7177 return 0; 7183 7178 } 7184 7179 7185 7180 static int gfx_v10_0_kcq_resume(struct amdgpu_device *adev) 7186 7181 { 7187 - struct amdgpu_ring *ring = NULL; 7188 - int r = 0, i; 7182 + int i, r; 7189 7183 7190 7184 gfx_v10_0_cp_compute_enable(adev, true); 7191 7185 7192 7186 for (i = 0; i < adev->gfx.num_compute_rings; i++) { 7193 - ring = &adev->gfx.compute_ring[i]; 7194 - 7195 - r = amdgpu_bo_reserve(ring->mqd_obj, false); 7196 - if (unlikely(r != 0)) 7197 - goto done; 7198 - r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr); 7199 - if (!r) { 7200 - r = gfx_v10_0_kcq_init_queue(ring, false); 7201 - amdgpu_bo_kunmap(ring->mqd_obj); 7202 - ring->mqd_ptr = NULL; 7203 - } 7204 - amdgpu_bo_unreserve(ring->mqd_obj); 7187 + r = gfx_v10_0_kcq_init_queue(&adev->gfx.compute_ring[i], 7188 + false); 7205 7189 if (r) 7206 - goto done; 7190 + return r; 7207 7191 } 7208 7192 7209 - r = amdgpu_gfx_enable_kcq(adev, 0); 7210 - done: 7211 - return r; 7193 + return amdgpu_gfx_enable_kcq(adev, 0); 7212 7194 } 7213 7195 7214 7196 static int gfx_v10_0_cp_resume(struct amdgpu_device *adev) ··· 9535 9579 if (r) 9536 9580 return r; 9537 9581 9538 - r = amdgpu_bo_reserve(ring->mqd_obj, false); 9539 - if (unlikely(r != 0)) { 9540 - DRM_ERROR("fail to resv mqd_obj\n"); 9541 - return r; 9542 - } 9543 - r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr); 9544 - if (!r) { 9545 - r = gfx_v10_0_kgq_init_queue(ring, true); 9546 - amdgpu_bo_kunmap(ring->mqd_obj); 9547 - ring->mqd_ptr = NULL; 9548 - } 9549 - amdgpu_bo_unreserve(ring->mqd_obj); 9582 + r = gfx_v10_0_kgq_init_queue(ring, true); 9550 9583 if (r) { 9551 - DRM_ERROR("fail to unresv mqd_obj\n"); 9584 + DRM_ERROR("fail to init kgq\n"); 9552 9585 return r; 9553 9586 } 9554 9587 ··· 9594 9649 return r; 9595 9650 } 9596 9651 9597 - r = amdgpu_bo_reserve(ring->mqd_obj, false); 9598 - if (unlikely(r != 0)) { 9599 - dev_err(adev->dev, "fail to resv mqd_obj\n"); 9600 - return r; 9601 - } 9602 - r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr); 9603 - if (!r) { 9604 - r = gfx_v10_0_kcq_init_queue(ring, true); 9605 - amdgpu_bo_kunmap(ring->mqd_obj); 9606 - ring->mqd_ptr = NULL; 9607 - } 9608 - amdgpu_bo_unreserve(ring->mqd_obj); 9652 + r = gfx_v10_0_kcq_init_queue(ring, true); 9609 9653 if (r) { 9610 - dev_err(adev->dev, "fail to unresv mqd_obj\n"); 9654 + dev_err(adev->dev, "fail to init kcq\n"); 9611 9655 return r; 9612 9656 } 9613 9657
+11 -79
drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
··· 1581 1581 adev->gfx.me.num_me = 1; 1582 1582 adev->gfx.me.num_pipe_per_me = 1; 1583 1583 adev->gfx.me.num_queue_per_pipe = 1; 1584 - adev->gfx.mec.num_mec = 2; 1584 + adev->gfx.mec.num_mec = 1; 1585 1585 adev->gfx.mec.num_pipe_per_mec = 4; 1586 1586 adev->gfx.mec.num_queue_per_pipe = 4; 1587 1587 break; ··· 4115 4115 static int gfx_v11_0_cp_async_gfx_ring_resume(struct amdgpu_device *adev) 4116 4116 { 4117 4117 int r, i; 4118 - struct amdgpu_ring *ring; 4119 4118 4120 4119 for (i = 0; i < adev->gfx.num_gfx_rings; i++) { 4121 - ring = &adev->gfx.gfx_ring[i]; 4122 - 4123 - r = amdgpu_bo_reserve(ring->mqd_obj, false); 4124 - if (unlikely(r != 0)) 4125 - return r; 4126 - 4127 - r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr); 4128 - if (!r) { 4129 - r = gfx_v11_0_kgq_init_queue(ring, false); 4130 - amdgpu_bo_kunmap(ring->mqd_obj); 4131 - ring->mqd_ptr = NULL; 4132 - } 4133 - amdgpu_bo_unreserve(ring->mqd_obj); 4120 + r = gfx_v11_0_kgq_init_queue(&adev->gfx.gfx_ring[i], false); 4134 4121 if (r) 4135 4122 return r; 4136 4123 } ··· 4439 4452 4440 4453 static int gfx_v11_0_kiq_resume(struct amdgpu_device *adev) 4441 4454 { 4442 - struct amdgpu_ring *ring; 4443 - int r; 4444 - 4445 - ring = &adev->gfx.kiq[0].ring; 4446 - 4447 - r = amdgpu_bo_reserve(ring->mqd_obj, false); 4448 - if (unlikely(r != 0)) 4449 - return r; 4450 - 4451 - r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr); 4452 - if (unlikely(r != 0)) { 4453 - amdgpu_bo_unreserve(ring->mqd_obj); 4454 - return r; 4455 - } 4456 - 4457 - gfx_v11_0_kiq_init_queue(ring); 4458 - amdgpu_bo_kunmap(ring->mqd_obj); 4459 - ring->mqd_ptr = NULL; 4460 - amdgpu_bo_unreserve(ring->mqd_obj); 4461 - ring->sched.ready = true; 4455 + gfx_v11_0_kiq_init_queue(&adev->gfx.kiq[0].ring); 4462 4456 return 0; 4463 4457 } 4464 4458 4465 4459 static int gfx_v11_0_kcq_resume(struct amdgpu_device *adev) 4466 4460 { 4467 - struct amdgpu_ring *ring = NULL; 4468 - int r = 0, i; 4461 + int i, r; 4469 4462 4470 4463 if (!amdgpu_async_gfx_ring) 4471 4464 gfx_v11_0_cp_compute_enable(adev, true); 4472 4465 4473 4466 for (i = 0; i < adev->gfx.num_compute_rings; i++) { 4474 - ring = &adev->gfx.compute_ring[i]; 4475 - 4476 - r = amdgpu_bo_reserve(ring->mqd_obj, false); 4477 - if (unlikely(r != 0)) 4478 - goto done; 4479 - r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr); 4480 - if (!r) { 4481 - r = gfx_v11_0_kcq_init_queue(ring, false); 4482 - amdgpu_bo_kunmap(ring->mqd_obj); 4483 - ring->mqd_ptr = NULL; 4484 - } 4485 - amdgpu_bo_unreserve(ring->mqd_obj); 4467 + r = gfx_v11_0_kcq_init_queue(&adev->gfx.compute_ring[i], false); 4486 4468 if (r) 4487 - goto done; 4469 + return r; 4488 4470 } 4489 4471 4490 - r = amdgpu_gfx_enable_kcq(adev, 0); 4491 - done: 4492 - return r; 4472 + return amdgpu_gfx_enable_kcq(adev, 0); 4493 4473 } 4494 4474 4495 4475 static int gfx_v11_0_cp_resume(struct amdgpu_device *adev) ··· 6621 6667 if (r) 6622 6668 return r; 6623 6669 6624 - r = amdgpu_bo_reserve(ring->mqd_obj, false); 6625 - if (unlikely(r != 0)) { 6626 - dev_err(adev->dev, "fail to resv mqd_obj\n"); 6627 - return r; 6628 - } 6629 - r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr); 6630 - if (!r) { 6631 - r = gfx_v11_0_kgq_init_queue(ring, true); 6632 - amdgpu_bo_kunmap(ring->mqd_obj); 6633 - ring->mqd_ptr = NULL; 6634 - } 6635 - amdgpu_bo_unreserve(ring->mqd_obj); 6670 + r = gfx_v11_0_kgq_init_queue(ring, true); 6636 6671 if (r) { 6637 - dev_err(adev->dev, "fail to unresv mqd_obj\n"); 6672 + dev_err(adev->dev, "failed to init kgq\n"); 6638 6673 return r; 6639 6674 } 6640 6675 ··· 6650 6707 return r; 6651 6708 } 6652 6709 6653 - r = amdgpu_bo_reserve(ring->mqd_obj, false); 6654 - if (unlikely(r != 0)) { 6655 - dev_err(adev->dev, "fail to resv mqd_obj\n"); 6656 - return r; 6657 - } 6658 - r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr); 6659 - if (!r) { 6660 - r = gfx_v11_0_kcq_init_queue(ring, true); 6661 - amdgpu_bo_kunmap(ring->mqd_obj); 6662 - ring->mqd_ptr = NULL; 6663 - } 6664 - amdgpu_bo_unreserve(ring->mqd_obj); 6710 + r = gfx_v11_0_kcq_init_queue(ring, true); 6665 6711 if (r) { 6666 - dev_err(adev->dev, "fail to unresv mqd_obj\n"); 6712 + dev_err(adev->dev, "fail to init kcq\n"); 6667 6713 return r; 6668 6714 } 6669 6715 r = amdgpu_mes_map_legacy_queue(adev, ring);
+16 -88
drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
··· 1355 1355 adev->gfx.me.num_me = 1; 1356 1356 adev->gfx.me.num_pipe_per_me = 1; 1357 1357 adev->gfx.me.num_queue_per_pipe = 1; 1358 - adev->gfx.mec.num_mec = 2; 1358 + adev->gfx.mec.num_mec = 1; 1359 1359 adev->gfx.mec.num_pipe_per_mec = 2; 1360 1360 adev->gfx.mec.num_queue_per_pipe = 4; 1361 1361 break; ··· 3001 3001 3002 3002 static int gfx_v12_0_cp_async_gfx_ring_resume(struct amdgpu_device *adev) 3003 3003 { 3004 - int r, i; 3005 - struct amdgpu_ring *ring; 3004 + int i, r; 3006 3005 3007 3006 for (i = 0; i < adev->gfx.num_gfx_rings; i++) { 3008 - ring = &adev->gfx.gfx_ring[i]; 3009 - 3010 - r = amdgpu_bo_reserve(ring->mqd_obj, false); 3011 - if (unlikely(r != 0)) 3012 - goto done; 3013 - 3014 - r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr); 3015 - if (!r) { 3016 - r = gfx_v12_0_kgq_init_queue(ring, false); 3017 - amdgpu_bo_kunmap(ring->mqd_obj); 3018 - ring->mqd_ptr = NULL; 3019 - } 3020 - amdgpu_bo_unreserve(ring->mqd_obj); 3007 + r = gfx_v12_0_kgq_init_queue(&adev->gfx.gfx_ring[i], false); 3021 3008 if (r) 3022 - goto done; 3009 + return r; 3023 3010 } 3024 3011 3025 3012 r = amdgpu_gfx_enable_kgq(adev, 0); 3026 3013 if (r) 3027 - goto done; 3014 + return r; 3028 3015 3029 - r = gfx_v12_0_cp_gfx_start(adev); 3030 - if (r) 3031 - goto done; 3032 - 3033 - done: 3034 - return r; 3016 + return gfx_v12_0_cp_gfx_start(adev); 3035 3017 } 3036 3018 3037 3019 static int gfx_v12_0_compute_mqd_init(struct amdgpu_device *adev, void *m, ··· 3326 3344 3327 3345 static int gfx_v12_0_kiq_resume(struct amdgpu_device *adev) 3328 3346 { 3329 - struct amdgpu_ring *ring; 3330 - int r; 3331 - 3332 - ring = &adev->gfx.kiq[0].ring; 3333 - 3334 - r = amdgpu_bo_reserve(ring->mqd_obj, false); 3335 - if (unlikely(r != 0)) 3336 - return r; 3337 - 3338 - r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr); 3339 - if (unlikely(r != 0)) { 3340 - amdgpu_bo_unreserve(ring->mqd_obj); 3341 - return r; 3342 - } 3343 - 3344 - gfx_v12_0_kiq_init_queue(ring); 3345 - amdgpu_bo_kunmap(ring->mqd_obj); 3346 - ring->mqd_ptr = NULL; 3347 - amdgpu_bo_unreserve(ring->mqd_obj); 3348 - ring->sched.ready = true; 3347 + gfx_v12_0_kiq_init_queue(&adev->gfx.kiq[0].ring); 3348 + adev->gfx.kiq[0].ring.sched.ready = true; 3349 3349 return 0; 3350 3350 } 3351 3351 3352 3352 static int gfx_v12_0_kcq_resume(struct amdgpu_device *adev) 3353 3353 { 3354 - struct amdgpu_ring *ring = NULL; 3355 - int r = 0, i; 3354 + int i, r; 3356 3355 3357 3356 if (!amdgpu_async_gfx_ring) 3358 3357 gfx_v12_0_cp_compute_enable(adev, true); 3359 3358 3360 3359 for (i = 0; i < adev->gfx.num_compute_rings; i++) { 3361 - ring = &adev->gfx.compute_ring[i]; 3362 - 3363 - r = amdgpu_bo_reserve(ring->mqd_obj, false); 3364 - if (unlikely(r != 0)) 3365 - goto done; 3366 - r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr); 3367 - if (!r) { 3368 - r = gfx_v12_0_kcq_init_queue(ring, false); 3369 - amdgpu_bo_kunmap(ring->mqd_obj); 3370 - ring->mqd_ptr = NULL; 3371 - } 3372 - amdgpu_bo_unreserve(ring->mqd_obj); 3360 + r = gfx_v12_0_kcq_init_queue(&adev->gfx.compute_ring[i], false); 3373 3361 if (r) 3374 - goto done; 3362 + return r; 3375 3363 } 3376 3364 3377 - r = amdgpu_gfx_enable_kcq(adev, 0); 3378 - done: 3379 - return r; 3365 + return amdgpu_gfx_enable_kcq(adev, 0); 3380 3366 } 3381 3367 3382 3368 static int gfx_v12_0_cp_resume(struct amdgpu_device *adev) ··· 5174 5224 return r; 5175 5225 } 5176 5226 5177 - r = amdgpu_bo_reserve(ring->mqd_obj, false); 5178 - if (unlikely(r != 0)) { 5179 - dev_err(adev->dev, "fail to resv mqd_obj\n"); 5180 - return r; 5181 - } 5182 - r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr); 5183 - if (!r) { 5184 - r = gfx_v12_0_kgq_init_queue(ring, true); 5185 - amdgpu_bo_kunmap(ring->mqd_obj); 5186 - ring->mqd_ptr = NULL; 5187 - } 5188 - amdgpu_bo_unreserve(ring->mqd_obj); 5227 + r = gfx_v12_0_kgq_init_queue(ring, true); 5189 5228 if (r) { 5190 - DRM_ERROR("fail to unresv mqd_obj\n"); 5229 + dev_err(adev->dev, "failed to init kgq\n"); 5191 5230 return r; 5192 5231 } 5193 5232 ··· 5203 5264 return r; 5204 5265 } 5205 5266 5206 - r = amdgpu_bo_reserve(ring->mqd_obj, false); 5207 - if (unlikely(r != 0)) { 5208 - DRM_ERROR("fail to resv mqd_obj\n"); 5209 - return r; 5210 - } 5211 - r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr); 5212 - if (!r) { 5213 - r = gfx_v12_0_kcq_init_queue(ring, true); 5214 - amdgpu_bo_kunmap(ring->mqd_obj); 5215 - ring->mqd_ptr = NULL; 5216 - } 5217 - amdgpu_bo_unreserve(ring->mqd_obj); 5267 + r = gfx_v12_0_kcq_init_queue(ring, true); 5218 5268 if (r) { 5219 - DRM_ERROR("fail to unresv mqd_obj\n"); 5269 + dev_err(adev->dev, "failed to init kcq\n"); 5220 5270 return r; 5221 5271 } 5222 5272 r = amdgpu_mes_map_legacy_queue(adev, ring);
+5 -40
drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
··· 4683 4683 4684 4684 static int gfx_v8_0_kiq_resume(struct amdgpu_device *adev) 4685 4685 { 4686 - struct amdgpu_ring *ring; 4687 - int r; 4688 - 4689 - ring = &adev->gfx.kiq[0].ring; 4690 - 4691 - r = amdgpu_bo_reserve(ring->mqd_obj, false); 4692 - if (unlikely(r != 0)) 4693 - return r; 4694 - 4695 - r = amdgpu_bo_kmap(ring->mqd_obj, &ring->mqd_ptr); 4696 - if (unlikely(r != 0)) { 4697 - amdgpu_bo_unreserve(ring->mqd_obj); 4698 - return r; 4699 - } 4700 - 4701 - gfx_v8_0_kiq_init_queue(ring); 4702 - amdgpu_bo_kunmap(ring->mqd_obj); 4703 - ring->mqd_ptr = NULL; 4704 - amdgpu_bo_unreserve(ring->mqd_obj); 4686 + gfx_v8_0_kiq_init_queue(&adev->gfx.kiq[0].ring); 4705 4687 return 0; 4706 4688 } 4707 4689 4708 4690 static int gfx_v8_0_kcq_resume(struct amdgpu_device *adev) 4709 4691 { 4710 - struct amdgpu_ring *ring = NULL; 4711 - int r = 0, i; 4692 + int i, r; 4712 4693 4713 4694 gfx_v8_0_cp_compute_enable(adev, true); 4714 4695 4715 4696 for (i = 0; i < adev->gfx.num_compute_rings; i++) { 4716 - ring = &adev->gfx.compute_ring[i]; 4717 - 4718 - r = amdgpu_bo_reserve(ring->mqd_obj, false); 4719 - if (unlikely(r != 0)) 4720 - goto done; 4721 - r = amdgpu_bo_kmap(ring->mqd_obj, &ring->mqd_ptr); 4722 - if (!r) { 4723 - r = gfx_v8_0_kcq_init_queue(ring); 4724 - amdgpu_bo_kunmap(ring->mqd_obj); 4725 - ring->mqd_ptr = NULL; 4726 - } 4727 - amdgpu_bo_unreserve(ring->mqd_obj); 4697 + r = gfx_v8_0_kcq_init_queue(&adev->gfx.compute_ring[i]); 4728 4698 if (r) 4729 - goto done; 4699 + return r; 4730 4700 } 4731 4701 4732 4702 gfx_v8_0_set_mec_doorbell_range(adev); 4733 4703 4734 - r = gfx_v8_0_kiq_kcq_enable(adev); 4735 - if (r) 4736 - goto done; 4737 - 4738 - done: 4739 - return r; 4704 + return gfx_v8_0_kiq_kcq_enable(adev); 4740 4705 } 4741 4706 4742 4707 static int gfx_v8_0_cp_test_all_rings(struct amdgpu_device *adev)
+8 -50
drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
··· 1269 1269 adev->gfx.mec_fw_write_wait = false; 1270 1270 1271 1271 if ((amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 4, 1)) && 1272 + (amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 4, 2)) && 1272 1273 ((adev->gfx.mec_fw_version < 0x000001a5) || 1273 1274 (adev->gfx.mec_feature_version < 46) || 1274 1275 (adev->gfx.pfp_fw_version < 0x000000b7) || ··· 3891 3890 3892 3891 static int gfx_v9_0_kiq_resume(struct amdgpu_device *adev) 3893 3892 { 3894 - struct amdgpu_ring *ring; 3895 - int r; 3896 - 3897 - ring = &adev->gfx.kiq[0].ring; 3898 - 3899 - r = amdgpu_bo_reserve(ring->mqd_obj, false); 3900 - if (unlikely(r != 0)) 3901 - return r; 3902 - 3903 - r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr); 3904 - if (unlikely(r != 0)) { 3905 - amdgpu_bo_unreserve(ring->mqd_obj); 3906 - return r; 3907 - } 3908 - 3909 - gfx_v9_0_kiq_init_queue(ring); 3910 - amdgpu_bo_kunmap(ring->mqd_obj); 3911 - ring->mqd_ptr = NULL; 3912 - amdgpu_bo_unreserve(ring->mqd_obj); 3893 + gfx_v9_0_kiq_init_queue(&adev->gfx.kiq[0].ring); 3913 3894 return 0; 3914 3895 } 3915 3896 3916 3897 static int gfx_v9_0_kcq_resume(struct amdgpu_device *adev) 3917 3898 { 3918 - struct amdgpu_ring *ring = NULL; 3919 - int r = 0, i; 3899 + int i, r; 3920 3900 3921 3901 gfx_v9_0_cp_compute_enable(adev, true); 3922 3902 3923 3903 for (i = 0; i < adev->gfx.num_compute_rings; i++) { 3924 - ring = &adev->gfx.compute_ring[i]; 3925 - 3926 - r = amdgpu_bo_reserve(ring->mqd_obj, false); 3927 - if (unlikely(r != 0)) 3928 - goto done; 3929 - r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr); 3930 - if (!r) { 3931 - r = gfx_v9_0_kcq_init_queue(ring, false); 3932 - amdgpu_bo_kunmap(ring->mqd_obj); 3933 - ring->mqd_ptr = NULL; 3934 - } 3935 - amdgpu_bo_unreserve(ring->mqd_obj); 3904 + r = gfx_v9_0_kcq_init_queue(&adev->gfx.compute_ring[i], false); 3936 3905 if (r) 3937 - goto done; 3906 + return r; 3938 3907 } 3939 3908 3940 - r = amdgpu_gfx_enable_kcq(adev, 0); 3941 - done: 3942 - return r; 3909 + return amdgpu_gfx_enable_kcq(adev, 0); 3943 3910 } 3944 3911 3945 3912 static int gfx_v9_0_cp_resume(struct amdgpu_device *adev) ··· 7288 7319 return r; 7289 7320 } 7290 7321 7291 - r = amdgpu_bo_reserve(ring->mqd_obj, false); 7292 - if (unlikely(r != 0)){ 7293 - dev_err(adev->dev, "fail to resv mqd_obj\n"); 7294 - return r; 7295 - } 7296 - r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr); 7297 - if (!r) { 7298 - r = gfx_v9_0_kcq_init_queue(ring, true); 7299 - amdgpu_bo_kunmap(ring->mqd_obj); 7300 - ring->mqd_ptr = NULL; 7301 - } 7302 - amdgpu_bo_unreserve(ring->mqd_obj); 7322 + r = gfx_v9_0_kcq_init_queue(ring, true); 7303 7323 if (r) { 7304 - dev_err(adev->dev, "fail to unresv mqd_obj\n"); 7324 + dev_err(adev->dev, "fail to init kcq\n"); 7305 7325 return r; 7306 7326 } 7307 7327 spin_lock_irqsave(&kiq->ring_lock, flags);
+13 -53
drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
··· 867 867 868 868 switch (type) { 869 869 case ACA_SMU_TYPE_UE: 870 - bank->aca_err_type = ACA_BANK_ERR_UE_DE_DECODE(bank); 870 + bank->aca_err_type = ACA_ERROR_TYPE_UE; 871 871 ret = aca_error_cache_log_bank_error(handle, &info, bank->aca_err_type, 1ULL); 872 872 break; 873 873 case ACA_SMU_TYPE_CE: 874 - bank->aca_err_type = ACA_BANK_ERR_CE_DE_DECODE(bank); 875 - ret = aca_error_cache_log_bank_error(handle, &info, 876 - bank->aca_err_type, 874 + bank->aca_err_type = ACA_ERROR_TYPE_CE; 875 + ret = aca_error_cache_log_bank_error(handle, &info, bank->aca_err_type, 877 876 ACA_REG__MISC0__ERRCNT(misc0)); 878 877 break; 879 878 default: ··· 2167 2168 2168 2169 static int gfx_v9_4_3_xcc_kiq_resume(struct amdgpu_device *adev, int xcc_id) 2169 2170 { 2170 - struct amdgpu_ring *ring; 2171 - int r; 2172 - 2173 - ring = &adev->gfx.kiq[xcc_id].ring; 2174 - 2175 - r = amdgpu_bo_reserve(ring->mqd_obj, false); 2176 - if (unlikely(r != 0)) 2177 - return r; 2178 - 2179 - r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr); 2180 - if (unlikely(r != 0)) { 2181 - amdgpu_bo_unreserve(ring->mqd_obj); 2182 - return r; 2183 - } 2184 - 2185 - gfx_v9_4_3_xcc_kiq_init_queue(ring, xcc_id); 2186 - amdgpu_bo_kunmap(ring->mqd_obj); 2187 - ring->mqd_ptr = NULL; 2188 - amdgpu_bo_unreserve(ring->mqd_obj); 2171 + gfx_v9_4_3_xcc_kiq_init_queue(&adev->gfx.kiq[xcc_id].ring, xcc_id); 2189 2172 return 0; 2190 2173 } 2191 2174 2192 2175 static int gfx_v9_4_3_xcc_kcq_resume(struct amdgpu_device *adev, int xcc_id) 2193 2176 { 2194 - struct amdgpu_ring *ring = NULL; 2195 - int r = 0, i; 2177 + struct amdgpu_ring *ring; 2178 + int i, r; 2196 2179 2197 2180 gfx_v9_4_3_xcc_cp_compute_enable(adev, true, xcc_id); 2198 2181 2199 2182 for (i = 0; i < adev->gfx.num_compute_rings; i++) { 2200 - ring = &adev->gfx.compute_ring[i + xcc_id * adev->gfx.num_compute_rings]; 2183 + ring = &adev->gfx.compute_ring[i + xcc_id * 2184 + adev->gfx.num_compute_rings]; 2201 2185 2202 - r = amdgpu_bo_reserve(ring->mqd_obj, false); 2203 - if (unlikely(r != 0)) 2204 - goto done; 2205 - r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr); 2206 - if (!r) { 2207 - r = gfx_v9_4_3_xcc_kcq_init_queue(ring, xcc_id, false); 2208 - amdgpu_bo_kunmap(ring->mqd_obj); 2209 - ring->mqd_ptr = NULL; 2210 - } 2211 - amdgpu_bo_unreserve(ring->mqd_obj); 2186 + r = gfx_v9_4_3_xcc_kcq_init_queue(ring, xcc_id, false); 2212 2187 if (r) 2213 - goto done; 2188 + return r; 2214 2189 } 2215 2190 2216 - r = amdgpu_gfx_enable_kcq(adev, xcc_id); 2217 - done: 2218 - return r; 2191 + return amdgpu_gfx_enable_kcq(adev, xcc_id); 2219 2192 } 2220 2193 2221 2194 static int gfx_v9_4_3_xcc_cp_resume(struct amdgpu_device *adev, int xcc_id) ··· 3559 3588 return r; 3560 3589 } 3561 3590 3562 - r = amdgpu_bo_reserve(ring->mqd_obj, false); 3563 - if (unlikely(r != 0)){ 3564 - dev_err(adev->dev, "fail to resv mqd_obj\n"); 3565 - return r; 3566 - } 3567 - r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr); 3568 - if (!r) { 3569 - r = gfx_v9_4_3_xcc_kcq_init_queue(ring, ring->xcc_id, true); 3570 - amdgpu_bo_kunmap(ring->mqd_obj); 3571 - ring->mqd_ptr = NULL; 3572 - } 3573 - amdgpu_bo_unreserve(ring->mqd_obj); 3591 + r = gfx_v9_4_3_xcc_kcq_init_queue(ring, ring->xcc_id, true); 3574 3592 if (r) { 3575 - dev_err(adev->dev, "fail to unresv mqd_obj\n"); 3593 + dev_err(adev->dev, "fail to init kcq\n"); 3576 3594 return r; 3577 3595 } 3578 3596 spin_lock_irqsave(&kiq->ring_lock, flags);
+1 -1
drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c
··· 1328 1328 1ULL); 1329 1329 break; 1330 1330 case ACA_SMU_TYPE_CE: 1331 - bank->aca_err_type = ACA_BANK_ERR_CE_DE_DECODE(bank); 1331 + bank->aca_err_type = ACA_ERROR_TYPE_CE; 1332 1332 ret = aca_error_cache_log_bank_error(handle, &info, bank->aca_err_type, 1333 1333 ACA_REG__MISC0__ERRCNT(misc0)); 1334 1334 break;
+1 -1
drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c
··· 751 751 1ULL); 752 752 break; 753 753 case ACA_SMU_TYPE_CE: 754 - bank->aca_err_type = ACA_BANK_ERR_CE_DE_DECODE(bank); 754 + bank->aca_err_type = ACA_ERROR_TYPE_CE; 755 755 ret = aca_error_cache_log_bank_error(handle, &info, bank->aca_err_type, 756 756 ACA_REG__MISC0__ERRCNT(misc0)); 757 757 break;
+15 -64
drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c
··· 31 31 #include "amdgpu_ucode.h" 32 32 #include "amdgpu_trace.h" 33 33 #include "amdgpu_reset.h" 34 - #include "gc/gc_9_0_sh_mask.h" 35 34 36 35 #include "sdma/sdma_4_4_2_offset.h" 37 36 #include "sdma/sdma_4_4_2_sh_mask.h" ··· 1290 1291 seq, 0xffffffff, 4); 1291 1292 } 1292 1293 1293 - /* 1294 - * sdma_v4_4_2_get_invalidate_req - Construct the VM_INVALIDATE_ENG0_REQ register value 1295 - * @vmid: The VMID to invalidate 1296 - * @flush_type: The type of flush (0 = legacy, 1 = lightweight, 2 = heavyweight) 1294 + 1295 + /** 1296 + * sdma_v4_4_2_ring_emit_vm_flush - vm flush using sDMA 1297 1297 * 1298 - * This function constructs the VM_INVALIDATE_ENG0_REQ register value for the specified VMID 1299 - * and flush type. It ensures that all relevant page table cache levels (L1 PTEs, L2 PTEs, and 1300 - * L2 PDEs) are invalidated. 1301 - */ 1302 - static uint32_t sdma_v4_4_2_get_invalidate_req(unsigned int vmid, 1303 - uint32_t flush_type) 1304 - { 1305 - u32 req = 0; 1306 - 1307 - req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, 1308 - PER_VMID_INVALIDATE_REQ, 1 << vmid); 1309 - req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, flush_type); 1310 - req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PTES, 1); 1311 - req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE0, 1); 1312 - req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE1, 1); 1313 - req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE2, 1); 1314 - req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L1_PTES, 1); 1315 - req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, 1316 - CLEAR_PROTECTION_FAULT_STATUS_ADDR, 0); 1317 - 1318 - return req; 1319 - } 1320 - 1321 - /* 1322 - * sdma_v4_4_2_ring_emit_vm_flush - Emit VM flush commands for SDMA 1323 - * @ring: The SDMA ring 1324 - * @vmid: The VMID to flush 1325 - * @pd_addr: The page directory address 1298 + * @ring: amdgpu_ring pointer 1299 + * @vmid: vmid number to use 1300 + * @pd_addr: address 1326 1301 * 1327 - * This function emits the necessary register writes and waits to perform a VM flush for the 1328 - * specified VMID. It updates the PTB address registers and issues a VM invalidation request 1329 - * using the specified VM invalidation engine. 1302 + * Update the page table base and flush the VM TLB 1303 + * using sDMA. 1330 1304 */ 1331 1305 static void sdma_v4_4_2_ring_emit_vm_flush(struct amdgpu_ring *ring, 1332 - unsigned int vmid, uint64_t pd_addr) 1306 + unsigned vmid, uint64_t pd_addr) 1333 1307 { 1334 - struct amdgpu_device *adev = ring->adev; 1335 - uint32_t req = sdma_v4_4_2_get_invalidate_req(vmid, 0); 1336 - unsigned int eng = ring->vm_inv_eng; 1337 - struct amdgpu_vmhub *hub = &adev->vmhub[ring->vm_hub]; 1338 - 1339 - amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_lo32 + 1340 - (hub->ctx_addr_distance * vmid), 1341 - lower_32_bits(pd_addr)); 1342 - 1343 - amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_hi32 + 1344 - (hub->ctx_addr_distance * vmid), 1345 - upper_32_bits(pd_addr)); 1346 - /* 1347 - * Construct and emit the VM invalidation packet 1348 - */ 1349 - amdgpu_ring_write(ring, 1350 - SDMA_PKT_VM_INVALIDATION_HEADER_OP(SDMA_OP_VM_INVALIDATE) | 1351 - SDMA_PKT_VM_INVALIDATION_HEADER_SUB_OP(SDMA_SUBOP_VM_INVALIDATE) | 1352 - SDMA_PKT_VM_INVALIDATION_HEADER_XCC0_ENG_ID(0x1f) | 1353 - SDMA_PKT_VM_INVALIDATION_HEADER_XCC1_ENG_ID(0x1f) | 1354 - SDMA_PKT_VM_INVALIDATION_HEADER_MMHUB_ENG_ID(eng)); 1355 - amdgpu_ring_write(ring, SDMA_PKT_VM_INVALIDATION_INVALIDATEREQ_INVALIDATEREQ(req)); 1356 - amdgpu_ring_write(ring, 0); 1357 - amdgpu_ring_write(ring, SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_INVALIDATEACK(BIT(vmid))); 1308 + amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr); 1358 1309 } 1359 1310 1360 1311 static void sdma_v4_4_2_ring_emit_wreg(struct amdgpu_ring *ring, ··· 2126 2177 3 + /* hdp invalidate */ 2127 2178 6 + /* sdma_v4_4_2_ring_emit_pipeline_sync */ 2128 2179 /* sdma_v4_4_2_ring_emit_vm_flush */ 2129 - 4 + 2 * 3 + 2180 + SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 + 2181 + SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 6 + 2130 2182 10 + 10 + 10, /* sdma_v4_4_2_ring_emit_fence x3 for user fence, vm fence */ 2131 2183 .emit_ib_size = 7 + 6, /* sdma_v4_4_2_ring_emit_ib */ 2132 2184 .emit_ib = sdma_v4_4_2_ring_emit_ib, ··· 2159 2209 3 + /* hdp invalidate */ 2160 2210 6 + /* sdma_v4_4_2_ring_emit_pipeline_sync */ 2161 2211 /* sdma_v4_4_2_ring_emit_vm_flush */ 2162 - 4 + 2 * 3 + 2212 + SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 + 2213 + SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 6 + 2163 2214 10 + 10 + 10, /* sdma_v4_4_2_ring_emit_fence x3 for user fence, vm fence */ 2164 2215 .emit_ib_size = 7 + 6, /* sdma_v4_4_2_ring_emit_ib */ 2165 2216 .emit_ib = sdma_v4_4_2_ring_emit_ib, ··· 2546 2595 1ULL); 2547 2596 break; 2548 2597 case ACA_SMU_TYPE_CE: 2549 - bank->aca_err_type = ACA_BANK_ERR_CE_DE_DECODE(bank); 2598 + bank->aca_err_type = ACA_ERROR_TYPE_CE; 2550 2599 ret = aca_error_cache_log_bank_error(handle, &info, bank->aca_err_type, 2551 2600 ACA_REG__MISC0__ERRCNT(misc0)); 2552 2601 break;
+3
drivers/gpu/drm/amd/amdgpu/ta_ras_if.h
··· 92 92 TA_RAS_BLOCK__MCA, 93 93 TA_RAS_BLOCK__VCN, 94 94 TA_RAS_BLOCK__JPEG, 95 + TA_RAS_BLOCK__IH, 96 + TA_RAS_BLOCK__MPIO, 97 + TA_RAS_BLOCK__MMSCH, 95 98 TA_NUM_BLOCK_MAX 96 99 }; 97 100
+2 -1
drivers/gpu/drm/amd/amdgpu/umc_v12_0.c
··· 85 85 86 86 return (amdgpu_ras_is_poison_mode_supported(adev) && 87 87 (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1) && 88 - (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Deferred) == 1)); 88 + ((REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Deferred) == 1) || 89 + (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Poison) == 1))); 89 90 } 90 91 91 92 bool umc_v12_0_is_uncorrectable_error(struct amdgpu_device *adev, uint64_t mc_umc_status)
+1 -1
drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c
··· 1965 1965 1ULL); 1966 1966 break; 1967 1967 case ACA_SMU_TYPE_CE: 1968 - bank->aca_err_type = ACA_BANK_ERR_CE_DE_DECODE(bank); 1968 + bank->aca_err_type = ACA_ERROR_TYPE_CE; 1969 1969 ret = aca_error_cache_log_bank_error(handle, &info, bank->aca_err_type, 1970 1970 ACA_REG__MISC0__ERRCNT(misc0)); 1971 1971 break;
-70
drivers/gpu/drm/amd/amdgpu/vega10_sdma_pkt_open.h
··· 64 64 #define HEADER_BARRIER 5 65 65 #define SDMA_OP_AQL_COPY 0 66 66 #define SDMA_OP_AQL_BARRIER_OR 0 67 - /* vm invalidation is only available for GC9.4.3/GC9.4.4/GC9.5.0 */ 68 - #define SDMA_OP_VM_INVALIDATE 8 69 - #define SDMA_SUBOP_VM_INVALIDATE 4 70 67 71 68 /*define for op field*/ 72 69 #define SDMA_PKT_HEADER_op_offset 0 ··· 3331 3334 #define SDMA_AQL_PKT_BARRIER_OR_COMPLETION_SIGNAL_HI_completion_signal_63_32_shift 0 3332 3335 #define SDMA_AQL_PKT_BARRIER_OR_COMPLETION_SIGNAL_HI_COMPLETION_SIGNAL_63_32(x) (((x) & SDMA_AQL_PKT_BARRIER_OR_COMPLETION_SIGNAL_HI_completion_signal_63_32_mask) << SDMA_AQL_PKT_BARRIER_OR_COMPLETION_SIGNAL_HI_completion_signal_63_32_shift) 3333 3336 3334 - /* 3335 - ** Definitions for SDMA_PKT_VM_INVALIDATION packet 3336 - */ 3337 - 3338 - /*define for HEADER word*/ 3339 - /*define for op field*/ 3340 - #define SDMA_PKT_VM_INVALIDATION_HEADER_op_offset 0 3341 - #define SDMA_PKT_VM_INVALIDATION_HEADER_op_mask 0x000000FF 3342 - #define SDMA_PKT_VM_INVALIDATION_HEADER_op_shift 0 3343 - #define SDMA_PKT_VM_INVALIDATION_HEADER_OP(x) ((x & SDMA_PKT_VM_INVALIDATION_HEADER_op_mask) << SDMA_PKT_VM_INVALIDATION_HEADER_op_shift) 3344 - 3345 - /*define for sub_op field*/ 3346 - #define SDMA_PKT_VM_INVALIDATION_HEADER_sub_op_offset 0 3347 - #define SDMA_PKT_VM_INVALIDATION_HEADER_sub_op_mask 0x000000FF 3348 - #define SDMA_PKT_VM_INVALIDATION_HEADER_sub_op_shift 8 3349 - #define SDMA_PKT_VM_INVALIDATION_HEADER_SUB_OP(x) ((x & SDMA_PKT_VM_INVALIDATION_HEADER_sub_op_mask) << SDMA_PKT_VM_INVALIDATION_HEADER_sub_op_shift) 3350 - 3351 - /*define for xcc0_eng_id field*/ 3352 - #define SDMA_PKT_VM_INVALIDATION_HEADER_xcc0_eng_id_offset 0 3353 - #define SDMA_PKT_VM_INVALIDATION_HEADER_xcc0_eng_id_mask 0x0000001F 3354 - #define SDMA_PKT_VM_INVALIDATION_HEADER_xcc0_eng_id_shift 16 3355 - #define SDMA_PKT_VM_INVALIDATION_HEADER_XCC0_ENG_ID(x) ((x & SDMA_PKT_VM_INVALIDATION_HEADER_xcc0_eng_id_mask) << SDMA_PKT_VM_INVALIDATION_HEADER_xcc0_eng_id_shift) 3356 - 3357 - /*define for xcc1_eng_id field*/ 3358 - #define SDMA_PKT_VM_INVALIDATION_HEADER_xcc1_eng_id_offset 0 3359 - #define SDMA_PKT_VM_INVALIDATION_HEADER_xcc1_eng_id_mask 0x0000001F 3360 - #define SDMA_PKT_VM_INVALIDATION_HEADER_xcc1_eng_id_shift 21 3361 - #define SDMA_PKT_VM_INVALIDATION_HEADER_XCC1_ENG_ID(x) ((x & SDMA_PKT_VM_INVALIDATION_HEADER_xcc1_eng_id_mask) << SDMA_PKT_VM_INVALIDATION_HEADER_xcc1_eng_id_shift) 3362 - 3363 - /*define for mmhub_eng_id field*/ 3364 - #define SDMA_PKT_VM_INVALIDATION_HEADER_mmhub_eng_id_offset 0 3365 - #define SDMA_PKT_VM_INVALIDATION_HEADER_mmhub_eng_id_mask 0x0000001F 3366 - #define SDMA_PKT_VM_INVALIDATION_HEADER_mmhub_eng_id_shift 26 3367 - #define SDMA_PKT_VM_INVALIDATION_HEADER_MMHUB_ENG_ID(x) ((x & SDMA_PKT_VM_INVALIDATION_HEADER_mmhub_eng_id_mask) << SDMA_PKT_VM_INVALIDATION_HEADER_mmhub_eng_id_shift) 3368 - 3369 - /*define for INVALIDATEREQ word*/ 3370 - /*define for invalidatereq field*/ 3371 - #define SDMA_PKT_VM_INVALIDATION_INVALIDATEREQ_invalidatereq_offset 1 3372 - #define SDMA_PKT_VM_INVALIDATION_INVALIDATEREQ_invalidatereq_mask 0xFFFFFFFF 3373 - #define SDMA_PKT_VM_INVALIDATION_INVALIDATEREQ_invalidatereq_shift 0 3374 - #define SDMA_PKT_VM_INVALIDATION_INVALIDATEREQ_INVALIDATEREQ(x) ((x & SDMA_PKT_VM_INVALIDATION_INVALIDATEREQ_invalidatereq_mask) << SDMA_PKT_VM_INVALIDATION_INVALIDATEREQ_invalidatereq_shift) 3375 - 3376 - /*define for ADDRESSRANGELO word*/ 3377 - /*define for addressrangelo field*/ 3378 - #define SDMA_PKT_VM_INVALIDATION_ADDRESSRANGELO_addressrangelo_offset 2 3379 - #define SDMA_PKT_VM_INVALIDATION_ADDRESSRANGELO_addressrangelo_mask 0xFFFFFFFF 3380 - #define SDMA_PKT_VM_INVALIDATION_ADDRESSRANGELO_addressrangelo_shift 0 3381 - #define SDMA_PKT_VM_INVALIDATION_ADDRESSRANGELO_ADDRESSRANGELO(x) ((x & SDMA_PKT_VM_INVALIDATION_ADDRESSRANGELO_addressrangelo_mask) << SDMA_PKT_VM_INVALIDATION_ADDRESSRANGELO_addressrangelo_shift) 3382 - 3383 - /*define for ADDRESSRANGEHI word*/ 3384 - /*define for invalidateack field*/ 3385 - #define SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_invalidateack_offset 3 3386 - #define SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_invalidateack_mask 0x0000FFFF 3387 - #define SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_invalidateack_shift 0 3388 - #define SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_INVALIDATEACK(x) ((x & SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_invalidateack_mask) << SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_invalidateack_shift) 3389 - 3390 - /*define for addressrangehi field*/ 3391 - #define SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_addressrangehi_offset 3 3392 - #define SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_addressrangehi_mask 0x0000001F 3393 - #define SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_addressrangehi_shift 16 3394 - #define SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_ADDRESSRANGEHI(x) ((x & SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_addressrangehi_mask) << SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_addressrangehi_shift) 3395 - 3396 - /*define for reserved field*/ 3397 - #define SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_reserved_offset 3 3398 - #define SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_reserved_mask 0x000001FF 3399 - #define SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_reserved_shift 23 3400 - #define SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_RESERVED(x) ((x & SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_reserved_mask) << SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_reserved_shift) 3401 3337 3402 3338 #endif /* __SDMA_PKT_OPEN_H_ */
+34 -7
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
··· 8707 8707 int offdelay; 8708 8708 8709 8709 if (acrtc_state) { 8710 - if (amdgpu_ip_version(adev, DCE_HWIP, 0) < 8711 - IP_VERSION(3, 5, 0) || 8712 - acrtc_state->stream->link->psr_settings.psr_version < 8713 - DC_PSR_VERSION_UNSUPPORTED || 8714 - !(adev->flags & AMD_IS_APU)) { 8715 - timing = &acrtc_state->stream->timing; 8710 + timing = &acrtc_state->stream->timing; 8716 8711 8717 - /* at least 2 frames */ 8712 + /* 8713 + * Depending on when the HW latching event of double-buffered 8714 + * registers happen relative to the PSR SDP deadline, and how 8715 + * bad the Panel clock has drifted since the last ALPM off 8716 + * event, there can be up to 3 frames of delay between sending 8717 + * the PSR exit cmd to DMUB fw, and when the panel starts 8718 + * displaying live frames. 8719 + * 8720 + * We can set: 8721 + * 8722 + * 20/100 * offdelay_ms = 3_frames_ms 8723 + * => offdelay_ms = 5 * 3_frames_ms 8724 + * 8725 + * This ensures that `3_frames_ms` will only be experienced as a 8726 + * 20% delay on top how long the display has been static, and 8727 + * thus make the delay less perceivable. 8728 + */ 8729 + if (acrtc_state->stream->link->psr_settings.psr_version < 8730 + DC_PSR_VERSION_UNSUPPORTED) { 8731 + offdelay = DIV64_U64_ROUND_UP((u64)5 * 3 * 10 * 8732 + timing->v_total * 8733 + timing->h_total, 8734 + timing->pix_clk_100hz); 8735 + config.offdelay_ms = offdelay ?: 30; 8736 + } else if (amdgpu_ip_version(adev, DCE_HWIP, 0) < 8737 + IP_VERSION(3, 5, 0) || 8738 + !(adev->flags & AMD_IS_APU)) { 8739 + /* 8740 + * Older HW and DGPU have issues with instant off; 8741 + * use a 2 frame offdelay. 8742 + */ 8718 8743 offdelay = DIV64_U64_ROUND_UP((u64)20 * 8719 8744 timing->v_total * 8720 8745 timing->h_total, ··· 8747 8722 8748 8723 config.offdelay_ms = offdelay ?: 30; 8749 8724 } else { 8725 + /* offdelay_ms = 0 will never disable vblank */ 8726 + config.offdelay_ms = 1; 8750 8727 config.disable_immediate = true; 8751 8728 } 8752 8729
+1 -1
drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c
··· 590 590 p->out_states->state_array[i].dtbclk_mhz = max_dtbclk_mhz; 591 591 p->out_states->state_array[i].phyclk_mhz = max_phyclk_mhz; 592 592 593 + p->out_states->state_array[i].dscclk_mhz = max_dispclk_mhz / 3.0; 593 594 p->out_states->state_array[i].phyclk_mhz = max_phyclk_mhz; 594 595 p->out_states->state_array[i].dtbclk_mhz = max_dtbclk_mhz; 595 596 596 597 /* Dependent states. */ 597 - p->out_states->state_array[i].dscclk_mhz = p->in_states->state_array[i].dscclk_mhz; 598 598 p->out_states->state_array[i].dram_speed_mts = p->in_states->state_array[i].dram_speed_mts; 599 599 p->out_states->state_array[i].fabricclk_mhz = p->in_states->state_array[i].fabricclk_mhz; 600 600 p->out_states->state_array[i].socclk_mhz = p->in_states->state_array[i].socclk_mhz;
+5 -1
drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
··· 3033 3033 dccg->funcs->set_dpstreamclk(dccg, DTBCLK0, tg->inst, dp_hpo_inst); 3034 3034 3035 3035 phyd32clk = get_phyd32clk_src(link); 3036 - dccg->funcs->enable_symclk32_se(dccg, dp_hpo_inst, phyd32clk); 3036 + if (link->cur_link_settings.link_rate == LINK_RATE_UNKNOWN) { 3037 + dccg->funcs->disable_symclk32_se(dccg, dp_hpo_inst); 3038 + } else { 3039 + dccg->funcs->enable_symclk32_se(dccg, dp_hpo_inst, phyd32clk); 3040 + } 3037 3041 } else { 3038 3042 if (dccg->funcs->enable_symclk_se) 3039 3043 dccg->funcs->enable_symclk_se(dccg, stream_enc->stream_enc_inst,
+5 -2
drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c
··· 936 936 if (dc_is_dp_signal(pipe_ctx->stream->signal) || dc_is_virtual_signal(pipe_ctx->stream->signal)) { 937 937 if (dc->link_srv->dp_is_128b_132b_signal(pipe_ctx)) { 938 938 dccg->funcs->set_dpstreamclk(dccg, DPREFCLK, tg->inst, dp_hpo_inst); 939 - 940 - dccg->funcs->enable_symclk32_se(dccg, dp_hpo_inst, phyd32clk); 939 + if (link->cur_link_settings.link_rate == LINK_RATE_UNKNOWN) { 940 + dccg->funcs->disable_symclk32_se(dccg, dp_hpo_inst); 941 + } else { 942 + dccg->funcs->enable_symclk32_se(dccg, dp_hpo_inst, phyd32clk); 943 + } 941 944 } else { 942 945 dccg->funcs->enable_symclk_se(dccg, stream_enc->stream_enc_inst, 943 946 link_enc->transmitter - TRANSMITTER_UNIPHY_A);
+114
drivers/gpu/drm/amd/include/kgd_pp_interface.h
··· 341 341 #define MAX_CLKS 4 342 342 #define NUM_VCN 4 343 343 #define NUM_JPEG_ENG 32 344 + #define NUM_JPEG_ENG_V1 40 344 345 #define MAX_XCC 8 345 346 #define NUM_XCP 8 346 347 struct seq_file; ··· 375 374 uint64_t gfx_busy_acc[MAX_XCC]; 376 375 /* Total App Clock Counter Accumulated */ 377 376 uint64_t gfx_below_host_limit_acc[MAX_XCC]; 377 + }; 378 + 379 + struct amdgpu_xcp_metrics_v1_2 { 380 + /* Utilization Instantaneous (%) */ 381 + uint32_t gfx_busy_inst[MAX_XCC]; 382 + uint16_t jpeg_busy[NUM_JPEG_ENG_V1]; 383 + uint16_t vcn_busy[NUM_VCN]; 384 + /* Utilization Accumulated (%) */ 385 + uint64_t gfx_busy_acc[MAX_XCC]; 386 + /* Total App Clock Counter Accumulated */ 387 + uint64_t gfx_below_host_limit_ppt_acc[MAX_XCC]; 388 + uint64_t gfx_below_host_limit_thm_acc[MAX_XCC]; 389 + uint64_t gfx_low_utilization_acc[MAX_XCC]; 390 + uint64_t gfx_below_host_limit_total_acc[MAX_XCC]; 378 391 }; 379 392 380 393 struct amd_pm_funcs { ··· 1100 1085 1101 1086 /* XCP metrics stats */ 1102 1087 struct amdgpu_xcp_metrics_v1_1 xcp_stats[NUM_XCP]; 1088 + 1089 + /* PCIE other end recovery counter */ 1090 + uint32_t pcie_lc_perf_other_end_recovery; 1091 + }; 1092 + 1093 + struct gpu_metrics_v1_8 { 1094 + struct metrics_table_header common_header; 1095 + 1096 + /* Temperature (Celsius) */ 1097 + uint16_t temperature_hotspot; 1098 + uint16_t temperature_mem; 1099 + uint16_t temperature_vrsoc; 1100 + 1101 + /* Power (Watts) */ 1102 + uint16_t curr_socket_power; 1103 + 1104 + /* Utilization (%) */ 1105 + uint16_t average_gfx_activity; 1106 + uint16_t average_umc_activity; // memory controller 1107 + 1108 + /* VRAM max bandwidthi (in GB/sec) at max memory clock */ 1109 + uint64_t mem_max_bandwidth; 1110 + 1111 + /* Energy (15.259uJ (2^-16) units) */ 1112 + uint64_t energy_accumulator; 1113 + 1114 + /* Driver attached timestamp (in ns) */ 1115 + uint64_t system_clock_counter; 1116 + 1117 + /* Accumulation cycle counter */ 1118 + uint32_t accumulation_counter; 1119 + 1120 + /* Accumulated throttler residencies */ 1121 + uint32_t prochot_residency_acc; 1122 + uint32_t ppt_residency_acc; 1123 + uint32_t socket_thm_residency_acc; 1124 + uint32_t vr_thm_residency_acc; 1125 + uint32_t hbm_thm_residency_acc; 1126 + 1127 + /* Clock Lock Status. Each bit corresponds to clock instance */ 1128 + uint32_t gfxclk_lock_status; 1129 + 1130 + /* Link width (number of lanes) and speed (in 0.1 GT/s) */ 1131 + uint16_t pcie_link_width; 1132 + uint16_t pcie_link_speed; 1133 + 1134 + /* XGMI bus width and bitrate (in Gbps) */ 1135 + uint16_t xgmi_link_width; 1136 + uint16_t xgmi_link_speed; 1137 + 1138 + /* Utilization Accumulated (%) */ 1139 + uint32_t gfx_activity_acc; 1140 + uint32_t mem_activity_acc; 1141 + 1142 + /*PCIE accumulated bandwidth (GB/sec) */ 1143 + uint64_t pcie_bandwidth_acc; 1144 + 1145 + /*PCIE instantaneous bandwidth (GB/sec) */ 1146 + uint64_t pcie_bandwidth_inst; 1147 + 1148 + /* PCIE L0 to recovery state transition accumulated count */ 1149 + uint64_t pcie_l0_to_recov_count_acc; 1150 + 1151 + /* PCIE replay accumulated count */ 1152 + uint64_t pcie_replay_count_acc; 1153 + 1154 + /* PCIE replay rollover accumulated count */ 1155 + uint64_t pcie_replay_rover_count_acc; 1156 + 1157 + /* PCIE NAK sent accumulated count */ 1158 + uint32_t pcie_nak_sent_count_acc; 1159 + 1160 + /* PCIE NAK received accumulated count */ 1161 + uint32_t pcie_nak_rcvd_count_acc; 1162 + 1163 + /* XGMI accumulated data transfer size(KiloBytes) */ 1164 + uint64_t xgmi_read_data_acc[NUM_XGMI_LINKS]; 1165 + uint64_t xgmi_write_data_acc[NUM_XGMI_LINKS]; 1166 + 1167 + /* XGMI link status(active/inactive) */ 1168 + uint16_t xgmi_link_status[NUM_XGMI_LINKS]; 1169 + 1170 + uint16_t padding; 1171 + 1172 + /* PMFW attached timestamp (10ns resolution) */ 1173 + uint64_t firmware_timestamp; 1174 + 1175 + /* Current clocks (Mhz) */ 1176 + uint16_t current_gfxclk[MAX_GFX_CLKS]; 1177 + uint16_t current_socclk[MAX_CLKS]; 1178 + uint16_t current_vclk0[MAX_CLKS]; 1179 + uint16_t current_dclk0[MAX_CLKS]; 1180 + uint16_t current_uclk; 1181 + 1182 + /* Number of current partition */ 1183 + uint16_t num_partition; 1184 + 1185 + /* XCP metrics stats */ 1186 + struct amdgpu_xcp_metrics_v1_2 xcp_stats[NUM_XCP]; 1103 1187 1104 1188 /* PCIE other end recovery counter */ 1105 1189 uint32_t pcie_lc_perf_other_end_recovery;
+2 -2
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_thermal.c
··· 267 267 if (hwmgr->thermal_controller.fanInfo.bNoFan || 268 268 (hwmgr->thermal_controller.fanInfo. 269 269 ucTachometerPulsesPerRevolution == 0) || 270 - speed == 0 || 270 + (!speed || speed > UINT_MAX/8) || 271 271 (speed < hwmgr->thermal_controller.fanInfo.ulMinRPM) || 272 272 (speed > hwmgr->thermal_controller.fanInfo.ulMaxRPM)) 273 - return 0; 273 + return -EINVAL; 274 274 275 275 if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl)) 276 276 smu7_fan_ctrl_stop_smc_fan_control(hwmgr);
+2 -2
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_thermal.c
··· 307 307 int result = 0; 308 308 309 309 if (hwmgr->thermal_controller.fanInfo.bNoFan || 310 - speed == 0 || 310 + (!speed || speed > UINT_MAX/8) || 311 311 (speed < hwmgr->thermal_controller.fanInfo.ulMinRPM) || 312 312 (speed > hwmgr->thermal_controller.fanInfo.ulMaxRPM)) 313 - return -1; 313 + return -EINVAL; 314 314 315 315 if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl)) 316 316 result = vega10_fan_ctrl_stop_smc_fan_control(hwmgr);
+1 -1
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_thermal.c
··· 191 191 uint32_t tach_period, crystal_clock_freq; 192 192 int result = 0; 193 193 194 - if (!speed) 194 + if (!speed || speed > UINT_MAX/8) 195 195 return -EINVAL; 196 196 197 197 if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl)) {
+5 -2
drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h
··· 127 127 VOLTAGE_GUARDBAND_COUNT 128 128 } GFX_GUARDBAND_e; 129 129 130 - #define SMU_METRICS_TABLE_VERSION 0xF 130 + #define SMU_METRICS_TABLE_VERSION 0x10 131 131 132 132 // Unified metrics table for smu_v13_0_6 133 133 typedef struct __attribute__((packed, aligned(4))) { ··· 241 241 uint32_t PCIeOtherEndRecoveryAcc; // The Pcie counter itself is accumulated 242 242 243 243 //Total App Clock Counter 244 - uint64_t GfxclkBelowHostLimitAcc[8]; 244 + uint64_t GfxclkBelowHostLimitPptAcc[8]; 245 + uint64_t GfxclkBelowHostLimitThmAcc[8]; 246 + uint64_t GfxclkBelowHostLimitTotalAcc[8]; 247 + uint64_t GfxclkLowUtilizationAcc[8]; 245 248 } MetricsTableV0_t; 246 249 247 250 // Metrics table for smu_v13_0_6 APUS
+3
drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
··· 1267 1267 uint32_t crystal_clock_freq = 2500; 1268 1268 uint32_t tach_period; 1269 1269 1270 + if (!speed || speed > UINT_MAX/8) 1271 + return -EINVAL; 1272 + 1270 1273 tach_period = 60 * crystal_clock_freq * 10000 / (8 * speed); 1271 1274 WREG32_SOC15(THM, 0, mmCG_TACH_CTRL_ARCT, 1272 1275 REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_TACH_CTRL_ARCT),
+1 -1
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
··· 1226 1226 uint32_t tach_period; 1227 1227 int ret; 1228 1228 1229 - if (!speed) 1229 + if (!speed || speed > UINT_MAX/8) 1230 1230 return -EINVAL; 1231 1231 1232 1232 ret = smu_v13_0_auto_fan_control(smu, 0);
-15
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
··· 109 109 SMU_CAP(OTHER_END_METRICS), 110 110 SMU_CAP(SET_UCLK_MAX), 111 111 SMU_CAP(PCIE_METRICS), 112 - SMU_CAP(HST_LIMIT_METRICS), 113 112 SMU_CAP(MCA_DEBUG_MODE), 114 113 SMU_CAP(PER_INST_METRICS), 115 114 SMU_CAP(CTF_LIMIT), ··· 324 325 325 326 if (fw_ver >= 0x05550E00) 326 327 smu_v13_0_6_cap_set(smu, SMU_CAP(OTHER_END_METRICS)); 327 - if (fw_ver >= 0x05551000) 328 - smu_v13_0_6_cap_set(smu, SMU_CAP(HST_LIMIT_METRICS)); 329 328 if (fw_ver >= 0x05550B00) 330 329 smu_v13_0_6_cap_set(smu, SMU_CAP(PER_INST_METRICS)); 331 330 if (fw_ver >= 0x5551200) ··· 339 342 SMU_CAP(RMA_MSG), 340 343 SMU_CAP(ACA_SYND), 341 344 SMU_CAP(OTHER_END_METRICS), 342 - SMU_CAP(HST_LIMIT_METRICS), 343 345 SMU_CAP(PER_INST_METRICS) }; 344 346 uint32_t fw_ver = smu->smc_fw_version; 345 347 ··· 383 387 smu_v13_0_6_cap_clear(smu, SMU_CAP(RMA_MSG)); 384 388 smu_v13_0_6_cap_clear(smu, SMU_CAP(ACA_SYND)); 385 389 386 - if (fw_ver >= 0x04556F00) 387 - smu_v13_0_6_cap_set(smu, SMU_CAP(HST_LIMIT_METRICS)); 388 390 if (fw_ver >= 0x04556A00) 389 391 smu_v13_0_6_cap_set(smu, SMU_CAP(PER_INST_METRICS)); 390 392 } else { ··· 402 408 smu_v13_0_6_cap_clear(smu, SMU_CAP(RMA_MSG)); 403 409 if (fw_ver < 0x00555600) 404 410 smu_v13_0_6_cap_clear(smu, SMU_CAP(ACA_SYND)); 405 - if (pgm == 0 && fw_ver >= 0x557900) 406 - smu_v13_0_6_cap_set(smu, SMU_CAP(HST_LIMIT_METRICS)); 407 411 } 408 412 if (((pgm == 7) && (fw_ver >= 0x7550700)) || 409 413 ((pgm == 0) && (fw_ver >= 0x00557900)) || ··· 2666 2674 gpu_metrics->xcp_stats[i].gfx_busy_acc[idx] = 2667 2675 SMUQ10_ROUND(GET_GPU_METRIC_FIELD(GfxBusyAcc, 2668 2676 version)[inst]); 2669 - 2670 - if (smu_v13_0_6_cap_supported( 2671 - smu, SMU_CAP(HST_LIMIT_METRICS))) 2672 - gpu_metrics->xcp_stats[i].gfx_below_host_limit_acc[idx] = 2673 - SMUQ10_ROUND(GET_GPU_METRIC_FIELD 2674 - (GfxclkBelowHostLimitAcc, version) 2675 - [inst]); 2676 2677 idx++; 2677 2678 } 2678 2679 }
+54 -1
drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
··· 79 79 #define PP_OD_FEATURE_FAN_ACOUSTIC_TARGET 8 80 80 #define PP_OD_FEATURE_FAN_TARGET_TEMPERATURE 9 81 81 #define PP_OD_FEATURE_FAN_MINIMUM_PWM 10 82 + #define PP_OD_FEATURE_FAN_ZERO_RPM_ENABLE 11 82 83 83 84 static struct cmn2asic_msg_mapping smu_v14_0_2_message_map[SMU_MSG_MAX_COUNT] = { 84 85 MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 1), ··· 1053 1052 od_min_setting = overdrive_lowerlimits->FanMinimumPwm; 1054 1053 od_max_setting = overdrive_upperlimits->FanMinimumPwm; 1055 1054 break; 1055 + case PP_OD_FEATURE_FAN_ZERO_RPM_ENABLE: 1056 + od_min_setting = overdrive_lowerlimits->FanZeroRpmEnable; 1057 + od_max_setting = overdrive_upperlimits->FanZeroRpmEnable; 1058 + break; 1056 1059 default: 1057 1060 od_min_setting = od_max_setting = INT_MAX; 1058 1061 break; ··· 1332 1327 &min_value, 1333 1328 &max_value); 1334 1329 size += sysfs_emit_at(buf, size, "MINIMUM_PWM: %u %u\n", 1330 + min_value, max_value); 1331 + break; 1332 + 1333 + case SMU_OD_FAN_ZERO_RPM_ENABLE: 1334 + if (!smu_v14_0_2_is_od_feature_supported(smu, 1335 + PP_OD_FEATURE_ZERO_FAN_BIT)) 1336 + break; 1337 + 1338 + size += sysfs_emit_at(buf, size, "FAN_ZERO_RPM_ENABLE:\n"); 1339 + size += sysfs_emit_at(buf, size, "%d\n", 1340 + (int)od_table->OverDriveTable.FanZeroRpmEnable); 1341 + 1342 + size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE"); 1343 + smu_v14_0_2_get_od_setting_limits(smu, 1344 + PP_OD_FEATURE_FAN_ZERO_RPM_ENABLE, 1345 + &min_value, 1346 + &max_value); 1347 + size += sysfs_emit_at(buf, size, "ZERO_RPM_ENABLE: %u %u\n", 1335 1348 min_value, max_value); 1336 1349 break; 1337 1350 ··· 2293 2270 OD_OPS_SUPPORT_FAN_TARGET_TEMPERATURE_RETRIEVE | 2294 2271 OD_OPS_SUPPORT_FAN_TARGET_TEMPERATURE_SET | 2295 2272 OD_OPS_SUPPORT_FAN_MINIMUM_PWM_RETRIEVE | 2296 - OD_OPS_SUPPORT_FAN_MINIMUM_PWM_SET; 2273 + OD_OPS_SUPPORT_FAN_MINIMUM_PWM_SET | 2274 + OD_OPS_SUPPORT_FAN_ZERO_RPM_ENABLE_RETRIEVE | 2275 + OD_OPS_SUPPORT_FAN_ZERO_RPM_ENABLE_SET; 2297 2276 } 2298 2277 2299 2278 static int smu_v14_0_2_get_overdrive_table(struct smu_context *smu, ··· 2374 2349 user_od_table_bak.OverDriveTable.FanTargetTemperature; 2375 2350 user_od_table->OverDriveTable.FanMinimumPwm = 2376 2351 user_od_table_bak.OverDriveTable.FanMinimumPwm; 2352 + user_od_table->OverDriveTable.FanZeroRpmEnable = 2353 + user_od_table_bak.OverDriveTable.FanZeroRpmEnable; 2377 2354 } 2378 2355 2379 2356 smu_v14_0_2_set_supported_od_feature_mask(smu); ··· 2422 2395 } 2423 2396 od_table->OverDriveTable.FanMode = FAN_MODE_AUTO; 2424 2397 od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT); 2398 + break; 2399 + case PP_OD_EDIT_FAN_ZERO_RPM_ENABLE: 2400 + od_table->OverDriveTable.FanZeroRpmEnable = 2401 + boot_overdrive_table->OverDriveTable.FanZeroRpmEnable; 2402 + od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_ZERO_FAN_BIT); 2425 2403 break; 2426 2404 case PP_OD_EDIT_ACOUSTIC_LIMIT: 2427 2405 od_table->OverDriveTable.AcousticLimitRpmThreshold = ··· 2708 2676 od_table->OverDriveTable.FanMinimumPwm = input[0]; 2709 2677 od_table->OverDriveTable.FanMode = FAN_MODE_AUTO; 2710 2678 od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT); 2679 + break; 2680 + 2681 + case PP_OD_EDIT_FAN_ZERO_RPM_ENABLE: 2682 + if (!smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_ZERO_FAN_BIT)) { 2683 + dev_warn(adev->dev, "Zero RPM setting not supported!\n"); 2684 + return -ENOTSUPP; 2685 + } 2686 + 2687 + smu_v14_0_2_get_od_setting_limits(smu, 2688 + PP_OD_FEATURE_FAN_ZERO_RPM_ENABLE, 2689 + &minimum, 2690 + &maximum); 2691 + if (input[0] < minimum || 2692 + input[0] > maximum) { 2693 + dev_info(adev->dev, "zero RPM enable setting(%ld) must be within [%d, %d]!\n", 2694 + input[0], minimum, maximum); 2695 + return -EINVAL; 2696 + } 2697 + 2698 + od_table->OverDriveTable.FanZeroRpmEnable = input[0]; 2699 + od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_ZERO_FAN_BIT); 2711 2700 break; 2712 2701 2713 2702 case PP_OD_RESTORE_DEFAULT_TABLE:
+3
drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
··· 1083 1083 case METRICS_VERSION(1, 7): 1084 1084 structure_size = sizeof(struct gpu_metrics_v1_7); 1085 1085 break; 1086 + case METRICS_VERSION(1, 8): 1087 + structure_size = sizeof(struct gpu_metrics_v1_8); 1088 + break; 1086 1089 case METRICS_VERSION(2, 0): 1087 1090 structure_size = sizeof(struct gpu_metrics_v2_0); 1088 1091 break;
+7 -6
drivers/gpu/drm/bridge/Kconfig
··· 91 91 Support for i.MX8MP DPI-to-LVDS on-SoC encoder. 92 92 93 93 config DRM_I2C_NXP_TDA998X 94 - tristate "NXP Semiconductors TDA998X HDMI encoder" 95 - default m if DRM_TILCDC 96 - select CEC_CORE if CEC_NOTIFIER 97 - select SND_SOC_HDMI_CODEC if SND_SOC 98 - help 99 - Support for NXP Semiconductors TDA998X HDMI encoders. 94 + tristate "NXP Semiconductors TDA998X HDMI encoder" 95 + default m if DRM_TILCDC 96 + select CEC_CORE if CEC_NOTIFIER 97 + select DRM_KMS_HELPER 98 + select SND_SOC_HDMI_CODEC if SND_SOC 99 + help 100 + Support for NXP Semiconductors TDA998X HDMI encoders. 100 101 101 102 config DRM_ITE_IT6263 102 103 tristate "ITE IT6263 LVDS/HDMI bridge"
+2
drivers/gpu/drm/i915/display/intel_fbdev.h
··· 6 6 #ifndef __INTEL_FBDEV_H__ 7 7 #define __INTEL_FBDEV_H__ 8 8 9 + #include <linux/types.h> 10 + 9 11 struct drm_fb_helper; 10 12 struct drm_fb_helper_surface_size; 11 13 struct drm_i915_private;
+4 -1
drivers/gpu/drm/i915/display/skl_watermark.c
··· 2314 2314 static int 2315 2315 dsc_prefill_latency(const struct intel_crtc_state *crtc_state) 2316 2316 { 2317 + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 2317 2318 const struct intel_crtc_scaler_state *scaler_state = 2318 2319 &crtc_state->scaler_state; 2319 2320 int linetime = DIV_ROUND_UP(1000 * crtc_state->hw.adjusted_mode.htotal, ··· 2324 2323 crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ? 2 : 1; 2325 2324 u32 dsc_prefill_latency = 0; 2326 2325 2327 - if (!crtc_state->dsc.compression_enable || !num_scaler_users) 2326 + if (!crtc_state->dsc.compression_enable || 2327 + !num_scaler_users || 2328 + num_scaler_users > crtc->num_scalers) 2328 2329 return dsc_prefill_latency; 2329 2330 2330 2331 dsc_prefill_latency = DIV_ROUND_UP(15 * linetime * chroma_downscaling_factor, 10);
+1 -1
drivers/gpu/drm/xe/Kconfig
··· 53 53 config DRM_XE_DISPLAY 54 54 bool "Enable display support" 55 55 depends on DRM_XE && DRM_XE=m && HAS_IOPORT 56 - select FB_IOMEM_HELPERS 56 + select FB_IOMEM_HELPERS if DRM_FBDEV_EMULATION 57 57 select I2C 58 58 select I2C_ALGOBIT 59 59 default y
+4
drivers/gpu/drm/xe/regs/xe_engine_regs.h
··· 130 130 #define RING_EXECLIST_STATUS_LO(base) XE_REG((base) + 0x234) 131 131 #define RING_EXECLIST_STATUS_HI(base) XE_REG((base) + 0x234 + 4) 132 132 133 + #define RING_IDLEDLY(base) XE_REG((base) + 0x23c) 134 + #define INHIBIT_SWITCH_UNTIL_PREEMPTED REG_BIT(31) 135 + #define IDLE_DELAY REG_GENMASK(20, 0) 136 + 133 137 #define RING_CONTEXT_CONTROL(base) XE_REG((base) + 0x244, XE_REG_OPTION_MASKED) 134 138 #define CTX_CTRL_PXP_ENABLE REG_BIT(10) 135 139 #define CTX_CTRL_OAC_CONTEXT_ENABLE REG_BIT(8)
+15 -2
drivers/gpu/drm/xe/xe_device.c
··· 53 53 #include "xe_pxp.h" 54 54 #include "xe_query.h" 55 55 #include "xe_shrinker.h" 56 + #include "xe_survivability_mode.h" 56 57 #include "xe_sriov.h" 57 58 #include "xe_tile.h" 58 59 #include "xe_ttm_stolen_mgr.h" ··· 706 705 sriov_update_device_info(xe); 707 706 708 707 err = xe_pcode_probe_early(xe); 709 - if (err) 710 - return err; 708 + if (err) { 709 + int save_err = err; 710 + 711 + /* 712 + * Try to leave device in survivability mode if device is 713 + * possible, but still return the previous error for error 714 + * propagation 715 + */ 716 + err = xe_survivability_mode_enable(xe); 717 + if (err) 718 + return err; 719 + 720 + return save_err; 721 + } 711 722 712 723 err = wait_for_lmem_ready(xe); 713 724 if (err)
+1 -7
drivers/gpu/drm/xe/xe_eu_stall.c
··· 222 222 goto exit_free; 223 223 } 224 224 225 - ret = devm_add_action_or_reset(xe->drm.dev, xe_eu_stall_fini, gt); 226 - if (ret) 227 - goto exit_destroy; 228 - 229 - return 0; 230 - exit_destroy: 231 - destroy_workqueue(gt->eu_stall->buf_ptr_poll_wq); 225 + return devm_add_action_or_reset(xe->drm.dev, xe_eu_stall_fini, gt); 232 226 exit_free: 233 227 mutex_destroy(&gt->eu_stall->stream_lock); 234 228 kfree(gt->eu_stall);
+38 -16
drivers/gpu/drm/xe/xe_gt_clock.c
··· 16 16 #include "xe_macros.h" 17 17 #include "xe_mmio.h" 18 18 19 - static u32 get_crystal_clock_freq(u32 rpm_config_reg) 19 + #define f19_2_mhz 19200000 20 + #define f24_mhz 24000000 21 + #define f25_mhz 25000000 22 + #define f38_4_mhz 38400000 23 + #define ts_base_83 83333 24 + #define ts_base_52 52083 25 + #define ts_base_80 80000 26 + 27 + static void read_crystal_clock(struct xe_gt *gt, u32 rpm_config_reg, u32 *freq, 28 + u32 *timestamp_base) 20 29 { 21 - const u32 f19_2_mhz = 19200000; 22 - const u32 f24_mhz = 24000000; 23 - const u32 f25_mhz = 25000000; 24 - const u32 f38_4_mhz = 38400000; 25 30 u32 crystal_clock = REG_FIELD_GET(RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_MASK, 26 31 rpm_config_reg); 27 32 28 33 switch (crystal_clock) { 29 34 case RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_24_MHZ: 30 - return f24_mhz; 35 + *freq = f24_mhz; 36 + *timestamp_base = ts_base_83; 37 + return; 31 38 case RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_19_2_MHZ: 32 - return f19_2_mhz; 39 + *freq = f19_2_mhz; 40 + *timestamp_base = ts_base_52; 41 + return; 33 42 case RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_38_4_MHZ: 34 - return f38_4_mhz; 43 + *freq = f38_4_mhz; 44 + *timestamp_base = ts_base_52; 45 + return; 35 46 case RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_25_MHZ: 36 - return f25_mhz; 47 + *freq = f25_mhz; 48 + *timestamp_base = ts_base_80; 49 + return; 37 50 default: 38 - XE_WARN_ON("NOT_POSSIBLE"); 39 - return 0; 51 + xe_gt_warn(gt, "Invalid crystal clock frequency: %u", crystal_clock); 52 + *freq = 0; 53 + *timestamp_base = 0; 54 + return; 40 55 } 41 56 } 42 57 43 - int xe_gt_clock_init(struct xe_gt *gt) 58 + static void check_ctc_mode(struct xe_gt *gt) 44 59 { 45 - u32 c0 = xe_mmio_read32(&gt->mmio, RPM_CONFIG0); 46 - u32 freq = 0; 47 - 48 60 /* 49 61 * CTC_MODE[0] = 1 is definitely not supported for Xe2 and later 50 62 * platforms. In theory it could be a valid setting for pre-Xe2 ··· 69 57 */ 70 58 if (xe_mmio_read32(&gt->mmio, CTC_MODE) & CTC_SOURCE_DIVIDE_LOGIC) 71 59 xe_gt_warn(gt, "CTC_MODE[0] is set; this is unexpected and undocumented\n"); 60 + } 72 61 73 - freq = get_crystal_clock_freq(c0); 62 + int xe_gt_clock_init(struct xe_gt *gt) 63 + { 64 + u32 freq; 65 + u32 c0; 66 + 67 + if (!IS_SRIOV_VF(gt_to_xe(gt))) 68 + check_ctc_mode(gt); 69 + 70 + c0 = xe_mmio_read32(&gt->mmio, RPM_CONFIG0); 71 + read_crystal_clock(gt, c0, &freq, &gt->info.timestamp_base); 74 72 75 73 /* 76 74 * Now figure out how the command stream's timestamp
+2
drivers/gpu/drm/xe/xe_gt_types.h
··· 121 121 enum xe_gt_type type; 122 122 /** @info.reference_clock: clock frequency */ 123 123 u32 reference_clock; 124 + /** @info.timestamp_base: GT timestamp base */ 125 + u32 timestamp_base; 124 126 /** 125 127 * @info.engine_mask: mask of engines present on GT. Some of 126 128 * them may be reserved in runtime and not available for user.
+33
drivers/gpu/drm/xe/xe_hw_engine.c
··· 8 8 #include <linux/nospec.h> 9 9 10 10 #include <drm/drm_managed.h> 11 + #include <drm/drm_print.h> 11 12 #include <uapi/drm/xe_drm.h> 13 + #include <generated/xe_wa_oob.h> 12 14 13 15 #include "regs/xe_engine_regs.h" 14 16 #include "regs/xe_gt_regs.h" ··· 23 21 #include "xe_gsc.h" 24 22 #include "xe_gt.h" 25 23 #include "xe_gt_ccs_mode.h" 24 + #include "xe_gt_clock.h" 26 25 #include "xe_gt_printk.h" 27 26 #include "xe_gt_mcr.h" 28 27 #include "xe_gt_topology.h" ··· 567 564 xe_reg_whitelist_process_engine(hwe); 568 565 } 569 566 567 + static void adjust_idledly(struct xe_hw_engine *hwe) 568 + { 569 + struct xe_gt *gt = hwe->gt; 570 + u32 idledly, maxcnt; 571 + u32 idledly_units_ps = 8 * gt->info.timestamp_base; 572 + u32 maxcnt_units_ns = 640; 573 + bool inhibit_switch = 0; 574 + 575 + if (!IS_SRIOV_VF(gt_to_xe(hwe->gt)) && XE_WA(gt, 16023105232)) { 576 + idledly = xe_mmio_read32(&gt->mmio, RING_IDLEDLY(hwe->mmio_base)); 577 + maxcnt = xe_mmio_read32(&gt->mmio, RING_PWRCTX_MAXCNT(hwe->mmio_base)); 578 + 579 + inhibit_switch = idledly & INHIBIT_SWITCH_UNTIL_PREEMPTED; 580 + idledly = REG_FIELD_GET(IDLE_DELAY, idledly); 581 + idledly = DIV_ROUND_CLOSEST(idledly * idledly_units_ps, 1000); 582 + maxcnt = REG_FIELD_GET(IDLE_WAIT_TIME, maxcnt); 583 + maxcnt *= maxcnt_units_ns; 584 + 585 + if (xe_gt_WARN_ON(gt, idledly >= maxcnt || inhibit_switch)) { 586 + idledly = DIV_ROUND_CLOSEST(((maxcnt - 1) * maxcnt_units_ns), 587 + idledly_units_ps); 588 + idledly = DIV_ROUND_CLOSEST(idledly, 1000); 589 + xe_mmio_write32(&gt->mmio, RING_IDLEDLY(hwe->mmio_base), idledly); 590 + } 591 + } 592 + } 593 + 570 594 static int hw_engine_init(struct xe_gt *gt, struct xe_hw_engine *hwe, 571 595 enum xe_hw_engine_id id) 572 596 { ··· 633 603 /* We reserve the highest BCS instance for USM */ 634 604 if (xe->info.has_usm && hwe->class == XE_ENGINE_CLASS_COPY) 635 605 gt->usm.reserved_bcs_instance = hwe->instance; 606 + 607 + /* Ensure IDLEDLY is lower than MAXCNT */ 608 + adjust_idledly(hwe); 636 609 637 610 return devm_add_action_or_reset(xe->drm.dev, hw_engine_fini, hwe); 638 611
+7 -9
drivers/gpu/drm/xe/xe_pci.c
··· 803 803 return err; 804 804 805 805 err = xe_device_probe_early(xe); 806 - 807 - /* 808 - * In Boot Survivability mode, no drm card is exposed and driver is 809 - * loaded with bare minimum to allow for firmware to be flashed through 810 - * mei. If early probe fails, check if survivability mode is flagged by 811 - * HW to be enabled. In that case enable it and return success. 812 - */ 813 806 if (err) { 814 - if (xe_survivability_mode_required(xe) && 815 - xe_survivability_mode_enable(xe)) 807 + /* 808 + * In Boot Survivability mode, no drm card is exposed and driver 809 + * is loaded with bare minimum to allow for firmware to be 810 + * flashed through mei. If early probe failed, but it managed to 811 + * enable survivability mode, return success. 812 + */ 813 + if (xe_survivability_mode_is_enabled(xe)) 816 814 return 0; 817 815 818 816 return err;
+22 -9
drivers/gpu/drm/xe/xe_survivability_mode.c
··· 155 155 if (ret) 156 156 return ret; 157 157 158 + /* Make sure xe_heci_gsc_init() knows about survivability mode */ 159 + survivability->mode = true; 160 + 158 161 ret = xe_heci_gsc_init(xe); 159 - if (ret) 162 + if (ret) { 163 + /* 164 + * But if it fails, device can't enter survivability 165 + * so move it back for correct error handling 166 + */ 167 + survivability->mode = false; 160 168 return ret; 169 + } 161 170 162 171 xe_vsec_init(xe); 163 172 164 - survivability->mode = true; 165 173 dev_err(dev, "In Survivability Mode\n"); 166 174 167 175 return 0; ··· 186 178 return xe->survivability.mode; 187 179 } 188 180 189 - /** 190 - * xe_survivability_mode_required - checks if survivability mode is required 191 - * @xe: xe device instance 181 + /* 182 + * survivability_mode_requested - check if it's possible to enable 183 + * survivability mode and that was requested by firmware 192 184 * 193 - * This function reads the boot status from Pcode 185 + * This function reads the boot status from Pcode. 194 186 * 195 - * Return: true if boot status indicates failure, false otherwise 187 + * Return: true if platform support is available and boot status indicates 188 + * failure, false otherwise. 196 189 */ 197 - bool xe_survivability_mode_required(struct xe_device *xe) 190 + static bool survivability_mode_requested(struct xe_device *xe) 198 191 { 199 192 struct xe_survivability *survivability = &xe->survivability; 200 193 struct xe_mmio *mmio = xe_root_tile_mmio(xe); ··· 217 208 * 218 209 * Initialize survivability information and enable survivability mode 219 210 * 220 - * Return: 0 for success, negative error code otherwise. 211 + * Return: 0 if survivability mode is enabled or not requested; negative error 212 + * code otherwise. 221 213 */ 222 214 int xe_survivability_mode_enable(struct xe_device *xe) 223 215 { 224 216 struct xe_survivability *survivability = &xe->survivability; 225 217 struct xe_survivability_info *info; 226 218 struct pci_dev *pdev = to_pci_dev(xe->drm.dev); 219 + 220 + if (!survivability_mode_requested(xe)) 221 + return 0; 227 222 228 223 survivability->size = MAX_SCRATCH_MMIO; 229 224
-1
drivers/gpu/drm/xe/xe_survivability_mode.h
··· 12 12 13 13 int xe_survivability_mode_enable(struct xe_device *xe); 14 14 bool xe_survivability_mode_is_enabled(struct xe_device *xe); 15 - bool xe_survivability_mode_required(struct xe_device *xe); 16 15 17 16 #endif /* _XE_SURVIVABILITY_MODE_H_ */
+6
drivers/gpu/drm/xe/xe_wa.c
··· 622 622 FUNC(xe_rtp_match_first_render_or_compute)), 623 623 XE_RTP_ACTIONS(SET(TDL_TSL_CHICKEN, RES_CHK_SPR_DIS)) 624 624 }, 625 + { XE_RTP_NAME("16023105232"), 626 + XE_RTP_RULES(MEDIA_VERSION_RANGE(1301, 3000), OR, 627 + GRAPHICS_VERSION_RANGE(2001, 3001)), 628 + XE_RTP_ACTIONS(SET(RING_PSMI_CTL(0), RC_SEMA_IDLE_MSG_DISABLE, 629 + XE_RTP_ACTION_FLAG(ENGINE_BASE))) 630 + }, 625 631 }; 626 632 627 633 static const struct xe_rtp_entry_sr lrc_was[] = {
+2
drivers/gpu/drm/xe/xe_wa_oob.rules
··· 53 53 GRAPHICS_VERSION_RANGE(1270, 1274) 54 54 1508761755 GRAPHICS_VERSION(1255) 55 55 GRAPHICS_VERSION(1260), GRAPHICS_STEP(A0, B0) 56 + 16023105232 GRAPHICS_VERSION_RANGE(2001, 3001) 57 + MEDIA_VERSION_RANGE(1301, 3000)