Merge tag 'drm-next-2025-04-05' of https://gitlab.freedesktop.org/drm/kernel

Pull drm fixes from Dave Airlie:
"Weekly fixes, mostly from the end of last week, this week was very
quiet, maybe you scared everyone away. It's mostly amdgpu, and xe,
with some i915, adp and bridge bits, since I think this is overly
quiet I'd expect rc2 to be a bit more lively.

bridge:
- tda998x: Select CONFIG_DRM_KMS_HELPER

amdgpu:
- Guard against potential division by 0 in fan code
- Zero RPM support for SMU 14.0.2
- Properly handle SI and CIK support being disabled
- PSR fixes
- DML2 fixes
- DP Link training fix
- Vblank fixes
- RAS fixes
- Partitioning fix
- SDMA fix
- SMU 13.0.x fixes
- Rom fetching fix
- MES fixes
- Queue reset fix

xe:
- Fix NULL pointer dereference on error path
- Add missing HW workaround for BMG
- Fix survivability mode not triggering
- Fix build warning when DRM_FBDEV_EMULATION is not set

i915:
- Bounds check for scalers in DSC prefill latency computation
- Fix build by adding a missing include

adp:
- Fix error handling in plane setup"

# -----BEGIN PGP SIGNATURE-----

* tag 'drm-next-2025-04-05' of https://gitlab.freedesktop.org/drm/kernel: (34 commits)
drm/i2c: tda998x: select CONFIG_DRM_KMS_HELPER
drm/amdgpu/gfx12: fix num_mec
drm/amdgpu/gfx11: fix num_mec
drm/amd/pm: Add gpu_metrics_v1_8
drm/amdgpu: Prefer shadow rom when available
drm/amd/pm: Update smu metrics table for smu_v13_0_6
drm/amd/pm: Remove host limit metrics support
Remove unnecessary firmware version check for gc v9_4_2
drm/amdgpu: stop unmapping MQD for kernel queues v3
Revert "drm/amdgpu/sdma_v4_4_2: update VM flush implementation for SDMA"
drm/amdgpu: Parse all deferred errors with UMC aca handle
drm/amdgpu: Update ta ras block
drm/amdgpu: Add NPS2 to DPX compatible mode
drm/amdgpu: Use correct gfx deferred error count
drm/amd/display: Actually do immediate vblank disable
drm/amd/display: prevent hang on link training fail
Revert "drm/amd/display: dml2 soc dscclk use DPM table clk setting"
drm/amd/display: Increase vblank offdelay for PSR panels
drm/amd: Handle being compiled without SI or CIK support better
drm/amd/pm: Add zero RPM enabled OD setting support for SMU14.0.2
...

+537 -701
+2 -2
drivers/gpu/drm/adp/adp_drv.c
··· 232 ALL_CRTCS, &adp_plane_funcs, 233 plane_formats, ARRAY_SIZE(plane_formats), 234 NULL, DRM_PLANE_TYPE_PRIMARY, "plane"); 235 - if (!plane) { 236 drm_err(drm, "failed to allocate plane"); 237 - return ERR_PTR(-ENOMEM); 238 } 239 240 drm_plane_helper_add(plane, &adp_plane_helper_funcs);
··· 232 ALL_CRTCS, &adp_plane_funcs, 233 plane_formats, ARRAY_SIZE(plane_formats), 234 NULL, DRM_PLANE_TYPE_PRIMARY, "plane"); 235 + if (IS_ERR(plane)) { 236 drm_err(drm, "failed to allocate plane"); 237 + return plane; 238 } 239 240 drm_plane_helper_add(plane, &adp_plane_helper_funcs);
+4
drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c
··· 195 { 196 const struct aca_bank_ops *bank_ops = handle->bank_ops; 197 198 if (!aca_bank_hwip_is_matched(bank, handle->hwip)) 199 return false; 200
··· 195 { 196 const struct aca_bank_ops *bank_ops = handle->bank_ops; 197 198 + /* Parse all deferred errors with UMC aca handle */ 199 + if (ACA_BANK_ERR_IS_DEFFERED(bank)) 200 + return handle->hwip == ACA_HWIP_TYPE_UMC; 201 + 202 if (!aca_bank_hwip_is_matched(bank, handle->hwip)) 203 return false; 204
-8
drivers/gpu/drm/amd/amdgpu/amdgpu_aca.h
··· 80 (ACA_REG__STATUS__POISON((bank)->regs[ACA_REG_IDX_STATUS]) || \ 81 ACA_REG__STATUS__DEFERRED((bank)->regs[ACA_REG_IDX_STATUS])) 82 83 - #define ACA_BANK_ERR_CE_DE_DECODE(bank) \ 84 - (ACA_BANK_ERR_IS_DEFFERED(bank) ? ACA_ERROR_TYPE_DEFERRED : \ 85 - ACA_ERROR_TYPE_CE) 86 - 87 - #define ACA_BANK_ERR_UE_DE_DECODE(bank) \ 88 - (ACA_BANK_ERR_IS_DEFFERED(bank) ? ACA_ERROR_TYPE_DEFERRED : \ 89 - ACA_ERROR_TYPE_UE) 90 - 91 enum aca_reg_idx { 92 ACA_REG_IDX_CTL = 0, 93 ACA_REG_IDX_STATUS = 1,
··· 80 (ACA_REG__STATUS__POISON((bank)->regs[ACA_REG_IDX_STATUS]) || \ 81 ACA_REG__STATUS__DEFERRED((bank)->regs[ACA_REG_IDX_STATUS])) 82 83 enum aca_reg_idx { 84 ACA_REG_IDX_CTL = 0, 85 ACA_REG_IDX_STATUS = 1,
+27 -7
drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
··· 447 return true; 448 } 449 450 static bool amdgpu_get_bios_dgpu(struct amdgpu_device *adev) 451 { 452 if (amdgpu_atrm_get_bios(adev)) { ··· 472 goto success; 473 } 474 475 - if (amdgpu_read_platform_bios(adev)) { 476 - dev_info(adev->dev, "Fetched VBIOS from platform\n"); 477 - goto success; 478 - } 479 480 - if (amdgpu_read_bios(adev)) { 481 - dev_info(adev->dev, "Fetched VBIOS from ROM BAR\n"); 482 - goto success; 483 } 484 485 if (amdgpu_read_bios_from_rom(adev)) {
··· 447 return true; 448 } 449 450 + static bool amdgpu_prefer_rom_resource(struct amdgpu_device *adev) 451 + { 452 + struct resource *res = &adev->pdev->resource[PCI_ROM_RESOURCE]; 453 + 454 + return (res->flags & IORESOURCE_ROM_SHADOW); 455 + } 456 + 457 static bool amdgpu_get_bios_dgpu(struct amdgpu_device *adev) 458 { 459 if (amdgpu_atrm_get_bios(adev)) { ··· 465 goto success; 466 } 467 468 + if (amdgpu_prefer_rom_resource(adev)) { 469 + if (amdgpu_read_bios(adev)) { 470 + dev_info(adev->dev, "Fetched VBIOS from ROM BAR\n"); 471 + goto success; 472 + } 473 474 + if (amdgpu_read_platform_bios(adev)) { 475 + dev_info(adev->dev, "Fetched VBIOS from platform\n"); 476 + goto success; 477 + } 478 + 479 + } else { 480 + if (amdgpu_read_platform_bios(adev)) { 481 + dev_info(adev->dev, "Fetched VBIOS from platform\n"); 482 + goto success; 483 + } 484 + 485 + if (amdgpu_read_bios(adev)) { 486 + dev_info(adev->dev, "Fetched VBIOS from ROM BAR\n"); 487 + goto success; 488 + } 489 } 490 491 if (amdgpu_read_bios_from_rom(adev)) {
+24 -20
drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
··· 1809 }; 1810 1811 static const struct pci_device_id pciidlist[] = { 1812 - #ifdef CONFIG_DRM_AMDGPU_SI 1813 {0x1002, 0x6780, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI}, 1814 {0x1002, 0x6784, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI}, 1815 {0x1002, 0x6788, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI}, ··· 1881 {0x1002, 0x6665, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|AMD_IS_MOBILITY}, 1882 {0x1002, 0x6667, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|AMD_IS_MOBILITY}, 1883 {0x1002, 0x666F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|AMD_IS_MOBILITY}, 1884 - #endif 1885 - #ifdef CONFIG_DRM_AMDGPU_CIK 1886 /* Kaveri */ 1887 {0x1002, 0x1304, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_MOBILITY|AMD_IS_APU}, 1888 {0x1002, 0x1305, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_APU}, ··· 1963 {0x1002, 0x985D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU}, 1964 {0x1002, 0x985E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU}, 1965 {0x1002, 0x985F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU}, 1966 - #endif 1967 /* topaz */ 1968 {0x1002, 0x6900, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ}, 1969 {0x1002, 0x6901, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ}, ··· 2309 return -ENOTSUPP; 2310 } 2311 2312 #ifdef CONFIG_DRM_AMDGPU_SI 2313 - if (!amdgpu_si_support) { 2314 - switch (flags & AMD_ASIC_MASK) { 2315 - case CHIP_TAHITI: 2316 - case CHIP_PITCAIRN: 2317 - case CHIP_VERDE: 2318 - case CHIP_OLAND: 2319 - case CHIP_HAINAN: 2320 dev_info(&pdev->dev, 2321 "SI support provided by radeon.\n"); 2322 dev_info(&pdev->dev, ··· 2324 ); 2325 return -ENODEV; 2326 } 2327 - } 2328 #endif 2329 #ifdef CONFIG_DRM_AMDGPU_CIK 2330 - if (!amdgpu_cik_support) { 2331 - switch (flags & AMD_ASIC_MASK) { 2332 - case CHIP_KAVERI: 2333 - case CHIP_BONAIRE: 2334 - case CHIP_HAWAII: 2335 - case CHIP_KABINI: 2336 - case CHIP_MULLINS: 2337 dev_info(&pdev->dev, 2338 "CIK support provided by radeon.\n"); 2339 dev_info(&pdev->dev, ··· 2343 ); 2344 return -ENODEV; 2345 } 2346 - } 2347 #endif 2348 2349 adev = devm_drm_dev_alloc(&pdev->dev, &amdgpu_kms_driver, typeof(*adev), ddev); 2350 if (IS_ERR(adev))
··· 1809 }; 1810 1811 static const struct pci_device_id pciidlist[] = { 1812 {0x1002, 0x6780, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI}, 1813 {0x1002, 0x6784, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI}, 1814 {0x1002, 0x6788, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI}, ··· 1882 {0x1002, 0x6665, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|AMD_IS_MOBILITY}, 1883 {0x1002, 0x6667, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|AMD_IS_MOBILITY}, 1884 {0x1002, 0x666F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|AMD_IS_MOBILITY}, 1885 /* Kaveri */ 1886 {0x1002, 0x1304, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_MOBILITY|AMD_IS_APU}, 1887 {0x1002, 0x1305, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_APU}, ··· 1966 {0x1002, 0x985D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU}, 1967 {0x1002, 0x985E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU}, 1968 {0x1002, 0x985F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU}, 1969 /* topaz */ 1970 {0x1002, 0x6900, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ}, 1971 {0x1002, 0x6901, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ}, ··· 2313 return -ENOTSUPP; 2314 } 2315 2316 + switch (flags & AMD_ASIC_MASK) { 2317 + case CHIP_TAHITI: 2318 + case CHIP_PITCAIRN: 2319 + case CHIP_VERDE: 2320 + case CHIP_OLAND: 2321 + case CHIP_HAINAN: 2322 #ifdef CONFIG_DRM_AMDGPU_SI 2323 + if (!amdgpu_si_support) { 2324 dev_info(&pdev->dev, 2325 "SI support provided by radeon.\n"); 2326 dev_info(&pdev->dev, ··· 2328 ); 2329 return -ENODEV; 2330 } 2331 + break; 2332 + #else 2333 + dev_info(&pdev->dev, "amdgpu is built without SI support.\n"); 2334 + return -ENODEV; 2335 #endif 2336 + case CHIP_KAVERI: 2337 + case CHIP_BONAIRE: 2338 + case CHIP_HAWAII: 2339 + case CHIP_KABINI: 2340 + case CHIP_MULLINS: 2341 #ifdef CONFIG_DRM_AMDGPU_CIK 2342 + if (!amdgpu_cik_support) { 2343 dev_info(&pdev->dev, 2344 "CIK support provided by radeon.\n"); 2345 dev_info(&pdev->dev, ··· 2345 ); 2346 return -ENODEV; 2347 } 2348 + break; 2349 + #else 2350 + dev_info(&pdev->dev, "amdgpu is built without CIK support.\n"); 2351 + return -ENODEV; 2352 #endif 2353 + default: 2354 + break; 2355 + } 2356 2357 adev = devm_drm_dev_alloc(&pdev->dev, &amdgpu_kms_driver, typeof(*adev), ddev); 2358 if (IS_ERR(adev))
+1
drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
··· 77 "jpeg", 78 "ih", 79 "mpio", 80 }; 81 82 const char *ras_mca_block_string[] = {
··· 77 "jpeg", 78 "ih", 79 "mpio", 80 + "mmsch", 81 }; 82 83 const char *ras_mca_block_string[] = {
+7
drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
··· 98 AMDGPU_RAS_BLOCK__JPEG, 99 AMDGPU_RAS_BLOCK__IH, 100 AMDGPU_RAS_BLOCK__MPIO, 101 102 AMDGPU_RAS_BLOCK__LAST, 103 AMDGPU_RAS_BLOCK__ANY = -1 ··· 796 return TA_RAS_BLOCK__VCN; 797 case AMDGPU_RAS_BLOCK__JPEG: 798 return TA_RAS_BLOCK__JPEG; 799 default: 800 WARN_ONCE(1, "RAS ERROR: unexpected block id %d\n", block); 801 return TA_RAS_BLOCK__UMC;
··· 98 AMDGPU_RAS_BLOCK__JPEG, 99 AMDGPU_RAS_BLOCK__IH, 100 AMDGPU_RAS_BLOCK__MPIO, 101 + AMDGPU_RAS_BLOCK__MMSCH, 102 103 AMDGPU_RAS_BLOCK__LAST, 104 AMDGPU_RAS_BLOCK__ANY = -1 ··· 795 return TA_RAS_BLOCK__VCN; 796 case AMDGPU_RAS_BLOCK__JPEG: 797 return TA_RAS_BLOCK__JPEG; 798 + case AMDGPU_RAS_BLOCK__IH: 799 + return TA_RAS_BLOCK__IH; 800 + case AMDGPU_RAS_BLOCK__MPIO: 801 + return TA_RAS_BLOCK__MPIO; 802 + case AMDGPU_RAS_BLOCK__MMSCH: 803 + return TA_RAS_BLOCK__MMSCH; 804 default: 805 WARN_ONCE(1, "RAS ERROR: unexpected block id %d\n", block); 806 return TA_RAS_BLOCK__UMC;
+8 -50
drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
··· 608 size_t size, loff_t *pos) 609 { 610 struct amdgpu_ring *ring = file_inode(f)->i_private; 611 - volatile u32 *mqd; 612 - u32 *kbuf; 613 - int r, i; 614 - uint32_t value, result; 615 616 - if (*pos & 3 || size & 3) 617 - return -EINVAL; 618 619 - kbuf = kmalloc(ring->mqd_size, GFP_KERNEL); 620 - if (!kbuf) 621 - return -ENOMEM; 622 623 - r = amdgpu_bo_reserve(ring->mqd_obj, false); 624 - if (unlikely(r != 0)) 625 - goto err_free; 626 - 627 - r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&mqd); 628 - if (r) 629 - goto err_unreserve; 630 - 631 - /* 632 - * Copy to local buffer to avoid put_user(), which might fault 633 - * and acquire mmap_sem, under reservation_ww_class_mutex. 634 - */ 635 - for (i = 0; i < ring->mqd_size/sizeof(u32); i++) 636 - kbuf[i] = mqd[i]; 637 - 638 - amdgpu_bo_kunmap(ring->mqd_obj); 639 - amdgpu_bo_unreserve(ring->mqd_obj); 640 - 641 - result = 0; 642 - while (size) { 643 - if (*pos >= ring->mqd_size) 644 - break; 645 - 646 - value = kbuf[*pos/4]; 647 - r = put_user(value, (uint32_t *)buf); 648 - if (r) 649 - goto err_free; 650 - buf += 4; 651 - result += 4; 652 - size -= 4; 653 - *pos += 4; 654 - } 655 - 656 - kfree(kbuf); 657 - return result; 658 - 659 - err_unreserve: 660 - amdgpu_bo_unreserve(ring->mqd_obj); 661 - err_free: 662 - kfree(kbuf); 663 - return r; 664 } 665 666 static const struct file_operations amdgpu_debugfs_mqd_fops = {
··· 608 size_t size, loff_t *pos) 609 { 610 struct amdgpu_ring *ring = file_inode(f)->i_private; 611 + ssize_t bytes = min_t(ssize_t, ring->mqd_size - *pos, size); 612 + void *from = ((u8 *)ring->mqd_ptr) + *pos; 613 614 + if (*pos > ring->mqd_size) 615 + return 0; 616 617 + if (copy_to_user(buf, from, bytes)) 618 + return -EFAULT; 619 620 + *pos += bytes; 621 + return bytes; 622 } 623 624 static const struct file_operations amdgpu_debugfs_mqd_fops = {
+1 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
··· 1172 break; 1173 case ACA_SMU_TYPE_CE: 1174 count = ext_error_code == 6 ? count : 0ULL; 1175 - bank->aca_err_type = ACA_BANK_ERR_CE_DE_DECODE(bank); 1176 ret = aca_error_cache_log_bank_error(handle, &info, bank->aca_err_type, count); 1177 break; 1178 default:
··· 1172 break; 1173 case ACA_SMU_TYPE_CE: 1174 count = ext_error_code == 6 ? count : 0ULL; 1175 + bank->aca_err_type = ACA_ERROR_TYPE_CE; 1176 ret = aca_error_cache_log_bank_error(handle, &info, bank->aca_err_type, count); 1177 break; 1178 default:
+2 -1
drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c
··· 473 break; 474 case AMDGPU_DPX_PARTITION_MODE: 475 num_xcp = 2; 476 - nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE); 477 break; 478 case AMDGPU_TPX_PARTITION_MODE: 479 num_xcp = 3;
··· 473 break; 474 case AMDGPU_DPX_PARTITION_MODE: 475 num_xcp = 2; 476 + nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE) | 477 + BIT(AMDGPU_NPS2_PARTITION_MODE); 478 break; 479 case AMDGPU_TPX_PARTITION_MODE: 480 num_xcp = 3;
+11 -77
drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
··· 6851 static int gfx_v10_0_cp_async_gfx_ring_resume(struct amdgpu_device *adev) 6852 { 6853 int r, i; 6854 - struct amdgpu_ring *ring; 6855 6856 for (i = 0; i < adev->gfx.num_gfx_rings; i++) { 6857 - ring = &adev->gfx.gfx_ring[i]; 6858 - 6859 - r = amdgpu_bo_reserve(ring->mqd_obj, false); 6860 - if (unlikely(r != 0)) 6861 - return r; 6862 - 6863 - r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr); 6864 - if (!r) { 6865 - r = gfx_v10_0_kgq_init_queue(ring, false); 6866 - amdgpu_bo_kunmap(ring->mqd_obj); 6867 - ring->mqd_ptr = NULL; 6868 - } 6869 - amdgpu_bo_unreserve(ring->mqd_obj); 6870 if (r) 6871 return r; 6872 } ··· 7160 7161 static int gfx_v10_0_kiq_resume(struct amdgpu_device *adev) 7162 { 7163 - struct amdgpu_ring *ring; 7164 - int r; 7165 - 7166 - ring = &adev->gfx.kiq[0].ring; 7167 - 7168 - r = amdgpu_bo_reserve(ring->mqd_obj, false); 7169 - if (unlikely(r != 0)) 7170 - return r; 7171 - 7172 - r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr); 7173 - if (unlikely(r != 0)) { 7174 - amdgpu_bo_unreserve(ring->mqd_obj); 7175 - return r; 7176 - } 7177 - 7178 - gfx_v10_0_kiq_init_queue(ring); 7179 - amdgpu_bo_kunmap(ring->mqd_obj); 7180 - ring->mqd_ptr = NULL; 7181 - amdgpu_bo_unreserve(ring->mqd_obj); 7182 return 0; 7183 } 7184 7185 static int gfx_v10_0_kcq_resume(struct amdgpu_device *adev) 7186 { 7187 - struct amdgpu_ring *ring = NULL; 7188 - int r = 0, i; 7189 7190 gfx_v10_0_cp_compute_enable(adev, true); 7191 7192 for (i = 0; i < adev->gfx.num_compute_rings; i++) { 7193 - ring = &adev->gfx.compute_ring[i]; 7194 - 7195 - r = amdgpu_bo_reserve(ring->mqd_obj, false); 7196 - if (unlikely(r != 0)) 7197 - goto done; 7198 - r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr); 7199 - if (!r) { 7200 - r = gfx_v10_0_kcq_init_queue(ring, false); 7201 - amdgpu_bo_kunmap(ring->mqd_obj); 7202 - ring->mqd_ptr = NULL; 7203 - } 7204 - amdgpu_bo_unreserve(ring->mqd_obj); 7205 if (r) 7206 - goto done; 7207 } 7208 7209 - r = amdgpu_gfx_enable_kcq(adev, 0); 7210 - done: 7211 - return r; 7212 } 7213 7214 static int gfx_v10_0_cp_resume(struct amdgpu_device *adev) ··· 9535 if (r) 9536 return r; 9537 9538 - r = amdgpu_bo_reserve(ring->mqd_obj, false); 9539 - if (unlikely(r != 0)) { 9540 - DRM_ERROR("fail to resv mqd_obj\n"); 9541 - return r; 9542 - } 9543 - r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr); 9544 - if (!r) { 9545 - r = gfx_v10_0_kgq_init_queue(ring, true); 9546 - amdgpu_bo_kunmap(ring->mqd_obj); 9547 - ring->mqd_ptr = NULL; 9548 - } 9549 - amdgpu_bo_unreserve(ring->mqd_obj); 9550 if (r) { 9551 - DRM_ERROR("fail to unresv mqd_obj\n"); 9552 return r; 9553 } 9554 ··· 9594 return r; 9595 } 9596 9597 - r = amdgpu_bo_reserve(ring->mqd_obj, false); 9598 - if (unlikely(r != 0)) { 9599 - dev_err(adev->dev, "fail to resv mqd_obj\n"); 9600 - return r; 9601 - } 9602 - r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr); 9603 - if (!r) { 9604 - r = gfx_v10_0_kcq_init_queue(ring, true); 9605 - amdgpu_bo_kunmap(ring->mqd_obj); 9606 - ring->mqd_ptr = NULL; 9607 - } 9608 - amdgpu_bo_unreserve(ring->mqd_obj); 9609 if (r) { 9610 - dev_err(adev->dev, "fail to unresv mqd_obj\n"); 9611 return r; 9612 } 9613
··· 6851 static int gfx_v10_0_cp_async_gfx_ring_resume(struct amdgpu_device *adev) 6852 { 6853 int r, i; 6854 6855 for (i = 0; i < adev->gfx.num_gfx_rings; i++) { 6856 + r = gfx_v10_0_kgq_init_queue(&adev->gfx.gfx_ring[i], false); 6857 if (r) 6858 return r; 6859 } ··· 7173 7174 static int gfx_v10_0_kiq_resume(struct amdgpu_device *adev) 7175 { 7176 + gfx_v10_0_kiq_init_queue(&adev->gfx.kiq[0].ring); 7177 return 0; 7178 } 7179 7180 static int gfx_v10_0_kcq_resume(struct amdgpu_device *adev) 7181 { 7182 + int i, r; 7183 7184 gfx_v10_0_cp_compute_enable(adev, true); 7185 7186 for (i = 0; i < adev->gfx.num_compute_rings; i++) { 7187 + r = gfx_v10_0_kcq_init_queue(&adev->gfx.compute_ring[i], 7188 + false); 7189 if (r) 7190 + return r; 7191 } 7192 7193 + return amdgpu_gfx_enable_kcq(adev, 0); 7194 } 7195 7196 static int gfx_v10_0_cp_resume(struct amdgpu_device *adev) ··· 9579 if (r) 9580 return r; 9581 9582 + r = gfx_v10_0_kgq_init_queue(ring, true); 9583 if (r) { 9584 + DRM_ERROR("fail to init kgq\n"); 9585 return r; 9586 } 9587 ··· 9649 return r; 9650 } 9651 9652 + r = gfx_v10_0_kcq_init_queue(ring, true); 9653 if (r) { 9654 + dev_err(adev->dev, "fail to init kcq\n"); 9655 return r; 9656 } 9657
+11 -79
drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
··· 1581 adev->gfx.me.num_me = 1; 1582 adev->gfx.me.num_pipe_per_me = 1; 1583 adev->gfx.me.num_queue_per_pipe = 1; 1584 - adev->gfx.mec.num_mec = 2; 1585 adev->gfx.mec.num_pipe_per_mec = 4; 1586 adev->gfx.mec.num_queue_per_pipe = 4; 1587 break; ··· 4115 static int gfx_v11_0_cp_async_gfx_ring_resume(struct amdgpu_device *adev) 4116 { 4117 int r, i; 4118 - struct amdgpu_ring *ring; 4119 4120 for (i = 0; i < adev->gfx.num_gfx_rings; i++) { 4121 - ring = &adev->gfx.gfx_ring[i]; 4122 - 4123 - r = amdgpu_bo_reserve(ring->mqd_obj, false); 4124 - if (unlikely(r != 0)) 4125 - return r; 4126 - 4127 - r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr); 4128 - if (!r) { 4129 - r = gfx_v11_0_kgq_init_queue(ring, false); 4130 - amdgpu_bo_kunmap(ring->mqd_obj); 4131 - ring->mqd_ptr = NULL; 4132 - } 4133 - amdgpu_bo_unreserve(ring->mqd_obj); 4134 if (r) 4135 return r; 4136 } ··· 4439 4440 static int gfx_v11_0_kiq_resume(struct amdgpu_device *adev) 4441 { 4442 - struct amdgpu_ring *ring; 4443 - int r; 4444 - 4445 - ring = &adev->gfx.kiq[0].ring; 4446 - 4447 - r = amdgpu_bo_reserve(ring->mqd_obj, false); 4448 - if (unlikely(r != 0)) 4449 - return r; 4450 - 4451 - r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr); 4452 - if (unlikely(r != 0)) { 4453 - amdgpu_bo_unreserve(ring->mqd_obj); 4454 - return r; 4455 - } 4456 - 4457 - gfx_v11_0_kiq_init_queue(ring); 4458 - amdgpu_bo_kunmap(ring->mqd_obj); 4459 - ring->mqd_ptr = NULL; 4460 - amdgpu_bo_unreserve(ring->mqd_obj); 4461 - ring->sched.ready = true; 4462 return 0; 4463 } 4464 4465 static int gfx_v11_0_kcq_resume(struct amdgpu_device *adev) 4466 { 4467 - struct amdgpu_ring *ring = NULL; 4468 - int r = 0, i; 4469 4470 if (!amdgpu_async_gfx_ring) 4471 gfx_v11_0_cp_compute_enable(adev, true); 4472 4473 for (i = 0; i < adev->gfx.num_compute_rings; i++) { 4474 - ring = &adev->gfx.compute_ring[i]; 4475 - 4476 - r = amdgpu_bo_reserve(ring->mqd_obj, false); 4477 - if (unlikely(r != 0)) 4478 - goto done; 4479 - r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr); 4480 - if (!r) { 4481 - r = gfx_v11_0_kcq_init_queue(ring, false); 4482 - amdgpu_bo_kunmap(ring->mqd_obj); 4483 - ring->mqd_ptr = NULL; 4484 - } 4485 - amdgpu_bo_unreserve(ring->mqd_obj); 4486 if (r) 4487 - goto done; 4488 } 4489 4490 - r = amdgpu_gfx_enable_kcq(adev, 0); 4491 - done: 4492 - return r; 4493 } 4494 4495 static int gfx_v11_0_cp_resume(struct amdgpu_device *adev) ··· 6621 if (r) 6622 return r; 6623 6624 - r = amdgpu_bo_reserve(ring->mqd_obj, false); 6625 - if (unlikely(r != 0)) { 6626 - dev_err(adev->dev, "fail to resv mqd_obj\n"); 6627 - return r; 6628 - } 6629 - r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr); 6630 - if (!r) { 6631 - r = gfx_v11_0_kgq_init_queue(ring, true); 6632 - amdgpu_bo_kunmap(ring->mqd_obj); 6633 - ring->mqd_ptr = NULL; 6634 - } 6635 - amdgpu_bo_unreserve(ring->mqd_obj); 6636 if (r) { 6637 - dev_err(adev->dev, "fail to unresv mqd_obj\n"); 6638 return r; 6639 } 6640 ··· 6650 return r; 6651 } 6652 6653 - r = amdgpu_bo_reserve(ring->mqd_obj, false); 6654 - if (unlikely(r != 0)) { 6655 - dev_err(adev->dev, "fail to resv mqd_obj\n"); 6656 - return r; 6657 - } 6658 - r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr); 6659 - if (!r) { 6660 - r = gfx_v11_0_kcq_init_queue(ring, true); 6661 - amdgpu_bo_kunmap(ring->mqd_obj); 6662 - ring->mqd_ptr = NULL; 6663 - } 6664 - amdgpu_bo_unreserve(ring->mqd_obj); 6665 if (r) { 6666 - dev_err(adev->dev, "fail to unresv mqd_obj\n"); 6667 return r; 6668 } 6669 r = amdgpu_mes_map_legacy_queue(adev, ring);
··· 1581 adev->gfx.me.num_me = 1; 1582 adev->gfx.me.num_pipe_per_me = 1; 1583 adev->gfx.me.num_queue_per_pipe = 1; 1584 + adev->gfx.mec.num_mec = 1; 1585 adev->gfx.mec.num_pipe_per_mec = 4; 1586 adev->gfx.mec.num_queue_per_pipe = 4; 1587 break; ··· 4115 static int gfx_v11_0_cp_async_gfx_ring_resume(struct amdgpu_device *adev) 4116 { 4117 int r, i; 4118 4119 for (i = 0; i < adev->gfx.num_gfx_rings; i++) { 4120 + r = gfx_v11_0_kgq_init_queue(&adev->gfx.gfx_ring[i], false); 4121 if (r) 4122 return r; 4123 } ··· 4452 4453 static int gfx_v11_0_kiq_resume(struct amdgpu_device *adev) 4454 { 4455 + gfx_v11_0_kiq_init_queue(&adev->gfx.kiq[0].ring); 4456 return 0; 4457 } 4458 4459 static int gfx_v11_0_kcq_resume(struct amdgpu_device *adev) 4460 { 4461 + int i, r; 4462 4463 if (!amdgpu_async_gfx_ring) 4464 gfx_v11_0_cp_compute_enable(adev, true); 4465 4466 for (i = 0; i < adev->gfx.num_compute_rings; i++) { 4467 + r = gfx_v11_0_kcq_init_queue(&adev->gfx.compute_ring[i], false); 4468 if (r) 4469 + return r; 4470 } 4471 4472 + return amdgpu_gfx_enable_kcq(adev, 0); 4473 } 4474 4475 static int gfx_v11_0_cp_resume(struct amdgpu_device *adev) ··· 6667 if (r) 6668 return r; 6669 6670 + r = gfx_v11_0_kgq_init_queue(ring, true); 6671 if (r) { 6672 + dev_err(adev->dev, "failed to init kgq\n"); 6673 return r; 6674 } 6675 ··· 6707 return r; 6708 } 6709 6710 + r = gfx_v11_0_kcq_init_queue(ring, true); 6711 if (r) { 6712 + dev_err(adev->dev, "fail to init kcq\n"); 6713 return r; 6714 } 6715 r = amdgpu_mes_map_legacy_queue(adev, ring);
+16 -88
drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
··· 1355 adev->gfx.me.num_me = 1; 1356 adev->gfx.me.num_pipe_per_me = 1; 1357 adev->gfx.me.num_queue_per_pipe = 1; 1358 - adev->gfx.mec.num_mec = 2; 1359 adev->gfx.mec.num_pipe_per_mec = 2; 1360 adev->gfx.mec.num_queue_per_pipe = 4; 1361 break; ··· 3001 3002 static int gfx_v12_0_cp_async_gfx_ring_resume(struct amdgpu_device *adev) 3003 { 3004 - int r, i; 3005 - struct amdgpu_ring *ring; 3006 3007 for (i = 0; i < adev->gfx.num_gfx_rings; i++) { 3008 - ring = &adev->gfx.gfx_ring[i]; 3009 - 3010 - r = amdgpu_bo_reserve(ring->mqd_obj, false); 3011 - if (unlikely(r != 0)) 3012 - goto done; 3013 - 3014 - r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr); 3015 - if (!r) { 3016 - r = gfx_v12_0_kgq_init_queue(ring, false); 3017 - amdgpu_bo_kunmap(ring->mqd_obj); 3018 - ring->mqd_ptr = NULL; 3019 - } 3020 - amdgpu_bo_unreserve(ring->mqd_obj); 3021 if (r) 3022 - goto done; 3023 } 3024 3025 r = amdgpu_gfx_enable_kgq(adev, 0); 3026 if (r) 3027 - goto done; 3028 3029 - r = gfx_v12_0_cp_gfx_start(adev); 3030 - if (r) 3031 - goto done; 3032 - 3033 - done: 3034 - return r; 3035 } 3036 3037 static int gfx_v12_0_compute_mqd_init(struct amdgpu_device *adev, void *m, ··· 3326 3327 static int gfx_v12_0_kiq_resume(struct amdgpu_device *adev) 3328 { 3329 - struct amdgpu_ring *ring; 3330 - int r; 3331 - 3332 - ring = &adev->gfx.kiq[0].ring; 3333 - 3334 - r = amdgpu_bo_reserve(ring->mqd_obj, false); 3335 - if (unlikely(r != 0)) 3336 - return r; 3337 - 3338 - r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr); 3339 - if (unlikely(r != 0)) { 3340 - amdgpu_bo_unreserve(ring->mqd_obj); 3341 - return r; 3342 - } 3343 - 3344 - gfx_v12_0_kiq_init_queue(ring); 3345 - amdgpu_bo_kunmap(ring->mqd_obj); 3346 - ring->mqd_ptr = NULL; 3347 - amdgpu_bo_unreserve(ring->mqd_obj); 3348 - ring->sched.ready = true; 3349 return 0; 3350 } 3351 3352 static int gfx_v12_0_kcq_resume(struct amdgpu_device *adev) 3353 { 3354 - struct amdgpu_ring *ring = NULL; 3355 - int r = 0, i; 3356 3357 if (!amdgpu_async_gfx_ring) 3358 gfx_v12_0_cp_compute_enable(adev, true); 3359 3360 for (i = 0; i < adev->gfx.num_compute_rings; i++) { 3361 - ring = &adev->gfx.compute_ring[i]; 3362 - 3363 - r = amdgpu_bo_reserve(ring->mqd_obj, false); 3364 - if (unlikely(r != 0)) 3365 - goto done; 3366 - r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr); 3367 - if (!r) { 3368 - r = gfx_v12_0_kcq_init_queue(ring, false); 3369 - amdgpu_bo_kunmap(ring->mqd_obj); 3370 - ring->mqd_ptr = NULL; 3371 - } 3372 - amdgpu_bo_unreserve(ring->mqd_obj); 3373 if (r) 3374 - goto done; 3375 } 3376 3377 - r = amdgpu_gfx_enable_kcq(adev, 0); 3378 - done: 3379 - return r; 3380 } 3381 3382 static int gfx_v12_0_cp_resume(struct amdgpu_device *adev) ··· 5174 return r; 5175 } 5176 5177 - r = amdgpu_bo_reserve(ring->mqd_obj, false); 5178 - if (unlikely(r != 0)) { 5179 - dev_err(adev->dev, "fail to resv mqd_obj\n"); 5180 - return r; 5181 - } 5182 - r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr); 5183 - if (!r) { 5184 - r = gfx_v12_0_kgq_init_queue(ring, true); 5185 - amdgpu_bo_kunmap(ring->mqd_obj); 5186 - ring->mqd_ptr = NULL; 5187 - } 5188 - amdgpu_bo_unreserve(ring->mqd_obj); 5189 if (r) { 5190 - DRM_ERROR("fail to unresv mqd_obj\n"); 5191 return r; 5192 } 5193 ··· 5203 return r; 5204 } 5205 5206 - r = amdgpu_bo_reserve(ring->mqd_obj, false); 5207 - if (unlikely(r != 0)) { 5208 - DRM_ERROR("fail to resv mqd_obj\n"); 5209 - return r; 5210 - } 5211 - r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr); 5212 - if (!r) { 5213 - r = gfx_v12_0_kcq_init_queue(ring, true); 5214 - amdgpu_bo_kunmap(ring->mqd_obj); 5215 - ring->mqd_ptr = NULL; 5216 - } 5217 - amdgpu_bo_unreserve(ring->mqd_obj); 5218 if (r) { 5219 - DRM_ERROR("fail to unresv mqd_obj\n"); 5220 return r; 5221 } 5222 r = amdgpu_mes_map_legacy_queue(adev, ring);
··· 1355 adev->gfx.me.num_me = 1; 1356 adev->gfx.me.num_pipe_per_me = 1; 1357 adev->gfx.me.num_queue_per_pipe = 1; 1358 + adev->gfx.mec.num_mec = 1; 1359 adev->gfx.mec.num_pipe_per_mec = 2; 1360 adev->gfx.mec.num_queue_per_pipe = 4; 1361 break; ··· 3001 3002 static int gfx_v12_0_cp_async_gfx_ring_resume(struct amdgpu_device *adev) 3003 { 3004 + int i, r; 3005 3006 for (i = 0; i < adev->gfx.num_gfx_rings; i++) { 3007 + r = gfx_v12_0_kgq_init_queue(&adev->gfx.gfx_ring[i], false); 3008 if (r) 3009 + return r; 3010 } 3011 3012 r = amdgpu_gfx_enable_kgq(adev, 0); 3013 if (r) 3014 + return r; 3015 3016 + return gfx_v12_0_cp_gfx_start(adev); 3017 } 3018 3019 static int gfx_v12_0_compute_mqd_init(struct amdgpu_device *adev, void *m, ··· 3344 3345 static int gfx_v12_0_kiq_resume(struct amdgpu_device *adev) 3346 { 3347 + gfx_v12_0_kiq_init_queue(&adev->gfx.kiq[0].ring); 3348 + adev->gfx.kiq[0].ring.sched.ready = true; 3349 return 0; 3350 } 3351 3352 static int gfx_v12_0_kcq_resume(struct amdgpu_device *adev) 3353 { 3354 + int i, r; 3355 3356 if (!amdgpu_async_gfx_ring) 3357 gfx_v12_0_cp_compute_enable(adev, true); 3358 3359 for (i = 0; i < adev->gfx.num_compute_rings; i++) { 3360 + r = gfx_v12_0_kcq_init_queue(&adev->gfx.compute_ring[i], false); 3361 if (r) 3362 + return r; 3363 } 3364 3365 + return amdgpu_gfx_enable_kcq(adev, 0); 3366 } 3367 3368 static int gfx_v12_0_cp_resume(struct amdgpu_device *adev) ··· 5224 return r; 5225 } 5226 5227 + r = gfx_v12_0_kgq_init_queue(ring, true); 5228 if (r) { 5229 + dev_err(adev->dev, "failed to init kgq\n"); 5230 return r; 5231 } 5232 ··· 5264 return r; 5265 } 5266 5267 + r = gfx_v12_0_kcq_init_queue(ring, true); 5268 if (r) { 5269 + dev_err(adev->dev, "failed to init kcq\n"); 5270 return r; 5271 } 5272 r = amdgpu_mes_map_legacy_queue(adev, ring);
+5 -40
drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
··· 4683 4684 static int gfx_v8_0_kiq_resume(struct amdgpu_device *adev) 4685 { 4686 - struct amdgpu_ring *ring; 4687 - int r; 4688 - 4689 - ring = &adev->gfx.kiq[0].ring; 4690 - 4691 - r = amdgpu_bo_reserve(ring->mqd_obj, false); 4692 - if (unlikely(r != 0)) 4693 - return r; 4694 - 4695 - r = amdgpu_bo_kmap(ring->mqd_obj, &ring->mqd_ptr); 4696 - if (unlikely(r != 0)) { 4697 - amdgpu_bo_unreserve(ring->mqd_obj); 4698 - return r; 4699 - } 4700 - 4701 - gfx_v8_0_kiq_init_queue(ring); 4702 - amdgpu_bo_kunmap(ring->mqd_obj); 4703 - ring->mqd_ptr = NULL; 4704 - amdgpu_bo_unreserve(ring->mqd_obj); 4705 return 0; 4706 } 4707 4708 static int gfx_v8_0_kcq_resume(struct amdgpu_device *adev) 4709 { 4710 - struct amdgpu_ring *ring = NULL; 4711 - int r = 0, i; 4712 4713 gfx_v8_0_cp_compute_enable(adev, true); 4714 4715 for (i = 0; i < adev->gfx.num_compute_rings; i++) { 4716 - ring = &adev->gfx.compute_ring[i]; 4717 - 4718 - r = amdgpu_bo_reserve(ring->mqd_obj, false); 4719 - if (unlikely(r != 0)) 4720 - goto done; 4721 - r = amdgpu_bo_kmap(ring->mqd_obj, &ring->mqd_ptr); 4722 - if (!r) { 4723 - r = gfx_v8_0_kcq_init_queue(ring); 4724 - amdgpu_bo_kunmap(ring->mqd_obj); 4725 - ring->mqd_ptr = NULL; 4726 - } 4727 - amdgpu_bo_unreserve(ring->mqd_obj); 4728 if (r) 4729 - goto done; 4730 } 4731 4732 gfx_v8_0_set_mec_doorbell_range(adev); 4733 4734 - r = gfx_v8_0_kiq_kcq_enable(adev); 4735 - if (r) 4736 - goto done; 4737 - 4738 - done: 4739 - return r; 4740 } 4741 4742 static int gfx_v8_0_cp_test_all_rings(struct amdgpu_device *adev)
··· 4683 4684 static int gfx_v8_0_kiq_resume(struct amdgpu_device *adev) 4685 { 4686 + gfx_v8_0_kiq_init_queue(&adev->gfx.kiq[0].ring); 4687 return 0; 4688 } 4689 4690 static int gfx_v8_0_kcq_resume(struct amdgpu_device *adev) 4691 { 4692 + int i, r; 4693 4694 gfx_v8_0_cp_compute_enable(adev, true); 4695 4696 for (i = 0; i < adev->gfx.num_compute_rings; i++) { 4697 + r = gfx_v8_0_kcq_init_queue(&adev->gfx.compute_ring[i]); 4698 if (r) 4699 + return r; 4700 } 4701 4702 gfx_v8_0_set_mec_doorbell_range(adev); 4703 4704 + return gfx_v8_0_kiq_kcq_enable(adev); 4705 } 4706 4707 static int gfx_v8_0_cp_test_all_rings(struct amdgpu_device *adev)
+8 -50
drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
··· 1269 adev->gfx.mec_fw_write_wait = false; 1270 1271 if ((amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 4, 1)) && 1272 ((adev->gfx.mec_fw_version < 0x000001a5) || 1273 (adev->gfx.mec_feature_version < 46) || 1274 (adev->gfx.pfp_fw_version < 0x000000b7) || ··· 3891 3892 static int gfx_v9_0_kiq_resume(struct amdgpu_device *adev) 3893 { 3894 - struct amdgpu_ring *ring; 3895 - int r; 3896 - 3897 - ring = &adev->gfx.kiq[0].ring; 3898 - 3899 - r = amdgpu_bo_reserve(ring->mqd_obj, false); 3900 - if (unlikely(r != 0)) 3901 - return r; 3902 - 3903 - r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr); 3904 - if (unlikely(r != 0)) { 3905 - amdgpu_bo_unreserve(ring->mqd_obj); 3906 - return r; 3907 - } 3908 - 3909 - gfx_v9_0_kiq_init_queue(ring); 3910 - amdgpu_bo_kunmap(ring->mqd_obj); 3911 - ring->mqd_ptr = NULL; 3912 - amdgpu_bo_unreserve(ring->mqd_obj); 3913 return 0; 3914 } 3915 3916 static int gfx_v9_0_kcq_resume(struct amdgpu_device *adev) 3917 { 3918 - struct amdgpu_ring *ring = NULL; 3919 - int r = 0, i; 3920 3921 gfx_v9_0_cp_compute_enable(adev, true); 3922 3923 for (i = 0; i < adev->gfx.num_compute_rings; i++) { 3924 - ring = &adev->gfx.compute_ring[i]; 3925 - 3926 - r = amdgpu_bo_reserve(ring->mqd_obj, false); 3927 - if (unlikely(r != 0)) 3928 - goto done; 3929 - r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr); 3930 - if (!r) { 3931 - r = gfx_v9_0_kcq_init_queue(ring, false); 3932 - amdgpu_bo_kunmap(ring->mqd_obj); 3933 - ring->mqd_ptr = NULL; 3934 - } 3935 - amdgpu_bo_unreserve(ring->mqd_obj); 3936 if (r) 3937 - goto done; 3938 } 3939 3940 - r = amdgpu_gfx_enable_kcq(adev, 0); 3941 - done: 3942 - return r; 3943 } 3944 3945 static int gfx_v9_0_cp_resume(struct amdgpu_device *adev) ··· 7288 return r; 7289 } 7290 7291 - r = amdgpu_bo_reserve(ring->mqd_obj, false); 7292 - if (unlikely(r != 0)){ 7293 - dev_err(adev->dev, "fail to resv mqd_obj\n"); 7294 - return r; 7295 - } 7296 - r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr); 7297 - if (!r) { 7298 - r = gfx_v9_0_kcq_init_queue(ring, true); 7299 - amdgpu_bo_kunmap(ring->mqd_obj); 7300 - ring->mqd_ptr = NULL; 7301 - } 7302 - amdgpu_bo_unreserve(ring->mqd_obj); 7303 if (r) { 7304 - dev_err(adev->dev, "fail to unresv mqd_obj\n"); 7305 return r; 7306 } 7307 spin_lock_irqsave(&kiq->ring_lock, flags);
··· 1269 adev->gfx.mec_fw_write_wait = false; 1270 1271 if ((amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 4, 1)) && 1272 + (amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 4, 2)) && 1273 ((adev->gfx.mec_fw_version < 0x000001a5) || 1274 (adev->gfx.mec_feature_version < 46) || 1275 (adev->gfx.pfp_fw_version < 0x000000b7) || ··· 3890 3891 static int gfx_v9_0_kiq_resume(struct amdgpu_device *adev) 3892 { 3893 + gfx_v9_0_kiq_init_queue(&adev->gfx.kiq[0].ring); 3894 return 0; 3895 } 3896 3897 static int gfx_v9_0_kcq_resume(struct amdgpu_device *adev) 3898 { 3899 + int i, r; 3900 3901 gfx_v9_0_cp_compute_enable(adev, true); 3902 3903 for (i = 0; i < adev->gfx.num_compute_rings; i++) { 3904 + r = gfx_v9_0_kcq_init_queue(&adev->gfx.compute_ring[i], false); 3905 if (r) 3906 + return r; 3907 } 3908 3909 + return amdgpu_gfx_enable_kcq(adev, 0); 3910 } 3911 3912 static int gfx_v9_0_cp_resume(struct amdgpu_device *adev) ··· 7319 return r; 7320 } 7321 7322 + r = gfx_v9_0_kcq_init_queue(ring, true); 7323 if (r) { 7324 + dev_err(adev->dev, "fail to init kcq\n"); 7325 return r; 7326 } 7327 spin_lock_irqsave(&kiq->ring_lock, flags);
+13 -53
drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
··· 867 868 switch (type) { 869 case ACA_SMU_TYPE_UE: 870 - bank->aca_err_type = ACA_BANK_ERR_UE_DE_DECODE(bank); 871 ret = aca_error_cache_log_bank_error(handle, &info, bank->aca_err_type, 1ULL); 872 break; 873 case ACA_SMU_TYPE_CE: 874 - bank->aca_err_type = ACA_BANK_ERR_CE_DE_DECODE(bank); 875 - ret = aca_error_cache_log_bank_error(handle, &info, 876 - bank->aca_err_type, 877 ACA_REG__MISC0__ERRCNT(misc0)); 878 break; 879 default: ··· 2167 2168 static int gfx_v9_4_3_xcc_kiq_resume(struct amdgpu_device *adev, int xcc_id) 2169 { 2170 - struct amdgpu_ring *ring; 2171 - int r; 2172 - 2173 - ring = &adev->gfx.kiq[xcc_id].ring; 2174 - 2175 - r = amdgpu_bo_reserve(ring->mqd_obj, false); 2176 - if (unlikely(r != 0)) 2177 - return r; 2178 - 2179 - r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr); 2180 - if (unlikely(r != 0)) { 2181 - amdgpu_bo_unreserve(ring->mqd_obj); 2182 - return r; 2183 - } 2184 - 2185 - gfx_v9_4_3_xcc_kiq_init_queue(ring, xcc_id); 2186 - amdgpu_bo_kunmap(ring->mqd_obj); 2187 - ring->mqd_ptr = NULL; 2188 - amdgpu_bo_unreserve(ring->mqd_obj); 2189 return 0; 2190 } 2191 2192 static int gfx_v9_4_3_xcc_kcq_resume(struct amdgpu_device *adev, int xcc_id) 2193 { 2194 - struct amdgpu_ring *ring = NULL; 2195 - int r = 0, i; 2196 2197 gfx_v9_4_3_xcc_cp_compute_enable(adev, true, xcc_id); 2198 2199 for (i = 0; i < adev->gfx.num_compute_rings; i++) { 2200 - ring = &adev->gfx.compute_ring[i + xcc_id * adev->gfx.num_compute_rings]; 2201 2202 - r = amdgpu_bo_reserve(ring->mqd_obj, false); 2203 - if (unlikely(r != 0)) 2204 - goto done; 2205 - r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr); 2206 - if (!r) { 2207 - r = gfx_v9_4_3_xcc_kcq_init_queue(ring, xcc_id, false); 2208 - amdgpu_bo_kunmap(ring->mqd_obj); 2209 - ring->mqd_ptr = NULL; 2210 - } 2211 - amdgpu_bo_unreserve(ring->mqd_obj); 2212 if (r) 2213 - goto done; 2214 } 2215 2216 - r = amdgpu_gfx_enable_kcq(adev, xcc_id); 2217 - done: 2218 - return r; 2219 } 2220 2221 static int gfx_v9_4_3_xcc_cp_resume(struct amdgpu_device *adev, int xcc_id) ··· 3559 return r; 3560 } 3561 3562 - r = amdgpu_bo_reserve(ring->mqd_obj, false); 3563 - if (unlikely(r != 0)){ 3564 - dev_err(adev->dev, "fail to resv mqd_obj\n"); 3565 - return r; 3566 - } 3567 - r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr); 3568 - if (!r) { 3569 - r = gfx_v9_4_3_xcc_kcq_init_queue(ring, ring->xcc_id, true); 3570 - amdgpu_bo_kunmap(ring->mqd_obj); 3571 - ring->mqd_ptr = NULL; 3572 - } 3573 - amdgpu_bo_unreserve(ring->mqd_obj); 3574 if (r) { 3575 - dev_err(adev->dev, "fail to unresv mqd_obj\n"); 3576 return r; 3577 } 3578 spin_lock_irqsave(&kiq->ring_lock, flags);
··· 867 868 switch (type) { 869 case ACA_SMU_TYPE_UE: 870 + bank->aca_err_type = ACA_ERROR_TYPE_UE; 871 ret = aca_error_cache_log_bank_error(handle, &info, bank->aca_err_type, 1ULL); 872 break; 873 case ACA_SMU_TYPE_CE: 874 + bank->aca_err_type = ACA_ERROR_TYPE_CE; 875 + ret = aca_error_cache_log_bank_error(handle, &info, bank->aca_err_type, 876 ACA_REG__MISC0__ERRCNT(misc0)); 877 break; 878 default: ··· 2168 2169 static int gfx_v9_4_3_xcc_kiq_resume(struct amdgpu_device *adev, int xcc_id) 2170 { 2171 + gfx_v9_4_3_xcc_kiq_init_queue(&adev->gfx.kiq[xcc_id].ring, xcc_id); 2172 return 0; 2173 } 2174 2175 static int gfx_v9_4_3_xcc_kcq_resume(struct amdgpu_device *adev, int xcc_id) 2176 { 2177 + struct amdgpu_ring *ring; 2178 + int i, r; 2179 2180 gfx_v9_4_3_xcc_cp_compute_enable(adev, true, xcc_id); 2181 2182 for (i = 0; i < adev->gfx.num_compute_rings; i++) { 2183 + ring = &adev->gfx.compute_ring[i + xcc_id * 2184 + adev->gfx.num_compute_rings]; 2185 2186 + r = gfx_v9_4_3_xcc_kcq_init_queue(ring, xcc_id, false); 2187 if (r) 2188 + return r; 2189 } 2190 2191 + return amdgpu_gfx_enable_kcq(adev, xcc_id); 2192 } 2193 2194 static int gfx_v9_4_3_xcc_cp_resume(struct amdgpu_device *adev, int xcc_id) ··· 3588 return r; 3589 } 3590 3591 + r = gfx_v9_4_3_xcc_kcq_init_queue(ring, ring->xcc_id, true); 3592 if (r) { 3593 + dev_err(adev->dev, "fail to init kcq\n"); 3594 return r; 3595 } 3596 spin_lock_irqsave(&kiq->ring_lock, flags);
+1 -1
drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c
··· 1328 1ULL); 1329 break; 1330 case ACA_SMU_TYPE_CE: 1331 - bank->aca_err_type = ACA_BANK_ERR_CE_DE_DECODE(bank); 1332 ret = aca_error_cache_log_bank_error(handle, &info, bank->aca_err_type, 1333 ACA_REG__MISC0__ERRCNT(misc0)); 1334 break;
··· 1328 1ULL); 1329 break; 1330 case ACA_SMU_TYPE_CE: 1331 + bank->aca_err_type = ACA_ERROR_TYPE_CE; 1332 ret = aca_error_cache_log_bank_error(handle, &info, bank->aca_err_type, 1333 ACA_REG__MISC0__ERRCNT(misc0)); 1334 break;
+1 -1
drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c
··· 751 1ULL); 752 break; 753 case ACA_SMU_TYPE_CE: 754 - bank->aca_err_type = ACA_BANK_ERR_CE_DE_DECODE(bank); 755 ret = aca_error_cache_log_bank_error(handle, &info, bank->aca_err_type, 756 ACA_REG__MISC0__ERRCNT(misc0)); 757 break;
··· 751 1ULL); 752 break; 753 case ACA_SMU_TYPE_CE: 754 + bank->aca_err_type = ACA_ERROR_TYPE_CE; 755 ret = aca_error_cache_log_bank_error(handle, &info, bank->aca_err_type, 756 ACA_REG__MISC0__ERRCNT(misc0)); 757 break;
+15 -64
drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c
··· 31 #include "amdgpu_ucode.h" 32 #include "amdgpu_trace.h" 33 #include "amdgpu_reset.h" 34 - #include "gc/gc_9_0_sh_mask.h" 35 36 #include "sdma/sdma_4_4_2_offset.h" 37 #include "sdma/sdma_4_4_2_sh_mask.h" ··· 1290 seq, 0xffffffff, 4); 1291 } 1292 1293 - /* 1294 - * sdma_v4_4_2_get_invalidate_req - Construct the VM_INVALIDATE_ENG0_REQ register value 1295 - * @vmid: The VMID to invalidate 1296 - * @flush_type: The type of flush (0 = legacy, 1 = lightweight, 2 = heavyweight) 1297 * 1298 - * This function constructs the VM_INVALIDATE_ENG0_REQ register value for the specified VMID 1299 - * and flush type. It ensures that all relevant page table cache levels (L1 PTEs, L2 PTEs, and 1300 - * L2 PDEs) are invalidated. 1301 - */ 1302 - static uint32_t sdma_v4_4_2_get_invalidate_req(unsigned int vmid, 1303 - uint32_t flush_type) 1304 - { 1305 - u32 req = 0; 1306 - 1307 - req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, 1308 - PER_VMID_INVALIDATE_REQ, 1 << vmid); 1309 - req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, flush_type); 1310 - req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PTES, 1); 1311 - req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE0, 1); 1312 - req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE1, 1); 1313 - req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE2, 1); 1314 - req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L1_PTES, 1); 1315 - req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, 1316 - CLEAR_PROTECTION_FAULT_STATUS_ADDR, 0); 1317 - 1318 - return req; 1319 - } 1320 - 1321 - /* 1322 - * sdma_v4_4_2_ring_emit_vm_flush - Emit VM flush commands for SDMA 1323 - * @ring: The SDMA ring 1324 - * @vmid: The VMID to flush 1325 - * @pd_addr: The page directory address 1326 * 1327 - * This function emits the necessary register writes and waits to perform a VM flush for the 1328 - * specified VMID. It updates the PTB address registers and issues a VM invalidation request 1329 - * using the specified VM invalidation engine. 1330 */ 1331 static void sdma_v4_4_2_ring_emit_vm_flush(struct amdgpu_ring *ring, 1332 - unsigned int vmid, uint64_t pd_addr) 1333 { 1334 - struct amdgpu_device *adev = ring->adev; 1335 - uint32_t req = sdma_v4_4_2_get_invalidate_req(vmid, 0); 1336 - unsigned int eng = ring->vm_inv_eng; 1337 - struct amdgpu_vmhub *hub = &adev->vmhub[ring->vm_hub]; 1338 - 1339 - amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_lo32 + 1340 - (hub->ctx_addr_distance * vmid), 1341 - lower_32_bits(pd_addr)); 1342 - 1343 - amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_hi32 + 1344 - (hub->ctx_addr_distance * vmid), 1345 - upper_32_bits(pd_addr)); 1346 - /* 1347 - * Construct and emit the VM invalidation packet 1348 - */ 1349 - amdgpu_ring_write(ring, 1350 - SDMA_PKT_VM_INVALIDATION_HEADER_OP(SDMA_OP_VM_INVALIDATE) | 1351 - SDMA_PKT_VM_INVALIDATION_HEADER_SUB_OP(SDMA_SUBOP_VM_INVALIDATE) | 1352 - SDMA_PKT_VM_INVALIDATION_HEADER_XCC0_ENG_ID(0x1f) | 1353 - SDMA_PKT_VM_INVALIDATION_HEADER_XCC1_ENG_ID(0x1f) | 1354 - SDMA_PKT_VM_INVALIDATION_HEADER_MMHUB_ENG_ID(eng)); 1355 - amdgpu_ring_write(ring, SDMA_PKT_VM_INVALIDATION_INVALIDATEREQ_INVALIDATEREQ(req)); 1356 - amdgpu_ring_write(ring, 0); 1357 - amdgpu_ring_write(ring, SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_INVALIDATEACK(BIT(vmid))); 1358 } 1359 1360 static void sdma_v4_4_2_ring_emit_wreg(struct amdgpu_ring *ring, ··· 2126 3 + /* hdp invalidate */ 2127 6 + /* sdma_v4_4_2_ring_emit_pipeline_sync */ 2128 /* sdma_v4_4_2_ring_emit_vm_flush */ 2129 - 4 + 2 * 3 + 2130 10 + 10 + 10, /* sdma_v4_4_2_ring_emit_fence x3 for user fence, vm fence */ 2131 .emit_ib_size = 7 + 6, /* sdma_v4_4_2_ring_emit_ib */ 2132 .emit_ib = sdma_v4_4_2_ring_emit_ib, ··· 2159 3 + /* hdp invalidate */ 2160 6 + /* sdma_v4_4_2_ring_emit_pipeline_sync */ 2161 /* sdma_v4_4_2_ring_emit_vm_flush */ 2162 - 4 + 2 * 3 + 2163 10 + 10 + 10, /* sdma_v4_4_2_ring_emit_fence x3 for user fence, vm fence */ 2164 .emit_ib_size = 7 + 6, /* sdma_v4_4_2_ring_emit_ib */ 2165 .emit_ib = sdma_v4_4_2_ring_emit_ib, ··· 2546 1ULL); 2547 break; 2548 case ACA_SMU_TYPE_CE: 2549 - bank->aca_err_type = ACA_BANK_ERR_CE_DE_DECODE(bank); 2550 ret = aca_error_cache_log_bank_error(handle, &info, bank->aca_err_type, 2551 ACA_REG__MISC0__ERRCNT(misc0)); 2552 break;
··· 31 #include "amdgpu_ucode.h" 32 #include "amdgpu_trace.h" 33 #include "amdgpu_reset.h" 34 35 #include "sdma/sdma_4_4_2_offset.h" 36 #include "sdma/sdma_4_4_2_sh_mask.h" ··· 1291 seq, 0xffffffff, 4); 1292 } 1293 1294 + 1295 + /** 1296 + * sdma_v4_4_2_ring_emit_vm_flush - vm flush using sDMA 1297 * 1298 + * @ring: amdgpu_ring pointer 1299 + * @vmid: vmid number to use 1300 + * @pd_addr: address 1301 * 1302 + * Update the page table base and flush the VM TLB 1303 + * using sDMA. 1304 */ 1305 static void sdma_v4_4_2_ring_emit_vm_flush(struct amdgpu_ring *ring, 1306 + unsigned vmid, uint64_t pd_addr) 1307 { 1308 + amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr); 1309 } 1310 1311 static void sdma_v4_4_2_ring_emit_wreg(struct amdgpu_ring *ring, ··· 2177 3 + /* hdp invalidate */ 2178 6 + /* sdma_v4_4_2_ring_emit_pipeline_sync */ 2179 /* sdma_v4_4_2_ring_emit_vm_flush */ 2180 + SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 + 2181 + SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 6 + 2182 10 + 10 + 10, /* sdma_v4_4_2_ring_emit_fence x3 for user fence, vm fence */ 2183 .emit_ib_size = 7 + 6, /* sdma_v4_4_2_ring_emit_ib */ 2184 .emit_ib = sdma_v4_4_2_ring_emit_ib, ··· 2209 3 + /* hdp invalidate */ 2210 6 + /* sdma_v4_4_2_ring_emit_pipeline_sync */ 2211 /* sdma_v4_4_2_ring_emit_vm_flush */ 2212 + SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 + 2213 + SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 6 + 2214 10 + 10 + 10, /* sdma_v4_4_2_ring_emit_fence x3 for user fence, vm fence */ 2215 .emit_ib_size = 7 + 6, /* sdma_v4_4_2_ring_emit_ib */ 2216 .emit_ib = sdma_v4_4_2_ring_emit_ib, ··· 2595 1ULL); 2596 break; 2597 case ACA_SMU_TYPE_CE: 2598 + bank->aca_err_type = ACA_ERROR_TYPE_CE; 2599 ret = aca_error_cache_log_bank_error(handle, &info, bank->aca_err_type, 2600 ACA_REG__MISC0__ERRCNT(misc0)); 2601 break;
+3
drivers/gpu/drm/amd/amdgpu/ta_ras_if.h
··· 92 TA_RAS_BLOCK__MCA, 93 TA_RAS_BLOCK__VCN, 94 TA_RAS_BLOCK__JPEG, 95 TA_NUM_BLOCK_MAX 96 }; 97
··· 92 TA_RAS_BLOCK__MCA, 93 TA_RAS_BLOCK__VCN, 94 TA_RAS_BLOCK__JPEG, 95 + TA_RAS_BLOCK__IH, 96 + TA_RAS_BLOCK__MPIO, 97 + TA_RAS_BLOCK__MMSCH, 98 TA_NUM_BLOCK_MAX 99 }; 100
+2 -1
drivers/gpu/drm/amd/amdgpu/umc_v12_0.c
··· 85 86 return (amdgpu_ras_is_poison_mode_supported(adev) && 87 (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1) && 88 - (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Deferred) == 1)); 89 } 90 91 bool umc_v12_0_is_uncorrectable_error(struct amdgpu_device *adev, uint64_t mc_umc_status)
··· 85 86 return (amdgpu_ras_is_poison_mode_supported(adev) && 87 (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1) && 88 + ((REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Deferred) == 1) || 89 + (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Poison) == 1))); 90 } 91 92 bool umc_v12_0_is_uncorrectable_error(struct amdgpu_device *adev, uint64_t mc_umc_status)
+1 -1
drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c
··· 1965 1ULL); 1966 break; 1967 case ACA_SMU_TYPE_CE: 1968 - bank->aca_err_type = ACA_BANK_ERR_CE_DE_DECODE(bank); 1969 ret = aca_error_cache_log_bank_error(handle, &info, bank->aca_err_type, 1970 ACA_REG__MISC0__ERRCNT(misc0)); 1971 break;
··· 1965 1ULL); 1966 break; 1967 case ACA_SMU_TYPE_CE: 1968 + bank->aca_err_type = ACA_ERROR_TYPE_CE; 1969 ret = aca_error_cache_log_bank_error(handle, &info, bank->aca_err_type, 1970 ACA_REG__MISC0__ERRCNT(misc0)); 1971 break;
-70
drivers/gpu/drm/amd/amdgpu/vega10_sdma_pkt_open.h
··· 64 #define HEADER_BARRIER 5 65 #define SDMA_OP_AQL_COPY 0 66 #define SDMA_OP_AQL_BARRIER_OR 0 67 - /* vm invalidation is only available for GC9.4.3/GC9.4.4/GC9.5.0 */ 68 - #define SDMA_OP_VM_INVALIDATE 8 69 - #define SDMA_SUBOP_VM_INVALIDATE 4 70 71 /*define for op field*/ 72 #define SDMA_PKT_HEADER_op_offset 0 ··· 3331 #define SDMA_AQL_PKT_BARRIER_OR_COMPLETION_SIGNAL_HI_completion_signal_63_32_shift 0 3332 #define SDMA_AQL_PKT_BARRIER_OR_COMPLETION_SIGNAL_HI_COMPLETION_SIGNAL_63_32(x) (((x) & SDMA_AQL_PKT_BARRIER_OR_COMPLETION_SIGNAL_HI_completion_signal_63_32_mask) << SDMA_AQL_PKT_BARRIER_OR_COMPLETION_SIGNAL_HI_completion_signal_63_32_shift) 3333 3334 - /* 3335 - ** Definitions for SDMA_PKT_VM_INVALIDATION packet 3336 - */ 3337 - 3338 - /*define for HEADER word*/ 3339 - /*define for op field*/ 3340 - #define SDMA_PKT_VM_INVALIDATION_HEADER_op_offset 0 3341 - #define SDMA_PKT_VM_INVALIDATION_HEADER_op_mask 0x000000FF 3342 - #define SDMA_PKT_VM_INVALIDATION_HEADER_op_shift 0 3343 - #define SDMA_PKT_VM_INVALIDATION_HEADER_OP(x) ((x & SDMA_PKT_VM_INVALIDATION_HEADER_op_mask) << SDMA_PKT_VM_INVALIDATION_HEADER_op_shift) 3344 - 3345 - /*define for sub_op field*/ 3346 - #define SDMA_PKT_VM_INVALIDATION_HEADER_sub_op_offset 0 3347 - #define SDMA_PKT_VM_INVALIDATION_HEADER_sub_op_mask 0x000000FF 3348 - #define SDMA_PKT_VM_INVALIDATION_HEADER_sub_op_shift 8 3349 - #define SDMA_PKT_VM_INVALIDATION_HEADER_SUB_OP(x) ((x & SDMA_PKT_VM_INVALIDATION_HEADER_sub_op_mask) << SDMA_PKT_VM_INVALIDATION_HEADER_sub_op_shift) 3350 - 3351 - /*define for xcc0_eng_id field*/ 3352 - #define SDMA_PKT_VM_INVALIDATION_HEADER_xcc0_eng_id_offset 0 3353 - #define SDMA_PKT_VM_INVALIDATION_HEADER_xcc0_eng_id_mask 0x0000001F 3354 - #define SDMA_PKT_VM_INVALIDATION_HEADER_xcc0_eng_id_shift 16 3355 - #define SDMA_PKT_VM_INVALIDATION_HEADER_XCC0_ENG_ID(x) ((x & SDMA_PKT_VM_INVALIDATION_HEADER_xcc0_eng_id_mask) << SDMA_PKT_VM_INVALIDATION_HEADER_xcc0_eng_id_shift) 3356 - 3357 - /*define for xcc1_eng_id field*/ 3358 - #define SDMA_PKT_VM_INVALIDATION_HEADER_xcc1_eng_id_offset 0 3359 - #define SDMA_PKT_VM_INVALIDATION_HEADER_xcc1_eng_id_mask 0x0000001F 3360 - #define SDMA_PKT_VM_INVALIDATION_HEADER_xcc1_eng_id_shift 21 3361 - #define SDMA_PKT_VM_INVALIDATION_HEADER_XCC1_ENG_ID(x) ((x & SDMA_PKT_VM_INVALIDATION_HEADER_xcc1_eng_id_mask) << SDMA_PKT_VM_INVALIDATION_HEADER_xcc1_eng_id_shift) 3362 - 3363 - /*define for mmhub_eng_id field*/ 3364 - #define SDMA_PKT_VM_INVALIDATION_HEADER_mmhub_eng_id_offset 0 3365 - #define SDMA_PKT_VM_INVALIDATION_HEADER_mmhub_eng_id_mask 0x0000001F 3366 - #define SDMA_PKT_VM_INVALIDATION_HEADER_mmhub_eng_id_shift 26 3367 - #define SDMA_PKT_VM_INVALIDATION_HEADER_MMHUB_ENG_ID(x) ((x & SDMA_PKT_VM_INVALIDATION_HEADER_mmhub_eng_id_mask) << SDMA_PKT_VM_INVALIDATION_HEADER_mmhub_eng_id_shift) 3368 - 3369 - /*define for INVALIDATEREQ word*/ 3370 - /*define for invalidatereq field*/ 3371 - #define SDMA_PKT_VM_INVALIDATION_INVALIDATEREQ_invalidatereq_offset 1 3372 - #define SDMA_PKT_VM_INVALIDATION_INVALIDATEREQ_invalidatereq_mask 0xFFFFFFFF 3373 - #define SDMA_PKT_VM_INVALIDATION_INVALIDATEREQ_invalidatereq_shift 0 3374 - #define SDMA_PKT_VM_INVALIDATION_INVALIDATEREQ_INVALIDATEREQ(x) ((x & SDMA_PKT_VM_INVALIDATION_INVALIDATEREQ_invalidatereq_mask) << SDMA_PKT_VM_INVALIDATION_INVALIDATEREQ_invalidatereq_shift) 3375 - 3376 - /*define for ADDRESSRANGELO word*/ 3377 - /*define for addressrangelo field*/ 3378 - #define SDMA_PKT_VM_INVALIDATION_ADDRESSRANGELO_addressrangelo_offset 2 3379 - #define SDMA_PKT_VM_INVALIDATION_ADDRESSRANGELO_addressrangelo_mask 0xFFFFFFFF 3380 - #define SDMA_PKT_VM_INVALIDATION_ADDRESSRANGELO_addressrangelo_shift 0 3381 - #define SDMA_PKT_VM_INVALIDATION_ADDRESSRANGELO_ADDRESSRANGELO(x) ((x & SDMA_PKT_VM_INVALIDATION_ADDRESSRANGELO_addressrangelo_mask) << SDMA_PKT_VM_INVALIDATION_ADDRESSRANGELO_addressrangelo_shift) 3382 - 3383 - /*define for ADDRESSRANGEHI word*/ 3384 - /*define for invalidateack field*/ 3385 - #define SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_invalidateack_offset 3 3386 - #define SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_invalidateack_mask 0x0000FFFF 3387 - #define SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_invalidateack_shift 0 3388 - #define SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_INVALIDATEACK(x) ((x & SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_invalidateack_mask) << SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_invalidateack_shift) 3389 - 3390 - /*define for addressrangehi field*/ 3391 - #define SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_addressrangehi_offset 3 3392 - #define SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_addressrangehi_mask 0x0000001F 3393 - #define SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_addressrangehi_shift 16 3394 - #define SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_ADDRESSRANGEHI(x) ((x & SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_addressrangehi_mask) << SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_addressrangehi_shift) 3395 - 3396 - /*define for reserved field*/ 3397 - #define SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_reserved_offset 3 3398 - #define SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_reserved_mask 0x000001FF 3399 - #define SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_reserved_shift 23 3400 - #define SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_RESERVED(x) ((x & SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_reserved_mask) << SDMA_PKT_VM_INVALIDATION_ADDRESSRANGEHI_reserved_shift) 3401 3402 #endif /* __SDMA_PKT_OPEN_H_ */
··· 64 #define HEADER_BARRIER 5 65 #define SDMA_OP_AQL_COPY 0 66 #define SDMA_OP_AQL_BARRIER_OR 0 67 68 /*define for op field*/ 69 #define SDMA_PKT_HEADER_op_offset 0 ··· 3334 #define SDMA_AQL_PKT_BARRIER_OR_COMPLETION_SIGNAL_HI_completion_signal_63_32_shift 0 3335 #define SDMA_AQL_PKT_BARRIER_OR_COMPLETION_SIGNAL_HI_COMPLETION_SIGNAL_63_32(x) (((x) & SDMA_AQL_PKT_BARRIER_OR_COMPLETION_SIGNAL_HI_completion_signal_63_32_mask) << SDMA_AQL_PKT_BARRIER_OR_COMPLETION_SIGNAL_HI_completion_signal_63_32_shift) 3336 3337 3338 #endif /* __SDMA_PKT_OPEN_H_ */
+34 -7
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
··· 8707 int offdelay; 8708 8709 if (acrtc_state) { 8710 - if (amdgpu_ip_version(adev, DCE_HWIP, 0) < 8711 - IP_VERSION(3, 5, 0) || 8712 - acrtc_state->stream->link->psr_settings.psr_version < 8713 - DC_PSR_VERSION_UNSUPPORTED || 8714 - !(adev->flags & AMD_IS_APU)) { 8715 - timing = &acrtc_state->stream->timing; 8716 8717 - /* at least 2 frames */ 8718 offdelay = DIV64_U64_ROUND_UP((u64)20 * 8719 timing->v_total * 8720 timing->h_total, ··· 8747 8748 config.offdelay_ms = offdelay ?: 30; 8749 } else { 8750 config.disable_immediate = true; 8751 } 8752
··· 8707 int offdelay; 8708 8709 if (acrtc_state) { 8710 + timing = &acrtc_state->stream->timing; 8711 8712 + /* 8713 + * Depending on when the HW latching event of double-buffered 8714 + * registers happen relative to the PSR SDP deadline, and how 8715 + * bad the Panel clock has drifted since the last ALPM off 8716 + * event, there can be up to 3 frames of delay between sending 8717 + * the PSR exit cmd to DMUB fw, and when the panel starts 8718 + * displaying live frames. 8719 + * 8720 + * We can set: 8721 + * 8722 + * 20/100 * offdelay_ms = 3_frames_ms 8723 + * => offdelay_ms = 5 * 3_frames_ms 8724 + * 8725 + * This ensures that `3_frames_ms` will only be experienced as a 8726 + * 20% delay on top how long the display has been static, and 8727 + * thus make the delay less perceivable. 8728 + */ 8729 + if (acrtc_state->stream->link->psr_settings.psr_version < 8730 + DC_PSR_VERSION_UNSUPPORTED) { 8731 + offdelay = DIV64_U64_ROUND_UP((u64)5 * 3 * 10 * 8732 + timing->v_total * 8733 + timing->h_total, 8734 + timing->pix_clk_100hz); 8735 + config.offdelay_ms = offdelay ?: 30; 8736 + } else if (amdgpu_ip_version(adev, DCE_HWIP, 0) < 8737 + IP_VERSION(3, 5, 0) || 8738 + !(adev->flags & AMD_IS_APU)) { 8739 + /* 8740 + * Older HW and DGPU have issues with instant off; 8741 + * use a 2 frame offdelay. 8742 + */ 8743 offdelay = DIV64_U64_ROUND_UP((u64)20 * 8744 timing->v_total * 8745 timing->h_total, ··· 8722 8723 config.offdelay_ms = offdelay ?: 30; 8724 } else { 8725 + /* offdelay_ms = 0 will never disable vblank */ 8726 + config.offdelay_ms = 1; 8727 config.disable_immediate = true; 8728 } 8729
+1 -1
drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c
··· 590 p->out_states->state_array[i].dtbclk_mhz = max_dtbclk_mhz; 591 p->out_states->state_array[i].phyclk_mhz = max_phyclk_mhz; 592 593 p->out_states->state_array[i].phyclk_mhz = max_phyclk_mhz; 594 p->out_states->state_array[i].dtbclk_mhz = max_dtbclk_mhz; 595 596 /* Dependent states. */ 597 - p->out_states->state_array[i].dscclk_mhz = p->in_states->state_array[i].dscclk_mhz; 598 p->out_states->state_array[i].dram_speed_mts = p->in_states->state_array[i].dram_speed_mts; 599 p->out_states->state_array[i].fabricclk_mhz = p->in_states->state_array[i].fabricclk_mhz; 600 p->out_states->state_array[i].socclk_mhz = p->in_states->state_array[i].socclk_mhz;
··· 590 p->out_states->state_array[i].dtbclk_mhz = max_dtbclk_mhz; 591 p->out_states->state_array[i].phyclk_mhz = max_phyclk_mhz; 592 593 + p->out_states->state_array[i].dscclk_mhz = max_dispclk_mhz / 3.0; 594 p->out_states->state_array[i].phyclk_mhz = max_phyclk_mhz; 595 p->out_states->state_array[i].dtbclk_mhz = max_dtbclk_mhz; 596 597 /* Dependent states. */ 598 p->out_states->state_array[i].dram_speed_mts = p->in_states->state_array[i].dram_speed_mts; 599 p->out_states->state_array[i].fabricclk_mhz = p->in_states->state_array[i].fabricclk_mhz; 600 p->out_states->state_array[i].socclk_mhz = p->in_states->state_array[i].socclk_mhz;
+5 -1
drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
··· 3033 dccg->funcs->set_dpstreamclk(dccg, DTBCLK0, tg->inst, dp_hpo_inst); 3034 3035 phyd32clk = get_phyd32clk_src(link); 3036 - dccg->funcs->enable_symclk32_se(dccg, dp_hpo_inst, phyd32clk); 3037 } else { 3038 if (dccg->funcs->enable_symclk_se) 3039 dccg->funcs->enable_symclk_se(dccg, stream_enc->stream_enc_inst,
··· 3033 dccg->funcs->set_dpstreamclk(dccg, DTBCLK0, tg->inst, dp_hpo_inst); 3034 3035 phyd32clk = get_phyd32clk_src(link); 3036 + if (link->cur_link_settings.link_rate == LINK_RATE_UNKNOWN) { 3037 + dccg->funcs->disable_symclk32_se(dccg, dp_hpo_inst); 3038 + } else { 3039 + dccg->funcs->enable_symclk32_se(dccg, dp_hpo_inst, phyd32clk); 3040 + } 3041 } else { 3042 if (dccg->funcs->enable_symclk_se) 3043 dccg->funcs->enable_symclk_se(dccg, stream_enc->stream_enc_inst,
+5 -2
drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c
··· 936 if (dc_is_dp_signal(pipe_ctx->stream->signal) || dc_is_virtual_signal(pipe_ctx->stream->signal)) { 937 if (dc->link_srv->dp_is_128b_132b_signal(pipe_ctx)) { 938 dccg->funcs->set_dpstreamclk(dccg, DPREFCLK, tg->inst, dp_hpo_inst); 939 - 940 - dccg->funcs->enable_symclk32_se(dccg, dp_hpo_inst, phyd32clk); 941 } else { 942 dccg->funcs->enable_symclk_se(dccg, stream_enc->stream_enc_inst, 943 link_enc->transmitter - TRANSMITTER_UNIPHY_A);
··· 936 if (dc_is_dp_signal(pipe_ctx->stream->signal) || dc_is_virtual_signal(pipe_ctx->stream->signal)) { 937 if (dc->link_srv->dp_is_128b_132b_signal(pipe_ctx)) { 938 dccg->funcs->set_dpstreamclk(dccg, DPREFCLK, tg->inst, dp_hpo_inst); 939 + if (link->cur_link_settings.link_rate == LINK_RATE_UNKNOWN) { 940 + dccg->funcs->disable_symclk32_se(dccg, dp_hpo_inst); 941 + } else { 942 + dccg->funcs->enable_symclk32_se(dccg, dp_hpo_inst, phyd32clk); 943 + } 944 } else { 945 dccg->funcs->enable_symclk_se(dccg, stream_enc->stream_enc_inst, 946 link_enc->transmitter - TRANSMITTER_UNIPHY_A);
+114
drivers/gpu/drm/amd/include/kgd_pp_interface.h
··· 341 #define MAX_CLKS 4 342 #define NUM_VCN 4 343 #define NUM_JPEG_ENG 32 344 #define MAX_XCC 8 345 #define NUM_XCP 8 346 struct seq_file; ··· 375 uint64_t gfx_busy_acc[MAX_XCC]; 376 /* Total App Clock Counter Accumulated */ 377 uint64_t gfx_below_host_limit_acc[MAX_XCC]; 378 }; 379 380 struct amd_pm_funcs { ··· 1100 1101 /* XCP metrics stats */ 1102 struct amdgpu_xcp_metrics_v1_1 xcp_stats[NUM_XCP]; 1103 1104 /* PCIE other end recovery counter */ 1105 uint32_t pcie_lc_perf_other_end_recovery;
··· 341 #define MAX_CLKS 4 342 #define NUM_VCN 4 343 #define NUM_JPEG_ENG 32 344 + #define NUM_JPEG_ENG_V1 40 345 #define MAX_XCC 8 346 #define NUM_XCP 8 347 struct seq_file; ··· 374 uint64_t gfx_busy_acc[MAX_XCC]; 375 /* Total App Clock Counter Accumulated */ 376 uint64_t gfx_below_host_limit_acc[MAX_XCC]; 377 + }; 378 + 379 + struct amdgpu_xcp_metrics_v1_2 { 380 + /* Utilization Instantaneous (%) */ 381 + uint32_t gfx_busy_inst[MAX_XCC]; 382 + uint16_t jpeg_busy[NUM_JPEG_ENG_V1]; 383 + uint16_t vcn_busy[NUM_VCN]; 384 + /* Utilization Accumulated (%) */ 385 + uint64_t gfx_busy_acc[MAX_XCC]; 386 + /* Total App Clock Counter Accumulated */ 387 + uint64_t gfx_below_host_limit_ppt_acc[MAX_XCC]; 388 + uint64_t gfx_below_host_limit_thm_acc[MAX_XCC]; 389 + uint64_t gfx_low_utilization_acc[MAX_XCC]; 390 + uint64_t gfx_below_host_limit_total_acc[MAX_XCC]; 391 }; 392 393 struct amd_pm_funcs { ··· 1085 1086 /* XCP metrics stats */ 1087 struct amdgpu_xcp_metrics_v1_1 xcp_stats[NUM_XCP]; 1088 + 1089 + /* PCIE other end recovery counter */ 1090 + uint32_t pcie_lc_perf_other_end_recovery; 1091 + }; 1092 + 1093 + struct gpu_metrics_v1_8 { 1094 + struct metrics_table_header common_header; 1095 + 1096 + /* Temperature (Celsius) */ 1097 + uint16_t temperature_hotspot; 1098 + uint16_t temperature_mem; 1099 + uint16_t temperature_vrsoc; 1100 + 1101 + /* Power (Watts) */ 1102 + uint16_t curr_socket_power; 1103 + 1104 + /* Utilization (%) */ 1105 + uint16_t average_gfx_activity; 1106 + uint16_t average_umc_activity; // memory controller 1107 + 1108 + /* VRAM max bandwidthi (in GB/sec) at max memory clock */ 1109 + uint64_t mem_max_bandwidth; 1110 + 1111 + /* Energy (15.259uJ (2^-16) units) */ 1112 + uint64_t energy_accumulator; 1113 + 1114 + /* Driver attached timestamp (in ns) */ 1115 + uint64_t system_clock_counter; 1116 + 1117 + /* Accumulation cycle counter */ 1118 + uint32_t accumulation_counter; 1119 + 1120 + /* Accumulated throttler residencies */ 1121 + uint32_t prochot_residency_acc; 1122 + uint32_t ppt_residency_acc; 1123 + uint32_t socket_thm_residency_acc; 1124 + uint32_t vr_thm_residency_acc; 1125 + uint32_t hbm_thm_residency_acc; 1126 + 1127 + /* Clock Lock Status. Each bit corresponds to clock instance */ 1128 + uint32_t gfxclk_lock_status; 1129 + 1130 + /* Link width (number of lanes) and speed (in 0.1 GT/s) */ 1131 + uint16_t pcie_link_width; 1132 + uint16_t pcie_link_speed; 1133 + 1134 + /* XGMI bus width and bitrate (in Gbps) */ 1135 + uint16_t xgmi_link_width; 1136 + uint16_t xgmi_link_speed; 1137 + 1138 + /* Utilization Accumulated (%) */ 1139 + uint32_t gfx_activity_acc; 1140 + uint32_t mem_activity_acc; 1141 + 1142 + /*PCIE accumulated bandwidth (GB/sec) */ 1143 + uint64_t pcie_bandwidth_acc; 1144 + 1145 + /*PCIE instantaneous bandwidth (GB/sec) */ 1146 + uint64_t pcie_bandwidth_inst; 1147 + 1148 + /* PCIE L0 to recovery state transition accumulated count */ 1149 + uint64_t pcie_l0_to_recov_count_acc; 1150 + 1151 + /* PCIE replay accumulated count */ 1152 + uint64_t pcie_replay_count_acc; 1153 + 1154 + /* PCIE replay rollover accumulated count */ 1155 + uint64_t pcie_replay_rover_count_acc; 1156 + 1157 + /* PCIE NAK sent accumulated count */ 1158 + uint32_t pcie_nak_sent_count_acc; 1159 + 1160 + /* PCIE NAK received accumulated count */ 1161 + uint32_t pcie_nak_rcvd_count_acc; 1162 + 1163 + /* XGMI accumulated data transfer size(KiloBytes) */ 1164 + uint64_t xgmi_read_data_acc[NUM_XGMI_LINKS]; 1165 + uint64_t xgmi_write_data_acc[NUM_XGMI_LINKS]; 1166 + 1167 + /* XGMI link status(active/inactive) */ 1168 + uint16_t xgmi_link_status[NUM_XGMI_LINKS]; 1169 + 1170 + uint16_t padding; 1171 + 1172 + /* PMFW attached timestamp (10ns resolution) */ 1173 + uint64_t firmware_timestamp; 1174 + 1175 + /* Current clocks (Mhz) */ 1176 + uint16_t current_gfxclk[MAX_GFX_CLKS]; 1177 + uint16_t current_socclk[MAX_CLKS]; 1178 + uint16_t current_vclk0[MAX_CLKS]; 1179 + uint16_t current_dclk0[MAX_CLKS]; 1180 + uint16_t current_uclk; 1181 + 1182 + /* Number of current partition */ 1183 + uint16_t num_partition; 1184 + 1185 + /* XCP metrics stats */ 1186 + struct amdgpu_xcp_metrics_v1_2 xcp_stats[NUM_XCP]; 1187 1188 /* PCIE other end recovery counter */ 1189 uint32_t pcie_lc_perf_other_end_recovery;
+2 -2
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_thermal.c
··· 267 if (hwmgr->thermal_controller.fanInfo.bNoFan || 268 (hwmgr->thermal_controller.fanInfo. 269 ucTachometerPulsesPerRevolution == 0) || 270 - speed == 0 || 271 (speed < hwmgr->thermal_controller.fanInfo.ulMinRPM) || 272 (speed > hwmgr->thermal_controller.fanInfo.ulMaxRPM)) 273 - return 0; 274 275 if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl)) 276 smu7_fan_ctrl_stop_smc_fan_control(hwmgr);
··· 267 if (hwmgr->thermal_controller.fanInfo.bNoFan || 268 (hwmgr->thermal_controller.fanInfo. 269 ucTachometerPulsesPerRevolution == 0) || 270 + (!speed || speed > UINT_MAX/8) || 271 (speed < hwmgr->thermal_controller.fanInfo.ulMinRPM) || 272 (speed > hwmgr->thermal_controller.fanInfo.ulMaxRPM)) 273 + return -EINVAL; 274 275 if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl)) 276 smu7_fan_ctrl_stop_smc_fan_control(hwmgr);
+2 -2
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_thermal.c
··· 307 int result = 0; 308 309 if (hwmgr->thermal_controller.fanInfo.bNoFan || 310 - speed == 0 || 311 (speed < hwmgr->thermal_controller.fanInfo.ulMinRPM) || 312 (speed > hwmgr->thermal_controller.fanInfo.ulMaxRPM)) 313 - return -1; 314 315 if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl)) 316 result = vega10_fan_ctrl_stop_smc_fan_control(hwmgr);
··· 307 int result = 0; 308 309 if (hwmgr->thermal_controller.fanInfo.bNoFan || 310 + (!speed || speed > UINT_MAX/8) || 311 (speed < hwmgr->thermal_controller.fanInfo.ulMinRPM) || 312 (speed > hwmgr->thermal_controller.fanInfo.ulMaxRPM)) 313 + return -EINVAL; 314 315 if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl)) 316 result = vega10_fan_ctrl_stop_smc_fan_control(hwmgr);
+1 -1
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_thermal.c
··· 191 uint32_t tach_period, crystal_clock_freq; 192 int result = 0; 193 194 - if (!speed) 195 return -EINVAL; 196 197 if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl)) {
··· 191 uint32_t tach_period, crystal_clock_freq; 192 int result = 0; 193 194 + if (!speed || speed > UINT_MAX/8) 195 return -EINVAL; 196 197 if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl)) {
+5 -2
drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h
··· 127 VOLTAGE_GUARDBAND_COUNT 128 } GFX_GUARDBAND_e; 129 130 - #define SMU_METRICS_TABLE_VERSION 0xF 131 132 // Unified metrics table for smu_v13_0_6 133 typedef struct __attribute__((packed, aligned(4))) { ··· 241 uint32_t PCIeOtherEndRecoveryAcc; // The Pcie counter itself is accumulated 242 243 //Total App Clock Counter 244 - uint64_t GfxclkBelowHostLimitAcc[8]; 245 } MetricsTableV0_t; 246 247 // Metrics table for smu_v13_0_6 APUS
··· 127 VOLTAGE_GUARDBAND_COUNT 128 } GFX_GUARDBAND_e; 129 130 + #define SMU_METRICS_TABLE_VERSION 0x10 131 132 // Unified metrics table for smu_v13_0_6 133 typedef struct __attribute__((packed, aligned(4))) { ··· 241 uint32_t PCIeOtherEndRecoveryAcc; // The Pcie counter itself is accumulated 242 243 //Total App Clock Counter 244 + uint64_t GfxclkBelowHostLimitPptAcc[8]; 245 + uint64_t GfxclkBelowHostLimitThmAcc[8]; 246 + uint64_t GfxclkBelowHostLimitTotalAcc[8]; 247 + uint64_t GfxclkLowUtilizationAcc[8]; 248 } MetricsTableV0_t; 249 250 // Metrics table for smu_v13_0_6 APUS
+3
drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
··· 1267 uint32_t crystal_clock_freq = 2500; 1268 uint32_t tach_period; 1269 1270 tach_period = 60 * crystal_clock_freq * 10000 / (8 * speed); 1271 WREG32_SOC15(THM, 0, mmCG_TACH_CTRL_ARCT, 1272 REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_TACH_CTRL_ARCT),
··· 1267 uint32_t crystal_clock_freq = 2500; 1268 uint32_t tach_period; 1269 1270 + if (!speed || speed > UINT_MAX/8) 1271 + return -EINVAL; 1272 + 1273 tach_period = 60 * crystal_clock_freq * 10000 / (8 * speed); 1274 WREG32_SOC15(THM, 0, mmCG_TACH_CTRL_ARCT, 1275 REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_TACH_CTRL_ARCT),
+1 -1
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
··· 1226 uint32_t tach_period; 1227 int ret; 1228 1229 - if (!speed) 1230 return -EINVAL; 1231 1232 ret = smu_v13_0_auto_fan_control(smu, 0);
··· 1226 uint32_t tach_period; 1227 int ret; 1228 1229 + if (!speed || speed > UINT_MAX/8) 1230 return -EINVAL; 1231 1232 ret = smu_v13_0_auto_fan_control(smu, 0);
-15
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
··· 109 SMU_CAP(OTHER_END_METRICS), 110 SMU_CAP(SET_UCLK_MAX), 111 SMU_CAP(PCIE_METRICS), 112 - SMU_CAP(HST_LIMIT_METRICS), 113 SMU_CAP(MCA_DEBUG_MODE), 114 SMU_CAP(PER_INST_METRICS), 115 SMU_CAP(CTF_LIMIT), ··· 324 325 if (fw_ver >= 0x05550E00) 326 smu_v13_0_6_cap_set(smu, SMU_CAP(OTHER_END_METRICS)); 327 - if (fw_ver >= 0x05551000) 328 - smu_v13_0_6_cap_set(smu, SMU_CAP(HST_LIMIT_METRICS)); 329 if (fw_ver >= 0x05550B00) 330 smu_v13_0_6_cap_set(smu, SMU_CAP(PER_INST_METRICS)); 331 if (fw_ver >= 0x5551200) ··· 339 SMU_CAP(RMA_MSG), 340 SMU_CAP(ACA_SYND), 341 SMU_CAP(OTHER_END_METRICS), 342 - SMU_CAP(HST_LIMIT_METRICS), 343 SMU_CAP(PER_INST_METRICS) }; 344 uint32_t fw_ver = smu->smc_fw_version; 345 ··· 383 smu_v13_0_6_cap_clear(smu, SMU_CAP(RMA_MSG)); 384 smu_v13_0_6_cap_clear(smu, SMU_CAP(ACA_SYND)); 385 386 - if (fw_ver >= 0x04556F00) 387 - smu_v13_0_6_cap_set(smu, SMU_CAP(HST_LIMIT_METRICS)); 388 if (fw_ver >= 0x04556A00) 389 smu_v13_0_6_cap_set(smu, SMU_CAP(PER_INST_METRICS)); 390 } else { ··· 402 smu_v13_0_6_cap_clear(smu, SMU_CAP(RMA_MSG)); 403 if (fw_ver < 0x00555600) 404 smu_v13_0_6_cap_clear(smu, SMU_CAP(ACA_SYND)); 405 - if (pgm == 0 && fw_ver >= 0x557900) 406 - smu_v13_0_6_cap_set(smu, SMU_CAP(HST_LIMIT_METRICS)); 407 } 408 if (((pgm == 7) && (fw_ver >= 0x7550700)) || 409 ((pgm == 0) && (fw_ver >= 0x00557900)) || ··· 2666 gpu_metrics->xcp_stats[i].gfx_busy_acc[idx] = 2667 SMUQ10_ROUND(GET_GPU_METRIC_FIELD(GfxBusyAcc, 2668 version)[inst]); 2669 - 2670 - if (smu_v13_0_6_cap_supported( 2671 - smu, SMU_CAP(HST_LIMIT_METRICS))) 2672 - gpu_metrics->xcp_stats[i].gfx_below_host_limit_acc[idx] = 2673 - SMUQ10_ROUND(GET_GPU_METRIC_FIELD 2674 - (GfxclkBelowHostLimitAcc, version) 2675 - [inst]); 2676 idx++; 2677 } 2678 }
··· 109 SMU_CAP(OTHER_END_METRICS), 110 SMU_CAP(SET_UCLK_MAX), 111 SMU_CAP(PCIE_METRICS), 112 SMU_CAP(MCA_DEBUG_MODE), 113 SMU_CAP(PER_INST_METRICS), 114 SMU_CAP(CTF_LIMIT), ··· 325 326 if (fw_ver >= 0x05550E00) 327 smu_v13_0_6_cap_set(smu, SMU_CAP(OTHER_END_METRICS)); 328 if (fw_ver >= 0x05550B00) 329 smu_v13_0_6_cap_set(smu, SMU_CAP(PER_INST_METRICS)); 330 if (fw_ver >= 0x5551200) ··· 342 SMU_CAP(RMA_MSG), 343 SMU_CAP(ACA_SYND), 344 SMU_CAP(OTHER_END_METRICS), 345 SMU_CAP(PER_INST_METRICS) }; 346 uint32_t fw_ver = smu->smc_fw_version; 347 ··· 387 smu_v13_0_6_cap_clear(smu, SMU_CAP(RMA_MSG)); 388 smu_v13_0_6_cap_clear(smu, SMU_CAP(ACA_SYND)); 389 390 if (fw_ver >= 0x04556A00) 391 smu_v13_0_6_cap_set(smu, SMU_CAP(PER_INST_METRICS)); 392 } else { ··· 408 smu_v13_0_6_cap_clear(smu, SMU_CAP(RMA_MSG)); 409 if (fw_ver < 0x00555600) 410 smu_v13_0_6_cap_clear(smu, SMU_CAP(ACA_SYND)); 411 } 412 if (((pgm == 7) && (fw_ver >= 0x7550700)) || 413 ((pgm == 0) && (fw_ver >= 0x00557900)) || ··· 2674 gpu_metrics->xcp_stats[i].gfx_busy_acc[idx] = 2675 SMUQ10_ROUND(GET_GPU_METRIC_FIELD(GfxBusyAcc, 2676 version)[inst]); 2677 idx++; 2678 } 2679 }
+54 -1
drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
··· 79 #define PP_OD_FEATURE_FAN_ACOUSTIC_TARGET 8 80 #define PP_OD_FEATURE_FAN_TARGET_TEMPERATURE 9 81 #define PP_OD_FEATURE_FAN_MINIMUM_PWM 10 82 83 static struct cmn2asic_msg_mapping smu_v14_0_2_message_map[SMU_MSG_MAX_COUNT] = { 84 MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 1), ··· 1053 od_min_setting = overdrive_lowerlimits->FanMinimumPwm; 1054 od_max_setting = overdrive_upperlimits->FanMinimumPwm; 1055 break; 1056 default: 1057 od_min_setting = od_max_setting = INT_MAX; 1058 break; ··· 1332 &min_value, 1333 &max_value); 1334 size += sysfs_emit_at(buf, size, "MINIMUM_PWM: %u %u\n", 1335 min_value, max_value); 1336 break; 1337 ··· 2293 OD_OPS_SUPPORT_FAN_TARGET_TEMPERATURE_RETRIEVE | 2294 OD_OPS_SUPPORT_FAN_TARGET_TEMPERATURE_SET | 2295 OD_OPS_SUPPORT_FAN_MINIMUM_PWM_RETRIEVE | 2296 - OD_OPS_SUPPORT_FAN_MINIMUM_PWM_SET; 2297 } 2298 2299 static int smu_v14_0_2_get_overdrive_table(struct smu_context *smu, ··· 2374 user_od_table_bak.OverDriveTable.FanTargetTemperature; 2375 user_od_table->OverDriveTable.FanMinimumPwm = 2376 user_od_table_bak.OverDriveTable.FanMinimumPwm; 2377 } 2378 2379 smu_v14_0_2_set_supported_od_feature_mask(smu); ··· 2422 } 2423 od_table->OverDriveTable.FanMode = FAN_MODE_AUTO; 2424 od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT); 2425 break; 2426 case PP_OD_EDIT_ACOUSTIC_LIMIT: 2427 od_table->OverDriveTable.AcousticLimitRpmThreshold = ··· 2708 od_table->OverDriveTable.FanMinimumPwm = input[0]; 2709 od_table->OverDriveTable.FanMode = FAN_MODE_AUTO; 2710 od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT); 2711 break; 2712 2713 case PP_OD_RESTORE_DEFAULT_TABLE:
··· 79 #define PP_OD_FEATURE_FAN_ACOUSTIC_TARGET 8 80 #define PP_OD_FEATURE_FAN_TARGET_TEMPERATURE 9 81 #define PP_OD_FEATURE_FAN_MINIMUM_PWM 10 82 + #define PP_OD_FEATURE_FAN_ZERO_RPM_ENABLE 11 83 84 static struct cmn2asic_msg_mapping smu_v14_0_2_message_map[SMU_MSG_MAX_COUNT] = { 85 MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 1), ··· 1052 od_min_setting = overdrive_lowerlimits->FanMinimumPwm; 1053 od_max_setting = overdrive_upperlimits->FanMinimumPwm; 1054 break; 1055 + case PP_OD_FEATURE_FAN_ZERO_RPM_ENABLE: 1056 + od_min_setting = overdrive_lowerlimits->FanZeroRpmEnable; 1057 + od_max_setting = overdrive_upperlimits->FanZeroRpmEnable; 1058 + break; 1059 default: 1060 od_min_setting = od_max_setting = INT_MAX; 1061 break; ··· 1327 &min_value, 1328 &max_value); 1329 size += sysfs_emit_at(buf, size, "MINIMUM_PWM: %u %u\n", 1330 + min_value, max_value); 1331 + break; 1332 + 1333 + case SMU_OD_FAN_ZERO_RPM_ENABLE: 1334 + if (!smu_v14_0_2_is_od_feature_supported(smu, 1335 + PP_OD_FEATURE_ZERO_FAN_BIT)) 1336 + break; 1337 + 1338 + size += sysfs_emit_at(buf, size, "FAN_ZERO_RPM_ENABLE:\n"); 1339 + size += sysfs_emit_at(buf, size, "%d\n", 1340 + (int)od_table->OverDriveTable.FanZeroRpmEnable); 1341 + 1342 + size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE"); 1343 + smu_v14_0_2_get_od_setting_limits(smu, 1344 + PP_OD_FEATURE_FAN_ZERO_RPM_ENABLE, 1345 + &min_value, 1346 + &max_value); 1347 + size += sysfs_emit_at(buf, size, "ZERO_RPM_ENABLE: %u %u\n", 1348 min_value, max_value); 1349 break; 1350 ··· 2270 OD_OPS_SUPPORT_FAN_TARGET_TEMPERATURE_RETRIEVE | 2271 OD_OPS_SUPPORT_FAN_TARGET_TEMPERATURE_SET | 2272 OD_OPS_SUPPORT_FAN_MINIMUM_PWM_RETRIEVE | 2273 + OD_OPS_SUPPORT_FAN_MINIMUM_PWM_SET | 2274 + OD_OPS_SUPPORT_FAN_ZERO_RPM_ENABLE_RETRIEVE | 2275 + OD_OPS_SUPPORT_FAN_ZERO_RPM_ENABLE_SET; 2276 } 2277 2278 static int smu_v14_0_2_get_overdrive_table(struct smu_context *smu, ··· 2349 user_od_table_bak.OverDriveTable.FanTargetTemperature; 2350 user_od_table->OverDriveTable.FanMinimumPwm = 2351 user_od_table_bak.OverDriveTable.FanMinimumPwm; 2352 + user_od_table->OverDriveTable.FanZeroRpmEnable = 2353 + user_od_table_bak.OverDriveTable.FanZeroRpmEnable; 2354 } 2355 2356 smu_v14_0_2_set_supported_od_feature_mask(smu); ··· 2395 } 2396 od_table->OverDriveTable.FanMode = FAN_MODE_AUTO; 2397 od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT); 2398 + break; 2399 + case PP_OD_EDIT_FAN_ZERO_RPM_ENABLE: 2400 + od_table->OverDriveTable.FanZeroRpmEnable = 2401 + boot_overdrive_table->OverDriveTable.FanZeroRpmEnable; 2402 + od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_ZERO_FAN_BIT); 2403 break; 2404 case PP_OD_EDIT_ACOUSTIC_LIMIT: 2405 od_table->OverDriveTable.AcousticLimitRpmThreshold = ··· 2676 od_table->OverDriveTable.FanMinimumPwm = input[0]; 2677 od_table->OverDriveTable.FanMode = FAN_MODE_AUTO; 2678 od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_FAN_CURVE_BIT); 2679 + break; 2680 + 2681 + case PP_OD_EDIT_FAN_ZERO_RPM_ENABLE: 2682 + if (!smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_ZERO_FAN_BIT)) { 2683 + dev_warn(adev->dev, "Zero RPM setting not supported!\n"); 2684 + return -ENOTSUPP; 2685 + } 2686 + 2687 + smu_v14_0_2_get_od_setting_limits(smu, 2688 + PP_OD_FEATURE_FAN_ZERO_RPM_ENABLE, 2689 + &minimum, 2690 + &maximum); 2691 + if (input[0] < minimum || 2692 + input[0] > maximum) { 2693 + dev_info(adev->dev, "zero RPM enable setting(%ld) must be within [%d, %d]!\n", 2694 + input[0], minimum, maximum); 2695 + return -EINVAL; 2696 + } 2697 + 2698 + od_table->OverDriveTable.FanZeroRpmEnable = input[0]; 2699 + od_table->OverDriveTable.FeatureCtrlMask |= BIT(PP_OD_FEATURE_ZERO_FAN_BIT); 2700 break; 2701 2702 case PP_OD_RESTORE_DEFAULT_TABLE:
+3
drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
··· 1083 case METRICS_VERSION(1, 7): 1084 structure_size = sizeof(struct gpu_metrics_v1_7); 1085 break; 1086 case METRICS_VERSION(2, 0): 1087 structure_size = sizeof(struct gpu_metrics_v2_0); 1088 break;
··· 1083 case METRICS_VERSION(1, 7): 1084 structure_size = sizeof(struct gpu_metrics_v1_7); 1085 break; 1086 + case METRICS_VERSION(1, 8): 1087 + structure_size = sizeof(struct gpu_metrics_v1_8); 1088 + break; 1089 case METRICS_VERSION(2, 0): 1090 structure_size = sizeof(struct gpu_metrics_v2_0); 1091 break;
+7 -6
drivers/gpu/drm/bridge/Kconfig
··· 91 Support for i.MX8MP DPI-to-LVDS on-SoC encoder. 92 93 config DRM_I2C_NXP_TDA998X 94 - tristate "NXP Semiconductors TDA998X HDMI encoder" 95 - default m if DRM_TILCDC 96 - select CEC_CORE if CEC_NOTIFIER 97 - select SND_SOC_HDMI_CODEC if SND_SOC 98 - help 99 - Support for NXP Semiconductors TDA998X HDMI encoders. 100 101 config DRM_ITE_IT6263 102 tristate "ITE IT6263 LVDS/HDMI bridge"
··· 91 Support for i.MX8MP DPI-to-LVDS on-SoC encoder. 92 93 config DRM_I2C_NXP_TDA998X 94 + tristate "NXP Semiconductors TDA998X HDMI encoder" 95 + default m if DRM_TILCDC 96 + select CEC_CORE if CEC_NOTIFIER 97 + select DRM_KMS_HELPER 98 + select SND_SOC_HDMI_CODEC if SND_SOC 99 + help 100 + Support for NXP Semiconductors TDA998X HDMI encoders. 101 102 config DRM_ITE_IT6263 103 tristate "ITE IT6263 LVDS/HDMI bridge"
+2
drivers/gpu/drm/i915/display/intel_fbdev.h
··· 6 #ifndef __INTEL_FBDEV_H__ 7 #define __INTEL_FBDEV_H__ 8 9 struct drm_fb_helper; 10 struct drm_fb_helper_surface_size; 11 struct drm_i915_private;
··· 6 #ifndef __INTEL_FBDEV_H__ 7 #define __INTEL_FBDEV_H__ 8 9 + #include <linux/types.h> 10 + 11 struct drm_fb_helper; 12 struct drm_fb_helper_surface_size; 13 struct drm_i915_private;
+4 -1
drivers/gpu/drm/i915/display/skl_watermark.c
··· 2314 static int 2315 dsc_prefill_latency(const struct intel_crtc_state *crtc_state) 2316 { 2317 const struct intel_crtc_scaler_state *scaler_state = 2318 &crtc_state->scaler_state; 2319 int linetime = DIV_ROUND_UP(1000 * crtc_state->hw.adjusted_mode.htotal, ··· 2324 crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ? 2 : 1; 2325 u32 dsc_prefill_latency = 0; 2326 2327 - if (!crtc_state->dsc.compression_enable || !num_scaler_users) 2328 return dsc_prefill_latency; 2329 2330 dsc_prefill_latency = DIV_ROUND_UP(15 * linetime * chroma_downscaling_factor, 10);
··· 2314 static int 2315 dsc_prefill_latency(const struct intel_crtc_state *crtc_state) 2316 { 2317 + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 2318 const struct intel_crtc_scaler_state *scaler_state = 2319 &crtc_state->scaler_state; 2320 int linetime = DIV_ROUND_UP(1000 * crtc_state->hw.adjusted_mode.htotal, ··· 2323 crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ? 2 : 1; 2324 u32 dsc_prefill_latency = 0; 2325 2326 + if (!crtc_state->dsc.compression_enable || 2327 + !num_scaler_users || 2328 + num_scaler_users > crtc->num_scalers) 2329 return dsc_prefill_latency; 2330 2331 dsc_prefill_latency = DIV_ROUND_UP(15 * linetime * chroma_downscaling_factor, 10);
+1 -1
drivers/gpu/drm/xe/Kconfig
··· 53 config DRM_XE_DISPLAY 54 bool "Enable display support" 55 depends on DRM_XE && DRM_XE=m && HAS_IOPORT 56 - select FB_IOMEM_HELPERS 57 select I2C 58 select I2C_ALGOBIT 59 default y
··· 53 config DRM_XE_DISPLAY 54 bool "Enable display support" 55 depends on DRM_XE && DRM_XE=m && HAS_IOPORT 56 + select FB_IOMEM_HELPERS if DRM_FBDEV_EMULATION 57 select I2C 58 select I2C_ALGOBIT 59 default y
+4
drivers/gpu/drm/xe/regs/xe_engine_regs.h
··· 130 #define RING_EXECLIST_STATUS_LO(base) XE_REG((base) + 0x234) 131 #define RING_EXECLIST_STATUS_HI(base) XE_REG((base) + 0x234 + 4) 132 133 #define RING_CONTEXT_CONTROL(base) XE_REG((base) + 0x244, XE_REG_OPTION_MASKED) 134 #define CTX_CTRL_PXP_ENABLE REG_BIT(10) 135 #define CTX_CTRL_OAC_CONTEXT_ENABLE REG_BIT(8)
··· 130 #define RING_EXECLIST_STATUS_LO(base) XE_REG((base) + 0x234) 131 #define RING_EXECLIST_STATUS_HI(base) XE_REG((base) + 0x234 + 4) 132 133 + #define RING_IDLEDLY(base) XE_REG((base) + 0x23c) 134 + #define INHIBIT_SWITCH_UNTIL_PREEMPTED REG_BIT(31) 135 + #define IDLE_DELAY REG_GENMASK(20, 0) 136 + 137 #define RING_CONTEXT_CONTROL(base) XE_REG((base) + 0x244, XE_REG_OPTION_MASKED) 138 #define CTX_CTRL_PXP_ENABLE REG_BIT(10) 139 #define CTX_CTRL_OAC_CONTEXT_ENABLE REG_BIT(8)
+15 -2
drivers/gpu/drm/xe/xe_device.c
··· 53 #include "xe_pxp.h" 54 #include "xe_query.h" 55 #include "xe_shrinker.h" 56 #include "xe_sriov.h" 57 #include "xe_tile.h" 58 #include "xe_ttm_stolen_mgr.h" ··· 706 sriov_update_device_info(xe); 707 708 err = xe_pcode_probe_early(xe); 709 - if (err) 710 - return err; 711 712 err = wait_for_lmem_ready(xe); 713 if (err)
··· 53 #include "xe_pxp.h" 54 #include "xe_query.h" 55 #include "xe_shrinker.h" 56 + #include "xe_survivability_mode.h" 57 #include "xe_sriov.h" 58 #include "xe_tile.h" 59 #include "xe_ttm_stolen_mgr.h" ··· 705 sriov_update_device_info(xe); 706 707 err = xe_pcode_probe_early(xe); 708 + if (err) { 709 + int save_err = err; 710 + 711 + /* 712 + * Try to leave device in survivability mode if device is 713 + * possible, but still return the previous error for error 714 + * propagation 715 + */ 716 + err = xe_survivability_mode_enable(xe); 717 + if (err) 718 + return err; 719 + 720 + return save_err; 721 + } 722 723 err = wait_for_lmem_ready(xe); 724 if (err)
+1 -7
drivers/gpu/drm/xe/xe_eu_stall.c
··· 222 goto exit_free; 223 } 224 225 - ret = devm_add_action_or_reset(xe->drm.dev, xe_eu_stall_fini, gt); 226 - if (ret) 227 - goto exit_destroy; 228 - 229 - return 0; 230 - exit_destroy: 231 - destroy_workqueue(gt->eu_stall->buf_ptr_poll_wq); 232 exit_free: 233 mutex_destroy(&gt->eu_stall->stream_lock); 234 kfree(gt->eu_stall);
··· 222 goto exit_free; 223 } 224 225 + return devm_add_action_or_reset(xe->drm.dev, xe_eu_stall_fini, gt); 226 exit_free: 227 mutex_destroy(&gt->eu_stall->stream_lock); 228 kfree(gt->eu_stall);
+38 -16
drivers/gpu/drm/xe/xe_gt_clock.c
··· 16 #include "xe_macros.h" 17 #include "xe_mmio.h" 18 19 - static u32 get_crystal_clock_freq(u32 rpm_config_reg) 20 { 21 - const u32 f19_2_mhz = 19200000; 22 - const u32 f24_mhz = 24000000; 23 - const u32 f25_mhz = 25000000; 24 - const u32 f38_4_mhz = 38400000; 25 u32 crystal_clock = REG_FIELD_GET(RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_MASK, 26 rpm_config_reg); 27 28 switch (crystal_clock) { 29 case RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_24_MHZ: 30 - return f24_mhz; 31 case RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_19_2_MHZ: 32 - return f19_2_mhz; 33 case RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_38_4_MHZ: 34 - return f38_4_mhz; 35 case RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_25_MHZ: 36 - return f25_mhz; 37 default: 38 - XE_WARN_ON("NOT_POSSIBLE"); 39 - return 0; 40 } 41 } 42 43 - int xe_gt_clock_init(struct xe_gt *gt) 44 { 45 - u32 c0 = xe_mmio_read32(&gt->mmio, RPM_CONFIG0); 46 - u32 freq = 0; 47 - 48 /* 49 * CTC_MODE[0] = 1 is definitely not supported for Xe2 and later 50 * platforms. In theory it could be a valid setting for pre-Xe2 ··· 69 */ 70 if (xe_mmio_read32(&gt->mmio, CTC_MODE) & CTC_SOURCE_DIVIDE_LOGIC) 71 xe_gt_warn(gt, "CTC_MODE[0] is set; this is unexpected and undocumented\n"); 72 73 - freq = get_crystal_clock_freq(c0); 74 75 /* 76 * Now figure out how the command stream's timestamp
··· 16 #include "xe_macros.h" 17 #include "xe_mmio.h" 18 19 + #define f19_2_mhz 19200000 20 + #define f24_mhz 24000000 21 + #define f25_mhz 25000000 22 + #define f38_4_mhz 38400000 23 + #define ts_base_83 83333 24 + #define ts_base_52 52083 25 + #define ts_base_80 80000 26 + 27 + static void read_crystal_clock(struct xe_gt *gt, u32 rpm_config_reg, u32 *freq, 28 + u32 *timestamp_base) 29 { 30 u32 crystal_clock = REG_FIELD_GET(RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_MASK, 31 rpm_config_reg); 32 33 switch (crystal_clock) { 34 case RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_24_MHZ: 35 + *freq = f24_mhz; 36 + *timestamp_base = ts_base_83; 37 + return; 38 case RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_19_2_MHZ: 39 + *freq = f19_2_mhz; 40 + *timestamp_base = ts_base_52; 41 + return; 42 case RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_38_4_MHZ: 43 + *freq = f38_4_mhz; 44 + *timestamp_base = ts_base_52; 45 + return; 46 case RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_25_MHZ: 47 + *freq = f25_mhz; 48 + *timestamp_base = ts_base_80; 49 + return; 50 default: 51 + xe_gt_warn(gt, "Invalid crystal clock frequency: %u", crystal_clock); 52 + *freq = 0; 53 + *timestamp_base = 0; 54 + return; 55 } 56 } 57 58 + static void check_ctc_mode(struct xe_gt *gt) 59 { 60 /* 61 * CTC_MODE[0] = 1 is definitely not supported for Xe2 and later 62 * platforms. In theory it could be a valid setting for pre-Xe2 ··· 57 */ 58 if (xe_mmio_read32(&gt->mmio, CTC_MODE) & CTC_SOURCE_DIVIDE_LOGIC) 59 xe_gt_warn(gt, "CTC_MODE[0] is set; this is unexpected and undocumented\n"); 60 + } 61 62 + int xe_gt_clock_init(struct xe_gt *gt) 63 + { 64 + u32 freq; 65 + u32 c0; 66 + 67 + if (!IS_SRIOV_VF(gt_to_xe(gt))) 68 + check_ctc_mode(gt); 69 + 70 + c0 = xe_mmio_read32(&gt->mmio, RPM_CONFIG0); 71 + read_crystal_clock(gt, c0, &freq, &gt->info.timestamp_base); 72 73 /* 74 * Now figure out how the command stream's timestamp
+2
drivers/gpu/drm/xe/xe_gt_types.h
··· 121 enum xe_gt_type type; 122 /** @info.reference_clock: clock frequency */ 123 u32 reference_clock; 124 /** 125 * @info.engine_mask: mask of engines present on GT. Some of 126 * them may be reserved in runtime and not available for user.
··· 121 enum xe_gt_type type; 122 /** @info.reference_clock: clock frequency */ 123 u32 reference_clock; 124 + /** @info.timestamp_base: GT timestamp base */ 125 + u32 timestamp_base; 126 /** 127 * @info.engine_mask: mask of engines present on GT. Some of 128 * them may be reserved in runtime and not available for user.
+33
drivers/gpu/drm/xe/xe_hw_engine.c
··· 8 #include <linux/nospec.h> 9 10 #include <drm/drm_managed.h> 11 #include <uapi/drm/xe_drm.h> 12 13 #include "regs/xe_engine_regs.h" 14 #include "regs/xe_gt_regs.h" ··· 23 #include "xe_gsc.h" 24 #include "xe_gt.h" 25 #include "xe_gt_ccs_mode.h" 26 #include "xe_gt_printk.h" 27 #include "xe_gt_mcr.h" 28 #include "xe_gt_topology.h" ··· 567 xe_reg_whitelist_process_engine(hwe); 568 } 569 570 static int hw_engine_init(struct xe_gt *gt, struct xe_hw_engine *hwe, 571 enum xe_hw_engine_id id) 572 { ··· 633 /* We reserve the highest BCS instance for USM */ 634 if (xe->info.has_usm && hwe->class == XE_ENGINE_CLASS_COPY) 635 gt->usm.reserved_bcs_instance = hwe->instance; 636 637 return devm_add_action_or_reset(xe->drm.dev, hw_engine_fini, hwe); 638
··· 8 #include <linux/nospec.h> 9 10 #include <drm/drm_managed.h> 11 + #include <drm/drm_print.h> 12 #include <uapi/drm/xe_drm.h> 13 + #include <generated/xe_wa_oob.h> 14 15 #include "regs/xe_engine_regs.h" 16 #include "regs/xe_gt_regs.h" ··· 21 #include "xe_gsc.h" 22 #include "xe_gt.h" 23 #include "xe_gt_ccs_mode.h" 24 + #include "xe_gt_clock.h" 25 #include "xe_gt_printk.h" 26 #include "xe_gt_mcr.h" 27 #include "xe_gt_topology.h" ··· 564 xe_reg_whitelist_process_engine(hwe); 565 } 566 567 + static void adjust_idledly(struct xe_hw_engine *hwe) 568 + { 569 + struct xe_gt *gt = hwe->gt; 570 + u32 idledly, maxcnt; 571 + u32 idledly_units_ps = 8 * gt->info.timestamp_base; 572 + u32 maxcnt_units_ns = 640; 573 + bool inhibit_switch = 0; 574 + 575 + if (!IS_SRIOV_VF(gt_to_xe(hwe->gt)) && XE_WA(gt, 16023105232)) { 576 + idledly = xe_mmio_read32(&gt->mmio, RING_IDLEDLY(hwe->mmio_base)); 577 + maxcnt = xe_mmio_read32(&gt->mmio, RING_PWRCTX_MAXCNT(hwe->mmio_base)); 578 + 579 + inhibit_switch = idledly & INHIBIT_SWITCH_UNTIL_PREEMPTED; 580 + idledly = REG_FIELD_GET(IDLE_DELAY, idledly); 581 + idledly = DIV_ROUND_CLOSEST(idledly * idledly_units_ps, 1000); 582 + maxcnt = REG_FIELD_GET(IDLE_WAIT_TIME, maxcnt); 583 + maxcnt *= maxcnt_units_ns; 584 + 585 + if (xe_gt_WARN_ON(gt, idledly >= maxcnt || inhibit_switch)) { 586 + idledly = DIV_ROUND_CLOSEST(((maxcnt - 1) * maxcnt_units_ns), 587 + idledly_units_ps); 588 + idledly = DIV_ROUND_CLOSEST(idledly, 1000); 589 + xe_mmio_write32(&gt->mmio, RING_IDLEDLY(hwe->mmio_base), idledly); 590 + } 591 + } 592 + } 593 + 594 static int hw_engine_init(struct xe_gt *gt, struct xe_hw_engine *hwe, 595 enum xe_hw_engine_id id) 596 { ··· 603 /* We reserve the highest BCS instance for USM */ 604 if (xe->info.has_usm && hwe->class == XE_ENGINE_CLASS_COPY) 605 gt->usm.reserved_bcs_instance = hwe->instance; 606 + 607 + /* Ensure IDLEDLY is lower than MAXCNT */ 608 + adjust_idledly(hwe); 609 610 return devm_add_action_or_reset(xe->drm.dev, hw_engine_fini, hwe); 611
+7 -9
drivers/gpu/drm/xe/xe_pci.c
··· 803 return err; 804 805 err = xe_device_probe_early(xe); 806 - 807 - /* 808 - * In Boot Survivability mode, no drm card is exposed and driver is 809 - * loaded with bare minimum to allow for firmware to be flashed through 810 - * mei. If early probe fails, check if survivability mode is flagged by 811 - * HW to be enabled. In that case enable it and return success. 812 - */ 813 if (err) { 814 - if (xe_survivability_mode_required(xe) && 815 - xe_survivability_mode_enable(xe)) 816 return 0; 817 818 return err;
··· 803 return err; 804 805 err = xe_device_probe_early(xe); 806 if (err) { 807 + /* 808 + * In Boot Survivability mode, no drm card is exposed and driver 809 + * is loaded with bare minimum to allow for firmware to be 810 + * flashed through mei. If early probe failed, but it managed to 811 + * enable survivability mode, return success. 812 + */ 813 + if (xe_survivability_mode_is_enabled(xe)) 814 return 0; 815 816 return err;
+22 -9
drivers/gpu/drm/xe/xe_survivability_mode.c
··· 155 if (ret) 156 return ret; 157 158 ret = xe_heci_gsc_init(xe); 159 - if (ret) 160 return ret; 161 162 xe_vsec_init(xe); 163 164 - survivability->mode = true; 165 dev_err(dev, "In Survivability Mode\n"); 166 167 return 0; ··· 186 return xe->survivability.mode; 187 } 188 189 - /** 190 - * xe_survivability_mode_required - checks if survivability mode is required 191 - * @xe: xe device instance 192 * 193 - * This function reads the boot status from Pcode 194 * 195 - * Return: true if boot status indicates failure, false otherwise 196 */ 197 - bool xe_survivability_mode_required(struct xe_device *xe) 198 { 199 struct xe_survivability *survivability = &xe->survivability; 200 struct xe_mmio *mmio = xe_root_tile_mmio(xe); ··· 217 * 218 * Initialize survivability information and enable survivability mode 219 * 220 - * Return: 0 for success, negative error code otherwise. 221 */ 222 int xe_survivability_mode_enable(struct xe_device *xe) 223 { 224 struct xe_survivability *survivability = &xe->survivability; 225 struct xe_survivability_info *info; 226 struct pci_dev *pdev = to_pci_dev(xe->drm.dev); 227 228 survivability->size = MAX_SCRATCH_MMIO; 229
··· 155 if (ret) 156 return ret; 157 158 + /* Make sure xe_heci_gsc_init() knows about survivability mode */ 159 + survivability->mode = true; 160 + 161 ret = xe_heci_gsc_init(xe); 162 + if (ret) { 163 + /* 164 + * But if it fails, device can't enter survivability 165 + * so move it back for correct error handling 166 + */ 167 + survivability->mode = false; 168 return ret; 169 + } 170 171 xe_vsec_init(xe); 172 173 dev_err(dev, "In Survivability Mode\n"); 174 175 return 0; ··· 178 return xe->survivability.mode; 179 } 180 181 + /* 182 + * survivability_mode_requested - check if it's possible to enable 183 + * survivability mode and that was requested by firmware 184 * 185 + * This function reads the boot status from Pcode. 186 * 187 + * Return: true if platform support is available and boot status indicates 188 + * failure, false otherwise. 189 */ 190 + static bool survivability_mode_requested(struct xe_device *xe) 191 { 192 struct xe_survivability *survivability = &xe->survivability; 193 struct xe_mmio *mmio = xe_root_tile_mmio(xe); ··· 208 * 209 * Initialize survivability information and enable survivability mode 210 * 211 + * Return: 0 if survivability mode is enabled or not requested; negative error 212 + * code otherwise. 213 */ 214 int xe_survivability_mode_enable(struct xe_device *xe) 215 { 216 struct xe_survivability *survivability = &xe->survivability; 217 struct xe_survivability_info *info; 218 struct pci_dev *pdev = to_pci_dev(xe->drm.dev); 219 + 220 + if (!survivability_mode_requested(xe)) 221 + return 0; 222 223 survivability->size = MAX_SCRATCH_MMIO; 224
-1
drivers/gpu/drm/xe/xe_survivability_mode.h
··· 12 13 int xe_survivability_mode_enable(struct xe_device *xe); 14 bool xe_survivability_mode_is_enabled(struct xe_device *xe); 15 - bool xe_survivability_mode_required(struct xe_device *xe); 16 17 #endif /* _XE_SURVIVABILITY_MODE_H_ */
··· 12 13 int xe_survivability_mode_enable(struct xe_device *xe); 14 bool xe_survivability_mode_is_enabled(struct xe_device *xe); 15 16 #endif /* _XE_SURVIVABILITY_MODE_H_ */
+6
drivers/gpu/drm/xe/xe_wa.c
··· 622 FUNC(xe_rtp_match_first_render_or_compute)), 623 XE_RTP_ACTIONS(SET(TDL_TSL_CHICKEN, RES_CHK_SPR_DIS)) 624 }, 625 }; 626 627 static const struct xe_rtp_entry_sr lrc_was[] = {
··· 622 FUNC(xe_rtp_match_first_render_or_compute)), 623 XE_RTP_ACTIONS(SET(TDL_TSL_CHICKEN, RES_CHK_SPR_DIS)) 624 }, 625 + { XE_RTP_NAME("16023105232"), 626 + XE_RTP_RULES(MEDIA_VERSION_RANGE(1301, 3000), OR, 627 + GRAPHICS_VERSION_RANGE(2001, 3001)), 628 + XE_RTP_ACTIONS(SET(RING_PSMI_CTL(0), RC_SEMA_IDLE_MSG_DISABLE, 629 + XE_RTP_ACTION_FLAG(ENGINE_BASE))) 630 + }, 631 }; 632 633 static const struct xe_rtp_entry_sr lrc_was[] = {
+2
drivers/gpu/drm/xe/xe_wa_oob.rules
··· 53 GRAPHICS_VERSION_RANGE(1270, 1274) 54 1508761755 GRAPHICS_VERSION(1255) 55 GRAPHICS_VERSION(1260), GRAPHICS_STEP(A0, B0)
··· 53 GRAPHICS_VERSION_RANGE(1270, 1274) 54 1508761755 GRAPHICS_VERSION(1255) 55 GRAPHICS_VERSION(1260), GRAPHICS_STEP(A0, B0) 56 + 16023105232 GRAPHICS_VERSION_RANGE(2001, 3001) 57 + MEDIA_VERSION_RANGE(1301, 3000)