Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'amd-drm-fixes-6.10-2024-06-26' of https://gitlab.freedesktop.org/agd5f/linux into drm-fixes

amd-drm-fixes-6.10-2024-06-26:

amdgpu:
- SMU 14.x fix
- vram info parsing fix
- mode1 reset fix
- LTTPR fix
- Virtual display fix
- Avoid spurious error in PSP init

Signed-off-by: Dave Airlie <airlied@redhat.com>

From: Alex Deucher <alexander.deucher@amd.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20240626221408.2019633-1-alexander.deucher@amd.com

+156 -15
+1 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
··· 400 400 mem_channel_number = vram_info->v30.channel_num; 401 401 mem_channel_width = vram_info->v30.channel_width; 402 402 if (vram_width) 403 - *vram_width = mem_channel_number * (1 << mem_channel_width); 403 + *vram_width = mem_channel_number * 16; 404 404 break; 405 405 default: 406 406 return -EINVAL;
+5 -2
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
··· 5220 5220 5221 5221 dev_info(adev->dev, "GPU mode1 reset\n"); 5222 5222 5223 + /* Cache the state before bus master disable. The saved config space 5224 + * values are used in other cases like restore after mode-2 reset. 5225 + */ 5226 + amdgpu_device_cache_pci_state(adev->pdev); 5227 + 5223 5228 /* disable BM */ 5224 5229 pci_clear_master(adev->pdev); 5225 - 5226 - amdgpu_device_cache_pci_state(adev->pdev); 5227 5230 5228 5231 if (amdgpu_dpm_is_mode1_reset_supported(adev)) { 5229 5232 dev_info(adev->dev, "GPU smu mode1 reset\n");
+21 -4
drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
··· 640 640 } 641 641 } 642 642 643 + static bool psp_err_warn(struct psp_context *psp) 644 + { 645 + struct psp_gfx_cmd_resp *cmd = psp->cmd_buf_mem; 646 + 647 + /* This response indicates reg list is already loaded */ 648 + if (amdgpu_ip_version(psp->adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 2) && 649 + cmd->cmd_id == GFX_CMD_ID_LOAD_IP_FW && 650 + cmd->cmd.cmd_load_ip_fw.fw_type == GFX_FW_TYPE_REG_LIST && 651 + cmd->resp.status == TEE_ERROR_CANCEL) 652 + return false; 653 + 654 + return true; 655 + } 656 + 643 657 static int 644 658 psp_cmd_submit_buf(struct psp_context *psp, 645 659 struct amdgpu_firmware_info *ucode, ··· 713 699 dev_warn(psp->adev->dev, 714 700 "failed to load ucode %s(0x%X) ", 715 701 amdgpu_ucode_name(ucode->ucode_id), ucode->ucode_id); 716 - dev_warn(psp->adev->dev, 717 - "psp gfx command %s(0x%X) failed and response status is (0x%X)\n", 718 - psp_gfx_cmd_name(psp->cmd_buf_mem->cmd_id), psp->cmd_buf_mem->cmd_id, 719 - psp->cmd_buf_mem->resp.status); 702 + if (psp_err_warn(psp)) 703 + dev_warn( 704 + psp->adev->dev, 705 + "psp gfx command %s(0x%X) failed and response status is (0x%X)\n", 706 + psp_gfx_cmd_name(psp->cmd_buf_mem->cmd_id), 707 + psp->cmd_buf_mem->cmd_id, 708 + psp->cmd_buf_mem->resp.status); 720 709 /* If any firmware (including CAP) load fails under SRIOV, it should 721 710 * return failure to stop the VF from initializing. 722 711 * Also return failure in case of timeout
+16 -2
drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
··· 3 3 #include <drm/drm_atomic_helper.h> 4 4 #include <drm/drm_edid.h> 5 5 #include <drm/drm_simple_kms_helper.h> 6 + #include <drm/drm_gem_framebuffer_helper.h> 6 7 #include <drm/drm_vblank.h> 7 8 8 9 #include "amdgpu.h" ··· 315 314 return 0; 316 315 } 317 316 afb = to_amdgpu_framebuffer(new_state->fb); 318 - obj = new_state->fb->obj[0]; 317 + 318 + obj = drm_gem_fb_get_obj(new_state->fb, 0); 319 + if (!obj) { 320 + DRM_ERROR("Failed to get obj from framebuffer\n"); 321 + return -EINVAL; 322 + } 323 + 319 324 rbo = gem_to_amdgpu_bo(obj); 320 325 adev = amdgpu_ttm_adev(rbo->tbo.bdev); 321 326 ··· 375 368 struct drm_plane_state *old_state) 376 369 { 377 370 struct amdgpu_bo *rbo; 371 + struct drm_gem_object *obj; 378 372 int r; 379 373 380 374 if (!old_state->fb) 381 375 return; 382 376 383 - rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]); 377 + obj = drm_gem_fb_get_obj(old_state->fb, 0); 378 + if (!obj) { 379 + DRM_ERROR("Failed to get obj from framebuffer\n"); 380 + return; 381 + } 382 + 383 + rbo = gem_to_amdgpu_bo(obj); 384 384 r = amdgpu_bo_reserve(rbo, false); 385 385 if (unlikely(r)) { 386 386 DRM_ERROR("failed to reserve rbo before unpin\n");
+3 -2
drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
··· 464 464 #define PSP_ERR_UNKNOWN_COMMAND 0x00000100 465 465 466 466 enum tee_error_code { 467 - TEE_SUCCESS = 0x00000000, 468 - TEE_ERROR_NOT_SUPPORTED = 0xFFFF000A, 467 + TEE_SUCCESS = 0x00000000, 468 + TEE_ERROR_CANCEL = 0xFFFF0002, 469 + TEE_ERROR_NOT_SUPPORTED = 0xFFFF000A, 469 470 }; 470 471 471 472 #endif /* _PSP_TEE_GFX_IF_H_ */
+5
drivers/gpu/drm/amd/display/include/dpcd_defs.h
··· 177 177 #define DP_SINK_PR_PIXEL_DEVIATION_PER_LINE 0x379 178 178 #define DP_SINK_PR_MAX_NUMBER_OF_DEVIATION_LINE 0x37A 179 179 180 + /* Remove once drm_dp_helper.h is updated upstream */ 181 + #ifndef DP_TOTAL_LTTPR_CNT 182 + #define DP_TOTAL_LTTPR_CNT 0xF000A /* 2.1 */ 183 + #endif 184 + 180 185 #endif /* __DAL_DPCD_DEFS_H__ */
+13
drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
··· 324 324 return ret; 325 325 } 326 326 327 + static int smu_set_mall_enable(struct smu_context *smu) 328 + { 329 + int ret = 0; 330 + 331 + if (!smu->ppt_funcs->set_mall_enable) 332 + return 0; 333 + 334 + ret = smu->ppt_funcs->set_mall_enable(smu); 335 + 336 + return ret; 337 + } 338 + 327 339 /** 328 340 * smu_dpm_set_power_gate - power gate/ungate the specific IP block 329 341 * ··· 1803 1791 smu_dpm_set_jpeg_enable(smu, true); 1804 1792 smu_dpm_set_vpe_enable(smu, true); 1805 1793 smu_dpm_set_umsch_mm_enable(smu, true); 1794 + smu_set_mall_enable(smu); 1806 1795 smu_set_gfx_cgpg(smu, true); 1807 1796 } 1808 1797
+5
drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
··· 1395 1395 int (*dpm_set_umsch_mm_enable)(struct smu_context *smu, bool enable); 1396 1396 1397 1397 /** 1398 + * @set_mall_enable: Init MALL power gating control. 1399 + */ 1400 + int (*set_mall_enable)(struct smu_context *smu); 1401 + 1402 + /** 1398 1403 * @notify_rlc_state: Notify RLC power state to SMU. 1399 1404 */ 1400 1405 int (*notify_rlc_state)(struct smu_context *smu, bool en);
+2 -2
drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v14_0_0_ppsmc.h
··· 106 106 #define PPSMC_MSG_DisableLSdma 0x35 ///< Disable LSDMA 107 107 #define PPSMC_MSG_SetSoftMaxVpe 0x36 ///< 108 108 #define PPSMC_MSG_SetSoftMinVpe 0x37 ///< 109 - #define PPSMC_MSG_AllocMALLCache 0x38 ///< Allocating MALL Cache 110 - #define PPSMC_MSG_ReleaseMALLCache 0x39 ///< Releasing MALL Cache 109 + #define PPSMC_MSG_MALLPowerController 0x38 ///< Set MALL control 110 + #define PPSMC_MSG_MALLPowerState 0x39 ///< Enter/Exit MALL PG 111 111 #define PPSMC_Message_Count 0x3A ///< Total number of PPSMC messages 112 112 /** @}*/ 113 113
+3 -1
drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h
··· 272 272 __SMU_DUMMY_MAP(SetSoftMinVpe), \ 273 273 __SMU_DUMMY_MAP(GetMetricsVersion), \ 274 274 __SMU_DUMMY_MAP(EnableUCLKShadow), \ 275 - __SMU_DUMMY_MAP(RmaDueToBadPageThreshold), 275 + __SMU_DUMMY_MAP(RmaDueToBadPageThreshold), \ 276 + __SMU_DUMMY_MAP(MALLPowerController), \ 277 + __SMU_DUMMY_MAP(MALLPowerState), 276 278 277 279 #undef __SMU_DUMMY_MAP 278 280 #define __SMU_DUMMY_MAP(type) SMU_MSG_##type
+73
drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c
··· 52 52 #define mmMP1_SMN_C2PMSG_90 0x029a 53 53 #define mmMP1_SMN_C2PMSG_90_BASE_IDX 0 54 54 55 + /* MALLPowerController message arguments (Defines for the Cache mode control) */ 56 + #define SMU_MALL_PMFW_CONTROL 0 57 + #define SMU_MALL_DRIVER_CONTROL 1 58 + 59 + /* 60 + * MALLPowerState message arguments 61 + * (Defines for the Allocate/Release Cache mode if in driver mode) 62 + */ 63 + #define SMU_MALL_EXIT_PG 0 64 + #define SMU_MALL_ENTER_PG 1 65 + 66 + #define SMU_MALL_PG_CONFIG_DEFAULT SMU_MALL_PG_CONFIG_DRIVER_CONTROL_ALWAYS_ON 67 + 55 68 #define FEATURE_MASK(feature) (1ULL << feature) 56 69 #define SMC_DPM_FEATURE ( \ 57 70 FEATURE_MASK(FEATURE_CCLK_DPM_BIT) | \ ··· 78 65 FEATURE_MASK(FEATURE_IPU_DPM_BIT) | \ 79 66 FEATURE_MASK(FEATURE_GFX_DPM_BIT) | \ 80 67 FEATURE_MASK(FEATURE_VPE_DPM_BIT)) 68 + 69 + enum smu_mall_pg_config { 70 + SMU_MALL_PG_CONFIG_PMFW_CONTROL = 0, 71 + SMU_MALL_PG_CONFIG_DRIVER_CONTROL_ALWAYS_ON = 1, 72 + SMU_MALL_PG_CONFIG_DRIVER_CONTROL_ALWAYS_OFF = 2, 73 + }; 81 74 82 75 static struct cmn2asic_msg_mapping smu_v14_0_0_message_map[SMU_MSG_MAX_COUNT] = { 83 76 MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 1), ··· 132 113 MSG_MAP(PowerDownUmsch, PPSMC_MSG_PowerDownUmsch, 1), 133 114 MSG_MAP(SetSoftMaxVpe, PPSMC_MSG_SetSoftMaxVpe, 1), 134 115 MSG_MAP(SetSoftMinVpe, PPSMC_MSG_SetSoftMinVpe, 1), 116 + MSG_MAP(MALLPowerController, PPSMC_MSG_MALLPowerController, 1), 117 + MSG_MAP(MALLPowerState, PPSMC_MSG_MALLPowerState, 1), 135 118 }; 136 119 137 120 static struct cmn2asic_mapping smu_v14_0_0_feature_mask_map[SMU_FEATURE_COUNT] = { ··· 1444 1423 return 0; 1445 1424 } 1446 1425 1426 + static int smu_v14_0_1_init_mall_power_gating(struct smu_context *smu, enum smu_mall_pg_config pg_config) 1427 + { 1428 + struct amdgpu_device *adev = smu->adev; 1429 + int ret = 0; 1430 + 1431 + if (pg_config == SMU_MALL_PG_CONFIG_PMFW_CONTROL) { 1432 + ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_MALLPowerController, 1433 + SMU_MALL_PMFW_CONTROL, NULL); 1434 + if (ret) { 1435 + dev_err(adev->dev, "Init MALL PMFW CONTROL Failure\n"); 1436 + return ret; 1437 + } 1438 + } else { 1439 + ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_MALLPowerController, 1440 + SMU_MALL_DRIVER_CONTROL, NULL); 1441 + if (ret) { 1442 + dev_err(adev->dev, "Init MALL Driver CONTROL Failure\n"); 1443 + return ret; 1444 + } 1445 + 1446 + if (pg_config == SMU_MALL_PG_CONFIG_DRIVER_CONTROL_ALWAYS_ON) { 1447 + ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_MALLPowerState, 1448 + SMU_MALL_EXIT_PG, NULL); 1449 + if (ret) { 1450 + dev_err(adev->dev, "EXIT MALL PG Failure\n"); 1451 + return ret; 1452 + } 1453 + } else if (pg_config == SMU_MALL_PG_CONFIG_DRIVER_CONTROL_ALWAYS_OFF) { 1454 + ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_MALLPowerState, 1455 + SMU_MALL_ENTER_PG, NULL); 1456 + if (ret) { 1457 + dev_err(adev->dev, "Enter MALL PG Failure\n"); 1458 + return ret; 1459 + } 1460 + } 1461 + } 1462 + 1463 + return ret; 1464 + } 1465 + 1466 + static int smu_v14_0_common_set_mall_enable(struct smu_context *smu) 1467 + { 1468 + enum smu_mall_pg_config pg_config = SMU_MALL_PG_CONFIG_DEFAULT; 1469 + int ret = 0; 1470 + 1471 + if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 1)) 1472 + ret = smu_v14_0_1_init_mall_power_gating(smu, pg_config); 1473 + 1474 + return ret; 1475 + } 1476 + 1447 1477 static const struct pptable_funcs smu_v14_0_0_ppt_funcs = { 1448 1478 .check_fw_status = smu_v14_0_check_fw_status, 1449 1479 .check_fw_version = smu_v14_0_check_fw_version, ··· 1526 1454 .dpm_set_vpe_enable = smu_v14_0_0_set_vpe_enable, 1527 1455 .dpm_set_umsch_mm_enable = smu_v14_0_0_set_umsch_mm_enable, 1528 1456 .get_dpm_clock_table = smu_v14_0_common_get_dpm_table, 1457 + .set_mall_enable = smu_v14_0_common_set_mall_enable, 1529 1458 }; 1530 1459 1531 1460 static void smu_v14_0_0_set_smu_mailbox_registers(struct smu_context *smu)