Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'amd-drm-fixes-5.11-2021-01-21' of https://gitlab.freedesktop.org/agd5f/linux into drm-fixes

amd-drm-fixes-5.11-2021-01-21:

amdgpu:
- Green Sardine fixes
- Vangogh fixes
- Renoir fixes
- Misc display fixes

Signed-off-by: Dave Airlie <airlied@redhat.com>
From: Alex Deucher <alexdeucher@gmail.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20210121160129.3981-1-alexander.deucher@amd.com

+98 -38
-1
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
··· 81 81 MODULE_FIRMWARE("amdgpu/navi14_gpu_info.bin"); 82 82 MODULE_FIRMWARE("amdgpu/navi12_gpu_info.bin"); 83 83 MODULE_FIRMWARE("amdgpu/vangogh_gpu_info.bin"); 84 - MODULE_FIRMWARE("amdgpu/green_sardine_gpu_info.bin"); 85 84 86 85 #define AMDGPU_RESUME_MS 2000 87 86
+3 -1
drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
··· 119 119 #define mmVGT_ESGS_RING_SIZE_Vangogh_BASE_IDX 1 120 120 #define mmSPI_CONFIG_CNTL_Vangogh 0x2440 121 121 #define mmSPI_CONFIG_CNTL_Vangogh_BASE_IDX 1 122 + #define mmGCR_GENERAL_CNTL_Vangogh 0x1580 123 + #define mmGCR_GENERAL_CNTL_Vangogh_BASE_IDX 0 122 124 123 125 #define mmCP_HYP_PFP_UCODE_ADDR 0x5814 124 126 #define mmCP_HYP_PFP_UCODE_ADDR_BASE_IDX 1 ··· 3246 3244 SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG4, 0xffffffff, 0x00800000), 3247 3245 SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_EXCEPTION_CONTROL, 0x7fff0f1f, 0x00b80000), 3248 3246 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0x0c1807ff, 0x00000142), 3249 - SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCR_GENERAL_CNTL, 0x1ff1ffff, 0x00000500), 3247 + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCR_GENERAL_CNTL_Vangogh, 0x1ff1ffff, 0x00000500), 3250 3248 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL1_PIPE_STEER, 0x000000ff, 0x000000e4), 3251 3249 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2_PIPE_STEER_0, 0x77777777, 0x32103210), 3252 3250 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2_PIPE_STEER_1, 0x77777777, 0x32103210),
+59 -21
drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c
··· 491 491 { 492 492 uint32_t def, data, def1, data1; 493 493 494 - def = data = RREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_MISC_CG); 494 + def = data = RREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_CGTT_CLK_CTRL); 495 495 def1 = data1 = RREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2); 496 496 497 497 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG)) { 498 - data |= MM_ATC_L2_MISC_CG__ENABLE_MASK; 499 - 498 + data &= ~MM_ATC_L2_CGTT_CLK_CTRL__SOFT_OVERRIDE_MASK; 500 499 data1 &= ~(DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG_MASK | 501 500 DAGB0_CNTL_MISC2__DISABLE_WRRET_CG_MASK | 502 501 DAGB0_CNTL_MISC2__DISABLE_RDREQ_CG_MASK | ··· 504 505 DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK); 505 506 506 507 } else { 507 - data &= ~MM_ATC_L2_MISC_CG__ENABLE_MASK; 508 - 508 + data |= MM_ATC_L2_CGTT_CLK_CTRL__SOFT_OVERRIDE_MASK; 509 509 data1 |= (DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG_MASK | 510 510 DAGB0_CNTL_MISC2__DISABLE_WRRET_CG_MASK | 511 511 DAGB0_CNTL_MISC2__DISABLE_RDREQ_CG_MASK | ··· 514 516 } 515 517 516 518 if (def != data) 517 - WREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_MISC_CG, data); 519 + WREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_CGTT_CLK_CTRL, data); 518 520 if (def1 != data1) 519 521 WREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2, data1); 520 522 } ··· 523 525 mmhub_v2_3_update_medium_grain_light_sleep(struct amdgpu_device *adev, 524 526 bool enable) 525 527 { 526 - uint32_t def, data; 528 + uint32_t def, data, def1, data1, def2, data2; 527 529 528 - def = data = RREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_MISC_CG); 530 + def = data = RREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_CGTT_CLK_CTRL); 531 + def1 = data1 = RREG32_SOC15(MMHUB, 0, mmDAGB0_WR_CGTT_CLK_CTRL); 532 + def2 = data2 = RREG32_SOC15(MMHUB, 0, mmDAGB0_RD_CGTT_CLK_CTRL); 529 533 530 - if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_LS)) 531 - data |= MM_ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK; 532 - else 533 - data &= ~MM_ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK; 534 + if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_LS)) { 535 + data &= ~MM_ATC_L2_CGTT_CLK_CTRL__MGLS_OVERRIDE_MASK; 536 + data1 &= !(DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_MASK | 537 + DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK | 538 + DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK | 539 + DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK | 540 + DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK); 541 + data2 &= !(DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_MASK | 542 + DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK | 543 + DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK | 544 + DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK | 545 + DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK); 546 + } else { 547 + data |= MM_ATC_L2_CGTT_CLK_CTRL__MGLS_OVERRIDE_MASK; 548 + data1 |= (DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_MASK | 549 + DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK | 550 + DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK | 551 + DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK | 552 + DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK); 553 + data2 |= (DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_MASK | 554 + DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK | 555 + DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK | 556 + DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK | 557 + DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK); 558 + } 534 559 535 560 if (def != data) 536 - WREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_MISC_CG, data); 561 + WREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_CGTT_CLK_CTRL, data); 562 + if (def1 != data1) 563 + WREG32_SOC15(MMHUB, 0, mmDAGB0_WR_CGTT_CLK_CTRL, data1); 564 + if (def2 != data2) 565 + WREG32_SOC15(MMHUB, 0, mmDAGB0_RD_CGTT_CLK_CTRL, data2); 537 566 } 538 567 539 568 static int mmhub_v2_3_set_clockgating(struct amdgpu_device *adev, ··· 579 554 580 555 static void mmhub_v2_3_get_clockgating(struct amdgpu_device *adev, u32 *flags) 581 556 { 582 - int data, data1; 557 + int data, data1, data2, data3; 583 558 584 559 if (amdgpu_sriov_vf(adev)) 585 560 *flags = 0; 586 561 587 - data = RREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_MISC_CG); 588 - data1 = RREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2); 562 + data = RREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2); 563 + data1 = RREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_CGTT_CLK_CTRL); 564 + data2 = RREG32_SOC15(MMHUB, 0, mmDAGB0_WR_CGTT_CLK_CTRL); 565 + data3 = RREG32_SOC15(MMHUB, 0, mmDAGB0_RD_CGTT_CLK_CTRL); 589 566 590 567 /* AMD_CG_SUPPORT_MC_MGCG */ 591 - if ((data & MM_ATC_L2_MISC_CG__ENABLE_MASK) && 592 - !(data1 & (DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG_MASK | 568 + if (!(data & (DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG_MASK | 593 569 DAGB0_CNTL_MISC2__DISABLE_WRRET_CG_MASK | 594 570 DAGB0_CNTL_MISC2__DISABLE_RDREQ_CG_MASK | 595 571 DAGB0_CNTL_MISC2__DISABLE_RDRET_CG_MASK | 596 572 DAGB0_CNTL_MISC2__DISABLE_TLBWR_CG_MASK | 597 - DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK))) 598 - *flags |= AMD_CG_SUPPORT_MC_MGCG; 573 + DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK)) 574 + && !(data1 & MM_ATC_L2_CGTT_CLK_CTRL__SOFT_OVERRIDE_MASK)) { 575 + *flags |= AMD_CG_SUPPORT_MC_MGCG; 576 + } 599 577 600 578 /* AMD_CG_SUPPORT_MC_LS */ 601 - if (data & MM_ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK) 579 + if (!(data1 & MM_ATC_L2_CGTT_CLK_CTRL__MGLS_OVERRIDE_MASK) 580 + && !(data2 & (DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_MASK | 581 + DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK | 582 + DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK | 583 + DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK | 584 + DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK)) 585 + && !(data3 & (DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_MASK | 586 + DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK | 587 + DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK | 588 + DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK | 589 + DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK))) 602 590 *flags |= AMD_CG_SUPPORT_MC_LS; 603 591 } 604 592
+4 -2
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c
··· 251 251 struct dmcu *dmcu = clk_mgr_base->ctx->dc->res_pool->dmcu; 252 252 bool force_reset = false; 253 253 bool update_uclk = false; 254 + bool p_state_change_support; 254 255 255 256 if (dc->work_arounds.skip_clock_update || !clk_mgr->smu_present) 256 257 return; ··· 292 291 clk_mgr_base->clks.socclk_khz = new_clocks->socclk_khz; 293 292 294 293 clk_mgr_base->clks.prev_p_state_change_support = clk_mgr_base->clks.p_state_change_support; 295 - if (should_update_pstate_support(safe_to_lower, new_clocks->p_state_change_support, clk_mgr_base->clks.p_state_change_support)) { 296 - clk_mgr_base->clks.p_state_change_support = new_clocks->p_state_change_support; 294 + p_state_change_support = new_clocks->p_state_change_support || (display_count == 0); 295 + if (should_update_pstate_support(safe_to_lower, p_state_change_support, clk_mgr_base->clks.p_state_change_support)) { 296 + clk_mgr_base->clks.p_state_change_support = p_state_change_support; 297 297 298 298 /* to disable P-State switching, set UCLK min = max */ 299 299 if (!clk_mgr_base->clks.p_state_change_support)
+14 -4
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
··· 647 647 if (REG(DC_IP_REQUEST_CNTL)) { 648 648 REG_SET(DC_IP_REQUEST_CNTL, 0, 649 649 IP_REQUEST_EN, 1); 650 - hws->funcs.dpp_pg_control(hws, plane_id, true); 651 - hws->funcs.hubp_pg_control(hws, plane_id, true); 650 + 651 + if (hws->funcs.dpp_pg_control) 652 + hws->funcs.dpp_pg_control(hws, plane_id, true); 653 + 654 + if (hws->funcs.hubp_pg_control) 655 + hws->funcs.hubp_pg_control(hws, plane_id, true); 656 + 652 657 REG_SET(DC_IP_REQUEST_CNTL, 0, 653 658 IP_REQUEST_EN, 0); 654 659 DC_LOG_DEBUG( ··· 1087 1082 if (REG(DC_IP_REQUEST_CNTL)) { 1088 1083 REG_SET(DC_IP_REQUEST_CNTL, 0, 1089 1084 IP_REQUEST_EN, 1); 1090 - hws->funcs.dpp_pg_control(hws, dpp->inst, false); 1091 - hws->funcs.hubp_pg_control(hws, hubp->inst, false); 1085 + 1086 + if (hws->funcs.dpp_pg_control) 1087 + hws->funcs.dpp_pg_control(hws, dpp->inst, false); 1088 + 1089 + if (hws->funcs.hubp_pg_control) 1090 + hws->funcs.hubp_pg_control(hws, hubp->inst, false); 1091 + 1092 1092 dpp->funcs->dpp_reset(dpp); 1093 1093 REG_SET(DC_IP_REQUEST_CNTL, 0, 1094 1094 IP_REQUEST_EN, 0);
+7 -2
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
··· 1062 1062 if (REG(DC_IP_REQUEST_CNTL)) { 1063 1063 REG_SET(DC_IP_REQUEST_CNTL, 0, 1064 1064 IP_REQUEST_EN, 1); 1065 - dcn20_dpp_pg_control(hws, pipe_ctx->plane_res.dpp->inst, true); 1066 - dcn20_hubp_pg_control(hws, pipe_ctx->plane_res.hubp->inst, true); 1065 + 1066 + if (hws->funcs.dpp_pg_control) 1067 + hws->funcs.dpp_pg_control(hws, pipe_ctx->plane_res.dpp->inst, true); 1068 + 1069 + if (hws->funcs.hubp_pg_control) 1070 + hws->funcs.hubp_pg_control(hws, pipe_ctx->plane_res.hubp->inst, true); 1071 + 1067 1072 REG_SET(DC_IP_REQUEST_CNTL, 0, 1068 1073 IP_REQUEST_EN, 0); 1069 1074 DC_LOG_DEBUG(
+4 -3
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
··· 2517 2517 * if this primary pipe has a bottom pipe in prev. state 2518 2518 * and if the bottom pipe is still available (which it should be), 2519 2519 * pick that pipe as secondary 2520 - * Same logic applies for ODM pipes. Since mpo is not allowed with odm 2521 - * check in else case. 2520 + * Same logic applies for ODM pipes 2522 2521 */ 2523 2522 if (dc->current_state->res_ctx.pipe_ctx[primary_pipe->pipe_idx].bottom_pipe) { 2524 2523 preferred_pipe_idx = dc->current_state->res_ctx.pipe_ctx[primary_pipe->pipe_idx].bottom_pipe->pipe_idx; ··· 2525 2526 secondary_pipe = &res_ctx->pipe_ctx[preferred_pipe_idx]; 2526 2527 secondary_pipe->pipe_idx = preferred_pipe_idx; 2527 2528 } 2528 - } else if (dc->current_state->res_ctx.pipe_ctx[primary_pipe->pipe_idx].next_odm_pipe) { 2529 + } 2530 + if (secondary_pipe == NULL && 2531 + dc->current_state->res_ctx.pipe_ctx[primary_pipe->pipe_idx].next_odm_pipe) { 2529 2532 preferred_pipe_idx = dc->current_state->res_ctx.pipe_ctx[primary_pipe->pipe_idx].next_odm_pipe->pipe_idx; 2530 2533 if (res_ctx->pipe_ctx[preferred_pipe_idx].stream == NULL) { 2531 2534 secondary_pipe = &res_ctx->pipe_ctx[preferred_pipe_idx];
+1 -1
drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
··· 296 296 .num_banks = 8, 297 297 .num_chans = 4, 298 298 .vmm_page_size_bytes = 4096, 299 - .dram_clock_change_latency_us = 23.84, 299 + .dram_clock_change_latency_us = 11.72, 300 300 .return_bus_width_bytes = 64, 301 301 .dispclk_dppclk_vco_speed_mhz = 3600, 302 302 .xfc_bus_transport_time_us = 4,
+1 -1
drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
··· 1121 1121 static int renoir_gfx_state_change_set(struct smu_context *smu, uint32_t state) 1122 1122 { 1123 1123 1124 - return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GpuChangeState, state, NULL); 1124 + return 0; 1125 1125 } 1126 1126 1127 1127 static const struct pptable_funcs renoir_ppt_funcs = {