Merge tag 'amd-drm-fixes-6.12-2024-11-14' of https://gitlab.freedesktop.org/agd5f/linux into drm-fixes

amd-drm-fixes-6.12-2024-11-14:

amdgpu:
- PSR fix
- Panel replay fixes
- DML fix
- vblank power fix
- Fix video caps
- SMU 14.0 fix
- GPUVM fix
- MES 12 fix
- APU carve out fix
- DC vbios fix
- NBIO fix

Signed-off-by: Dave Airlie <airlied@redhat.com>

From: Alex Deucher <alexander.deucher@amd.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20241114143401.448210-1-alexander.deucher@amd.com

Changed files
+117 -96
drivers
+2 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
··· 161 161 * When GTT is just an alternative to VRAM make sure that we 162 162 * only use it as fallback and still try to fill up VRAM first. 163 163 */ 164 - if (domain & abo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) 164 + if (domain & abo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM && 165 + !(adev->flags & AMD_IS_APU)) 165 166 places[c].flags |= TTM_PL_FLAG_FALLBACK; 166 167 c++; 167 168 }
+8 -5
drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
··· 1124 1124 uint64_t *flags) 1125 1125 { 1126 1126 struct amdgpu_device *bo_adev = amdgpu_ttm_adev(bo->tbo.bdev); 1127 - bool is_vram = bo->tbo.resource->mem_type == TTM_PL_VRAM; 1128 - bool coherent = bo->flags & (AMDGPU_GEM_CREATE_COHERENT | AMDGPU_GEM_CREATE_EXT_COHERENT); 1127 + bool is_vram = bo->tbo.resource && 1128 + bo->tbo.resource->mem_type == TTM_PL_VRAM; 1129 + bool coherent = bo->flags & (AMDGPU_GEM_CREATE_COHERENT | 1130 + AMDGPU_GEM_CREATE_EXT_COHERENT); 1129 1131 bool ext_coherent = bo->flags & AMDGPU_GEM_CREATE_EXT_COHERENT; 1130 1132 bool uncached = bo->flags & AMDGPU_GEM_CREATE_UNCACHED; 1131 1133 struct amdgpu_vm *vm = mapping->bo_va->base.vm; 1132 1134 unsigned int mtype_local, mtype; 1133 1135 bool snoop = false; 1134 1136 bool is_local; 1137 + 1138 + dma_resv_assert_held(bo->tbo.base.resv); 1135 1139 1136 1140 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 1137 1141 case IP_VERSION(9, 4, 1): ··· 1255 1251 *flags &= ~AMDGPU_PTE_VALID; 1256 1252 } 1257 1253 1258 - if (bo && bo->tbo.resource) 1259 - gmc_v9_0_get_coherence_flags(adev, mapping->bo_va->base.bo, 1260 - mapping, flags); 1254 + if ((*flags & AMDGPU_PTE_VALID) && bo) 1255 + gmc_v9_0_get_coherence_flags(adev, bo, mapping, flags); 1261 1256 } 1262 1257 1263 1258 static void gmc_v9_0_override_vm_pte_flags(struct amdgpu_device *adev,
+1 -1
drivers/gpu/drm/amd/amdgpu/mes_v12_0.c
··· 550 550 mes_set_hw_res_1_pkt.header.type = MES_API_TYPE_SCHEDULER; 551 551 mes_set_hw_res_1_pkt.header.opcode = MES_SCH_API_SET_HW_RSRC_1; 552 552 mes_set_hw_res_1_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS; 553 - mes_set_hw_res_1_pkt.mes_kiq_unmap_timeout = 100; 553 + mes_set_hw_res_1_pkt.mes_kiq_unmap_timeout = 0xa; 554 554 555 555 return mes_v12_0_submit_pkt_and_poll_completion(mes, pipe, 556 556 &mes_set_hw_res_1_pkt, sizeof(mes_set_hw_res_1_pkt),
+6
drivers/gpu/drm/amd/amdgpu/nbio_v7_7.c
··· 247 247 if (def != data) 248 248 WREG32_SOC15(NBIO, 0, regBIF0_PCIE_MST_CTRL_3, data); 249 249 250 + switch (adev->ip_versions[NBIO_HWIP][0]) { 251 + case IP_VERSION(7, 7, 0): 252 + data = RREG32_SOC15(NBIO, 0, regRCC_DEV0_EPF5_STRAP4) & ~BIT(23); 253 + WREG32_SOC15(NBIO, 0, regRCC_DEV0_EPF5_STRAP4, data); 254 + break; 255 + } 250 256 } 251 257 252 258 static void nbio_v7_7_update_medium_grain_clock_gating(struct amdgpu_device *adev,
+6 -6
drivers/gpu/drm/amd/amdgpu/nv.c
··· 67 67 68 68 /* Navi */ 69 69 static const struct amdgpu_video_codec_info nv_video_codecs_encode_array[] = { 70 - {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)}, 71 - {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 2304, 0)}, 70 + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 0)}, 71 + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 4096, 0)}, 72 72 }; 73 73 74 74 static const struct amdgpu_video_codecs nv_video_codecs_encode = { ··· 94 94 95 95 /* Sienna Cichlid */ 96 96 static const struct amdgpu_video_codec_info sc_video_codecs_encode_array[] = { 97 - {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2160, 0)}, 98 - {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 7680, 4352, 0)}, 97 + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 0)}, 98 + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 0)}, 99 99 }; 100 100 101 101 static const struct amdgpu_video_codecs sc_video_codecs_encode = { ··· 136 136 137 137 /* SRIOV Sienna Cichlid, not const since data is controlled by host */ 138 138 static struct amdgpu_video_codec_info sriov_sc_video_codecs_encode_array[] = { 139 - {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2160, 0)}, 140 - {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 7680, 4352, 0)}, 139 + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 0)}, 140 + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 0)}, 141 141 }; 142 142 143 143 static struct amdgpu_video_codec_info sriov_sc_video_codecs_decode_array_vcn0[] = {
+2 -2
drivers/gpu/drm/amd/amdgpu/soc15.c
··· 90 90 /* Vega, Raven, Arcturus */ 91 91 static const struct amdgpu_video_codec_info vega_video_codecs_encode_array[] = 92 92 { 93 - {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)}, 94 - {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 2304, 0)}, 93 + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 0)}, 94 + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 4096, 0)}, 95 95 }; 96 96 97 97 static const struct amdgpu_video_codecs vega_video_codecs_encode =
+6 -6
drivers/gpu/drm/amd/amdgpu/soc21.c
··· 49 49 50 50 /* SOC21 */ 51 51 static const struct amdgpu_video_codec_info vcn_4_0_0_video_codecs_encode_array_vcn0[] = { 52 - {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)}, 52 + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 0)}, 53 53 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 0)}, 54 54 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, 8192, 4352, 0)}, 55 55 }; 56 56 57 57 static const struct amdgpu_video_codec_info vcn_4_0_0_video_codecs_encode_array_vcn1[] = { 58 - {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)}, 58 + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 0)}, 59 59 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 0)}, 60 60 }; 61 61 ··· 96 96 97 97 /* SRIOV SOC21, not const since data is controlled by host */ 98 98 static struct amdgpu_video_codec_info sriov_vcn_4_0_0_video_codecs_encode_array_vcn0[] = { 99 - {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)}, 100 - {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 2304, 0)}, 99 + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 0)}, 100 + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 0)}, 101 101 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, 8192, 4352, 0)}, 102 102 }; 103 103 104 104 static struct amdgpu_video_codec_info sriov_vcn_4_0_0_video_codecs_encode_array_vcn1[] = { 105 - {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)}, 106 - {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 2304, 0)}, 105 + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 0)}, 106 + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 0)}, 107 107 }; 108 108 109 109 static struct amdgpu_video_codecs sriov_vcn_4_0_0_video_codecs_encode_vcn0 = {
+1 -1
drivers/gpu/drm/amd/amdgpu/soc24.c
··· 48 48 static const struct amd_ip_funcs soc24_common_ip_funcs; 49 49 50 50 static const struct amdgpu_video_codec_info vcn_5_0_0_video_codecs_encode_array_vcn0[] = { 51 - {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)}, 51 + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 0)}, 52 52 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 0)}, 53 53 {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, 8192, 4352, 0)}, 54 54 };
+4 -4
drivers/gpu/drm/amd/amdgpu/vi.c
··· 136 136 { 137 137 .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 138 138 .max_width = 4096, 139 - .max_height = 2304, 140 - .max_pixels_per_frame = 4096 * 2304, 139 + .max_height = 4096, 140 + .max_pixels_per_frame = 4096 * 4096, 141 141 .max_level = 0, 142 142 }, 143 143 { 144 144 .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 145 145 .max_width = 4096, 146 - .max_height = 2304, 147 - .max_pixels_per_frame = 4096 * 2304, 146 + .max_height = 4096, 147 + .max_pixels_per_frame = 4096 * 4096, 148 148 .max_level = 0, 149 149 }, 150 150 };
+60 -57
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
··· 6762 6762 if (stream->out_transfer_func.tf == TRANSFER_FUNCTION_GAMMA22) 6763 6763 tf = TRANSFER_FUNC_GAMMA_22; 6764 6764 mod_build_vsc_infopacket(stream, &stream->vsc_infopacket, stream->output_color_space, tf); 6765 - aconnector->psr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY; 6765 + aconnector->sr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY; 6766 6766 6767 6767 } 6768 6768 finish: ··· 8875 8875 } 8876 8876 } 8877 8877 8878 + static void amdgpu_dm_enable_self_refresh(struct amdgpu_crtc *acrtc_attach, 8879 + const struct dm_crtc_state *acrtc_state, 8880 + const u64 current_ts) 8881 + { 8882 + struct psr_settings *psr = &acrtc_state->stream->link->psr_settings; 8883 + struct replay_settings *pr = &acrtc_state->stream->link->replay_settings; 8884 + struct amdgpu_dm_connector *aconn = 8885 + (struct amdgpu_dm_connector *)acrtc_state->stream->dm_stream_context; 8886 + 8887 + if (acrtc_state->update_type > UPDATE_TYPE_FAST) { 8888 + if (pr->config.replay_supported && !pr->replay_feature_enabled) 8889 + amdgpu_dm_link_setup_replay(acrtc_state->stream->link, aconn); 8890 + else if (psr->psr_version != DC_PSR_VERSION_UNSUPPORTED && 8891 + !psr->psr_feature_enabled) 8892 + if (!aconn->disallow_edp_enter_psr) 8893 + amdgpu_dm_link_setup_psr(acrtc_state->stream); 8894 + } 8895 + 8896 + /* Decrement skip count when SR is enabled and we're doing fast updates. */ 8897 + if (acrtc_state->update_type == UPDATE_TYPE_FAST && 8898 + (psr->psr_feature_enabled || pr->config.replay_supported)) { 8899 + if (aconn->sr_skip_count > 0) 8900 + aconn->sr_skip_count--; 8901 + 8902 + /* Allow SR when skip count is 0. */ 8903 + acrtc_attach->dm_irq_params.allow_sr_entry = !aconn->sr_skip_count; 8904 + 8905 + /* 8906 + * If sink supports PSR SU/Panel Replay, there is no need to rely on 8907 + * a vblank event disable request to enable PSR/RP. PSR SU/RP 8908 + * can be enabled immediately once OS demonstrates an 8909 + * adequate number of fast atomic commits to notify KMD 8910 + * of update events. See `vblank_control_worker()`. 8911 + */ 8912 + if (acrtc_attach->dm_irq_params.allow_sr_entry && 8913 + #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY 8914 + !amdgpu_dm_crc_window_is_activated(acrtc_state->base.crtc) && 8915 + #endif 8916 + (current_ts - psr->psr_dirty_rects_change_timestamp_ns) > 500000000) { 8917 + if (pr->replay_feature_enabled && !pr->replay_allow_active) 8918 + amdgpu_dm_replay_enable(acrtc_state->stream, true); 8919 + if (psr->psr_version >= DC_PSR_VERSION_SU_1 && 8920 + !psr->psr_allow_active && !aconn->disallow_edp_enter_psr) 8921 + amdgpu_dm_psr_enable(acrtc_state->stream); 8922 + } 8923 + } else { 8924 + acrtc_attach->dm_irq_params.allow_sr_entry = false; 8925 + } 8926 + } 8927 + 8878 8928 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, 8879 8929 struct drm_device *dev, 8880 8930 struct amdgpu_display_manager *dm, ··· 9078 9028 * during the PSR-SU was disabled. 9079 9029 */ 9080 9030 if (acrtc_state->stream->link->psr_settings.psr_version >= DC_PSR_VERSION_SU_1 && 9081 - acrtc_attach->dm_irq_params.allow_psr_entry && 9031 + acrtc_attach->dm_irq_params.allow_sr_entry && 9082 9032 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY 9083 9033 !amdgpu_dm_crc_window_is_activated(acrtc_state->base.crtc) && 9084 9034 #endif ··· 9253 9203 bundle->stream_update.abm_level = &acrtc_state->abm_level; 9254 9204 9255 9205 mutex_lock(&dm->dc_lock); 9256 - if ((acrtc_state->update_type > UPDATE_TYPE_FAST) && 9257 - acrtc_state->stream->link->psr_settings.psr_allow_active) 9258 - amdgpu_dm_psr_disable(acrtc_state->stream); 9206 + if (acrtc_state->update_type > UPDATE_TYPE_FAST) { 9207 + if (acrtc_state->stream->link->replay_settings.replay_allow_active) 9208 + amdgpu_dm_replay_disable(acrtc_state->stream); 9209 + if (acrtc_state->stream->link->psr_settings.psr_allow_active) 9210 + amdgpu_dm_psr_disable(acrtc_state->stream); 9211 + } 9259 9212 mutex_unlock(&dm->dc_lock); 9260 9213 9261 9214 /* ··· 9299 9246 dm_update_pflip_irq_state(drm_to_adev(dev), 9300 9247 acrtc_attach); 9301 9248 9302 - if (acrtc_state->update_type > UPDATE_TYPE_FAST) { 9303 - if (acrtc_state->stream->link->replay_settings.config.replay_supported && 9304 - !acrtc_state->stream->link->replay_settings.replay_feature_enabled) { 9305 - struct amdgpu_dm_connector *aconn = 9306 - (struct amdgpu_dm_connector *)acrtc_state->stream->dm_stream_context; 9307 - amdgpu_dm_link_setup_replay(acrtc_state->stream->link, aconn); 9308 - } else if (acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED && 9309 - !acrtc_state->stream->link->psr_settings.psr_feature_enabled) { 9310 - 9311 - struct amdgpu_dm_connector *aconn = (struct amdgpu_dm_connector *) 9312 - acrtc_state->stream->dm_stream_context; 9313 - 9314 - if (!aconn->disallow_edp_enter_psr) 9315 - amdgpu_dm_link_setup_psr(acrtc_state->stream); 9316 - } 9317 - } 9318 - 9319 - /* Decrement skip count when PSR is enabled and we're doing fast updates. */ 9320 - if (acrtc_state->update_type == UPDATE_TYPE_FAST && 9321 - acrtc_state->stream->link->psr_settings.psr_feature_enabled) { 9322 - struct amdgpu_dm_connector *aconn = 9323 - (struct amdgpu_dm_connector *)acrtc_state->stream->dm_stream_context; 9324 - 9325 - if (aconn->psr_skip_count > 0) 9326 - aconn->psr_skip_count--; 9327 - 9328 - /* Allow PSR when skip count is 0. */ 9329 - acrtc_attach->dm_irq_params.allow_psr_entry = !aconn->psr_skip_count; 9330 - 9331 - /* 9332 - * If sink supports PSR SU, there is no need to rely on 9333 - * a vblank event disable request to enable PSR. PSR SU 9334 - * can be enabled immediately once OS demonstrates an 9335 - * adequate number of fast atomic commits to notify KMD 9336 - * of update events. See `vblank_control_worker()`. 9337 - */ 9338 - if (acrtc_state->stream->link->psr_settings.psr_version >= DC_PSR_VERSION_SU_1 && 9339 - acrtc_attach->dm_irq_params.allow_psr_entry && 9340 - #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY 9341 - !amdgpu_dm_crc_window_is_activated(acrtc_state->base.crtc) && 9342 - #endif 9343 - !acrtc_state->stream->link->psr_settings.psr_allow_active && 9344 - !aconn->disallow_edp_enter_psr && 9345 - (timestamp_ns - 9346 - acrtc_state->stream->link->psr_settings.psr_dirty_rects_change_timestamp_ns) > 9347 - 500000000) 9348 - amdgpu_dm_psr_enable(acrtc_state->stream); 9349 - } else { 9350 - acrtc_attach->dm_irq_params.allow_psr_entry = false; 9351 - } 9352 - 9249 + amdgpu_dm_enable_self_refresh(acrtc_attach, acrtc_state, timestamp_ns); 9353 9250 mutex_unlock(&dm->dc_lock); 9354 9251 } 9355 9252 ··· 12083 12080 break; 12084 12081 } 12085 12082 12086 - while (j < EDID_LENGTH) { 12083 + while (j < EDID_LENGTH - sizeof(struct amd_vsdb_block)) { 12087 12084 struct amd_vsdb_block *amd_vsdb = (struct amd_vsdb_block *)&edid_ext[j]; 12088 12085 unsigned int ieeeId = (amd_vsdb->ieee_id[2] << 16) | (amd_vsdb->ieee_id[1] << 8) | (amd_vsdb->ieee_id[0]); 12089 12086
+1 -1
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
··· 727 727 /* Cached display modes */ 728 728 struct drm_display_mode freesync_vid_base; 729 729 730 - int psr_skip_count; 730 + int sr_skip_count; 731 731 bool disallow_edp_enter_psr; 732 732 733 733 /* Record progress status of mst*/
+2 -3
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
··· 266 266 * where the SU region is the full hactive*vactive region. See 267 267 * fill_dc_dirty_rects(). 268 268 */ 269 - if (vblank_work->stream && vblank_work->stream->link) { 269 + if (vblank_work->stream && vblank_work->stream->link && vblank_work->acrtc) { 270 270 amdgpu_dm_crtc_set_panel_sr_feature( 271 271 vblank_work, vblank_work->enable, 272 - vblank_work->acrtc->dm_irq_params.allow_psr_entry || 273 - vblank_work->stream->link->replay_settings.replay_feature_enabled); 272 + vblank_work->acrtc->dm_irq_params.allow_sr_entry); 274 273 } 275 274 276 275 if (dm->active_vblank_irq_count == 0) {
+1 -1
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq_params.h
··· 33 33 struct mod_vrr_params vrr_params; 34 34 struct dc_stream_state *stream; 35 35 int active_planes; 36 - bool allow_psr_entry; 36 + bool allow_sr_entry; 37 37 struct mod_freesync_config freesync_config; 38 38 39 39 #ifdef CONFIG_DEBUG_FS
+2 -4
drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
··· 3122 3122 struct dc_vram_info *info) 3123 3123 { 3124 3124 struct bios_parser *bp = BP_FROM_DCB(dcb); 3125 - static enum bp_result result = BP_RESULT_BADBIOSTABLE; 3125 + enum bp_result result = BP_RESULT_BADBIOSTABLE; 3126 3126 struct atom_common_table_header *header; 3127 3127 struct atom_data_revision revision; 3128 3128 3129 3129 // vram info moved to umc_info for DCN4x 3130 - if (dcb->ctx->dce_version >= DCN_VERSION_4_01 && 3131 - dcb->ctx->dce_version < DCN_VERSION_MAX && 3132 - info && DATA_TABLES(umc_info)) { 3130 + if (info && DATA_TABLES(umc_info)) { 3133 3131 header = GET_IMAGE(struct atom_common_table_header, 3134 3132 DATA_TABLES(umc_info)); 3135 3133
+3
drivers/gpu/drm/amd/display/dc/core/dc_state.c
··· 265 265 dc_state_copy_internal(new_state, src_state); 266 266 267 267 #ifdef CONFIG_DRM_AMD_DC_FP 268 + new_state->bw_ctx.dml2 = NULL; 269 + new_state->bw_ctx.dml2_dc_power_source = NULL; 270 + 268 271 if (src_state->bw_ctx.dml2 && 269 272 !dml2_create_copy(&new_state->bw_ctx.dml2, src_state->bw_ctx.dml2)) { 270 273 dc_state_release(new_state);
+9 -2
drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c
··· 8 8 #include "dml2_pmo_dcn4_fams2.h" 9 9 10 10 static const double MIN_VACTIVE_MARGIN_PCT = 0.25; // We need more than non-zero margin because DET buffer granularity can alter vactive latency hiding 11 + static const double MIN_BLANK_STUTTER_FACTOR = 3.0; 11 12 12 13 static const struct dml2_pmo_pstate_strategy base_strategy_list_1_display[] = { 13 14 // VActive Preferred ··· 2140 2139 struct dml2_pmo_instance *pmo = in_out->instance; 2141 2140 bool stutter_period_meets_z8_eco = true; 2142 2141 bool z8_stutter_optimization_too_expensive = false; 2142 + bool stutter_optimization_too_expensive = false; 2143 2143 double line_time_us, vblank_nom_time_us; 2144 2144 2145 2145 unsigned int i; ··· 2162 2160 line_time_us = (double)in_out->base_display_config->display_config.stream_descriptors[i].timing.h_total / (in_out->base_display_config->display_config.stream_descriptors[i].timing.pixel_clock_khz * 1000) * 1000000; 2163 2161 vblank_nom_time_us = line_time_us * in_out->base_display_config->display_config.stream_descriptors[i].timing.vblank_nom; 2164 2162 2165 - if (vblank_nom_time_us < pmo->soc_bb->power_management_parameters.z8_stutter_exit_latency_us) { 2163 + if (vblank_nom_time_us < pmo->soc_bb->power_management_parameters.z8_stutter_exit_latency_us * MIN_BLANK_STUTTER_FACTOR) { 2166 2164 z8_stutter_optimization_too_expensive = true; 2165 + break; 2166 + } 2167 + 2168 + if (vblank_nom_time_us < pmo->soc_bb->power_management_parameters.stutter_enter_plus_exit_latency_us * MIN_BLANK_STUTTER_FACTOR) { 2169 + stutter_optimization_too_expensive = true; 2167 2170 break; 2168 2171 } 2169 2172 } ··· 2186 2179 pmo->scratch.pmo_dcn4.z8_vblank_optimizable = false; 2187 2180 } 2188 2181 2189 - if (pmo->soc_bb->power_management_parameters.stutter_enter_plus_exit_latency_us > 0) { 2182 + if (!stutter_optimization_too_expensive && pmo->soc_bb->power_management_parameters.stutter_enter_plus_exit_latency_us > 0) { 2190 2183 pmo->scratch.pmo_dcn4.optimal_vblank_reserved_time_for_stutter_us[pmo->scratch.pmo_dcn4.num_stutter_candidates] = (unsigned int)pmo->soc_bb->power_management_parameters.stutter_enter_plus_exit_latency_us; 2191 2184 pmo->scratch.pmo_dcn4.num_stutter_candidates++; 2192 2185 }
+3 -2
drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c
··· 1132 1132 static int smu_v14_0_0_print_clk_levels(struct smu_context *smu, 1133 1133 enum smu_clk_type clk_type, char *buf) 1134 1134 { 1135 - int i, size = 0, ret = 0; 1135 + int i, idx, ret = 0, size = 0; 1136 1136 uint32_t cur_value = 0, value = 0, count = 0; 1137 1137 uint32_t min, max; 1138 1138 ··· 1168 1168 break; 1169 1169 1170 1170 for (i = 0; i < count; i++) { 1171 - ret = smu_v14_0_common_get_dpm_freq_by_index(smu, clk_type, i, &value); 1171 + idx = (clk_type == SMU_MCLK) ? (count - i - 1) : i; 1172 + ret = smu_v14_0_common_get_dpm_freq_by_index(smu, clk_type, idx, &value); 1172 1173 if (ret) 1173 1174 break; 1174 1175