Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'amd-drm-fixes-6.13-2025-01-15' of https://gitlab.freedesktop.org/agd5f/linux into drm-fixes

amd-drm-fixes-6.13-2025-01-15:

amdgpu:
- SMU 13 fix
- DP MST fixes
- DCN 3.5 fix
- PSR fixes
- eDP fix
- VRR fix
- Enforce isolation fixes
- GFX 12 fix
- PSP 14.x fix

Signed-off-by: Dave Airlie <airlied@redhat.com>

From: Alex Deucher <alexander.deucher@amd.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20250115151602.210704-1-alexander.deucher@amd.com

+157 -64
+3 -2
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
··· 715 715 void amdgpu_amdkfd_set_compute_idle(struct amdgpu_device *adev, bool idle) 716 716 { 717 717 enum amd_powergating_state state = idle ? AMD_PG_STATE_GATE : AMD_PG_STATE_UNGATE; 718 - if (IP_VERSION_MAJ(amdgpu_ip_version(adev, GC_HWIP, 0)) == 11 && 719 - ((adev->mes.kiq_version & AMDGPU_MES_VERSION_MASK) <= 64)) { 718 + if ((IP_VERSION_MAJ(amdgpu_ip_version(adev, GC_HWIP, 0)) == 11 && 719 + ((adev->mes.kiq_version & AMDGPU_MES_VERSION_MASK) <= 64)) || 720 + (IP_VERSION_MAJ(amdgpu_ip_version(adev, GC_HWIP, 0)) == 12)) { 720 721 pr_debug("GFXOFF is %s\n", idle ? "enabled" : "disabled"); 721 722 amdgpu_gfx_off_ctrl(adev, idle); 722 723 } else if ((IP_VERSION_MAJ(amdgpu_ip_version(adev, GC_HWIP, 0)) == 9) &&
+4
drivers/gpu/drm/amd/amdgpu/amdgpu_fw_attestation.c
··· 122 122 if (adev->flags & AMD_IS_APU) 123 123 return 0; 124 124 125 + if (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(14, 0, 2) || 126 + amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(14, 0, 3)) 127 + return 0; 128 + 125 129 if (adev->asic_type >= CHIP_SIENNA_CICHLID) 126 130 return 1; 127 131
+10 -2
drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
··· 1989 1989 { 1990 1990 struct amdgpu_device *adev = ring->adev; 1991 1991 u32 idx; 1992 + bool sched_work = false; 1992 1993 1993 1994 if (!adev->gfx.enable_cleaner_shader) 1994 1995 return; ··· 2008 2007 mutex_lock(&adev->enforce_isolation_mutex); 2009 2008 if (adev->enforce_isolation[idx]) { 2010 2009 if (adev->kfd.init_complete) 2011 - amdgpu_gfx_kfd_sch_ctrl(adev, idx, false); 2010 + sched_work = true; 2012 2011 } 2013 2012 mutex_unlock(&adev->enforce_isolation_mutex); 2013 + 2014 + if (sched_work) 2015 + amdgpu_gfx_kfd_sch_ctrl(adev, idx, false); 2014 2016 } 2015 2017 2016 2018 void amdgpu_gfx_enforce_isolation_ring_end_use(struct amdgpu_ring *ring) 2017 2019 { 2018 2020 struct amdgpu_device *adev = ring->adev; 2019 2021 u32 idx; 2022 + bool sched_work = false; 2020 2023 2021 2024 if (!adev->gfx.enable_cleaner_shader) 2022 2025 return; ··· 2036 2031 mutex_lock(&adev->enforce_isolation_mutex); 2037 2032 if (adev->enforce_isolation[idx]) { 2038 2033 if (adev->kfd.init_complete) 2039 - amdgpu_gfx_kfd_sch_ctrl(adev, idx, true); 2034 + sched_work = true; 2040 2035 } 2041 2036 mutex_unlock(&adev->enforce_isolation_mutex); 2037 + 2038 + if (sched_work) 2039 + amdgpu_gfx_kfd_sch_ctrl(adev, idx, true); 2042 2040 } 2043 2041 2044 2042 /*
+2 -2
drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
··· 193 193 need_ctx_switch = ring->current_ctx != fence_ctx; 194 194 if (ring->funcs->emit_pipeline_sync && job && 195 195 ((tmp = amdgpu_sync_get_fence(&job->explicit_sync)) || 196 - (amdgpu_sriov_vf(adev) && need_ctx_switch) || 197 - amdgpu_vm_need_pipeline_sync(ring, job))) { 196 + need_ctx_switch || amdgpu_vm_need_pipeline_sync(ring, job))) { 197 + 198 198 need_pipe_sync = true; 199 199 200 200 if (tmp)
+30 -11
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
··· 8896 8896 struct replay_settings *pr = &acrtc_state->stream->link->replay_settings; 8897 8897 struct amdgpu_dm_connector *aconn = 8898 8898 (struct amdgpu_dm_connector *)acrtc_state->stream->dm_stream_context; 8899 + bool vrr_active = amdgpu_dm_crtc_vrr_active(acrtc_state); 8899 8900 8900 8901 if (acrtc_state->update_type > UPDATE_TYPE_FAST) { 8901 8902 if (pr->config.replay_supported && !pr->replay_feature_enabled) ··· 8923 8922 * adequate number of fast atomic commits to notify KMD 8924 8923 * of update events. See `vblank_control_worker()`. 8925 8924 */ 8926 - if (acrtc_attach->dm_irq_params.allow_sr_entry && 8925 + if (!vrr_active && 8926 + acrtc_attach->dm_irq_params.allow_sr_entry && 8927 8927 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY 8928 8928 !amdgpu_dm_crc_window_is_activated(acrtc_state->base.crtc) && 8929 8929 #endif 8930 8930 (current_ts - psr->psr_dirty_rects_change_timestamp_ns) > 500000000) { 8931 8931 if (pr->replay_feature_enabled && !pr->replay_allow_active) 8932 8932 amdgpu_dm_replay_enable(acrtc_state->stream, true); 8933 - if (psr->psr_version >= DC_PSR_VERSION_SU_1 && 8933 + if (psr->psr_version == DC_PSR_VERSION_SU_1 && 8934 8934 !psr->psr_allow_active && !aconn->disallow_edp_enter_psr) 8935 8935 amdgpu_dm_psr_enable(acrtc_state->stream); 8936 8936 } ··· 9102 9100 acrtc_state->stream->link->psr_settings.psr_dirty_rects_change_timestamp_ns = 9103 9101 timestamp_ns; 9104 9102 if (acrtc_state->stream->link->psr_settings.psr_allow_active) 9105 - amdgpu_dm_psr_disable(acrtc_state->stream); 9103 + amdgpu_dm_psr_disable(acrtc_state->stream, true); 9106 9104 mutex_unlock(&dm->dc_lock); 9107 9105 } 9108 9106 } ··· 9268 9266 bundle->stream_update.abm_level = &acrtc_state->abm_level; 9269 9267 9270 9268 mutex_lock(&dm->dc_lock); 9271 - if (acrtc_state->update_type > UPDATE_TYPE_FAST) { 9269 + if ((acrtc_state->update_type > UPDATE_TYPE_FAST) || vrr_active) { 9272 9270 if (acrtc_state->stream->link->replay_settings.replay_allow_active) 9273 9271 amdgpu_dm_replay_disable(acrtc_state->stream); 9274 9272 if (acrtc_state->stream->link->psr_settings.psr_allow_active) 9275 - amdgpu_dm_psr_disable(acrtc_state->stream); 9273 + amdgpu_dm_psr_disable(acrtc_state->stream, true); 9276 9274 } 9277 9275 mutex_unlock(&dm->dc_lock); 9278 9276 ··· 11381 11379 return 0; 11382 11380 } 11383 11381 11382 + static bool amdgpu_dm_crtc_mem_type_changed(struct drm_device *dev, 11383 + struct drm_atomic_state *state, 11384 + struct drm_crtc_state *crtc_state) 11385 + { 11386 + struct drm_plane *plane; 11387 + struct drm_plane_state *new_plane_state, *old_plane_state; 11388 + 11389 + drm_for_each_plane_mask(plane, dev, crtc_state->plane_mask) { 11390 + new_plane_state = drm_atomic_get_plane_state(state, plane); 11391 + old_plane_state = drm_atomic_get_plane_state(state, plane); 11392 + 11393 + if (old_plane_state->fb && new_plane_state->fb && 11394 + get_mem_type(old_plane_state->fb) != get_mem_type(new_plane_state->fb)) 11395 + return true; 11396 + } 11397 + 11398 + return false; 11399 + } 11400 + 11384 11401 /** 11385 11402 * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM. 11386 11403 * ··· 11597 11576 11598 11577 /* Remove exiting planes if they are modified */ 11599 11578 for_each_oldnew_plane_in_descending_zpos(state, plane, old_plane_state, new_plane_state) { 11600 - if (old_plane_state->fb && new_plane_state->fb && 11601 - get_mem_type(old_plane_state->fb) != 11602 - get_mem_type(new_plane_state->fb)) 11603 - lock_and_validation_needed = true; 11604 11579 11605 11580 ret = dm_update_plane_state(dc, state, plane, 11606 11581 old_plane_state, ··· 11891 11874 11892 11875 /* 11893 11876 * Only allow async flips for fast updates that don't change 11894 - * the FB pitch, the DCC state, rotation, etc. 11877 + * the FB pitch, the DCC state, rotation, mem_type, etc. 11895 11878 */ 11896 - if (new_crtc_state->async_flip && lock_and_validation_needed) { 11879 + if (new_crtc_state->async_flip && 11880 + (lock_and_validation_needed || 11881 + amdgpu_dm_crtc_mem_type_changed(dev, state, new_crtc_state))) { 11897 11882 drm_dbg_atomic(crtc->dev, 11898 11883 "[CRTC:%d:%s] async flips are only supported for fast updates\n", 11899 11884 crtc->base.id, crtc->name);
+2
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
··· 697 697 struct drm_dp_mst_port *mst_output_port; 698 698 struct amdgpu_dm_connector *mst_root; 699 699 struct drm_dp_aux *dsc_aux; 700 + uint32_t mst_local_bw; 701 + uint16_t vc_full_pbn; 700 702 struct mutex handle_mst_msg_ready; 701 703 702 704 /* TODO see if we can merge with ddc_bus or make a dm_connector */
+16 -9
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
··· 30 30 #include "amdgpu_dm.h" 31 31 #include "dc.h" 32 32 #include "amdgpu_securedisplay.h" 33 + #include "amdgpu_dm_psr.h" 33 34 34 35 static const char *const pipe_crc_sources[] = { 35 36 "none", ··· 225 224 226 225 mutex_lock(&adev->dm.dc_lock); 227 226 227 + /* For PSR1, check that the panel has exited PSR */ 228 + if (stream_state->link->psr_settings.psr_version < DC_PSR_VERSION_SU_1) 229 + amdgpu_dm_psr_wait_disable(stream_state); 230 + 228 231 /* Enable or disable CRTC CRC generation */ 229 232 if (dm_is_crc_source_crtc(source) || source == AMDGPU_DM_PIPE_CRC_SOURCE_NONE) { 230 233 if (!dc_stream_configure_crc(stream_state->ctx->dc, ··· 362 357 363 358 } 364 359 360 + /* 361 + * Reading the CRC requires the vblank interrupt handler to be 362 + * enabled. Keep a reference until CRC capture stops. 363 + */ 364 + enabled = amdgpu_dm_is_valid_crc_source(cur_crc_src); 365 + if (!enabled && enable) { 366 + ret = drm_crtc_vblank_get(crtc); 367 + if (ret) 368 + goto cleanup; 369 + } 370 + 365 371 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) 366 372 /* Reset secure_display when we change crc source from debugfs */ 367 373 amdgpu_dm_set_crc_window_default(crtc, crtc_state->stream); ··· 383 367 goto cleanup; 384 368 } 385 369 386 - /* 387 - * Reading the CRC requires the vblank interrupt handler to be 388 - * enabled. Keep a reference until CRC capture stops. 389 - */ 390 - enabled = amdgpu_dm_is_valid_crc_source(cur_crc_src); 391 370 if (!enabled && enable) { 392 - ret = drm_crtc_vblank_get(crtc); 393 - if (ret) 394 - goto cleanup; 395 - 396 371 if (dm_is_crc_source_dprx(source)) { 397 372 if (drm_dp_start_crc(aux, crtc)) { 398 373 DRM_DEBUG_DRIVER("dp start crc failed\n");
+2 -2
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
··· 93 93 return rc; 94 94 } 95 95 96 - bool amdgpu_dm_crtc_vrr_active(struct dm_crtc_state *dm_state) 96 + bool amdgpu_dm_crtc_vrr_active(const struct dm_crtc_state *dm_state) 97 97 { 98 98 return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE || 99 99 dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED; ··· 142 142 amdgpu_dm_replay_enable(vblank_work->stream, true); 143 143 } else if (vblank_enabled) { 144 144 if (link->psr_settings.psr_version < DC_PSR_VERSION_SU_1 && is_sr_active) 145 - amdgpu_dm_psr_disable(vblank_work->stream); 145 + amdgpu_dm_psr_disable(vblank_work->stream, false); 146 146 } else if (link->psr_settings.psr_feature_enabled && 147 147 allow_sr_entry && !is_sr_active && !is_crc_window_active) { 148 148
+1 -1
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.h
··· 37 37 38 38 bool amdgpu_dm_crtc_vrr_active_irq(struct amdgpu_crtc *acrtc); 39 39 40 - bool amdgpu_dm_crtc_vrr_active(struct dm_crtc_state *dm_state); 40 + bool amdgpu_dm_crtc_vrr_active(const struct dm_crtc_state *dm_state); 41 41 42 42 int amdgpu_dm_crtc_enable_vblank(struct drm_crtc *crtc); 43 43
+1 -1
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
··· 3606 3606 /* PSR may write to OTG CRC window control register, 3607 3607 * so close it before starting secure_display. 3608 3608 */ 3609 - amdgpu_dm_psr_disable(acrtc->dm_irq_params.stream); 3609 + amdgpu_dm_psr_disable(acrtc->dm_irq_params.stream, true); 3610 3610 3611 3611 spin_lock_irq(&adev_to_drm(adev)->event_lock); 3612 3612
+34 -14
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
··· 155 155 return 0; 156 156 } 157 157 158 + 159 + static inline void 160 + amdgpu_dm_mst_reset_mst_connector_setting(struct amdgpu_dm_connector *aconnector) 161 + { 162 + aconnector->drm_edid = NULL; 163 + aconnector->dsc_aux = NULL; 164 + aconnector->mst_output_port->passthrough_aux = NULL; 165 + aconnector->mst_local_bw = 0; 166 + aconnector->vc_full_pbn = 0; 167 + } 168 + 158 169 static void 159 170 amdgpu_dm_mst_connector_early_unregister(struct drm_connector *connector) 160 171 { ··· 193 182 194 183 dc_sink_release(dc_sink); 195 184 aconnector->dc_sink = NULL; 196 - aconnector->drm_edid = NULL; 197 - aconnector->dsc_aux = NULL; 198 - port->passthrough_aux = NULL; 185 + amdgpu_dm_mst_reset_mst_connector_setting(aconnector); 199 186 } 200 187 201 188 aconnector->mst_status = MST_STATUS_DEFAULT; ··· 513 504 514 505 dc_sink_release(aconnector->dc_sink); 515 506 aconnector->dc_sink = NULL; 516 - aconnector->drm_edid = NULL; 517 - aconnector->dsc_aux = NULL; 518 - port->passthrough_aux = NULL; 507 + amdgpu_dm_mst_reset_mst_connector_setting(aconnector); 519 508 520 509 amdgpu_dm_set_mst_status(&aconnector->mst_status, 521 510 MST_REMOTE_EDID | MST_ALLOCATE_NEW_PAYLOAD | MST_CLEAR_ALLOCATED_PAYLOAD, ··· 1826 1819 struct drm_dp_mst_port *immediate_upstream_port = NULL; 1827 1820 uint32_t end_link_bw = 0; 1828 1821 1829 - /*Get last DP link BW capability*/ 1830 - if (dp_get_link_current_set_bw(&aconnector->mst_output_port->aux, &end_link_bw)) { 1831 - if (stream_kbps > end_link_bw) { 1822 + /*Get last DP link BW capability. Mode shall be supported by Legacy peer*/ 1823 + if (aconnector->mst_output_port->pdt != DP_PEER_DEVICE_DP_LEGACY_CONV && 1824 + aconnector->mst_output_port->pdt != DP_PEER_DEVICE_NONE) { 1825 + if (aconnector->vc_full_pbn != aconnector->mst_output_port->full_pbn) { 1826 + dp_get_link_current_set_bw(&aconnector->mst_output_port->aux, &end_link_bw); 1827 + aconnector->vc_full_pbn = aconnector->mst_output_port->full_pbn; 1828 + aconnector->mst_local_bw = end_link_bw; 1829 + } else { 1830 + end_link_bw = aconnector->mst_local_bw; 1831 + } 1832 + 1833 + if (end_link_bw > 0 && stream_kbps > end_link_bw) { 1832 1834 DRM_DEBUG_DRIVER("MST_DSC dsc decode at last link." 1833 1835 "Mode required bw can't fit into last link\n"); 1834 1836 return DC_FAIL_BANDWIDTH_VALIDATE; ··· 1851 1835 if (immediate_upstream_port) { 1852 1836 virtual_channel_bw_in_kbps = kbps_from_pbn(immediate_upstream_port->full_pbn); 1853 1837 virtual_channel_bw_in_kbps = min(root_link_bw_in_kbps, virtual_channel_bw_in_kbps); 1854 - if (bw_range.min_kbps > virtual_channel_bw_in_kbps) { 1855 - DRM_DEBUG_DRIVER("MST_DSC dsc decode at last link." 1856 - "Max dsc compression can't fit into MST available bw\n"); 1857 - return DC_FAIL_BANDWIDTH_VALIDATE; 1858 - } 1838 + } else { 1839 + /* For topology LCT 1 case - only one mstb*/ 1840 + virtual_channel_bw_in_kbps = root_link_bw_in_kbps; 1841 + } 1842 + 1843 + if (bw_range.min_kbps > virtual_channel_bw_in_kbps) { 1844 + DRM_DEBUG_DRIVER("MST_DSC dsc decode at last link." 1845 + "Max dsc compression can't fit into MST available bw\n"); 1846 + return DC_FAIL_BANDWIDTH_VALIDATE; 1859 1847 } 1860 1848 } 1861 1849
+32 -3
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
··· 201 201 * 202 202 * Return: true if success 203 203 */ 204 - bool amdgpu_dm_psr_disable(struct dc_stream_state *stream) 204 + bool amdgpu_dm_psr_disable(struct dc_stream_state *stream, bool wait) 205 205 { 206 - unsigned int power_opt = 0; 207 206 bool psr_enable = false; 208 207 209 208 DRM_DEBUG_DRIVER("Disabling psr...\n"); 210 209 211 - return dc_link_set_psr_allow_active(stream->link, &psr_enable, true, false, &power_opt); 210 + return dc_link_set_psr_allow_active(stream->link, &psr_enable, wait, false, NULL); 212 211 } 213 212 214 213 /* ··· 249 250 } 250 251 251 252 return allow_active; 253 + } 254 + 255 + /** 256 + * amdgpu_dm_psr_wait_disable() - Wait for eDP panel to exit PSR 257 + * @stream: stream state attached to the eDP link 258 + * 259 + * Waits for a max of 500ms for the eDP panel to exit PSR. 260 + * 261 + * Return: true if panel exited PSR, false otherwise. 262 + */ 263 + bool amdgpu_dm_psr_wait_disable(struct dc_stream_state *stream) 264 + { 265 + enum dc_psr_state psr_state = PSR_STATE0; 266 + struct dc_link *link = stream->link; 267 + int retry_count; 268 + 269 + if (link == NULL) 270 + return false; 271 + 272 + for (retry_count = 0; retry_count <= 1000; retry_count++) { 273 + dc_link_get_psr_state(link, &psr_state); 274 + if (psr_state == PSR_STATE0) 275 + break; 276 + udelay(500); 277 + } 278 + 279 + if (retry_count == 1000) 280 + return false; 281 + 282 + return true; 252 283 }
+2 -1
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.h
··· 34 34 void amdgpu_dm_set_psr_caps(struct dc_link *link); 35 35 void amdgpu_dm_psr_enable(struct dc_stream_state *stream); 36 36 bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream); 37 - bool amdgpu_dm_psr_disable(struct dc_stream_state *stream); 37 + bool amdgpu_dm_psr_disable(struct dc_stream_state *stream, bool wait); 38 38 bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm); 39 39 bool amdgpu_dm_psr_is_active_allowed(struct amdgpu_display_manager *dm); 40 + bool amdgpu_dm_psr_wait_disable(struct dc_stream_state *stream); 40 41 41 42 #endif /* AMDGPU_DM_AMDGPU_DM_PSR_H_ */
+2 -1
drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c
··· 63 63 64 64 bool should_use_dmub_lock(struct dc_link *link) 65 65 { 66 - if (link->psr_settings.psr_version == DC_PSR_VERSION_SU_1) 66 + if (link->psr_settings.psr_version == DC_PSR_VERSION_SU_1 || 67 + link->psr_settings.psr_version == DC_PSR_VERSION_1) 67 68 return true; 68 69 69 70 if (link->replay_settings.replay_feature_enabled)
+2 -2
drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c
··· 195 195 .dcn_downspread_percent = 0.5, 196 196 .gpuvm_min_page_size_bytes = 4096, 197 197 .hostvm_min_page_size_bytes = 4096, 198 - .do_urgent_latency_adjustment = 1, 198 + .do_urgent_latency_adjustment = 0, 199 199 .urgent_latency_adjustment_fabric_clock_component_us = 0, 200 - .urgent_latency_adjustment_fabric_clock_reference_mhz = 3000, 200 + .urgent_latency_adjustment_fabric_clock_reference_mhz = 0, 201 201 }; 202 202 203 203 void dcn35_build_wm_range_table_fpu(struct clk_mgr *clk_mgr)
+2 -2
drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c
··· 579 579 { 580 580 *BytePerPixelDETY = 0; 581 581 *BytePerPixelDETC = 0; 582 - *BytePerPixelY = 0; 583 - *BytePerPixelC = 0; 582 + *BytePerPixelY = 1; 583 + *BytePerPixelC = 1; 584 584 585 585 if (SourcePixelFormat == dml2_444_64) { 586 586 *BytePerPixelDETY = 8;
+6 -6
drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c
··· 1398 1398 1399 1399 link_hwss->disable_link_output(link, link_res, signal); 1400 1400 link->phy_state.symclk_state = SYMCLK_OFF_TX_OFF; 1401 - 1402 - if (signal == SIGNAL_TYPE_EDP && 1403 - link->dc->hwss.edp_power_control && 1404 - !link->skip_implict_edp_power_control) 1405 - link->dc->hwss.edp_power_control(link, false); 1406 - else if (dmcu != NULL && dmcu->funcs->unlock_phy) 1401 + /* 1402 + * Add the logic to extract BOTH power up and power down sequences 1403 + * from enable/disable link output and only call edp panel control 1404 + * in enable_link_dp and disable_link_dp once. 1405 + */ 1406 + if (dmcu != NULL && dmcu->funcs->unlock_phy) 1407 1407 dmcu->funcs->unlock_phy(dmcu); 1408 1408 1409 1409 dc->link_srv->dp_trace_source_sequence(link, DPCD_SOURCE_SEQ_AFTER_DISABLE_LINK_PHY);
+6 -5
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
··· 2643 2643 &backend_workload_mask); 2644 2644 2645 2645 /* Add optimizations for SMU13.0.0/10. Reuse the power saving profile */ 2646 - if ((amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 0) && 2647 - ((smu->adev->pm.fw_version == 0x004e6601) || 2648 - (smu->adev->pm.fw_version >= 0x004e7300))) || 2649 - (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 10) && 2650 - smu->adev->pm.fw_version >= 0x00504500)) { 2646 + if ((workload_mask & (1 << PP_SMC_POWER_PROFILE_COMPUTE)) && 2647 + ((amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 0) && 2648 + ((smu->adev->pm.fw_version == 0x004e6601) || 2649 + (smu->adev->pm.fw_version >= 0x004e7300))) || 2650 + (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 10) && 2651 + smu->adev->pm.fw_version >= 0x00504500))) { 2651 2652 workload_type = smu_cmn_to_asic_specific_index(smu, 2652 2653 CMN2ASIC_MAPPING_WORKLOAD, 2653 2654 PP_SMC_POWER_PROFILE_POWERSAVING);