Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/amd/display: Add debug prints for IPS testing

[WHY]
To log commit states and when we transition in/out of allow and idle
states and the caller.

[HOW]
Add a new logging helper and wrap idle optimization calls to receive
the caller.

Reviewed-by: Duncan Ma <duncan.ma@amd.com>
Acked-by: Alex Hung <alex.hung@amd.com>
Signed-off-by: Nicholas Kazlauskas <nicholas.kazlauskas@amd.com>
Tested-by: Daniel Wheeler <daniel.wheeler@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>

authored by

Nicholas Kazlauskas and committed by
Alex Deucher
460ea5b3 2f72e02f

+86 -5
+7 -3
drivers/gpu/drm/amd/display/dc/core/dc.c
··· 4874 4874 return true; 4875 4875 } 4876 4876 4877 - void dc_allow_idle_optimizations(struct dc *dc, bool allow) 4877 + void dc_allow_idle_optimizations_internal(struct dc *dc, bool allow, char const *caller_name) 4878 4878 { 4879 4879 if (dc->debug.disable_idle_power_optimizations) 4880 4880 return; 4881 + 4882 + if (allow != dc->idle_optimizations_allowed) 4883 + DC_LOG_IPS("%s: allow_idle old=%d new=%d (caller=%s)\n", __func__, 4884 + dc->idle_optimizations_allowed, allow, caller_name); 4881 4885 4882 4886 if (dc->caps.ips_support && (dc->config.disable_ips == DMUB_IPS_DISABLE_ALL)) 4883 4887 return; ··· 4897 4893 dc->idle_optimizations_allowed = allow; 4898 4894 } 4899 4895 4900 - void dc_exit_ips_for_hw_access(struct dc *dc) 4896 + void dc_exit_ips_for_hw_access_internal(struct dc *dc, const char *caller_name) 4901 4897 { 4902 4898 if (dc->caps.ips_support) 4903 - dc_allow_idle_optimizations(dc, false); 4899 + dc_allow_idle_optimizations_internal(dc, false, caller_name); 4904 4900 } 4905 4901 4906 4902 bool dc_dmub_is_ips_idle_state(struct dc *dc)
+5 -2
drivers/gpu/drm/amd/display/dc/dc.h
··· 2339 2339 bool dc_is_plane_eligible_for_idle_optimizations(struct dc *dc, struct dc_plane_state *plane, 2340 2340 struct dc_cursor_attributes *cursor_attr); 2341 2341 2342 - void dc_allow_idle_optimizations(struct dc *dc, bool allow); 2343 - void dc_exit_ips_for_hw_access(struct dc *dc); 2342 + #define dc_allow_idle_optimizations(dc, allow) dc_allow_idle_optimizations_internal(dc, allow, __func__) 2343 + #define dc_exit_ips_for_hw_access(dc) dc_exit_ips_for_hw_access_internal(dc, __func__) 2344 + 2345 + void dc_allow_idle_optimizations_internal(struct dc *dc, bool allow, const char *caller_name); 2346 + void dc_exit_ips_for_hw_access_internal(struct dc *dc, const char *caller_name); 2344 2347 bool dc_dmub_is_ips_idle_state(struct dc *dc); 2345 2348 2346 2349 /* set min and max memory clock to lowest and highest DPM level, respectively */
+73
drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
··· 910 910 return; 911 911 } 912 912 913 + DC_LOG_ERROR("%s: DMCUB error - collecting diagnostic data\n", __func__); 914 + 913 915 if (!dc_dmub_srv_get_diagnostic_data(dc_dmub_srv, &diag_data)) { 914 916 DC_LOG_ERROR("%s: dc_dmub_srv_get_diagnostic_data failed.", __func__); 915 917 return; ··· 1203 1201 1204 1202 static void dc_dmub_srv_notify_idle(const struct dc *dc, bool allow_idle) 1205 1203 { 1204 + volatile const struct dmub_shared_state_ips_fw *ips_fw; 1206 1205 struct dc_dmub_srv *dc_dmub_srv; 1207 1206 union dmub_rb_cmd cmd = {0}; 1208 1207 ··· 1214 1211 return; 1215 1212 1216 1213 dc_dmub_srv = dc->ctx->dmub_srv; 1214 + ips_fw = &dc_dmub_srv->dmub->shared_state[DMUB_SHARED_SHARE_FEATURE__IPS_FW].data.ips_fw; 1217 1215 1218 1216 memset(&cmd, 0, sizeof(cmd)); 1219 1217 cmd.idle_opt_notify_idle.header.type = DMUB_CMD__IDLE_OPT; ··· 1229 1225 volatile struct dmub_shared_state_ips_driver *ips_driver = 1230 1226 &dc_dmub_srv->dmub->shared_state[DMUB_SHARED_SHARE_FEATURE__IPS_DRIVER].data.ips_driver; 1231 1227 union dmub_shared_state_ips_driver_signals new_signals; 1228 + 1229 + DC_LOG_IPS( 1230 + "%s wait idle (ips1_commit=%d ips2_commit=%d)", 1231 + __func__, 1232 + ips_fw->signals.bits.ips1_commit, 1233 + ips_fw->signals.bits.ips2_commit); 1232 1234 1233 1235 dc_dmub_srv_wait_idle(dc->ctx->dmub_srv); 1234 1236 ··· 1259 1249 1260 1250 ips_driver->signals = new_signals; 1261 1251 } 1252 + 1253 + DC_LOG_IPS( 1254 + "%s send allow_idle=%d (ips1_commit=%d ips2_commit=%d)", 1255 + __func__, 1256 + allow_idle, 1257 + ips_fw->signals.bits.ips1_commit, 1258 + ips_fw->signals.bits.ips2_commit); 1262 1259 1263 1260 /* NOTE: This does not use the "wake" interface since this is part of the wake path. */ 1264 1261 /* We also do not perform a wait since DMCUB could enter idle after the notification. */ ··· 1293 1276 1294 1277 ips_driver->signals.all = 0; 1295 1278 1279 + DC_LOG_IPS( 1280 + "%s check (allow_ips1=%d allow_ips2=%d) (ips1_commit=%d ips2_commit=%d)", 1281 + __func__, 1282 + ips_driver->signals.bits.allow_ips1, 1283 + ips_driver->signals.bits.allow_ips2, 1284 + ips_fw->signals.bits.ips1_commit, 1285 + ips_fw->signals.bits.ips2_commit); 1286 + 1296 1287 if (prev_driver_signals.bits.allow_ips2) { 1288 + DC_LOG_IPS( 1289 + "wait IPS2 eval (ips1_commit=%d ips2_commit=%d)", 1290 + ips_fw->signals.bits.ips1_commit, 1291 + ips_fw->signals.bits.ips2_commit); 1292 + 1297 1293 udelay(dc->debug.ips2_eval_delay_us); 1298 1294 1299 1295 if (ips_fw->signals.bits.ips2_commit) { 1296 + DC_LOG_IPS( 1297 + "exit IPS2 #1 (ips1_commit=%d ips2_commit=%d)", 1298 + ips_fw->signals.bits.ips1_commit, 1299 + ips_fw->signals.bits.ips2_commit); 1300 + 1300 1301 // Tell PMFW to exit low power state 1301 1302 dc->clk_mgr->funcs->exit_low_power_state(dc->clk_mgr); 1303 + 1304 + DC_LOG_IPS( 1305 + "wait IPS2 entry delay (ips1_commit=%d ips2_commit=%d)", 1306 + ips_fw->signals.bits.ips1_commit, 1307 + ips_fw->signals.bits.ips2_commit); 1302 1308 1303 1309 // Wait for IPS2 entry upper bound 1304 1310 udelay(dc->debug.ips2_entry_delay_us); 1305 1311 1312 + DC_LOG_IPS( 1313 + "exit IPS2 #2 (ips1_commit=%d ips2_commit=%d)", 1314 + ips_fw->signals.bits.ips1_commit, 1315 + ips_fw->signals.bits.ips2_commit); 1316 + 1306 1317 dc->clk_mgr->funcs->exit_low_power_state(dc->clk_mgr); 1318 + 1319 + DC_LOG_IPS( 1320 + "wait IPS2 commit clear (ips1_commit=%d ips2_commit=%d)", 1321 + ips_fw->signals.bits.ips1_commit, 1322 + ips_fw->signals.bits.ips2_commit); 1307 1323 1308 1324 while (ips_fw->signals.bits.ips2_commit) 1309 1325 udelay(1); 1310 1326 1327 + DC_LOG_IPS( 1328 + "wait hw_pwr_up (ips1_commit=%d ips2_commit=%d)", 1329 + ips_fw->signals.bits.ips1_commit, 1330 + ips_fw->signals.bits.ips2_commit); 1331 + 1311 1332 if (!dc_dmub_srv_is_hw_pwr_up(dc->ctx->dmub_srv, true)) 1312 1333 ASSERT(0); 1334 + 1335 + DC_LOG_IPS( 1336 + "resync inbox1 (ips1_commit=%d ips2_commit=%d)", 1337 + ips_fw->signals.bits.ips1_commit, 1338 + ips_fw->signals.bits.ips2_commit); 1313 1339 1314 1340 dmub_srv_sync_inbox1(dc->ctx->dmub_srv->dmub); 1315 1341 } ··· 1360 1300 1361 1301 dc_dmub_srv_notify_idle(dc, false); 1362 1302 if (prev_driver_signals.bits.allow_ips1) { 1303 + DC_LOG_IPS( 1304 + "wait for IPS1 commit clear (ips1_commit=%d ips2_commit=%d)", 1305 + ips_fw->signals.bits.ips1_commit, 1306 + ips_fw->signals.bits.ips2_commit); 1307 + 1363 1308 while (ips_fw->signals.bits.ips1_commit) 1364 1309 udelay(1); 1365 1310 1311 + DC_LOG_IPS( 1312 + "wait for IPS1 commit clear done (ips1_commit=%d ips2_commit=%d)", 1313 + ips_fw->signals.bits.ips1_commit, 1314 + ips_fw->signals.bits.ips2_commit); 1366 1315 } 1367 1316 } 1368 1317 1369 1318 if (!dc_dmub_srv_is_hw_pwr_up(dc->ctx->dmub_srv, true)) 1370 1319 ASSERT(0); 1320 + 1321 + DC_LOG_IPS("%s exited", __func__); 1371 1322 } 1372 1323 1373 1324 void dc_dmub_srv_set_power_state(struct dc_dmub_srv *dc_dmub_srv, enum dc_acpi_cm_power_state powerState) ··· 1405 1334 1406 1335 if (dc_dmub_srv->idle_allowed == allow_idle) 1407 1336 return; 1337 + 1338 + DC_LOG_IPS("%s state change: old=%d new=%d", __func__, dc_dmub_srv->idle_allowed, allow_idle); 1408 1339 1409 1340 /* 1410 1341 * Entering a low power state requires a driver notification.
+1
drivers/gpu/drm/amd/display/include/logger_types.h
··· 64 64 #define DC_LOG_DWB(...) drm_dbg((DC_LOGGER)->dev, __VA_ARGS__) 65 65 #define DC_LOG_DP2(...) drm_dbg_dp((DC_LOGGER)->dev, __VA_ARGS__) 66 66 #define DC_LOG_AUTO_DPM_TEST(...) pr_debug("[AutoDPMTest]: "__VA_ARGS__) 67 + #define DC_LOG_IPS(...) pr_debug("[IPS]: "__VA_ARGS__) 67 68 68 69 struct dc_log_buffer_ctx { 69 70 char *buf;