Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/xe/pm: Add trace for pm functions

Add trace for xe pm function for better debuggability.

v2: Fix indentation and add trace for xe_pm_runtime_get_ioctl

Cc: Matthew Brost <matthew.brost@intel.com>
Cc: Rodrigo Vivi <rodrigo.vivi@intel.com>
Reviewed-by: Matthew Brost <matthew.brost@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20240717125950.9952-1-nirmoy.das@intel.com
Signed-off-by: Nirmoy Das <nirmoy.das@intel.com>

+60
+8
drivers/gpu/drm/xe/xe_pm.c
··· 20 20 #include "xe_guc.h" 21 21 #include "xe_irq.h" 22 22 #include "xe_pcode.h" 23 + #include "xe_trace.h" 23 24 #include "xe_wa.h" 24 25 25 26 /** ··· 88 87 int err; 89 88 90 89 drm_dbg(&xe->drm, "Suspending device\n"); 90 + trace_xe_pm_suspend(xe, __builtin_return_address(0)); 91 91 92 92 for_each_gt(gt, xe, id) 93 93 xe_gt_suspend_prepare(gt); ··· 133 131 int err; 134 132 135 133 drm_dbg(&xe->drm, "Resuming device\n"); 134 + trace_xe_pm_resume(xe, __builtin_return_address(0)); 136 135 137 136 for_each_tile(tile, xe, id) 138 137 xe_wa_apply_tile_workarounds(tile); ··· 329 326 u8 id; 330 327 int err = 0; 331 328 329 + trace_xe_pm_runtime_suspend(xe, __builtin_return_address(0)); 332 330 /* Disable access_ongoing asserts and prevent recursive pm calls */ 333 331 xe_pm_write_callback_task(xe, current); 334 332 ··· 403 399 u8 id; 404 400 int err = 0; 405 401 402 + trace_xe_pm_runtime_resume(xe, __builtin_return_address(0)); 406 403 /* Disable access_ongoing asserts and prevent recursive pm calls */ 407 404 xe_pm_write_callback_task(xe, current); 408 405 ··· 468 463 */ 469 464 void xe_pm_runtime_get(struct xe_device *xe) 470 465 { 466 + trace_xe_pm_runtime_get(xe, __builtin_return_address(0)); 471 467 pm_runtime_get_noresume(xe->drm.dev); 472 468 473 469 if (xe_pm_read_callback_task(xe) == current) ··· 484 478 */ 485 479 void xe_pm_runtime_put(struct xe_device *xe) 486 480 { 481 + trace_xe_pm_runtime_put(xe, __builtin_return_address(0)); 487 482 if (xe_pm_read_callback_task(xe) == current) { 488 483 pm_runtime_put_noidle(xe->drm.dev); 489 484 } else { ··· 502 495 */ 503 496 int xe_pm_runtime_get_ioctl(struct xe_device *xe) 504 497 { 498 + trace_xe_pm_runtime_get_ioctl(xe, __builtin_return_address(0)); 505 499 if (WARN_ON(xe_pm_read_callback_task(xe) == current)) 506 500 return -ELOOP; 507 501
+52
drivers/gpu/drm/xe/xe_trace.h
··· 369 369 (u32)(__entry->val >> 32)) 370 370 ); 371 371 372 + DECLARE_EVENT_CLASS(xe_pm_runtime, 373 + TP_PROTO(struct xe_device *xe, void *caller), 374 + TP_ARGS(xe, caller), 375 + 376 + TP_STRUCT__entry( 377 + __string(dev, __dev_name_xe(xe)) 378 + __field(void *, caller) 379 + ), 380 + 381 + TP_fast_assign( 382 + __assign_str(dev); 383 + __entry->caller = caller; 384 + ), 385 + 386 + TP_printk("dev=%s caller_function=%pS", __get_str(dev), __entry->caller) 387 + ); 388 + 389 + DEFINE_EVENT(xe_pm_runtime, xe_pm_runtime_get, 390 + TP_PROTO(struct xe_device *xe, void *caller), 391 + TP_ARGS(xe, caller) 392 + ); 393 + 394 + DEFINE_EVENT(xe_pm_runtime, xe_pm_runtime_put, 395 + TP_PROTO(struct xe_device *xe, void *caller), 396 + TP_ARGS(xe, caller) 397 + ); 398 + 399 + DEFINE_EVENT(xe_pm_runtime, xe_pm_resume, 400 + TP_PROTO(struct xe_device *xe, void *caller), 401 + TP_ARGS(xe, caller) 402 + ); 403 + 404 + DEFINE_EVENT(xe_pm_runtime, xe_pm_suspend, 405 + TP_PROTO(struct xe_device *xe, void *caller), 406 + TP_ARGS(xe, caller) 407 + ); 408 + 409 + DEFINE_EVENT(xe_pm_runtime, xe_pm_runtime_resume, 410 + TP_PROTO(struct xe_device *xe, void *caller), 411 + TP_ARGS(xe, caller) 412 + ); 413 + 414 + DEFINE_EVENT(xe_pm_runtime, xe_pm_runtime_suspend, 415 + TP_PROTO(struct xe_device *xe, void *caller), 416 + TP_ARGS(xe, caller) 417 + ); 418 + 419 + DEFINE_EVENT(xe_pm_runtime, xe_pm_runtime_get_ioctl, 420 + TP_PROTO(struct xe_device *xe, void *caller), 421 + TP_ARGS(xe, caller) 422 + ); 423 + 372 424 #endif 373 425 374 426 /* This part must be outside protection */