Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/xe/guc: Prefer GT oriented logs in submit code

For better diagnostics, use xe_gt_err() instead of drm_err().

Signed-off-by: Michal Wajdeczko <michal.wajdeczko@intel.com>
Cc: Matthew Brost <matthew.brost@intel.com>
Reviewed-by: Matthew Brost <matthew.brost@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20241107194741.2167-3-michal.wajdeczko@intel.com

+12 -13
+12 -13
drivers/gpu/drm/xe/xe_guc_submit.c
··· 763 763 struct xe_exec_queue *q) 764 764 { 765 765 MAKE_SCHED_CONTEXT_ACTION(q, DISABLE); 766 - struct xe_device *xe = guc_to_xe(guc); 767 766 int ret; 768 767 769 768 set_min_preemption_timeout(guc, q); ··· 772 773 if (!ret) { 773 774 struct xe_gpu_scheduler *sched = &q->guc->sched; 774 775 775 - drm_warn(&xe->drm, "Pending enable failed to respond"); 776 + xe_gt_warn(q->gt, "Pending enable failed to respond\n"); 776 777 xe_sched_submission_start(sched); 777 778 xe_gt_reset_async(q->gt); 778 779 xe_sched_tdr_queue_imm(sched); ··· 816 817 */ 817 818 void xe_guc_submit_wedge(struct xe_guc *guc) 818 819 { 819 - struct xe_device *xe = guc_to_xe(guc); 820 + struct xe_gt *gt = guc_to_gt(guc); 820 821 struct xe_exec_queue *q; 821 822 unsigned long index; 822 823 int err; ··· 826 827 err = devm_add_action_or_reset(guc_to_xe(guc)->drm.dev, 827 828 guc_submit_wedged_fini, guc); 828 829 if (err) { 829 - drm_err(&xe->drm, "Failed to register xe_guc_submit clean-up on wedged.mode=2. Although device is wedged.\n"); 830 + xe_gt_err(gt, "Failed to register clean-up on wedged.mode=2; " 831 + "Although device is wedged.\n"); 830 832 return; 831 833 } 832 834 ··· 859 859 container_of(w, struct xe_guc_exec_queue, lr_tdr); 860 860 struct xe_exec_queue *q = ge->q; 861 861 struct xe_guc *guc = exec_queue_to_guc(q); 862 - struct xe_device *xe = guc_to_xe(guc); 863 862 struct xe_gpu_scheduler *sched = &ge->sched; 864 863 bool wedged; 865 864 ··· 896 897 !exec_queue_pending_disable(q) || 897 898 xe_guc_read_stopped(guc), HZ * 5); 898 899 if (!ret) { 899 - drm_warn(&xe->drm, "Schedule disable failed to respond"); 900 + xe_gt_warn(q->gt, "Schedule disable failed to respond\n"); 900 901 xe_sched_submission_start(sched); 901 902 xe_gt_reset_async(q->gt); 902 903 return; ··· 1800 1801 static struct xe_exec_queue * 1801 1802 g2h_exec_queue_lookup(struct xe_guc *guc, u32 guc_id) 1802 1803 { 1803 - struct xe_device *xe = guc_to_xe(guc); 1804 + struct xe_gt *gt = guc_to_gt(guc); 1804 1805 struct xe_exec_queue *q; 1805 1806 1806 1807 if (unlikely(guc_id >= GUC_ID_MAX)) { 1807 - drm_err(&xe->drm, "Invalid guc_id %u", guc_id); 1808 + xe_gt_err(gt, "Invalid guc_id %u\n", guc_id); 1808 1809 return NULL; 1809 1810 } 1810 1811 1811 1812 q = xa_load(&guc->submission_state.exec_queue_lookup, guc_id); 1812 1813 if (unlikely(!q)) { 1813 - drm_err(&xe->drm, "Not engine present for guc_id %u", guc_id); 1814 + xe_gt_err(gt, "Not engine present for guc_id %u\n", guc_id); 1814 1815 return NULL; 1815 1816 } 1816 1817 ··· 2038 2039 2039 2040 int xe_guc_exec_queue_reset_failure_handler(struct xe_guc *guc, u32 *msg, u32 len) 2040 2041 { 2041 - struct xe_device *xe = guc_to_xe(guc); 2042 + struct xe_gt *gt = guc_to_gt(guc); 2042 2043 u8 guc_class, instance; 2043 2044 u32 reason; 2044 2045 ··· 2050 2051 reason = msg[2]; 2051 2052 2052 2053 /* Unexpected failure of a hardware feature, log an actual error */ 2053 - drm_err(&xe->drm, "GuC engine reset request failed on %d:%d because 0x%08X", 2054 - guc_class, instance, reason); 2054 + xe_gt_err(gt, "GuC engine reset request failed on %d:%d because 0x%08X", 2055 + guc_class, instance, reason); 2055 2056 2056 - xe_gt_reset_async(guc_to_gt(guc)); 2057 + xe_gt_reset_async(gt); 2057 2058 2058 2059 return 0; 2059 2060 }