Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/xe/guc: Prefer GT oriented asserts in submit code

For better diagnostics, use xe_gt_assert() instead of xe_assert().

Signed-off-by: Michal Wajdeczko <michal.wajdeczko@intel.com>
Cc: Matthew Brost <matthew.brost@intel.com>
Reviewed-by: Matthew Brost <matthew.brost@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20241107194741.2167-2-michal.wajdeczko@intel.com

+21 -31
+21 -31
drivers/gpu/drm/xe/xe_guc_submit.c
··· 412 412 static void init_policies(struct xe_guc *guc, struct xe_exec_queue *q) 413 413 { 414 414 struct exec_queue_policy policy; 415 - struct xe_device *xe = guc_to_xe(guc); 416 415 enum xe_exec_queue_priority prio = q->sched_props.priority; 417 416 u32 timeslice_us = q->sched_props.timeslice_us; 418 417 u32 preempt_timeout_us = q->sched_props.preempt_timeout_us; 419 418 420 - xe_assert(xe, exec_queue_registered(q)); 419 + xe_gt_assert(guc_to_gt(guc), exec_queue_registered(q)); 421 420 422 421 __guc_exec_queue_policy_start_klv(&policy, q->guc->id); 423 422 __guc_exec_queue_policy_add_priority(&policy, xe_exec_queue_prio_to_guc[prio]); ··· 450 451 struct guc_ctxt_registration_info *info) 451 452 { 452 453 #define MAX_MLRC_REG_SIZE (13 + XE_HW_ENGINE_MAX_INSTANCE * 2) 453 - struct xe_device *xe = guc_to_xe(guc); 454 454 u32 action[MAX_MLRC_REG_SIZE]; 455 455 int len = 0; 456 456 int i; 457 457 458 - xe_assert(xe, xe_exec_queue_is_parallel(q)); 458 + xe_gt_assert(guc_to_gt(guc), xe_exec_queue_is_parallel(q)); 459 459 460 460 action[len++] = XE_GUC_ACTION_REGISTER_CONTEXT_MULTI_LRC; 461 461 action[len++] = info->flags; ··· 477 479 action[len++] = upper_32_bits(xe_lrc_descriptor(lrc)); 478 480 } 479 481 480 - xe_assert(xe, len <= MAX_MLRC_REG_SIZE); 482 + xe_gt_assert(guc_to_gt(guc), len <= MAX_MLRC_REG_SIZE); 481 483 #undef MAX_MLRC_REG_SIZE 482 484 483 485 xe_guc_ct_send(&guc->ct, action, len, 0, 0); ··· 511 513 struct xe_lrc *lrc = q->lrc[0]; 512 514 struct guc_ctxt_registration_info info; 513 515 514 - xe_assert(xe, !exec_queue_registered(q)); 516 + xe_gt_assert(guc_to_gt(guc), !exec_queue_registered(q)); 515 517 516 518 memset(&info, 0, sizeof(info)); 517 519 info.context_idx = q->guc->id; ··· 601 603 if (wq_wait_for_space(q, wq_space_until_wrap(q))) 602 604 return -ENODEV; 603 605 604 - xe_assert(xe, FIELD_FIT(WQ_LEN_MASK, len_dw)); 606 + xe_gt_assert(guc_to_gt(guc), FIELD_FIT(WQ_LEN_MASK, len_dw)); 605 607 606 608 parallel_write(xe, map, wq[q->guc->wqi_tail / sizeof(u32)], 607 609 FIELD_PREP(WQ_TYPE_MASK, WQ_TYPE_NOOP) | ··· 641 643 wqi[i++] = lrc->ring.tail / sizeof(u64); 642 644 } 643 645 644 - xe_assert(xe, i == wqi_size / sizeof(u32)); 646 + xe_gt_assert(guc_to_gt(guc), i == wqi_size / sizeof(u32)); 645 647 646 648 iosys_map_incr(&map, offsetof(struct guc_submit_parallel_scratch, 647 649 wq[q->guc->wqi_tail / sizeof(u32)])); 648 650 xe_map_memcpy_to(xe, &map, 0, wqi, wqi_size); 649 651 q->guc->wqi_tail += wqi_size; 650 - xe_assert(xe, q->guc->wqi_tail <= WQ_SIZE); 652 + xe_gt_assert(guc_to_gt(guc), q->guc->wqi_tail <= WQ_SIZE); 651 653 652 654 xe_device_wmb(xe); 653 655 ··· 659 661 static void submit_exec_queue(struct xe_exec_queue *q) 660 662 { 661 663 struct xe_guc *guc = exec_queue_to_guc(q); 662 - struct xe_device *xe = guc_to_xe(guc); 663 664 struct xe_lrc *lrc = q->lrc[0]; 664 665 u32 action[3]; 665 666 u32 g2h_len = 0; ··· 666 669 int len = 0; 667 670 bool extra_submit = false; 668 671 669 - xe_assert(xe, exec_queue_registered(q)); 672 + xe_gt_assert(guc_to_gt(guc), exec_queue_registered(q)); 670 673 671 674 if (xe_exec_queue_is_parallel(q)) 672 675 wq_item_append(q); ··· 713 716 struct xe_sched_job *job = to_xe_sched_job(drm_job); 714 717 struct xe_exec_queue *q = job->q; 715 718 struct xe_guc *guc = exec_queue_to_guc(q); 716 - struct xe_device *xe = guc_to_xe(guc); 717 719 struct dma_fence *fence = NULL; 718 720 bool lr = xe_exec_queue_is_lr(q); 719 721 720 - xe_assert(xe, !(exec_queue_destroyed(q) || exec_queue_pending_disable(q)) || 721 - exec_queue_banned(q) || exec_queue_suspended(q)); 722 + xe_gt_assert(guc_to_gt(guc), !(exec_queue_destroyed(q) || exec_queue_pending_disable(q)) || 723 + exec_queue_banned(q) || exec_queue_suspended(q)); 722 724 723 725 trace_xe_sched_job_run(job); 724 726 ··· 863 867 struct xe_gpu_scheduler *sched = &ge->sched; 864 868 bool wedged; 865 869 866 - xe_assert(xe, xe_exec_queue_is_lr(q)); 870 + xe_gt_assert(guc_to_gt(guc), xe_exec_queue_is_lr(q)); 867 871 trace_xe_exec_queue_lr_cleanup(q); 868 872 869 873 wedged = guc_submit_hint_wedged(exec_queue_to_guc(q)); ··· 1270 1274 { 1271 1275 struct xe_exec_queue *q = msg->private_data; 1272 1276 struct xe_guc *guc = exec_queue_to_guc(q); 1273 - struct xe_device *xe = guc_to_xe(guc); 1274 1277 1275 - xe_assert(xe, !(q->flags & EXEC_QUEUE_FLAG_PERMANENT)); 1278 + xe_gt_assert(guc_to_gt(guc), !(q->flags & EXEC_QUEUE_FLAG_PERMANENT)); 1276 1279 trace_xe_exec_queue_cleanup_entity(q); 1277 1280 1278 1281 if (exec_queue_registered(q)) ··· 1307 1312 static void suspend_fence_signal(struct xe_exec_queue *q) 1308 1313 { 1309 1314 struct xe_guc *guc = exec_queue_to_guc(q); 1310 - struct xe_device *xe = guc_to_xe(guc); 1311 1315 1312 - xe_assert(xe, exec_queue_suspended(q) || exec_queue_killed(q) || 1313 - xe_guc_read_stopped(guc)); 1314 - xe_assert(xe, q->guc->suspend_pending); 1316 + xe_gt_assert(guc_to_gt(guc), exec_queue_suspended(q) || exec_queue_killed(q) || 1317 + xe_guc_read_stopped(guc)); 1318 + xe_gt_assert(guc_to_gt(guc), q->guc->suspend_pending); 1315 1319 1316 1320 __suspend_fence_signal(q); 1317 1321 } ··· 1406 1412 { 1407 1413 struct xe_gpu_scheduler *sched; 1408 1414 struct xe_guc *guc = exec_queue_to_guc(q); 1409 - struct xe_device *xe = guc_to_xe(guc); 1410 1415 struct xe_guc_exec_queue *ge; 1411 1416 long timeout; 1412 1417 int err, i; 1413 1418 1414 - xe_assert(xe, xe_device_uc_enabled(guc_to_xe(guc))); 1419 + xe_gt_assert(guc_to_gt(guc), xe_device_uc_enabled(guc_to_xe(guc))); 1415 1420 1416 1421 ge = kzalloc(sizeof(*ge), GFP_KERNEL); 1417 1422 if (!ge) ··· 1623 1630 struct xe_gpu_scheduler *sched = &q->guc->sched; 1624 1631 struct xe_sched_msg *msg = q->guc->static_msgs + STATIC_MSG_RESUME; 1625 1632 struct xe_guc *guc = exec_queue_to_guc(q); 1626 - struct xe_device *xe = guc_to_xe(guc); 1627 1633 1628 - xe_assert(xe, !q->guc->suspend_pending); 1634 + xe_gt_assert(guc_to_gt(guc), !q->guc->suspend_pending); 1629 1635 1630 1636 xe_sched_msg_lock(sched); 1631 1637 guc_exec_queue_try_add_msg(q, msg, RESUME); ··· 1736 1744 { 1737 1745 struct xe_exec_queue *q; 1738 1746 unsigned long index; 1739 - struct xe_device *xe = guc_to_xe(guc); 1740 1747 1741 - xe_assert(xe, xe_guc_read_stopped(guc) == 1); 1748 + xe_gt_assert(guc_to_gt(guc), xe_guc_read_stopped(guc) == 1); 1742 1749 1743 1750 mutex_lock(&guc->submission_state.lock); 1744 1751 ··· 1779 1788 { 1780 1789 struct xe_exec_queue *q; 1781 1790 unsigned long index; 1782 - struct xe_device *xe = guc_to_xe(guc); 1783 1791 1784 - xe_assert(xe, xe_guc_read_stopped(guc) == 1); 1792 + xe_gt_assert(guc_to_gt(guc), xe_guc_read_stopped(guc) == 1); 1785 1793 1786 1794 mutex_lock(&guc->submission_state.lock); 1787 1795 atomic_dec(&guc->submission_state.stopped); ··· 1815 1825 return NULL; 1816 1826 } 1817 1827 1818 - xe_assert(xe, guc_id >= q->guc->id); 1819 - xe_assert(xe, guc_id < (q->guc->id + q->width)); 1828 + xe_gt_assert(guc_to_gt(guc), guc_id >= q->guc->id); 1829 + xe_gt_assert(guc_to_gt(guc), guc_id < (q->guc->id + q->width)); 1820 1830 1821 1831 return q; 1822 1832 }