Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Revert "drm/xe/vf: Rebase exec queue parallel commands during migration recovery"

This reverts commit ba180a362128cb71d16c3f0ce6645448011d2607.

Due to change in the VF migration recovery design this code
is not needed any more.

v3:
- Add commit message (Michal / Lucas)

Signed-off-by: Matthew Brost <matthew.brost@intel.com>
Reviewed-by: Michal Wajdeczko <michal.wajdeczko@intel.com>
Signed-off-by: Michal Wajdeczko <michal.wajdeczko@intel.com>
Signed-off-by: Matthew Brost <matthew.brost@intel.com>
Link: https://lore.kernel.org/r/20251002233824.203417-2-michal.wajdeczko@intel.com

-62
-8
drivers/gpu/drm/xe/abi/guc_actions_abi.h
··· 196 196 XE_GUC_REGISTER_CONTEXT_MULTI_LRC_MSG_MIN_LEN = 11, 197 197 }; 198 198 199 - enum xe_guc_context_wq_item_offsets { 200 - XE_GUC_CONTEXT_WQ_HEADER_DATA_0_TYPE_LEN = 0, 201 - XE_GUC_CONTEXT_WQ_EL_INFO_DATA_1_CTX_DESC_LOW, 202 - XE_GUC_CONTEXT_WQ_EL_INFO_DATA_2_GUCCTX_RINGTAIL_FREEZEPOCS, 203 - XE_GUC_CONTEXT_WQ_EL_INFO_DATA_3_WI_FENCE_ID, 204 - XE_GUC_CONTEXT_WQ_EL_CHILD_LIST_DATA_4_RINGTAIL, 205 - }; 206 - 207 199 enum xe_guc_report_status { 208 200 XE_GUC_REPORT_STATUS_UNKNOWN = 0x0, 209 201 XE_GUC_REPORT_STATUS_ACKED = 0x1,
-54
drivers/gpu/drm/xe/xe_guc_submit.c
··· 735 735 if (wq_wait_for_space(q, wqi_size)) 736 736 return; 737 737 738 - xe_gt_assert(guc_to_gt(guc), i == XE_GUC_CONTEXT_WQ_HEADER_DATA_0_TYPE_LEN); 739 738 wqi[i++] = FIELD_PREP(WQ_TYPE_MASK, WQ_TYPE_MULTI_LRC) | 740 739 FIELD_PREP(WQ_LEN_MASK, len_dw); 741 - xe_gt_assert(guc_to_gt(guc), i == XE_GUC_CONTEXT_WQ_EL_INFO_DATA_1_CTX_DESC_LOW); 742 740 wqi[i++] = xe_lrc_descriptor(q->lrc[0]); 743 - xe_gt_assert(guc_to_gt(guc), i == 744 - XE_GUC_CONTEXT_WQ_EL_INFO_DATA_2_GUCCTX_RINGTAIL_FREEZEPOCS); 745 741 wqi[i++] = FIELD_PREP(WQ_GUC_ID_MASK, q->guc->id) | 746 742 FIELD_PREP(WQ_RING_TAIL_MASK, q->lrc[0]->ring.tail / sizeof(u64)); 747 - xe_gt_assert(guc_to_gt(guc), i == XE_GUC_CONTEXT_WQ_EL_INFO_DATA_3_WI_FENCE_ID); 748 743 wqi[i++] = 0; 749 - xe_gt_assert(guc_to_gt(guc), i == XE_GUC_CONTEXT_WQ_EL_CHILD_LIST_DATA_4_RINGTAIL); 750 744 for (j = 1; j < q->width; ++j) { 751 745 struct xe_lrc *lrc = q->lrc[j]; 752 746 ··· 759 765 760 766 map = xe_lrc_parallel_map(q->lrc[0]); 761 767 parallel_write(xe, map, wq_desc.tail, q->guc->wqi_tail); 762 - } 763 - 764 - static int wq_items_rebase(struct xe_exec_queue *q) 765 - { 766 - struct xe_guc *guc = exec_queue_to_guc(q); 767 - struct xe_device *xe = guc_to_xe(guc); 768 - struct iosys_map map = xe_lrc_parallel_map(q->lrc[0]); 769 - int i = q->guc->wqi_head; 770 - 771 - /* the ring starts after a header struct */ 772 - iosys_map_incr(&map, offsetof(struct guc_submit_parallel_scratch, wq[0])); 773 - 774 - while ((i % WQ_SIZE) != (q->guc->wqi_tail % WQ_SIZE)) { 775 - u32 len_dw, type, val; 776 - 777 - if (drm_WARN_ON_ONCE(&xe->drm, i < 0 || i > 2 * WQ_SIZE)) 778 - break; 779 - 780 - val = xe_map_rd_ring_u32(xe, &map, i / sizeof(u32) + 781 - XE_GUC_CONTEXT_WQ_HEADER_DATA_0_TYPE_LEN, 782 - WQ_SIZE / sizeof(u32)); 783 - len_dw = FIELD_GET(WQ_LEN_MASK, val); 784 - type = FIELD_GET(WQ_TYPE_MASK, val); 785 - 786 - if (drm_WARN_ON_ONCE(&xe->drm, len_dw >= WQ_SIZE / sizeof(u32))) 787 - break; 788 - 789 - if (type == WQ_TYPE_MULTI_LRC) { 790 - val = xe_lrc_descriptor(q->lrc[0]); 791 - xe_map_wr_ring_u32(xe, &map, i / sizeof(u32) + 792 - XE_GUC_CONTEXT_WQ_EL_INFO_DATA_1_CTX_DESC_LOW, 793 - WQ_SIZE / sizeof(u32), val); 794 - } else if (drm_WARN_ON_ONCE(&xe->drm, type != WQ_TYPE_NOOP)) { 795 - break; 796 - } 797 - 798 - i += (len_dw + 1) * sizeof(u32); 799 - } 800 - 801 - if ((i % WQ_SIZE) != (q->guc->wqi_tail % WQ_SIZE)) { 802 - xe_gt_err(q->gt, "Exec queue fixups incomplete - wqi parse failed\n"); 803 - return -EBADMSG; 804 - } 805 - return 0; 806 768 } 807 769 808 770 #define RESUME_PENDING ~0x0ull ··· 2617 2667 mutex_lock(&guc->submission_state.lock); 2618 2668 xa_for_each(&guc->submission_state.exec_queue_lookup, index, q) { 2619 2669 err = xe_exec_queue_contexts_hwsp_rebase(q, scratch); 2620 - if (err) 2621 - break; 2622 - if (xe_exec_queue_is_parallel(q)) 2623 - err = wq_items_rebase(q); 2624 2670 if (err) 2625 2671 break; 2626 2672 }