Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/xe/exec_queue: Prepare last fence for hw engine group resume context

Ensure we can safely take a ref of the exec queue's last fence from the
context of resuming jobs from the hw engine group. The locking requirements
differ from the general case, hence the introduction of this new function.

v2: Add kernel doc, rework the code to prevent code duplication

v3: Fix kernel doc, remove now unnecessary lockdep variants (Matt Brost)

v4: Remove new put function (Matt Brost)

Signed-off-by: Francois Dugast <francois.dugast@intel.com>
Reviewed-by: Matthew Brost <matthew.brost@intel.com>
Signed-off-by: Matthew Brost <matthew.brost@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20240809155156.1955925-7-francois.dugast@intel.com

authored by

Francois Dugast and committed by
Matthew Brost
0d92cd89 7f0d7bee

+33 -2
+31 -2
drivers/gpu/drm/xe/xe_exec_queue.c
··· 837 837 static void xe_exec_queue_last_fence_lockdep_assert(struct xe_exec_queue *q, 838 838 struct xe_vm *vm) 839 839 { 840 - if (q->flags & EXEC_QUEUE_FLAG_VM) 840 + if (q->flags & EXEC_QUEUE_FLAG_VM) { 841 841 lockdep_assert_held(&vm->lock); 842 - else 842 + } else { 843 843 xe_vm_assert_held(vm); 844 + lockdep_assert_held(&q->hwe->hw_engine_group->mode_sem); 845 + } 844 846 } 845 847 846 848 /** ··· 890 888 if (q->last_fence && 891 889 test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &q->last_fence->flags)) 892 890 xe_exec_queue_last_fence_put(q, vm); 891 + 892 + fence = q->last_fence ? q->last_fence : dma_fence_get_stub(); 893 + dma_fence_get(fence); 894 + return fence; 895 + } 896 + 897 + /** 898 + * xe_exec_queue_last_fence_get_for_resume() - Get last fence 899 + * @q: The exec queue 900 + * @vm: The VM the engine does a bind or exec for 901 + * 902 + * Get last fence, takes a ref. Only safe to be called in the context of 903 + * resuming the hw engine group's long-running exec queue, when the group 904 + * semaphore is held. 905 + * 906 + * Returns: last fence if not signaled, dma fence stub if signaled 907 + */ 908 + struct dma_fence *xe_exec_queue_last_fence_get_for_resume(struct xe_exec_queue *q, 909 + struct xe_vm *vm) 910 + { 911 + struct dma_fence *fence; 912 + 913 + lockdep_assert_held_write(&q->hwe->hw_engine_group->mode_sem); 914 + 915 + if (q->last_fence && 916 + test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &q->last_fence->flags)) 917 + xe_exec_queue_last_fence_put_unlocked(q); 893 918 894 919 fence = q->last_fence ? q->last_fence : dma_fence_get_stub(); 895 920 dma_fence_get(fence);
+2
drivers/gpu/drm/xe/xe_exec_queue.h
··· 77 77 void xe_exec_queue_last_fence_put_unlocked(struct xe_exec_queue *e); 78 78 struct dma_fence *xe_exec_queue_last_fence_get(struct xe_exec_queue *e, 79 79 struct xe_vm *vm); 80 + struct dma_fence *xe_exec_queue_last_fence_get_for_resume(struct xe_exec_queue *e, 81 + struct xe_vm *vm); 80 82 void xe_exec_queue_last_fence_set(struct xe_exec_queue *e, struct xe_vm *vm, 81 83 struct dma_fence *fence); 82 84 int xe_exec_queue_last_fence_test_dep(struct xe_exec_queue *q,