Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/xe: Expose user fence from xe_sync_entry

By allowing getting reference to user fence, we can
control the lifetime outside of sync entries.

This is needed to allow vma to track the associated
user fence that was provided with bind ioctl.

v2: xe_user_fence can be kept opaque (Jani, Matt)
v3: indent fix (Matt)

Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Cc: Matthew Brost <matthew.brost@intel.com>
Cc: Jani Nikula <jani.nikula@linux.intel.com>
Signed-off-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Reviewed-by: Matthew Brost <matthew.brost@intel.com>
Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20240215181152.450082-2-mika.kuoppala@linux.intel.com
(cherry picked from commit 977e5b82e0901480bc201342d39f855fc0a2ef47)
Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>

authored by

Mika Kuoppala and committed by
Thomas Hellström
86b3cd6d 4ca5c829

+53 -11
+48 -10
drivers/gpu/drm/xe/xe_sync.c
··· 19 19 #include "xe_macros.h" 20 20 #include "xe_sched_job_types.h" 21 21 22 - struct user_fence { 22 + struct xe_user_fence { 23 23 struct xe_device *xe; 24 24 struct kref refcount; 25 25 struct dma_fence_cb cb; ··· 27 27 struct mm_struct *mm; 28 28 u64 __user *addr; 29 29 u64 value; 30 + int signalled; 30 31 }; 31 32 32 33 static void user_fence_destroy(struct kref *kref) 33 34 { 34 - struct user_fence *ufence = container_of(kref, struct user_fence, 35 + struct xe_user_fence *ufence = container_of(kref, struct xe_user_fence, 35 36 refcount); 36 37 37 38 mmdrop(ufence->mm); 38 39 kfree(ufence); 39 40 } 40 41 41 - static void user_fence_get(struct user_fence *ufence) 42 + static void user_fence_get(struct xe_user_fence *ufence) 42 43 { 43 44 kref_get(&ufence->refcount); 44 45 } 45 46 46 - static void user_fence_put(struct user_fence *ufence) 47 + static void user_fence_put(struct xe_user_fence *ufence) 47 48 { 48 49 kref_put(&ufence->refcount, user_fence_destroy); 49 50 } 50 51 51 - static struct user_fence *user_fence_create(struct xe_device *xe, u64 addr, 52 - u64 value) 52 + static struct xe_user_fence *user_fence_create(struct xe_device *xe, u64 addr, 53 + u64 value) 53 54 { 54 - struct user_fence *ufence; 55 + struct xe_user_fence *ufence; 55 56 56 57 ufence = kmalloc(sizeof(*ufence), GFP_KERNEL); 57 58 if (!ufence) ··· 70 69 71 70 static void user_fence_worker(struct work_struct *w) 72 71 { 73 - struct user_fence *ufence = container_of(w, struct user_fence, worker); 72 + struct xe_user_fence *ufence = container_of(w, struct xe_user_fence, worker); 74 73 75 74 if (mmget_not_zero(ufence->mm)) { 76 75 kthread_use_mm(ufence->mm); ··· 81 80 } 82 81 83 82 wake_up_all(&ufence->xe->ufence_wq); 83 + WRITE_ONCE(ufence->signalled, 1); 84 84 user_fence_put(ufence); 85 85 } 86 86 87 - static void kick_ufence(struct user_fence *ufence, struct dma_fence *fence) 87 + static void kick_ufence(struct xe_user_fence *ufence, struct dma_fence *fence) 88 88 { 89 89 INIT_WORK(&ufence->worker, user_fence_worker); 90 90 queue_work(ufence->xe->ordered_wq, &ufence->worker); ··· 94 92 95 93 static void user_fence_cb(struct dma_fence *fence, struct dma_fence_cb *cb) 96 94 { 97 - struct user_fence *ufence = container_of(cb, struct user_fence, cb); 95 + struct xe_user_fence *ufence = container_of(cb, struct xe_user_fence, cb); 98 96 99 97 kick_ufence(ufence, fence); 100 98 } ··· 341 339 kfree(cf); 342 340 343 341 return ERR_PTR(-ENOMEM); 342 + } 343 + 344 + /** 345 + * xe_sync_ufence_get() - Get user fence from sync 346 + * @sync: input sync 347 + * 348 + * Get a user fence reference from sync. 349 + * 350 + * Return: xe_user_fence pointer with reference 351 + */ 352 + struct xe_user_fence *xe_sync_ufence_get(struct xe_sync_entry *sync) 353 + { 354 + user_fence_get(sync->ufence); 355 + 356 + return sync->ufence; 357 + } 358 + 359 + /** 360 + * xe_sync_ufence_put() - Put user fence reference 361 + * @ufence: user fence reference 362 + * 363 + */ 364 + void xe_sync_ufence_put(struct xe_user_fence *ufence) 365 + { 366 + user_fence_put(ufence); 367 + } 368 + 369 + /** 370 + * xe_sync_ufence_get_status() - Get user fence status 371 + * @ufence: user fence 372 + * 373 + * Return: 1 if signalled, 0 not signalled, <0 on error 374 + */ 375 + int xe_sync_ufence_get_status(struct xe_user_fence *ufence) 376 + { 377 + return READ_ONCE(ufence->signalled); 344 378 }
+4
drivers/gpu/drm/xe/xe_sync.h
··· 38 38 return !!sync->ufence; 39 39 } 40 40 41 + struct xe_user_fence *xe_sync_ufence_get(struct xe_sync_entry *sync); 42 + void xe_sync_ufence_put(struct xe_user_fence *ufence); 43 + int xe_sync_ufence_get_status(struct xe_user_fence *ufence); 44 + 41 45 #endif
+1 -1
drivers/gpu/drm/xe/xe_sync_types.h
··· 18 18 struct drm_syncobj *syncobj; 19 19 struct dma_fence *fence; 20 20 struct dma_fence_chain *chain_fence; 21 - struct user_fence *ufence; 21 + struct xe_user_fence *ufence; 22 22 u64 addr; 23 23 u64 timeline_value; 24 24 u32 type;