Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/xe/uapi: Remove support for persistent exec_queues

Persistent exec_queues delays explicit destruction of exec_queues
until they are done executing, but destruction on process exit
is still immediate. It turns out no UMD is relying on this
functionality, so remove it. If there turns out to be a use-case
in the future, let's re-add.

Persistent exec_queues were never used for LR VMs

v2:
- Don't add an "UNUSED" define for the missing property
(Lucas, Rodrigo)
v3:
- Remove the remaining struct xe_exec_queue::persistent state
(Niranjana, Lucas)

Fixes: dd08ebf6c352 ("drm/xe: Introduce a new DRM driver for Intel GPUs")
Cc: Rodrigo Vivi <rodrigo.vivi@intel.com>
Cc: Matthew Brost <matthew.brost@intel.com>
Cc: David Airlie <airlied@gmail.com>
Cc: Daniel Vetter <daniel@ffwll.ch>
Cc: Lucas De Marchi <lucas.demarchi@intel.com>
Cc: Francois Dugast <francois.dugast@intel.com>
Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Reviewed-by: Lucas De Marchi <lucas.demarchi@intel.com>
Acked-by: José Roberto de Souza <jose.souza@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20240209113444.8396-1-thomas.hellstrom@linux.intel.com
(cherry picked from commit f1a9abc0cf311375695bede1590364864c05976d)
Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>

+5 -94
-39
drivers/gpu/drm/xe/xe_device.c
··· 83 83 return 0; 84 84 } 85 85 86 - static void device_kill_persistent_exec_queues(struct xe_device *xe, 87 - struct xe_file *xef); 88 - 89 86 static void xe_file_close(struct drm_device *dev, struct drm_file *file) 90 87 { 91 88 struct xe_device *xe = to_xe_device(dev); ··· 99 102 mutex_unlock(&xef->exec_queue.lock); 100 103 xa_destroy(&xef->exec_queue.xa); 101 104 mutex_destroy(&xef->exec_queue.lock); 102 - device_kill_persistent_exec_queues(xe, xef); 103 - 104 105 mutex_lock(&xef->vm.lock); 105 106 xa_for_each(&xef->vm.xa, idx, vm) 106 107 xe_vm_close_and_put(vm); ··· 249 254 if (err >= 0) 250 255 xa_erase(&xe->usm.asid_to_vm, asid); 251 256 } 252 - 253 - drmm_mutex_init(&xe->drm, &xe->persistent_engines.lock); 254 - INIT_LIST_HEAD(&xe->persistent_engines.list); 255 257 256 258 spin_lock_init(&xe->pinned.lock); 257 259 INIT_LIST_HEAD(&xe->pinned.kernel_bo_present); ··· 560 568 561 569 void xe_device_shutdown(struct xe_device *xe) 562 570 { 563 - } 564 - 565 - void xe_device_add_persistent_exec_queues(struct xe_device *xe, struct xe_exec_queue *q) 566 - { 567 - mutex_lock(&xe->persistent_engines.lock); 568 - list_add_tail(&q->persistent.link, &xe->persistent_engines.list); 569 - mutex_unlock(&xe->persistent_engines.lock); 570 - } 571 - 572 - void xe_device_remove_persistent_exec_queues(struct xe_device *xe, 573 - struct xe_exec_queue *q) 574 - { 575 - mutex_lock(&xe->persistent_engines.lock); 576 - if (!list_empty(&q->persistent.link)) 577 - list_del(&q->persistent.link); 578 - mutex_unlock(&xe->persistent_engines.lock); 579 - } 580 - 581 - static void device_kill_persistent_exec_queues(struct xe_device *xe, 582 - struct xe_file *xef) 583 - { 584 - struct xe_exec_queue *q, *next; 585 - 586 - mutex_lock(&xe->persistent_engines.lock); 587 - list_for_each_entry_safe(q, next, &xe->persistent_engines.list, 588 - persistent.link) 589 - if (q->persistent.xef == xef) { 590 - xe_exec_queue_kill(q); 591 - list_del_init(&q->persistent.link); 592 - } 593 - mutex_unlock(&xe->persistent_engines.lock); 594 571 } 595 572 596 573 void xe_device_wmb(struct xe_device *xe)
-4
drivers/gpu/drm/xe/xe_device.h
··· 42 42 void xe_device_remove(struct xe_device *xe); 43 43 void xe_device_shutdown(struct xe_device *xe); 44 44 45 - void xe_device_add_persistent_exec_queues(struct xe_device *xe, struct xe_exec_queue *q); 46 - void xe_device_remove_persistent_exec_queues(struct xe_device *xe, 47 - struct xe_exec_queue *q); 48 - 49 45 void xe_device_wmb(struct xe_device *xe); 50 46 51 47 static inline struct xe_file *to_xe_file(const struct drm_file *file)
-8
drivers/gpu/drm/xe/xe_device_types.h
··· 341 341 struct mutex lock; 342 342 } usm; 343 343 344 - /** @persistent_engines: engines that are closed but still running */ 345 - struct { 346 - /** @lock: protects persistent engines */ 347 - struct mutex lock; 348 - /** @list: list of persistent engines */ 349 - struct list_head list; 350 - } persistent_engines; 351 - 352 344 /** @pinned: pinned BO state */ 353 345 struct { 354 346 /** @lock: protected pinned BO list state */
+5 -28
drivers/gpu/drm/xe/xe_exec_queue.c
··· 60 60 q->fence_irq = &gt->fence_irq[hwe->class]; 61 61 q->ring_ops = gt->ring_ops[hwe->class]; 62 62 q->ops = gt->exec_queue_ops; 63 - INIT_LIST_HEAD(&q->persistent.link); 64 63 INIT_LIST_HEAD(&q->compute.link); 65 64 INIT_LIST_HEAD(&q->multi_gt_link); 66 65 ··· 325 326 return q->ops->set_preempt_timeout(q, value); 326 327 } 327 328 328 - static int exec_queue_set_persistence(struct xe_device *xe, struct xe_exec_queue *q, 329 - u64 value, bool create) 330 - { 331 - if (XE_IOCTL_DBG(xe, !create)) 332 - return -EINVAL; 333 - 334 - if (XE_IOCTL_DBG(xe, xe_vm_in_preempt_fence_mode(q->vm))) 335 - return -EINVAL; 336 - 337 - if (value) 338 - q->flags |= EXEC_QUEUE_FLAG_PERSISTENT; 339 - else 340 - q->flags &= ~EXEC_QUEUE_FLAG_PERSISTENT; 341 - 342 - return 0; 343 - } 344 - 345 329 static int exec_queue_set_job_timeout(struct xe_device *xe, struct xe_exec_queue *q, 346 330 u64 value, bool create) 347 331 { ··· 396 414 [DRM_XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY] = exec_queue_set_priority, 397 415 [DRM_XE_EXEC_QUEUE_SET_PROPERTY_TIMESLICE] = exec_queue_set_timeslice, 398 416 [DRM_XE_EXEC_QUEUE_SET_PROPERTY_PREEMPTION_TIMEOUT] = exec_queue_set_preemption_timeout, 399 - [DRM_XE_EXEC_QUEUE_SET_PROPERTY_PERSISTENCE] = exec_queue_set_persistence, 400 417 [DRM_XE_EXEC_QUEUE_SET_PROPERTY_JOB_TIMEOUT] = exec_queue_set_job_timeout, 401 418 [DRM_XE_EXEC_QUEUE_SET_PROPERTY_ACC_TRIGGER] = exec_queue_set_acc_trigger, 402 419 [DRM_XE_EXEC_QUEUE_SET_PROPERTY_ACC_NOTIFY] = exec_queue_set_acc_notify, ··· 422 441 return -EINVAL; 423 442 424 443 idx = array_index_nospec(ext.property, ARRAY_SIZE(exec_queue_set_property_funcs)); 444 + if (!exec_queue_set_property_funcs[idx]) 445 + return -EINVAL; 446 + 425 447 return exec_queue_set_property_funcs[idx](xe, q, ext.value, create); 426 448 } 427 449 ··· 688 704 } 689 705 690 706 q = xe_exec_queue_create(xe, vm, logical_mask, 691 - args->width, hwe, 692 - xe_vm_in_lr_mode(vm) ? 0 : 693 - EXEC_QUEUE_FLAG_PERSISTENT); 707 + args->width, hwe, 0); 694 708 up_read(&vm->lock); 695 709 xe_vm_put(vm); 696 710 if (IS_ERR(q)) ··· 709 727 if (XE_IOCTL_DBG(xe, err)) 710 728 goto kill_exec_queue; 711 729 } 712 - 713 - q->persistent.xef = xef; 714 730 715 731 mutex_lock(&xef->exec_queue.lock); 716 732 err = xa_alloc(&xef->exec_queue.xa, &id, q, xa_limit_32b, GFP_KERNEL); ··· 852 872 if (XE_IOCTL_DBG(xe, !q)) 853 873 return -ENOENT; 854 874 855 - if (!(q->flags & EXEC_QUEUE_FLAG_PERSISTENT)) 856 - xe_exec_queue_kill(q); 857 - else 858 - xe_device_add_persistent_exec_queues(xe, q); 875 + xe_exec_queue_kill(q); 859 876 860 877 trace_xe_exec_queue_close(q); 861 878 xe_exec_queue_put(q);
-10
drivers/gpu/drm/xe/xe_exec_queue_types.h
··· 105 105 struct xe_guc_exec_queue *guc; 106 106 }; 107 107 108 - /** 109 - * @persistent: persistent exec queue state 110 - */ 111 - struct { 112 - /** @xef: file which this exec queue belongs to */ 113 - struct xe_file *xef; 114 - /** @link: link in list of persistent exec queues */ 115 - struct list_head link; 116 - } persistent; 117 - 118 108 union { 119 109 /** 120 110 * @parallel: parallel submission state
-2
drivers/gpu/drm/xe/xe_execlist.c
··· 378 378 list_del(&exl->active_link); 379 379 spin_unlock_irqrestore(&exl->port->lock, flags); 380 380 381 - if (q->flags & EXEC_QUEUE_FLAG_PERSISTENT) 382 - xe_device_remove_persistent_exec_queues(xe, q); 383 381 drm_sched_entity_fini(&exl->entity); 384 382 drm_sched_fini(&exl->sched); 385 383 kfree(exl);
-2
drivers/gpu/drm/xe/xe_guc_submit.c
··· 1028 1028 1029 1029 if (xe_exec_queue_is_lr(q)) 1030 1030 cancel_work_sync(&ge->lr_tdr); 1031 - if (q->flags & EXEC_QUEUE_FLAG_PERSISTENT) 1032 - xe_device_remove_persistent_exec_queues(gt_to_xe(q->gt), q); 1033 1031 release_guc_id(guc, q); 1034 1032 xe_sched_entity_fini(&ge->entity); 1035 1033 xe_sched_fini(&ge->sched);
-1
include/uapi/drm/xe_drm.h
··· 1046 1046 #define DRM_XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY 0 1047 1047 #define DRM_XE_EXEC_QUEUE_SET_PROPERTY_TIMESLICE 1 1048 1048 #define DRM_XE_EXEC_QUEUE_SET_PROPERTY_PREEMPTION_TIMEOUT 2 1049 - #define DRM_XE_EXEC_QUEUE_SET_PROPERTY_PERSISTENCE 3 1050 1049 #define DRM_XE_EXEC_QUEUE_SET_PROPERTY_JOB_TIMEOUT 4 1051 1050 #define DRM_XE_EXEC_QUEUE_SET_PROPERTY_ACC_TRIGGER 5 1052 1051 #define DRM_XE_EXEC_QUEUE_SET_PROPERTY_ACC_NOTIFY 6