at v6.9 23 kB view raw
1// SPDX-License-Identifier: MIT 2/* 3 * Copyright © 2021 Intel Corporation 4 */ 5 6#include "xe_exec_queue.h" 7 8#include <linux/nospec.h> 9 10#include <drm/drm_device.h> 11#include <drm/drm_file.h> 12#include <drm/xe_drm.h> 13 14#include "xe_device.h" 15#include "xe_gt.h" 16#include "xe_hw_engine_class_sysfs.h" 17#include "xe_hw_fence.h" 18#include "xe_lrc.h" 19#include "xe_macros.h" 20#include "xe_migrate.h" 21#include "xe_pm.h" 22#include "xe_ring_ops_types.h" 23#include "xe_trace.h" 24#include "xe_vm.h" 25 26enum xe_exec_queue_sched_prop { 27 XE_EXEC_QUEUE_JOB_TIMEOUT = 0, 28 XE_EXEC_QUEUE_TIMESLICE = 1, 29 XE_EXEC_QUEUE_PREEMPT_TIMEOUT = 2, 30 XE_EXEC_QUEUE_SCHED_PROP_MAX = 3, 31}; 32 33static int exec_queue_user_extensions(struct xe_device *xe, struct xe_exec_queue *q, 34 u64 extensions, int ext_number, bool create); 35 36static struct xe_exec_queue *__xe_exec_queue_alloc(struct xe_device *xe, 37 struct xe_vm *vm, 38 u32 logical_mask, 39 u16 width, struct xe_hw_engine *hwe, 40 u32 flags, u64 extensions) 41{ 42 struct xe_exec_queue *q; 43 struct xe_gt *gt = hwe->gt; 44 int err; 45 46 /* only kernel queues can be permanent */ 47 XE_WARN_ON((flags & EXEC_QUEUE_FLAG_PERMANENT) && !(flags & EXEC_QUEUE_FLAG_KERNEL)); 48 49 q = kzalloc(struct_size(q, lrc, width), GFP_KERNEL); 50 if (!q) 51 return ERR_PTR(-ENOMEM); 52 53 kref_init(&q->refcount); 54 q->flags = flags; 55 q->hwe = hwe; 56 q->gt = gt; 57 q->class = hwe->class; 58 q->width = width; 59 q->logical_mask = logical_mask; 60 q->fence_irq = &gt->fence_irq[hwe->class]; 61 q->ring_ops = gt->ring_ops[hwe->class]; 62 q->ops = gt->exec_queue_ops; 63 INIT_LIST_HEAD(&q->compute.link); 64 INIT_LIST_HEAD(&q->multi_gt_link); 65 66 q->sched_props.timeslice_us = hwe->eclass->sched_props.timeslice_us; 67 q->sched_props.preempt_timeout_us = 68 hwe->eclass->sched_props.preempt_timeout_us; 69 q->sched_props.job_timeout_ms = 70 hwe->eclass->sched_props.job_timeout_ms; 71 if (q->flags & EXEC_QUEUE_FLAG_KERNEL && 72 q->flags & EXEC_QUEUE_FLAG_HIGH_PRIORITY) 73 q->sched_props.priority = XE_EXEC_QUEUE_PRIORITY_KERNEL; 74 else 75 q->sched_props.priority = XE_EXEC_QUEUE_PRIORITY_NORMAL; 76 77 if (extensions) { 78 /* 79 * may set q->usm, must come before xe_lrc_init(), 80 * may overwrite q->sched_props, must come before q->ops->init() 81 */ 82 err = exec_queue_user_extensions(xe, q, extensions, 0, true); 83 if (err) { 84 kfree(q); 85 return ERR_PTR(err); 86 } 87 } 88 89 if (vm) 90 q->vm = xe_vm_get(vm); 91 92 if (xe_exec_queue_is_parallel(q)) { 93 q->parallel.composite_fence_ctx = dma_fence_context_alloc(1); 94 q->parallel.composite_fence_seqno = XE_FENCE_INITIAL_SEQNO; 95 } 96 97 return q; 98} 99 100static void __xe_exec_queue_free(struct xe_exec_queue *q) 101{ 102 if (q->vm) 103 xe_vm_put(q->vm); 104 kfree(q); 105} 106 107static int __xe_exec_queue_init(struct xe_exec_queue *q) 108{ 109 struct xe_device *xe = gt_to_xe(q->gt); 110 int i, err; 111 112 for (i = 0; i < q->width; ++i) { 113 err = xe_lrc_init(q->lrc + i, q->hwe, q, q->vm, SZ_16K); 114 if (err) 115 goto err_lrc; 116 } 117 118 err = q->ops->init(q); 119 if (err) 120 goto err_lrc; 121 122 /* 123 * Normally the user vm holds an rpm ref to keep the device 124 * awake, and the context holds a ref for the vm, however for 125 * some engines we use the kernels migrate vm underneath which offers no 126 * such rpm ref, or we lack a vm. Make sure we keep a ref here, so we 127 * can perform GuC CT actions when needed. Caller is expected to have 128 * already grabbed the rpm ref outside any sensitive locks. 129 */ 130 if (!(q->flags & EXEC_QUEUE_FLAG_PERMANENT) && (q->flags & EXEC_QUEUE_FLAG_VM || !q->vm)) 131 drm_WARN_ON(&xe->drm, !xe_device_mem_access_get_if_ongoing(xe)); 132 133 return 0; 134 135err_lrc: 136 for (i = i - 1; i >= 0; --i) 137 xe_lrc_finish(q->lrc + i); 138 return err; 139} 140 141struct xe_exec_queue *xe_exec_queue_create(struct xe_device *xe, struct xe_vm *vm, 142 u32 logical_mask, u16 width, 143 struct xe_hw_engine *hwe, u32 flags, 144 u64 extensions) 145{ 146 struct xe_exec_queue *q; 147 int err; 148 149 q = __xe_exec_queue_alloc(xe, vm, logical_mask, width, hwe, flags, 150 extensions); 151 if (IS_ERR(q)) 152 return q; 153 154 if (vm) { 155 err = xe_vm_lock(vm, true); 156 if (err) 157 goto err_post_alloc; 158 } 159 160 err = __xe_exec_queue_init(q); 161 if (vm) 162 xe_vm_unlock(vm); 163 if (err) 164 goto err_post_alloc; 165 166 return q; 167 168err_post_alloc: 169 __xe_exec_queue_free(q); 170 return ERR_PTR(err); 171} 172 173struct xe_exec_queue *xe_exec_queue_create_class(struct xe_device *xe, struct xe_gt *gt, 174 struct xe_vm *vm, 175 enum xe_engine_class class, u32 flags) 176{ 177 struct xe_hw_engine *hwe, *hwe0 = NULL; 178 enum xe_hw_engine_id id; 179 u32 logical_mask = 0; 180 181 for_each_hw_engine(hwe, gt, id) { 182 if (xe_hw_engine_is_reserved(hwe)) 183 continue; 184 185 if (hwe->class == class) { 186 logical_mask |= BIT(hwe->logical_instance); 187 if (!hwe0) 188 hwe0 = hwe; 189 } 190 } 191 192 if (!logical_mask) 193 return ERR_PTR(-ENODEV); 194 195 return xe_exec_queue_create(xe, vm, logical_mask, 1, hwe0, flags, 0); 196} 197 198void xe_exec_queue_destroy(struct kref *ref) 199{ 200 struct xe_exec_queue *q = container_of(ref, struct xe_exec_queue, refcount); 201 struct xe_exec_queue *eq, *next; 202 203 xe_exec_queue_last_fence_put_unlocked(q); 204 if (!(q->flags & EXEC_QUEUE_FLAG_BIND_ENGINE_CHILD)) { 205 list_for_each_entry_safe(eq, next, &q->multi_gt_list, 206 multi_gt_link) 207 xe_exec_queue_put(eq); 208 } 209 210 q->ops->fini(q); 211} 212 213void xe_exec_queue_fini(struct xe_exec_queue *q) 214{ 215 int i; 216 217 for (i = 0; i < q->width; ++i) 218 xe_lrc_finish(q->lrc + i); 219 if (!(q->flags & EXEC_QUEUE_FLAG_PERMANENT) && (q->flags & EXEC_QUEUE_FLAG_VM || !q->vm)) 220 xe_device_mem_access_put(gt_to_xe(q->gt)); 221 __xe_exec_queue_free(q); 222} 223 224void xe_exec_queue_assign_name(struct xe_exec_queue *q, u32 instance) 225{ 226 switch (q->class) { 227 case XE_ENGINE_CLASS_RENDER: 228 sprintf(q->name, "rcs%d", instance); 229 break; 230 case XE_ENGINE_CLASS_VIDEO_DECODE: 231 sprintf(q->name, "vcs%d", instance); 232 break; 233 case XE_ENGINE_CLASS_VIDEO_ENHANCE: 234 sprintf(q->name, "vecs%d", instance); 235 break; 236 case XE_ENGINE_CLASS_COPY: 237 sprintf(q->name, "bcs%d", instance); 238 break; 239 case XE_ENGINE_CLASS_COMPUTE: 240 sprintf(q->name, "ccs%d", instance); 241 break; 242 case XE_ENGINE_CLASS_OTHER: 243 sprintf(q->name, "gsccs%d", instance); 244 break; 245 default: 246 XE_WARN_ON(q->class); 247 } 248} 249 250struct xe_exec_queue *xe_exec_queue_lookup(struct xe_file *xef, u32 id) 251{ 252 struct xe_exec_queue *q; 253 254 mutex_lock(&xef->exec_queue.lock); 255 q = xa_load(&xef->exec_queue.xa, id); 256 if (q) 257 xe_exec_queue_get(q); 258 mutex_unlock(&xef->exec_queue.lock); 259 260 return q; 261} 262 263enum xe_exec_queue_priority 264xe_exec_queue_device_get_max_priority(struct xe_device *xe) 265{ 266 return capable(CAP_SYS_NICE) ? XE_EXEC_QUEUE_PRIORITY_HIGH : 267 XE_EXEC_QUEUE_PRIORITY_NORMAL; 268} 269 270static int exec_queue_set_priority(struct xe_device *xe, struct xe_exec_queue *q, 271 u64 value, bool create) 272{ 273 if (XE_IOCTL_DBG(xe, value > XE_EXEC_QUEUE_PRIORITY_HIGH)) 274 return -EINVAL; 275 276 if (XE_IOCTL_DBG(xe, value > xe_exec_queue_device_get_max_priority(xe))) 277 return -EPERM; 278 279 if (!create) 280 return q->ops->set_priority(q, value); 281 282 q->sched_props.priority = value; 283 return 0; 284} 285 286static bool xe_exec_queue_enforce_schedule_limit(void) 287{ 288#if IS_ENABLED(CONFIG_DRM_XE_ENABLE_SCHEDTIMEOUT_LIMIT) 289 return true; 290#else 291 return !capable(CAP_SYS_NICE); 292#endif 293} 294 295static void 296xe_exec_queue_get_prop_minmax(struct xe_hw_engine_class_intf *eclass, 297 enum xe_exec_queue_sched_prop prop, 298 u32 *min, u32 *max) 299{ 300 switch (prop) { 301 case XE_EXEC_QUEUE_JOB_TIMEOUT: 302 *min = eclass->sched_props.job_timeout_min; 303 *max = eclass->sched_props.job_timeout_max; 304 break; 305 case XE_EXEC_QUEUE_TIMESLICE: 306 *min = eclass->sched_props.timeslice_min; 307 *max = eclass->sched_props.timeslice_max; 308 break; 309 case XE_EXEC_QUEUE_PREEMPT_TIMEOUT: 310 *min = eclass->sched_props.preempt_timeout_min; 311 *max = eclass->sched_props.preempt_timeout_max; 312 break; 313 default: 314 break; 315 } 316#if IS_ENABLED(CONFIG_DRM_XE_ENABLE_SCHEDTIMEOUT_LIMIT) 317 if (capable(CAP_SYS_NICE)) { 318 switch (prop) { 319 case XE_EXEC_QUEUE_JOB_TIMEOUT: 320 *min = XE_HW_ENGINE_JOB_TIMEOUT_MIN; 321 *max = XE_HW_ENGINE_JOB_TIMEOUT_MAX; 322 break; 323 case XE_EXEC_QUEUE_TIMESLICE: 324 *min = XE_HW_ENGINE_TIMESLICE_MIN; 325 *max = XE_HW_ENGINE_TIMESLICE_MAX; 326 break; 327 case XE_EXEC_QUEUE_PREEMPT_TIMEOUT: 328 *min = XE_HW_ENGINE_PREEMPT_TIMEOUT_MIN; 329 *max = XE_HW_ENGINE_PREEMPT_TIMEOUT_MAX; 330 break; 331 default: 332 break; 333 } 334 } 335#endif 336} 337 338static int exec_queue_set_timeslice(struct xe_device *xe, struct xe_exec_queue *q, 339 u64 value, bool create) 340{ 341 u32 min = 0, max = 0; 342 343 xe_exec_queue_get_prop_minmax(q->hwe->eclass, 344 XE_EXEC_QUEUE_TIMESLICE, &min, &max); 345 346 if (xe_exec_queue_enforce_schedule_limit() && 347 !xe_hw_engine_timeout_in_range(value, min, max)) 348 return -EINVAL; 349 350 if (!create) 351 return q->ops->set_timeslice(q, value); 352 353 q->sched_props.timeslice_us = value; 354 return 0; 355} 356 357typedef int (*xe_exec_queue_set_property_fn)(struct xe_device *xe, 358 struct xe_exec_queue *q, 359 u64 value, bool create); 360 361static const xe_exec_queue_set_property_fn exec_queue_set_property_funcs[] = { 362 [DRM_XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY] = exec_queue_set_priority, 363 [DRM_XE_EXEC_QUEUE_SET_PROPERTY_TIMESLICE] = exec_queue_set_timeslice, 364}; 365 366static int exec_queue_user_ext_set_property(struct xe_device *xe, 367 struct xe_exec_queue *q, 368 u64 extension, 369 bool create) 370{ 371 u64 __user *address = u64_to_user_ptr(extension); 372 struct drm_xe_ext_set_property ext; 373 int err; 374 u32 idx; 375 376 err = __copy_from_user(&ext, address, sizeof(ext)); 377 if (XE_IOCTL_DBG(xe, err)) 378 return -EFAULT; 379 380 if (XE_IOCTL_DBG(xe, ext.property >= 381 ARRAY_SIZE(exec_queue_set_property_funcs)) || 382 XE_IOCTL_DBG(xe, ext.pad) || 383 XE_IOCTL_DBG(xe, ext.property != DRM_XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY && 384 ext.property != DRM_XE_EXEC_QUEUE_SET_PROPERTY_TIMESLICE)) 385 return -EINVAL; 386 387 idx = array_index_nospec(ext.property, ARRAY_SIZE(exec_queue_set_property_funcs)); 388 if (!exec_queue_set_property_funcs[idx]) 389 return -EINVAL; 390 391 return exec_queue_set_property_funcs[idx](xe, q, ext.value, create); 392} 393 394typedef int (*xe_exec_queue_user_extension_fn)(struct xe_device *xe, 395 struct xe_exec_queue *q, 396 u64 extension, 397 bool create); 398 399static const xe_exec_queue_set_property_fn exec_queue_user_extension_funcs[] = { 400 [DRM_XE_EXEC_QUEUE_EXTENSION_SET_PROPERTY] = exec_queue_user_ext_set_property, 401}; 402 403#define MAX_USER_EXTENSIONS 16 404static int exec_queue_user_extensions(struct xe_device *xe, struct xe_exec_queue *q, 405 u64 extensions, int ext_number, bool create) 406{ 407 u64 __user *address = u64_to_user_ptr(extensions); 408 struct drm_xe_user_extension ext; 409 int err; 410 u32 idx; 411 412 if (XE_IOCTL_DBG(xe, ext_number >= MAX_USER_EXTENSIONS)) 413 return -E2BIG; 414 415 err = __copy_from_user(&ext, address, sizeof(ext)); 416 if (XE_IOCTL_DBG(xe, err)) 417 return -EFAULT; 418 419 if (XE_IOCTL_DBG(xe, ext.pad) || 420 XE_IOCTL_DBG(xe, ext.name >= 421 ARRAY_SIZE(exec_queue_user_extension_funcs))) 422 return -EINVAL; 423 424 idx = array_index_nospec(ext.name, 425 ARRAY_SIZE(exec_queue_user_extension_funcs)); 426 err = exec_queue_user_extension_funcs[idx](xe, q, extensions, create); 427 if (XE_IOCTL_DBG(xe, err)) 428 return err; 429 430 if (ext.next_extension) 431 return exec_queue_user_extensions(xe, q, ext.next_extension, 432 ++ext_number, create); 433 434 return 0; 435} 436 437static const enum xe_engine_class user_to_xe_engine_class[] = { 438 [DRM_XE_ENGINE_CLASS_RENDER] = XE_ENGINE_CLASS_RENDER, 439 [DRM_XE_ENGINE_CLASS_COPY] = XE_ENGINE_CLASS_COPY, 440 [DRM_XE_ENGINE_CLASS_VIDEO_DECODE] = XE_ENGINE_CLASS_VIDEO_DECODE, 441 [DRM_XE_ENGINE_CLASS_VIDEO_ENHANCE] = XE_ENGINE_CLASS_VIDEO_ENHANCE, 442 [DRM_XE_ENGINE_CLASS_COMPUTE] = XE_ENGINE_CLASS_COMPUTE, 443}; 444 445static struct xe_hw_engine * 446find_hw_engine(struct xe_device *xe, 447 struct drm_xe_engine_class_instance eci) 448{ 449 u32 idx; 450 451 if (eci.engine_class >= ARRAY_SIZE(user_to_xe_engine_class)) 452 return NULL; 453 454 if (eci.gt_id >= xe->info.gt_count) 455 return NULL; 456 457 idx = array_index_nospec(eci.engine_class, 458 ARRAY_SIZE(user_to_xe_engine_class)); 459 460 return xe_gt_hw_engine(xe_device_get_gt(xe, eci.gt_id), 461 user_to_xe_engine_class[idx], 462 eci.engine_instance, true); 463} 464 465static u32 bind_exec_queue_logical_mask(struct xe_device *xe, struct xe_gt *gt, 466 struct drm_xe_engine_class_instance *eci, 467 u16 width, u16 num_placements) 468{ 469 struct xe_hw_engine *hwe; 470 enum xe_hw_engine_id id; 471 u32 logical_mask = 0; 472 473 if (XE_IOCTL_DBG(xe, width != 1)) 474 return 0; 475 if (XE_IOCTL_DBG(xe, num_placements != 1)) 476 return 0; 477 if (XE_IOCTL_DBG(xe, eci[0].engine_instance != 0)) 478 return 0; 479 480 eci[0].engine_class = DRM_XE_ENGINE_CLASS_COPY; 481 482 for_each_hw_engine(hwe, gt, id) { 483 if (xe_hw_engine_is_reserved(hwe)) 484 continue; 485 486 if (hwe->class == 487 user_to_xe_engine_class[DRM_XE_ENGINE_CLASS_COPY]) 488 logical_mask |= BIT(hwe->logical_instance); 489 } 490 491 return logical_mask; 492} 493 494static u32 calc_validate_logical_mask(struct xe_device *xe, struct xe_gt *gt, 495 struct drm_xe_engine_class_instance *eci, 496 u16 width, u16 num_placements) 497{ 498 int len = width * num_placements; 499 int i, j, n; 500 u16 class; 501 u16 gt_id; 502 u32 return_mask = 0, prev_mask; 503 504 if (XE_IOCTL_DBG(xe, !xe_device_uc_enabled(xe) && 505 len > 1)) 506 return 0; 507 508 for (i = 0; i < width; ++i) { 509 u32 current_mask = 0; 510 511 for (j = 0; j < num_placements; ++j) { 512 struct xe_hw_engine *hwe; 513 514 n = j * width + i; 515 516 hwe = find_hw_engine(xe, eci[n]); 517 if (XE_IOCTL_DBG(xe, !hwe)) 518 return 0; 519 520 if (XE_IOCTL_DBG(xe, xe_hw_engine_is_reserved(hwe))) 521 return 0; 522 523 if (XE_IOCTL_DBG(xe, n && eci[n].gt_id != gt_id) || 524 XE_IOCTL_DBG(xe, n && eci[n].engine_class != class)) 525 return 0; 526 527 class = eci[n].engine_class; 528 gt_id = eci[n].gt_id; 529 530 if (width == 1 || !i) 531 return_mask |= BIT(eci[n].engine_instance); 532 current_mask |= BIT(eci[n].engine_instance); 533 } 534 535 /* Parallel submissions must be logically contiguous */ 536 if (i && XE_IOCTL_DBG(xe, current_mask != prev_mask << 1)) 537 return 0; 538 539 prev_mask = current_mask; 540 } 541 542 return return_mask; 543} 544 545int xe_exec_queue_create_ioctl(struct drm_device *dev, void *data, 546 struct drm_file *file) 547{ 548 struct xe_device *xe = to_xe_device(dev); 549 struct xe_file *xef = to_xe_file(file); 550 struct drm_xe_exec_queue_create *args = data; 551 struct drm_xe_engine_class_instance eci[XE_HW_ENGINE_MAX_INSTANCE]; 552 struct drm_xe_engine_class_instance __user *user_eci = 553 u64_to_user_ptr(args->instances); 554 struct xe_hw_engine *hwe; 555 struct xe_vm *vm, *migrate_vm; 556 struct xe_gt *gt; 557 struct xe_exec_queue *q = NULL; 558 u32 logical_mask; 559 u32 id; 560 u32 len; 561 int err; 562 563 if (XE_IOCTL_DBG(xe, args->flags) || 564 XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1])) 565 return -EINVAL; 566 567 len = args->width * args->num_placements; 568 if (XE_IOCTL_DBG(xe, !len || len > XE_HW_ENGINE_MAX_INSTANCE)) 569 return -EINVAL; 570 571 err = __copy_from_user(eci, user_eci, 572 sizeof(struct drm_xe_engine_class_instance) * 573 len); 574 if (XE_IOCTL_DBG(xe, err)) 575 return -EFAULT; 576 577 if (XE_IOCTL_DBG(xe, eci[0].gt_id >= xe->info.gt_count)) 578 return -EINVAL; 579 580 if (eci[0].engine_class == DRM_XE_ENGINE_CLASS_VM_BIND) { 581 for_each_gt(gt, xe, id) { 582 struct xe_exec_queue *new; 583 u32 flags; 584 585 if (xe_gt_is_media_type(gt)) 586 continue; 587 588 eci[0].gt_id = gt->info.id; 589 logical_mask = bind_exec_queue_logical_mask(xe, gt, eci, 590 args->width, 591 args->num_placements); 592 if (XE_IOCTL_DBG(xe, !logical_mask)) 593 return -EINVAL; 594 595 hwe = find_hw_engine(xe, eci[0]); 596 if (XE_IOCTL_DBG(xe, !hwe)) 597 return -EINVAL; 598 599 /* The migration vm doesn't hold rpm ref */ 600 xe_device_mem_access_get(xe); 601 602 flags = EXEC_QUEUE_FLAG_VM | (id ? EXEC_QUEUE_FLAG_BIND_ENGINE_CHILD : 0); 603 604 migrate_vm = xe_migrate_get_vm(gt_to_tile(gt)->migrate); 605 new = xe_exec_queue_create(xe, migrate_vm, logical_mask, 606 args->width, hwe, flags, 607 args->extensions); 608 609 xe_device_mem_access_put(xe); /* now held by engine */ 610 611 xe_vm_put(migrate_vm); 612 if (IS_ERR(new)) { 613 err = PTR_ERR(new); 614 if (q) 615 goto put_exec_queue; 616 return err; 617 } 618 if (id == 0) 619 q = new; 620 else 621 list_add_tail(&new->multi_gt_list, 622 &q->multi_gt_link); 623 } 624 } else { 625 gt = xe_device_get_gt(xe, eci[0].gt_id); 626 logical_mask = calc_validate_logical_mask(xe, gt, eci, 627 args->width, 628 args->num_placements); 629 if (XE_IOCTL_DBG(xe, !logical_mask)) 630 return -EINVAL; 631 632 hwe = find_hw_engine(xe, eci[0]); 633 if (XE_IOCTL_DBG(xe, !hwe)) 634 return -EINVAL; 635 636 vm = xe_vm_lookup(xef, args->vm_id); 637 if (XE_IOCTL_DBG(xe, !vm)) 638 return -ENOENT; 639 640 err = down_read_interruptible(&vm->lock); 641 if (err) { 642 xe_vm_put(vm); 643 return err; 644 } 645 646 if (XE_IOCTL_DBG(xe, xe_vm_is_closed_or_banned(vm))) { 647 up_read(&vm->lock); 648 xe_vm_put(vm); 649 return -ENOENT; 650 } 651 652 q = xe_exec_queue_create(xe, vm, logical_mask, 653 args->width, hwe, 0, 654 args->extensions); 655 up_read(&vm->lock); 656 xe_vm_put(vm); 657 if (IS_ERR(q)) 658 return PTR_ERR(q); 659 660 if (xe_vm_in_preempt_fence_mode(vm)) { 661 q->compute.context = dma_fence_context_alloc(1); 662 spin_lock_init(&q->compute.lock); 663 664 err = xe_vm_add_compute_exec_queue(vm, q); 665 if (XE_IOCTL_DBG(xe, err)) 666 goto put_exec_queue; 667 } 668 } 669 670 mutex_lock(&xef->exec_queue.lock); 671 err = xa_alloc(&xef->exec_queue.xa, &id, q, xa_limit_32b, GFP_KERNEL); 672 mutex_unlock(&xef->exec_queue.lock); 673 if (err) 674 goto kill_exec_queue; 675 676 args->exec_queue_id = id; 677 678 return 0; 679 680kill_exec_queue: 681 xe_exec_queue_kill(q); 682put_exec_queue: 683 xe_exec_queue_put(q); 684 return err; 685} 686 687int xe_exec_queue_get_property_ioctl(struct drm_device *dev, void *data, 688 struct drm_file *file) 689{ 690 struct xe_device *xe = to_xe_device(dev); 691 struct xe_file *xef = to_xe_file(file); 692 struct drm_xe_exec_queue_get_property *args = data; 693 struct xe_exec_queue *q; 694 int ret; 695 696 if (XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1])) 697 return -EINVAL; 698 699 q = xe_exec_queue_lookup(xef, args->exec_queue_id); 700 if (XE_IOCTL_DBG(xe, !q)) 701 return -ENOENT; 702 703 switch (args->property) { 704 case DRM_XE_EXEC_QUEUE_GET_PROPERTY_BAN: 705 args->value = !!(q->flags & EXEC_QUEUE_FLAG_BANNED); 706 ret = 0; 707 break; 708 default: 709 ret = -EINVAL; 710 } 711 712 xe_exec_queue_put(q); 713 714 return ret; 715} 716 717/** 718 * xe_exec_queue_is_lr() - Whether an exec_queue is long-running 719 * @q: The exec_queue 720 * 721 * Return: True if the exec_queue is long-running, false otherwise. 722 */ 723bool xe_exec_queue_is_lr(struct xe_exec_queue *q) 724{ 725 return q->vm && xe_vm_in_lr_mode(q->vm) && 726 !(q->flags & EXEC_QUEUE_FLAG_VM); 727} 728 729static s32 xe_exec_queue_num_job_inflight(struct xe_exec_queue *q) 730{ 731 return q->lrc->fence_ctx.next_seqno - xe_lrc_seqno(q->lrc) - 1; 732} 733 734/** 735 * xe_exec_queue_ring_full() - Whether an exec_queue's ring is full 736 * @q: The exec_queue 737 * 738 * Return: True if the exec_queue's ring is full, false otherwise. 739 */ 740bool xe_exec_queue_ring_full(struct xe_exec_queue *q) 741{ 742 struct xe_lrc *lrc = q->lrc; 743 s32 max_job = lrc->ring.size / MAX_JOB_SIZE_BYTES; 744 745 return xe_exec_queue_num_job_inflight(q) >= max_job; 746} 747 748/** 749 * xe_exec_queue_is_idle() - Whether an exec_queue is idle. 750 * @q: The exec_queue 751 * 752 * FIXME: Need to determine what to use as the short-lived 753 * timeline lock for the exec_queues, so that the return value 754 * of this function becomes more than just an advisory 755 * snapshot in time. The timeline lock must protect the 756 * seqno from racing submissions on the same exec_queue. 757 * Typically vm->resv, but user-created timeline locks use the migrate vm 758 * and never grabs the migrate vm->resv so we have a race there. 759 * 760 * Return: True if the exec_queue is idle, false otherwise. 761 */ 762bool xe_exec_queue_is_idle(struct xe_exec_queue *q) 763{ 764 if (xe_exec_queue_is_parallel(q)) { 765 int i; 766 767 for (i = 0; i < q->width; ++i) { 768 if (xe_lrc_seqno(&q->lrc[i]) != 769 q->lrc[i].fence_ctx.next_seqno - 1) 770 return false; 771 } 772 773 return true; 774 } 775 776 return xe_lrc_seqno(&q->lrc[0]) == 777 q->lrc[0].fence_ctx.next_seqno - 1; 778} 779 780void xe_exec_queue_kill(struct xe_exec_queue *q) 781{ 782 struct xe_exec_queue *eq = q, *next; 783 784 list_for_each_entry_safe(eq, next, &eq->multi_gt_list, 785 multi_gt_link) { 786 q->ops->kill(eq); 787 xe_vm_remove_compute_exec_queue(q->vm, eq); 788 } 789 790 q->ops->kill(q); 791 xe_vm_remove_compute_exec_queue(q->vm, q); 792} 793 794int xe_exec_queue_destroy_ioctl(struct drm_device *dev, void *data, 795 struct drm_file *file) 796{ 797 struct xe_device *xe = to_xe_device(dev); 798 struct xe_file *xef = to_xe_file(file); 799 struct drm_xe_exec_queue_destroy *args = data; 800 struct xe_exec_queue *q; 801 802 if (XE_IOCTL_DBG(xe, args->pad) || 803 XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1])) 804 return -EINVAL; 805 806 mutex_lock(&xef->exec_queue.lock); 807 q = xa_erase(&xef->exec_queue.xa, args->exec_queue_id); 808 mutex_unlock(&xef->exec_queue.lock); 809 if (XE_IOCTL_DBG(xe, !q)) 810 return -ENOENT; 811 812 xe_exec_queue_kill(q); 813 814 trace_xe_exec_queue_close(q); 815 xe_exec_queue_put(q); 816 817 return 0; 818} 819 820static void xe_exec_queue_last_fence_lockdep_assert(struct xe_exec_queue *q, 821 struct xe_vm *vm) 822{ 823 if (q->flags & EXEC_QUEUE_FLAG_VM) 824 lockdep_assert_held(&vm->lock); 825 else 826 xe_vm_assert_held(vm); 827} 828 829/** 830 * xe_exec_queue_last_fence_put() - Drop ref to last fence 831 * @q: The exec queue 832 * @vm: The VM the engine does a bind or exec for 833 */ 834void xe_exec_queue_last_fence_put(struct xe_exec_queue *q, struct xe_vm *vm) 835{ 836 xe_exec_queue_last_fence_lockdep_assert(q, vm); 837 838 if (q->last_fence) { 839 dma_fence_put(q->last_fence); 840 q->last_fence = NULL; 841 } 842} 843 844/** 845 * xe_exec_queue_last_fence_put_unlocked() - Drop ref to last fence unlocked 846 * @q: The exec queue 847 * 848 * Only safe to be called from xe_exec_queue_destroy(). 849 */ 850void xe_exec_queue_last_fence_put_unlocked(struct xe_exec_queue *q) 851{ 852 if (q->last_fence) { 853 dma_fence_put(q->last_fence); 854 q->last_fence = NULL; 855 } 856} 857 858/** 859 * xe_exec_queue_last_fence_get() - Get last fence 860 * @q: The exec queue 861 * @vm: The VM the engine does a bind or exec for 862 * 863 * Get last fence, takes a ref 864 * 865 * Returns: last fence if not signaled, dma fence stub if signaled 866 */ 867struct dma_fence *xe_exec_queue_last_fence_get(struct xe_exec_queue *q, 868 struct xe_vm *vm) 869{ 870 struct dma_fence *fence; 871 872 xe_exec_queue_last_fence_lockdep_assert(q, vm); 873 874 if (q->last_fence && 875 test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &q->last_fence->flags)) 876 xe_exec_queue_last_fence_put(q, vm); 877 878 fence = q->last_fence ? q->last_fence : dma_fence_get_stub(); 879 dma_fence_get(fence); 880 return fence; 881} 882 883/** 884 * xe_exec_queue_last_fence_set() - Set last fence 885 * @q: The exec queue 886 * @vm: The VM the engine does a bind or exec for 887 * @fence: The fence 888 * 889 * Set the last fence for the engine. Increases reference count for fence, when 890 * closing engine xe_exec_queue_last_fence_put should be called. 891 */ 892void xe_exec_queue_last_fence_set(struct xe_exec_queue *q, struct xe_vm *vm, 893 struct dma_fence *fence) 894{ 895 xe_exec_queue_last_fence_lockdep_assert(q, vm); 896 897 xe_exec_queue_last_fence_put(q, vm); 898 q->last_fence = dma_fence_get(fence); 899}