Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/xe/pt: unify xe_pt_svm_pre_commit with userptr

We now use the same notifier lock for SVM and userptr, with that we can
combine xe_pt_userptr_pre_commit and xe_pt_svm_pre_commit.

v2: (Matt B)
- Re-use xe_svm_notifier_lock/unlock for userptr.
- Combine svm/userptr handling further down into op_check_svm_userptr.
v3:
- Only hide the ops if we lack DRM_GPUSVM, since we also need them for
userptr.

Suggested-by: Matthew Brost <matthew.brost@intel.com>
Signed-off-by: Matthew Auld <matthew.auld@intel.com>
Cc: Himal Prasad Ghimiray <himal.prasad.ghimiray@intel.com>
Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Reviewed-by: Matthew Brost <matthew.brost@intel.com>
Link: https://lore.kernel.org/r/20250828142430.615826-18-matthew.auld@intel.com

+47 -78
+47 -76
drivers/gpu/drm/xe/xe_pt.c
··· 1377 1377 pt_update_ops, rftree); 1378 1378 } 1379 1379 1380 + #if IS_ENABLED(CONFIG_DRM_GPUSVM) 1380 1381 #ifdef CONFIG_DRM_XE_USERPTR_INVAL_INJECT 1381 1382 1382 1383 static bool xe_pt_userptr_inject_eagain(struct xe_userptr_vma *uvma) ··· 1433 1432 return 0; 1434 1433 } 1435 1434 1436 - static int op_check_userptr(struct xe_vm *vm, struct xe_vma_op *op, 1437 - struct xe_vm_pgtable_update_ops *pt_update) 1435 + static int op_check_svm_userptr(struct xe_vm *vm, struct xe_vma_op *op, 1436 + struct xe_vm_pgtable_update_ops *pt_update) 1438 1437 { 1439 1438 int err = 0; 1440 1439 ··· 1456 1455 case DRM_GPUVA_OP_UNMAP: 1457 1456 break; 1458 1457 case DRM_GPUVA_OP_PREFETCH: 1459 - err = vma_check_userptr(vm, gpuva_to_vma(op->base.prefetch.va), 1460 - pt_update); 1458 + if (xe_vma_is_cpu_addr_mirror(gpuva_to_vma(op->base.prefetch.va))) { 1459 + struct xe_svm_range *range = op->map_range.range; 1460 + unsigned long i; 1461 + 1462 + xe_assert(vm->xe, 1463 + xe_vma_is_cpu_addr_mirror(gpuva_to_vma(op->base.prefetch.va))); 1464 + xa_for_each(&op->prefetch_range.range, i, range) { 1465 + xe_svm_range_debug(range, "PRE-COMMIT"); 1466 + 1467 + if (!xe_svm_range_pages_valid(range)) { 1468 + xe_svm_range_debug(range, "PRE-COMMIT - RETRY"); 1469 + return -ENODATA; 1470 + } 1471 + } 1472 + } else { 1473 + err = vma_check_userptr(vm, gpuva_to_vma(op->base.prefetch.va), pt_update); 1474 + } 1461 1475 break; 1476 + #if IS_ENABLED(CONFIG_DRM_XE_GPUSVM) 1477 + case DRM_GPUVA_OP_DRIVER: 1478 + if (op->subop == XE_VMA_SUBOP_MAP_RANGE) { 1479 + struct xe_svm_range *range = op->map_range.range; 1480 + 1481 + xe_assert(vm->xe, xe_vma_is_cpu_addr_mirror(op->map_range.vma)); 1482 + 1483 + xe_svm_range_debug(range, "PRE-COMMIT"); 1484 + 1485 + if (!xe_svm_range_pages_valid(range)) { 1486 + xe_svm_range_debug(range, "PRE-COMMIT - RETRY"); 1487 + return -EAGAIN; 1488 + } 1489 + } 1490 + break; 1491 + #endif 1462 1492 default: 1463 1493 drm_warn(&vm->xe->drm, "NOT POSSIBLE"); 1464 1494 } ··· 1497 1465 return err; 1498 1466 } 1499 1467 1500 - static int xe_pt_userptr_pre_commit(struct xe_migrate_pt_update *pt_update) 1468 + static int xe_pt_svm_userptr_pre_commit(struct xe_migrate_pt_update *pt_update) 1501 1469 { 1502 1470 struct xe_vm *vm = pt_update->vops->vm; 1503 1471 struct xe_vma_ops *vops = pt_update->vops; ··· 1510 1478 if (err) 1511 1479 return err; 1512 1480 1513 - down_read(&vm->svm.gpusvm.notifier_lock); 1481 + xe_svm_notifier_lock(vm); 1514 1482 1515 1483 list_for_each_entry(op, &vops->list, link) { 1516 - err = op_check_userptr(vm, op, pt_update_ops); 1484 + err = op_check_svm_userptr(vm, op, pt_update_ops); 1517 1485 if (err) { 1518 - up_read(&vm->svm.gpusvm.notifier_lock); 1486 + xe_svm_notifier_unlock(vm); 1519 1487 break; 1520 1488 } 1521 1489 } 1522 1490 1523 1491 return err; 1524 - } 1525 - 1526 - #if IS_ENABLED(CONFIG_DRM_XE_GPUSVM) 1527 - static int xe_pt_svm_pre_commit(struct xe_migrate_pt_update *pt_update) 1528 - { 1529 - struct xe_vm *vm = pt_update->vops->vm; 1530 - struct xe_vma_ops *vops = pt_update->vops; 1531 - struct xe_vma_op *op; 1532 - unsigned long i; 1533 - int err; 1534 - 1535 - err = xe_pt_pre_commit(pt_update); 1536 - if (err) 1537 - return err; 1538 - 1539 - xe_svm_notifier_lock(vm); 1540 - 1541 - list_for_each_entry(op, &vops->list, link) { 1542 - struct xe_svm_range *range = NULL; 1543 - 1544 - if (op->subop == XE_VMA_SUBOP_UNMAP_RANGE) 1545 - continue; 1546 - 1547 - if (op->base.op == DRM_GPUVA_OP_PREFETCH) { 1548 - xe_assert(vm->xe, 1549 - xe_vma_is_cpu_addr_mirror(gpuva_to_vma(op->base.prefetch.va))); 1550 - xa_for_each(&op->prefetch_range.range, i, range) { 1551 - xe_svm_range_debug(range, "PRE-COMMIT"); 1552 - 1553 - if (!xe_svm_range_pages_valid(range)) { 1554 - xe_svm_range_debug(range, "PRE-COMMIT - RETRY"); 1555 - xe_svm_notifier_unlock(vm); 1556 - return -ENODATA; 1557 - } 1558 - } 1559 - } else { 1560 - xe_assert(vm->xe, xe_vma_is_cpu_addr_mirror(op->map_range.vma)); 1561 - xe_assert(vm->xe, op->subop == XE_VMA_SUBOP_MAP_RANGE); 1562 - range = op->map_range.range; 1563 - 1564 - xe_svm_range_debug(range, "PRE-COMMIT"); 1565 - 1566 - if (!xe_svm_range_pages_valid(range)) { 1567 - xe_svm_range_debug(range, "PRE-COMMIT - RETRY"); 1568 - xe_svm_notifier_unlock(vm); 1569 - return -EAGAIN; 1570 - } 1571 - } 1572 - } 1573 - 1574 - return 0; 1575 1492 } 1576 1493 #endif 1577 1494 ··· 1825 1844 xe_vma_start(vma), 1826 1845 xe_vma_end(vma)); 1827 1846 ++pt_update_ops->current_op; 1828 - pt_update_ops->needs_userptr_lock |= xe_vma_is_userptr(vma); 1847 + pt_update_ops->needs_svm_lock |= xe_vma_is_userptr(vma); 1829 1848 1830 1849 /* 1831 1850 * If rebind, we have to invalidate TLB on !LR vms to invalidate ··· 1933 1952 xe_pt_update_ops_rfence_interval(pt_update_ops, xe_vma_start(vma), 1934 1953 xe_vma_end(vma)); 1935 1954 ++pt_update_ops->current_op; 1936 - pt_update_ops->needs_userptr_lock |= xe_vma_is_userptr(vma); 1955 + pt_update_ops->needs_svm_lock |= xe_vma_is_userptr(vma); 1937 1956 pt_update_ops->needs_invalidation = true; 1938 1957 1939 1958 xe_pt_commit_prepare_unbind(vma, pt_op->entries, pt_op->num_entries); ··· 2320 2339 .pre_commit = xe_pt_pre_commit, 2321 2340 }; 2322 2341 2323 - static const struct xe_migrate_pt_update_ops userptr_migrate_ops = { 2342 + #if IS_ENABLED(CONFIG_DRM_GPUSVM) 2343 + static const struct xe_migrate_pt_update_ops svm_userptr_migrate_ops = { 2324 2344 .populate = xe_vm_populate_pgtable, 2325 2345 .clear = xe_migrate_clear_pgtable_callback, 2326 - .pre_commit = xe_pt_userptr_pre_commit, 2327 - }; 2328 - 2329 - #if IS_ENABLED(CONFIG_DRM_XE_GPUSVM) 2330 - static const struct xe_migrate_pt_update_ops svm_migrate_ops = { 2331 - .populate = xe_vm_populate_pgtable, 2332 - .clear = xe_migrate_clear_pgtable_callback, 2333 - .pre_commit = xe_pt_svm_pre_commit, 2346 + .pre_commit = xe_pt_svm_userptr_pre_commit, 2334 2347 }; 2335 2348 #else 2336 - static const struct xe_migrate_pt_update_ops svm_migrate_ops; 2349 + static const struct xe_migrate_pt_update_ops svm_userptr_migrate_ops; 2337 2350 #endif 2338 2351 2339 2352 static struct xe_dep_scheduler *to_dep_scheduler(struct xe_exec_queue *q, ··· 2365 2390 int err = 0, i; 2366 2391 struct xe_migrate_pt_update update = { 2367 2392 .ops = pt_update_ops->needs_svm_lock ? 2368 - &svm_migrate_ops : 2369 - pt_update_ops->needs_userptr_lock ? 2370 - &userptr_migrate_ops : 2393 + &svm_userptr_migrate_ops : 2371 2394 &migrate_ops, 2372 2395 .vops = vops, 2373 2396 .tile_id = tile->id, ··· 2507 2534 2508 2535 if (pt_update_ops->needs_svm_lock) 2509 2536 xe_svm_notifier_unlock(vm); 2510 - if (pt_update_ops->needs_userptr_lock) 2511 - up_read(&vm->svm.gpusvm.notifier_lock); 2512 2537 2513 2538 xe_tlb_inval_job_put(mjob); 2514 2539 xe_tlb_inval_job_put(ijob);
-2
drivers/gpu/drm/xe/xe_pt_types.h
··· 105 105 u32 current_op; 106 106 /** @needs_svm_lock: Needs SVM lock */ 107 107 bool needs_svm_lock; 108 - /** @needs_userptr_lock: Needs userptr lock */ 109 - bool needs_userptr_lock; 110 108 /** @needs_invalidation: Needs invalidation */ 111 109 bool needs_invalidation; 112 110 /**