Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

gpuvm: remove gem.gpuva.lock_dep_map

Since all users of gem.gpuva.lock_dep_map now rely on the mutex directly
in gpuva, we may remove it. Whether the mutex is used is now tracked by
a flag in gpuvm rather than by whether lock_dep_map is null.

Note that a GEM object may not be pushed to multiple gpuvms that
disagree on the value of this new flag. But that's okay because a single
driver should use the same locking scheme everywhere, and a GEM object
is driver specific (when a GEM is exported with prime, a new GEM object
instance is created from the backing dma-buf).

The flag is present even with CONFIG_LOCKDEP=n because the intent is
that the flag will also cause vm_bo cleanup to become deferred. However,
that will happen in a follow-up patch.

Reviewed-by: Boris Brezillon <boris.brezillon@collabora.com>
Signed-off-by: Alice Ryhl <aliceryhl@google.com>
Link: https://lore.kernel.org/r/20250827-gpuva-mutex-in-gem-v3-3-bd89f5a82c0d@google.com
[ Use lockdep_is_held() instead of lock_is_held(). - Danilo ]
Signed-off-by: Danilo Krummrich <dakr@kernel.org>

authored by

Alice Ryhl and committed by
Danilo Krummrich
3c8d31b8 69013f52

+59 -48
+14 -16
drivers/gpu/drm/drm_gpuvm.c
··· 497 497 * DRM GPUVM also does not take care of the locking of the backing 498 498 * &drm_gem_object buffers GPU VA lists and &drm_gpuvm_bo abstractions by 499 499 * itself; drivers are responsible to enforce mutual exclusion using either the 500 - * GEMs dma_resv lock or alternatively a driver specific external lock. For the 501 - * latter see also drm_gem_gpuva_set_lock(). 500 + * GEMs dma_resv lock or the GEMs gpuva.lock mutex. 502 501 * 503 502 * However, DRM GPUVM contains lockdep checks to ensure callers of its API hold 504 503 * the corresponding lock whenever the &drm_gem_objects GPU VA list is accessed ··· 1581 1582 drm_gpuvm_bo_list_del(vm_bo, extobj, lock); 1582 1583 drm_gpuvm_bo_list_del(vm_bo, evict, lock); 1583 1584 1584 - drm_gem_gpuva_assert_lock_held(obj); 1585 + drm_gem_gpuva_assert_lock_held(gpuvm, obj); 1585 1586 list_del(&vm_bo->list.entry.gem); 1586 1587 1587 1588 if (ops && ops->vm_bo_free) ··· 1602 1603 * If the reference count drops to zero, the &gpuvm_bo is destroyed, which 1603 1604 * includes removing it from the GEMs gpuva list. Hence, if a call to this 1604 1605 * function can potentially let the reference count drop to zero the caller must 1605 - * hold the dma-resv or driver specific GEM gpuva lock. 1606 + * hold the lock that the GEM uses for its gpuva list (either the GEM's 1607 + * dma-resv or gpuva.lock mutex). 1606 1608 * 1607 1609 * This function may only be called from non-atomic context. 1608 1610 * ··· 1627 1627 { 1628 1628 struct drm_gpuvm_bo *vm_bo; 1629 1629 1630 - drm_gem_gpuva_assert_lock_held(obj); 1630 + drm_gem_gpuva_assert_lock_held(gpuvm, obj); 1631 1631 drm_gem_for_each_gpuvm_bo(vm_bo, obj) 1632 1632 if (vm_bo->vm == gpuvm) 1633 1633 return vm_bo; ··· 1686 1686 if (!vm_bo) 1687 1687 return ERR_PTR(-ENOMEM); 1688 1688 1689 - drm_gem_gpuva_assert_lock_held(obj); 1689 + drm_gem_gpuva_assert_lock_held(gpuvm, obj); 1690 1690 list_add_tail(&vm_bo->list.entry.gem, &obj->gpuva.list); 1691 1691 1692 1692 return vm_bo; ··· 1722 1722 return vm_bo; 1723 1723 } 1724 1724 1725 - drm_gem_gpuva_assert_lock_held(obj); 1725 + drm_gem_gpuva_assert_lock_held(gpuvm, obj); 1726 1726 list_add_tail(&__vm_bo->list.entry.gem, &obj->gpuva.list); 1727 1727 1728 1728 return __vm_bo; ··· 1894 1894 * reference of the latter is taken. 1895 1895 * 1896 1896 * This function expects the caller to protect the GEM's GPUVA list against 1897 - * concurrent access using either the GEMs dma_resv lock or a driver specific 1898 - * lock set through drm_gem_gpuva_set_lock(). 1897 + * concurrent access using either the GEM's dma-resv or gpuva.lock mutex. 1899 1898 */ 1900 1899 void 1901 1900 drm_gpuva_link(struct drm_gpuva *va, struct drm_gpuvm_bo *vm_bo) ··· 1909 1910 1910 1911 va->vm_bo = drm_gpuvm_bo_get(vm_bo); 1911 1912 1912 - drm_gem_gpuva_assert_lock_held(obj); 1913 + drm_gem_gpuva_assert_lock_held(gpuvm, obj); 1913 1914 list_add_tail(&va->gem.entry, &vm_bo->list.gpuva); 1914 1915 } 1915 1916 EXPORT_SYMBOL_GPL(drm_gpuva_link); ··· 1929 1930 * the latter is dropped. 1930 1931 * 1931 1932 * This function expects the caller to protect the GEM's GPUVA list against 1932 - * concurrent access using either the GEMs dma_resv lock or a driver specific 1933 - * lock set through drm_gem_gpuva_set_lock(). 1933 + * concurrent access using either the GEM's dma-resv or gpuva.lock mutex. 1934 1934 */ 1935 1935 void 1936 1936 drm_gpuva_unlink(struct drm_gpuva *va) ··· 1940 1942 if (unlikely(!obj)) 1941 1943 return; 1942 1944 1943 - drm_gem_gpuva_assert_lock_held(obj); 1945 + drm_gem_gpuva_assert_lock_held(va->vm, obj); 1944 1946 list_del_init(&va->gem.entry); 1945 1947 1946 1948 va->vm_bo = NULL; ··· 2941 2943 * After the caller finished processing the returned &drm_gpuva_ops, they must 2942 2944 * be freed with &drm_gpuva_ops_free. 2943 2945 * 2944 - * It is the callers responsibility to protect the GEMs GPUVA list against 2945 - * concurrent access using the GEMs dma_resv lock. 2946 + * This function expects the caller to protect the GEM's GPUVA list against 2947 + * concurrent access using either the GEM's dma-resv or gpuva.lock mutex. 2946 2948 * 2947 2949 * Returns: a pointer to the &drm_gpuva_ops on success, an ERR_PTR on failure 2948 2950 */ ··· 2954 2956 struct drm_gpuva *va; 2955 2957 int ret; 2956 2958 2957 - drm_gem_gpuva_assert_lock_held(vm_bo->obj); 2959 + drm_gem_gpuva_assert_lock_held(vm_bo->vm, vm_bo->obj); 2958 2960 2959 2961 ops = kzalloc(sizeof(*ops), GFP_KERNEL); 2960 2962 if (!ops)
-1
drivers/gpu/drm/panthor/panthor_gem.c
··· 245 245 246 246 obj->base.base.funcs = &panthor_gem_funcs; 247 247 obj->base.map_wc = !ptdev->coherent; 248 - drm_gem_gpuva_set_lock(&obj->base.base, &obj->base.base.gpuva.lock); 249 248 mutex_init(&obj->label.lock); 250 249 251 250 panthor_gem_debugfs_bo_init(obj);
+3 -2
drivers/gpu/drm/panthor/panthor_mmu.c
··· 2420 2420 * to be handled the same way user VMAs are. 2421 2421 */ 2422 2422 drm_gpuvm_init(&vm->base, for_mcu ? "panthor-MCU-VM" : "panthor-GPU-VM", 2423 - DRM_GPUVM_RESV_PROTECTED, &ptdev->base, dummy_gem, 2424 - min_va, va_range, 0, 0, &panthor_gpuvm_ops); 2423 + DRM_GPUVM_RESV_PROTECTED | DRM_GPUVM_IMMEDIATE_MODE, 2424 + &ptdev->base, dummy_gem, min_va, va_range, 0, 0, 2425 + &panthor_gpuvm_ops); 2425 2426 drm_gem_object_put(dummy_gem); 2426 2427 return vm; 2427 2428
+15 -26
include/drm/drm_gem.h
··· 399 399 400 400 /** 401 401 * @gpuva: Fields used by GPUVM to manage mappings pointing to this GEM object. 402 + * 403 + * When DRM_GPUVM_IMMEDIATE_MODE is set, this list is protected by the 404 + * mutex. Otherwise, the list is protected by the GEMs &dma_resv lock. 405 + * 406 + * Note that all entries in this list must agree on whether 407 + * DRM_GPUVM_IMMEDIATE_MODE is set. 402 408 */ 403 409 struct { 404 410 /** ··· 418 412 419 413 /** 420 414 * @gpuva.lock: lock protecting access to &drm_gem_object.gpuva.list 421 - * when the resv lock can't be used. 415 + * when DRM_GPUVM_IMMEDIATE_MODE is used. 422 416 * 423 - * Should only be used when the VM is being modified in a fence 424 - * signalling path, otherwise you should use &drm_gem_object.resv to 425 - * protect accesses to &drm_gem_object.gpuva.list. 417 + * Only used when DRM_GPUVM_IMMEDIATE_MODE is set. It should be 418 + * safe to take this mutex during the fence signalling path, so 419 + * do not allocate memory while holding this lock. Otherwise, 420 + * the &dma_resv lock should be used. 426 421 */ 427 422 struct mutex lock; 428 - 429 - #ifdef CONFIG_LOCKDEP 430 - struct lockdep_map *lock_dep_map; 431 - #endif 432 423 } gpuva; 433 424 434 425 /** ··· 610 607 } 611 608 612 609 #ifdef CONFIG_LOCKDEP 613 - /** 614 - * drm_gem_gpuva_set_lock() - Set the lock protecting accesses to the gpuva list. 615 - * @obj: the &drm_gem_object 616 - * @lock: the lock used to protect the gpuva list. The locking primitive 617 - * must contain a dep_map field. 618 - * 619 - * Call this if you're not proctecting access to the gpuva list with the 620 - * dma-resv lock, but with a custom lock. 621 - */ 622 - #define drm_gem_gpuva_set_lock(obj, lock) \ 623 - if (!WARN((obj)->gpuva.lock_dep_map, \ 624 - "GEM GPUVA lock should be set only once.")) \ 625 - (obj)->gpuva.lock_dep_map = &(lock)->dep_map 626 - #define drm_gem_gpuva_assert_lock_held(obj) \ 627 - lockdep_assert((obj)->gpuva.lock_dep_map ? \ 628 - lock_is_held((obj)->gpuva.lock_dep_map) : \ 610 + #define drm_gem_gpuva_assert_lock_held(gpuvm, obj) \ 611 + lockdep_assert(drm_gpuvm_immediate_mode(gpuvm) ? \ 612 + lockdep_is_held(&(obj)->gpuva.lock) : \ 629 613 dma_resv_held((obj)->resv)) 630 614 #else 631 - #define drm_gem_gpuva_set_lock(obj, lock) do {} while (0) 632 - #define drm_gem_gpuva_assert_lock_held(obj) do {} while (0) 615 + #define drm_gem_gpuva_assert_lock_held(gpuvm, obj) do {} while (0) 633 616 #endif 634 617 635 618 /**
+27 -3
include/drm/drm_gpuvm.h
··· 197 197 DRM_GPUVM_RESV_PROTECTED = BIT(0), 198 198 199 199 /** 200 + * @DRM_GPUVM_IMMEDIATE_MODE: use the locking scheme for GEMs designed 201 + * for modifying the GPUVM during the fence signalling path 202 + * 203 + * When set, gpuva.lock is used to protect gpuva.list in all GEM 204 + * objects associated with this GPUVM. Otherwise, the GEMs dma-resv is 205 + * used. 206 + */ 207 + DRM_GPUVM_IMMEDIATE_MODE = BIT(1), 208 + 209 + /** 200 210 * @DRM_GPUVM_USERBITS: user defined bits 201 211 */ 202 - DRM_GPUVM_USERBITS = BIT(1), 212 + DRM_GPUVM_USERBITS = BIT(2), 203 213 }; 204 214 205 215 /** ··· 377 367 drm_gpuvm_resv_protected(struct drm_gpuvm *gpuvm) 378 368 { 379 369 return gpuvm->flags & DRM_GPUVM_RESV_PROTECTED; 370 + } 371 + 372 + /** 373 + * drm_gpuvm_immediate_mode() - indicates whether &DRM_GPUVM_IMMEDIATE_MODE is 374 + * set 375 + * @gpuvm: the &drm_gpuvm 376 + * 377 + * Returns: true if &DRM_GPUVM_IMMEDIATE_MODE is set, false otherwise. 378 + */ 379 + static inline bool 380 + drm_gpuvm_immediate_mode(struct drm_gpuvm *gpuvm) 381 + { 382 + return gpuvm->flags & DRM_GPUVM_IMMEDIATE_MODE; 380 383 } 381 384 382 385 /** ··· 765 742 { 766 743 struct drm_gpuvm_bo *vm_bo; 767 744 768 - drm_gem_gpuva_assert_lock_held(obj); 769 - drm_gem_for_each_gpuvm_bo(vm_bo, obj) 745 + drm_gem_for_each_gpuvm_bo(vm_bo, obj) { 746 + drm_gem_gpuva_assert_lock_held(vm_bo->vm, obj); 770 747 drm_gpuvm_bo_evict(vm_bo, evict); 748 + } 771 749 } 772 750 773 751 void drm_gpuvm_bo_extobj_add(struct drm_gpuvm_bo *vm_bo);