Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

panthor: use drm_gem_object.gpuva.lock instead of gpuva_list_lock

Now that drm_gem_object has a dedicated mutex for the gpuva list that is
intended to be used in cases that must be fence signalling safe, use it
in Panthor.

Reviewed-by: Boris Brezillon <boris.brezillon@collabora.com>
Signed-off-by: Alice Ryhl <aliceryhl@google.com>
Link: https://lore.kernel.org/r/20250827-gpuva-mutex-in-gem-v3-2-bd89f5a82c0d@google.com
Signed-off-by: Danilo Krummrich <dakr@kernel.org>

authored by

Alice Ryhl and committed by
Danilo Krummrich
69013f52 e7fa80e2

+9 -23
+1 -3
drivers/gpu/drm/panthor/panthor_gem.c
··· 74 74 mutex_destroy(&bo->label.lock); 75 75 76 76 drm_gem_free_mmap_offset(&bo->base.base); 77 - mutex_destroy(&bo->gpuva_list_lock); 78 77 drm_gem_shmem_free(&bo->base); 79 78 drm_gem_object_put(vm_root_gem); 80 79 } ··· 245 246 246 247 obj->base.base.funcs = &panthor_gem_funcs; 247 248 obj->base.map_wc = !ptdev->coherent; 248 - mutex_init(&obj->gpuva_list_lock); 249 - drm_gem_gpuva_set_lock(&obj->base.base, &obj->gpuva_list_lock); 249 + drm_gem_gpuva_set_lock(&obj->base.base, &obj->base.base.gpuva.lock); 250 250 mutex_init(&obj->label.lock); 251 251 252 252 panthor_gem_debugfs_bo_init(obj);
-12
drivers/gpu/drm/panthor/panthor_gem.h
··· 79 79 */ 80 80 struct drm_gem_object *exclusive_vm_root_gem; 81 81 82 - /** 83 - * @gpuva_list_lock: Custom GPUVA lock. 84 - * 85 - * Used to protect insertion of drm_gpuva elements to the 86 - * drm_gem_object.gpuva.list list. 87 - * 88 - * We can't use the GEM resv for that, because drm_gpuva_link() is 89 - * called in a dma-signaling path, where we're not allowed to take 90 - * resv locks. 91 - */ 92 - struct mutex gpuva_list_lock; 93 - 94 82 /** @flags: Combination of drm_panthor_bo_flags flags. */ 95 83 u32 flags; 96 84
+8 -8
drivers/gpu/drm/panthor/panthor_mmu.c
··· 1107 1107 * GEM vm_bo list. 1108 1108 */ 1109 1109 dma_resv_lock(drm_gpuvm_resv(vm), NULL); 1110 - mutex_lock(&bo->gpuva_list_lock); 1110 + mutex_lock(&bo->base.base.gpuva.lock); 1111 1111 unpin = drm_gpuvm_bo_put(vm_bo); 1112 - mutex_unlock(&bo->gpuva_list_lock); 1112 + mutex_unlock(&bo->base.base.gpuva.lock); 1113 1113 dma_resv_unlock(drm_gpuvm_resv(vm)); 1114 1114 1115 1115 /* If the vm_bo object was destroyed, release the pin reference that ··· 1282 1282 * calling this function. 1283 1283 */ 1284 1284 dma_resv_lock(panthor_vm_resv(vm), NULL); 1285 - mutex_lock(&bo->gpuva_list_lock); 1285 + mutex_lock(&bo->base.base.gpuva.lock); 1286 1286 op_ctx->map.vm_bo = drm_gpuvm_bo_obtain_prealloc(preallocated_vm_bo); 1287 - mutex_unlock(&bo->gpuva_list_lock); 1287 + mutex_unlock(&bo->base.base.gpuva.lock); 1288 1288 dma_resv_unlock(panthor_vm_resv(vm)); 1289 1289 1290 1290 /* If the a vm_bo for this <VM,BO> combination exists, it already ··· 2036 2036 { 2037 2037 struct panthor_gem_object *bo = to_panthor_bo(vma->base.gem.obj); 2038 2038 2039 - mutex_lock(&bo->gpuva_list_lock); 2039 + mutex_lock(&bo->base.base.gpuva.lock); 2040 2040 drm_gpuva_link(&vma->base, vm_bo); 2041 2041 drm_WARN_ON(&vm->ptdev->base, drm_gpuvm_bo_put(vm_bo)); 2042 - mutex_unlock(&bo->gpuva_list_lock); 2042 + mutex_unlock(&bo->base.base.gpuva.lock); 2043 2043 } 2044 2044 2045 2045 static void panthor_vma_unlink(struct panthor_vm *vm, ··· 2048 2048 struct panthor_gem_object *bo = to_panthor_bo(vma->base.gem.obj); 2049 2049 struct drm_gpuvm_bo *vm_bo = drm_gpuvm_bo_get(vma->base.vm_bo); 2050 2050 2051 - mutex_lock(&bo->gpuva_list_lock); 2051 + mutex_lock(&bo->base.base.gpuva.lock); 2052 2052 drm_gpuva_unlink(&vma->base); 2053 - mutex_unlock(&bo->gpuva_list_lock); 2053 + mutex_unlock(&bo->base.base.gpuva.lock); 2054 2054 2055 2055 /* drm_gpuva_unlink() release the vm_bo, but we manually retained it 2056 2056 * when entering this function, so we can implement deferred VMA