Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/gem: Acquire reservation lock in drm_gem_{pin/unpin}()

Acquire the buffer object's reservation lock in drm_gem_pin() and
remove locking the drivers' GEM callbacks where necessary. Same for
unpin().

DRM drivers and memory managers modified by this patch will now have
correct dma-buf locking semantics: the caller is responsible for
holding the reservation lock when calling the pin or unpin callback.

DRM drivers and memory managers that are not modified will now be
protected against concurent invocation of their pin and unpin callbacks.

PRIME does not implement struct dma_buf_ops.pin, which requires
the caller to hold the reservation lock. It does implement struct
dma_buf_ops.attach, which requires to callee to acquire the
reservation lock. The PRIME code uses drm_gem_pin(), so locks
are now taken as specified. Same for unpin and detach.

The patch harmonizes GEM pin and unpin to have non-interruptible
reservation locking across all drivers, as is already the case for
vmap and vunmap. This affects gem-shmem, gem-vram, loongson, qxl and
radeon.

Signed-off-by: Thomas Zimmermann <tzimmermann@suse.de>
Reviewed-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Zack Rusin <zack.rusin@broadcom.com>
Reviewed-by: Dmitry Osipenko <dmitry.osipenko@collabora.com>
Tested-by: Dmitry Osipenko <dmitry.osipenko@collabora.com> # virtio-gpu
Link: https://patchwork.freedesktop.org/patch/msgid/20240227113853.8464-10-tzimmermann@suse.de

+35 -97
+20 -2
drivers/gpu/drm/drm_gem.c
··· 1161 1161 obj->funcs->print_info(p, indent, obj); 1162 1162 } 1163 1163 1164 - int drm_gem_pin(struct drm_gem_object *obj) 1164 + int drm_gem_pin_locked(struct drm_gem_object *obj) 1165 1165 { 1166 1166 if (obj->funcs->pin) 1167 1167 return obj->funcs->pin(obj); ··· 1169 1169 return 0; 1170 1170 } 1171 1171 1172 - void drm_gem_unpin(struct drm_gem_object *obj) 1172 + void drm_gem_unpin_locked(struct drm_gem_object *obj) 1173 1173 { 1174 1174 if (obj->funcs->unpin) 1175 1175 obj->funcs->unpin(obj); 1176 + } 1177 + 1178 + int drm_gem_pin(struct drm_gem_object *obj) 1179 + { 1180 + int ret; 1181 + 1182 + dma_resv_lock(obj->resv, NULL); 1183 + ret = drm_gem_pin_locked(obj); 1184 + dma_resv_unlock(obj->resv); 1185 + 1186 + return ret; 1187 + } 1188 + 1189 + void drm_gem_unpin(struct drm_gem_object *obj) 1190 + { 1191 + dma_resv_lock(obj->resv, NULL); 1192 + drm_gem_unpin_locked(obj); 1193 + dma_resv_unlock(obj->resv); 1176 1194 } 1177 1195 1178 1196 int drm_gem_vmap(struct drm_gem_object *obj, struct iosys_map *map)
+1 -14
drivers/gpu/drm/drm_gem_vram_helper.c
··· 774 774 static int drm_gem_vram_object_pin(struct drm_gem_object *gem) 775 775 { 776 776 struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(gem); 777 - int ret; 778 - 779 - ret = ttm_bo_reserve(&gbo->bo, true, false, NULL); 780 - if (ret) 781 - return ret; 782 777 783 778 /* 784 779 * Fbdev console emulation is the use case of these PRIME ··· 784 789 * the buffer to be pinned to VRAM, implement a callback that 785 790 * sets the flags accordingly. 786 791 */ 787 - ret = drm_gem_vram_pin_locked(gbo, 0); 788 - ttm_bo_unreserve(&gbo->bo); 789 - 790 - return ret; 792 + return drm_gem_vram_pin_locked(gbo, 0); 791 793 } 792 794 793 795 /** ··· 795 803 static void drm_gem_vram_object_unpin(struct drm_gem_object *gem) 796 804 { 797 805 struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(gem); 798 - int ret; 799 806 800 - ret = ttm_bo_reserve(&gbo->bo, true, false, NULL); 801 - if (ret) 802 - return; 803 807 drm_gem_vram_unpin_locked(gbo); 804 - ttm_bo_unreserve(&gbo->bo); 805 808 } 806 809 807 810 /**
+2
drivers/gpu/drm/drm_internal.h
··· 173 173 void drm_gem_print_info(struct drm_printer *p, unsigned int indent, 174 174 const struct drm_gem_object *obj); 175 175 176 + int drm_gem_pin_locked(struct drm_gem_object *obj); 177 + void drm_gem_unpin_locked(struct drm_gem_object *obj); 176 178 int drm_gem_pin(struct drm_gem_object *obj); 177 179 void drm_gem_unpin(struct drm_gem_object *obj); 178 180 int drm_gem_vmap(struct drm_gem_object *obj, struct iosys_map *map);
+2 -11
drivers/gpu/drm/loongson/lsdc_gem.c
··· 19 19 struct lsdc_bo *lbo = gem_to_lsdc_bo(obj); 20 20 int ret; 21 21 22 - ret = lsdc_bo_reserve(lbo); 23 - if (unlikely(ret)) 24 - return ret; 22 + dma_resv_assert_held(obj->resv); 25 23 26 24 ret = lsdc_bo_pin(lbo, LSDC_GEM_DOMAIN_GTT, NULL); 27 25 if (likely(ret == 0)) 28 26 lbo->sharing_count++; 29 - 30 - lsdc_bo_unreserve(lbo); 31 27 32 28 return ret; 33 29 } ··· 31 35 static void lsdc_gem_prime_unpin(struct drm_gem_object *obj) 32 36 { 33 37 struct lsdc_bo *lbo = gem_to_lsdc_bo(obj); 34 - int ret; 35 38 36 - ret = lsdc_bo_reserve(lbo); 37 - if (unlikely(ret)) 38 - return; 39 + dma_resv_assert_held(obj->resv); 39 40 40 41 lsdc_bo_unpin(lbo); 41 42 if (lbo->sharing_count) 42 43 lbo->sharing_count--; 43 - 44 - lsdc_bo_unreserve(lbo); 45 44 } 46 45 47 46 static struct sg_table *lsdc_gem_prime_get_sg_table(struct drm_gem_object *obj)
-4
drivers/gpu/drm/msm/msm_gem_prime.c
··· 53 53 if (obj->import_attach) 54 54 return 0; 55 55 56 - msm_gem_lock(obj); 57 56 pages = msm_gem_pin_pages_locked(obj); 58 57 if (IS_ERR(pages)) 59 58 ret = PTR_ERR(pages); 60 - msm_gem_unlock(obj); 61 59 62 60 return ret; 63 61 } ··· 65 67 if (obj->import_attach) 66 68 return; 67 69 68 - msm_gem_lock(obj); 69 70 msm_gem_unpin_pages_locked(obj); 70 - msm_gem_unlock(obj); 71 71 }
-11
drivers/gpu/drm/nouveau/nouveau_prime.c
··· 86 86 int nouveau_gem_prime_pin(struct drm_gem_object *obj) 87 87 { 88 88 struct nouveau_bo *nvbo = nouveau_gem_object(obj); 89 - struct ttm_buffer_object *bo = &nvbo->bo; 90 89 int ret; 91 90 92 - ret = ttm_bo_reserve(bo, false, false, NULL); 93 - if (ret) 94 - return -EINVAL; 95 91 /* pin buffer into GTT */ 96 92 ret = nouveau_bo_pin_locked(nvbo, NOUVEAU_GEM_DOMAIN_GART, false); 97 93 if (ret) 98 94 ret = -EINVAL; 99 - ttm_bo_unreserve(bo); 100 95 101 96 return ret; 102 97 } ··· 99 104 void nouveau_gem_prime_unpin(struct drm_gem_object *obj) 100 105 { 101 106 struct nouveau_bo *nvbo = nouveau_gem_object(obj); 102 - struct ttm_buffer_object *bo = &nvbo->bo; 103 - int ret; 104 107 105 - ret = ttm_bo_reserve(bo, false, false, NULL); 106 - if (ret) 107 - return; 108 108 nouveau_bo_unpin_locked(nvbo); 109 - ttm_bo_unreserve(bo); 110 109 } 111 110 112 111 struct dma_buf *nouveau_gem_prime_export(struct drm_gem_object *gobj,
+1 -13
drivers/gpu/drm/qxl/qxl_prime.c
··· 31 31 int qxl_gem_prime_pin(struct drm_gem_object *obj) 32 32 { 33 33 struct qxl_bo *bo = gem_to_qxl_bo(obj); 34 - int r; 35 34 36 - r = qxl_bo_reserve(bo); 37 - if (r) 38 - return r; 39 - r = qxl_bo_pin_locked(bo); 40 - qxl_bo_unreserve(bo); 41 - 42 - return r; 35 + return qxl_bo_pin_locked(bo); 43 36 } 44 37 45 38 void qxl_gem_prime_unpin(struct drm_gem_object *obj) 46 39 { 47 40 struct qxl_bo *bo = gem_to_qxl_bo(obj); 48 - int r; 49 41 50 - r = qxl_bo_reserve(bo); 51 - if (r) 52 - return; 53 42 qxl_bo_unpin_locked(bo); 54 - qxl_bo_unreserve(bo); 55 43 } 56 44 57 45 struct sg_table *qxl_gem_prime_get_sg_table(struct drm_gem_object *obj)
-11
drivers/gpu/drm/radeon/radeon_prime.c
··· 73 73 struct radeon_bo *bo = gem_to_radeon_bo(obj); 74 74 int ret = 0; 75 75 76 - ret = radeon_bo_reserve(bo, false); 77 - if (unlikely(ret != 0)) 78 - return ret; 79 - 80 76 /* pin buffer into GTT */ 81 77 ret = radeon_bo_pin(bo, RADEON_GEM_DOMAIN_GTT, NULL); 82 78 if (likely(ret == 0)) 83 79 bo->prime_shared_count++; 84 80 85 - radeon_bo_unreserve(bo); 86 81 return ret; 87 82 } 88 83 89 84 void radeon_gem_prime_unpin(struct drm_gem_object *obj) 90 85 { 91 86 struct radeon_bo *bo = gem_to_radeon_bo(obj); 92 - int ret = 0; 93 - 94 - ret = radeon_bo_reserve(bo, false); 95 - if (unlikely(ret != 0)) 96 - return; 97 87 98 88 radeon_bo_unpin(bo); 99 89 if (bo->prime_shared_count) 100 90 bo->prime_shared_count--; 101 - radeon_bo_unreserve(bo); 102 91 } 103 92 104 93
+8 -21
drivers/gpu/drm/vmwgfx/vmwgfx_gem.c
··· 48 48 { 49 49 } 50 50 51 - static int vmw_gem_pin_private(struct drm_gem_object *obj, bool do_pin) 52 - { 53 - struct ttm_buffer_object *bo = drm_gem_ttm_of_gem(obj); 54 - struct vmw_bo *vbo = to_vmw_bo(obj); 55 - int ret; 56 - 57 - ret = ttm_bo_reserve(bo, false, false, NULL); 58 - if (unlikely(ret != 0)) 59 - goto err; 60 - 61 - vmw_bo_pin_reserved(vbo, do_pin); 62 - 63 - ttm_bo_unreserve(bo); 64 - 65 - err: 66 - return ret; 67 - } 68 - 69 - 70 51 static int vmw_gem_object_pin(struct drm_gem_object *obj) 71 52 { 72 - return vmw_gem_pin_private(obj, true); 53 + struct vmw_bo *vbo = to_vmw_bo(obj); 54 + 55 + vmw_bo_pin_reserved(vbo, true); 56 + 57 + return 0; 73 58 } 74 59 75 60 static void vmw_gem_object_unpin(struct drm_gem_object *obj) 76 61 { 77 - vmw_gem_pin_private(obj, false); 62 + struct vmw_bo *vbo = to_vmw_bo(obj); 63 + 64 + vmw_bo_pin_reserved(vbo, false); 78 65 } 79 66 80 67 static struct sg_table *vmw_gem_object_get_sg_table(struct drm_gem_object *obj)
+1 -10
include/drm/drm_gem_shmem_helper.h
··· 175 175 static inline int drm_gem_shmem_object_pin(struct drm_gem_object *obj) 176 176 { 177 177 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); 178 - int ret; 179 178 180 - ret = dma_resv_lock_interruptible(shmem->base.resv, NULL); 181 - if (ret) 182 - return ret; 183 - ret = drm_gem_shmem_pin_locked(shmem); 184 - dma_resv_unlock(shmem->base.resv); 185 - 186 - return ret; 179 + return drm_gem_shmem_pin_locked(shmem); 187 180 } 188 181 189 182 /** ··· 190 197 { 191 198 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); 192 199 193 - dma_resv_lock(shmem->base.resv, NULL); 194 200 drm_gem_shmem_unpin_locked(shmem); 195 - dma_resv_unlock(shmem->base.resv); 196 201 } 197 202 198 203 /**