Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/gem: Take reservation lock for vmap/vunmap operations

The new common dma-buf locking convention will require buffer importers
to hold the reservation lock around mapping operations. Make DRM GEM core
to take the lock around the vmapping operations and update DRM drivers to
use the locked functions for the case where DRM core now holds the lock.
This patch prepares DRM core and drivers to the common dynamic dma-buf
locking convention.

Acked-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Dmitry Osipenko <dmitry.osipenko@collabora.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20221017172229.42269-4-dmitry.osipenko@collabora.com

+54 -33
+2 -2
drivers/gpu/drm/drm_client.c
··· 323 323 * fd_install step out of the driver backend hooks, to make that 324 324 * final step optional for internal users. 325 325 */ 326 - ret = drm_gem_vmap(buffer->gem, map); 326 + ret = drm_gem_vmap_unlocked(buffer->gem, map); 327 327 if (ret) 328 328 return ret; 329 329 ··· 345 345 { 346 346 struct iosys_map *map = &buffer->map; 347 347 348 - drm_gem_vunmap(buffer->gem, map); 348 + drm_gem_vunmap_unlocked(buffer->gem, map); 349 349 } 350 350 EXPORT_SYMBOL(drm_client_buffer_vunmap); 351 351
+24
drivers/gpu/drm/drm_gem.c
··· 1171 1171 { 1172 1172 int ret; 1173 1173 1174 + dma_resv_assert_held(obj->resv); 1175 + 1174 1176 if (!obj->funcs->vmap) 1175 1177 return -EOPNOTSUPP; 1176 1178 ··· 1188 1186 1189 1187 void drm_gem_vunmap(struct drm_gem_object *obj, struct iosys_map *map) 1190 1188 { 1189 + dma_resv_assert_held(obj->resv); 1190 + 1191 1191 if (iosys_map_is_null(map)) 1192 1192 return; 1193 1193 ··· 1200 1196 iosys_map_clear(map); 1201 1197 } 1202 1198 EXPORT_SYMBOL(drm_gem_vunmap); 1199 + 1200 + int drm_gem_vmap_unlocked(struct drm_gem_object *obj, struct iosys_map *map) 1201 + { 1202 + int ret; 1203 + 1204 + dma_resv_lock(obj->resv, NULL); 1205 + ret = drm_gem_vmap(obj, map); 1206 + dma_resv_unlock(obj->resv); 1207 + 1208 + return ret; 1209 + } 1210 + EXPORT_SYMBOL(drm_gem_vmap_unlocked); 1211 + 1212 + void drm_gem_vunmap_unlocked(struct drm_gem_object *obj, struct iosys_map *map) 1213 + { 1214 + dma_resv_lock(obj->resv, NULL); 1215 + drm_gem_vunmap(obj, map); 1216 + dma_resv_unlock(obj->resv); 1217 + } 1218 + EXPORT_SYMBOL(drm_gem_vunmap_unlocked); 1203 1219 1204 1220 /** 1205 1221 * drm_gem_lock_reservations - Sets up the ww context and acquires
+3 -3
drivers/gpu/drm/drm_gem_dma_helper.c
··· 230 230 231 231 if (gem_obj->import_attach) { 232 232 if (dma_obj->vaddr) 233 - dma_buf_vunmap(gem_obj->import_attach->dmabuf, &map); 233 + dma_buf_vunmap_unlocked(gem_obj->import_attach->dmabuf, &map); 234 234 drm_prime_gem_destroy(gem_obj, dma_obj->sgt); 235 235 } else if (dma_obj->vaddr) { 236 236 if (dma_obj->map_noncoherent) ··· 581 581 struct iosys_map map; 582 582 int ret; 583 583 584 - ret = dma_buf_vmap(attach->dmabuf, &map); 584 + ret = dma_buf_vmap_unlocked(attach->dmabuf, &map); 585 585 if (ret) { 586 586 DRM_ERROR("Failed to vmap PRIME buffer\n"); 587 587 return ERR_PTR(ret); ··· 589 589 590 590 obj = drm_gem_dma_prime_import_sg_table(dev, attach, sgt); 591 591 if (IS_ERR(obj)) { 592 - dma_buf_vunmap(attach->dmabuf, &map); 592 + dma_buf_vunmap_unlocked(attach->dmabuf, &map); 593 593 return obj; 594 594 } 595 595
+3 -3
drivers/gpu/drm/drm_gem_framebuffer_helper.c
··· 354 354 ret = -EINVAL; 355 355 goto err_drm_gem_vunmap; 356 356 } 357 - ret = drm_gem_vmap(obj, &map[i]); 357 + ret = drm_gem_vmap_unlocked(obj, &map[i]); 358 358 if (ret) 359 359 goto err_drm_gem_vunmap; 360 360 } ··· 376 376 obj = drm_gem_fb_get_obj(fb, i); 377 377 if (!obj) 378 378 continue; 379 - drm_gem_vunmap(obj, &map[i]); 379 + drm_gem_vunmap_unlocked(obj, &map[i]); 380 380 } 381 381 return ret; 382 382 } ··· 403 403 continue; 404 404 if (iosys_map_is_null(&map[i])) 405 405 continue; 406 - drm_gem_vunmap(obj, &map[i]); 406 + drm_gem_vunmap_unlocked(obj, &map[i]); 407 407 } 408 408 } 409 409 EXPORT_SYMBOL(drm_gem_fb_vunmap);
+1 -8
drivers/gpu/drm/drm_gem_ttm_helper.c
··· 64 64 struct iosys_map *map) 65 65 { 66 66 struct ttm_buffer_object *bo = drm_gem_ttm_of_gem(gem); 67 - int ret; 68 67 69 - dma_resv_lock(gem->resv, NULL); 70 - ret = ttm_bo_vmap(bo, map); 71 - dma_resv_unlock(gem->resv); 72 - 73 - return ret; 68 + return ttm_bo_vmap(bo, map); 74 69 } 75 70 EXPORT_SYMBOL(drm_gem_ttm_vmap); 76 71 ··· 82 87 { 83 88 struct ttm_buffer_object *bo = drm_gem_ttm_of_gem(gem); 84 89 85 - dma_resv_lock(gem->resv, NULL); 86 90 ttm_bo_vunmap(bo, map); 87 - dma_resv_unlock(gem->resv); 88 91 } 89 92 EXPORT_SYMBOL(drm_gem_ttm_vunmap); 90 93
+2 -2
drivers/gpu/drm/lima/lima_sched.c
··· 371 371 } else { 372 372 buffer_chunk->size = lima_bo_size(bo); 373 373 374 - ret = drm_gem_shmem_vmap(&bo->base, &map); 374 + ret = drm_gem_vmap_unlocked(&bo->base.base, &map); 375 375 if (ret) { 376 376 kvfree(et); 377 377 goto out; ··· 379 379 380 380 memcpy(buffer_chunk + 1, map.vaddr, buffer_chunk->size); 381 381 382 - drm_gem_shmem_vunmap(&bo->base, &map); 382 + drm_gem_vunmap_unlocked(&bo->base.base, &map); 383 383 } 384 384 385 385 buffer_chunk = (void *)(buffer_chunk + 1) + buffer_chunk->size;
+2 -2
drivers/gpu/drm/panfrost/panfrost_dump.c
··· 209 209 goto dump_header; 210 210 } 211 211 212 - ret = drm_gem_shmem_vmap(&bo->base, &map); 212 + ret = drm_gem_vmap_unlocked(&bo->base.base, &map); 213 213 if (ret) { 214 214 dev_err(pfdev->dev, "Panfrost Dump: couldn't map Buffer Object\n"); 215 215 iter.hdr->bomap.valid = 0; ··· 236 236 vaddr = map.vaddr; 237 237 memcpy(iter.data, vaddr, bo->base.base.size); 238 238 239 - drm_gem_shmem_vunmap(&bo->base, &map); 239 + drm_gem_vunmap_unlocked(&bo->base.base, &map); 240 240 241 241 iter.hdr->bomap.valid = cpu_to_le32(1); 242 242
+3 -3
drivers/gpu/drm/panfrost/panfrost_perfcnt.c
··· 106 106 goto err_close_bo; 107 107 } 108 108 109 - ret = drm_gem_shmem_vmap(bo, &map); 109 + ret = drm_gem_vmap_unlocked(&bo->base, &map); 110 110 if (ret) 111 111 goto err_put_mapping; 112 112 perfcnt->buf = map.vaddr; ··· 165 165 return 0; 166 166 167 167 err_vunmap: 168 - drm_gem_shmem_vunmap(bo, &map); 168 + drm_gem_vunmap_unlocked(&bo->base, &map); 169 169 err_put_mapping: 170 170 panfrost_gem_mapping_put(perfcnt->mapping); 171 171 err_close_bo: ··· 195 195 GPU_PERFCNT_CFG_MODE(GPU_PERFCNT_CFG_MODE_OFF)); 196 196 197 197 perfcnt->user = NULL; 198 - drm_gem_shmem_vunmap(&perfcnt->mapping->obj->base, &map); 198 + drm_gem_vunmap_unlocked(&perfcnt->mapping->obj->base.base, &map); 199 199 perfcnt->buf = NULL; 200 200 panfrost_gem_close(&perfcnt->mapping->obj->base.base, file_priv); 201 201 panfrost_mmu_as_put(pfdev, perfcnt->mapping->mmu);
+9 -8
drivers/gpu/drm/qxl/qxl_object.c
··· 168 168 bo->map_count++; 169 169 goto out; 170 170 } 171 - r = ttm_bo_vmap(&bo->tbo, &bo->map); 171 + 172 + r = __qxl_bo_pin(bo); 172 173 if (r) 173 174 return r; 175 + 176 + r = ttm_bo_vmap(&bo->tbo, &bo->map); 177 + if (r) { 178 + __qxl_bo_unpin(bo); 179 + return r; 180 + } 174 181 bo->map_count = 1; 175 182 176 183 /* TODO: Remove kptr in favor of map everywhere. */ ··· 198 191 r = qxl_bo_reserve(bo); 199 192 if (r) 200 193 return r; 201 - 202 - r = __qxl_bo_pin(bo); 203 - if (r) { 204 - qxl_bo_unreserve(bo); 205 - return r; 206 - } 207 194 208 195 r = qxl_bo_vmap_locked(bo, map); 209 196 qxl_bo_unreserve(bo); ··· 248 247 return; 249 248 bo->kptr = NULL; 250 249 ttm_bo_vunmap(&bo->tbo, &bo->map); 250 + __qxl_bo_unpin(bo); 251 251 } 252 252 253 253 int qxl_bo_vunmap(struct qxl_bo *bo) ··· 260 258 return r; 261 259 262 260 qxl_bo_vunmap_locked(bo); 263 - __qxl_bo_unpin(bo); 264 261 qxl_bo_unreserve(bo); 265 262 return 0; 266 263 }
+2 -2
drivers/gpu/drm/qxl/qxl_prime.c
··· 59 59 struct qxl_bo *bo = gem_to_qxl_bo(obj); 60 60 int ret; 61 61 62 - ret = qxl_bo_vmap(bo, map); 62 + ret = qxl_bo_vmap_locked(bo, map); 63 63 if (ret < 0) 64 64 return ret; 65 65 ··· 71 71 { 72 72 struct qxl_bo *bo = gem_to_qxl_bo(obj); 73 73 74 - qxl_bo_vunmap(bo); 74 + qxl_bo_vunmap_locked(bo); 75 75 }
+3
include/drm/drm_gem.h
··· 408 408 void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages, 409 409 bool dirty, bool accessed); 410 410 411 + int drm_gem_vmap_unlocked(struct drm_gem_object *obj, struct iosys_map *map); 412 + void drm_gem_vunmap_unlocked(struct drm_gem_object *obj, struct iosys_map *map); 413 + 411 414 int drm_gem_objects_lookup(struct drm_file *filp, void __user *bo_handles, 412 415 int count, struct drm_gem_object ***objs_out); 413 416 struct drm_gem_object *drm_gem_object_lookup(struct drm_file *filp, u32 handle);