Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/gem-vram: Do not pin buffer objects for vmap

Pin and vmap are distinct operations. Do not perform a pin as part
of the vmap call. This used to be necessary to keep the fbdev buffer
in place while it is being updated. Fbdev emulation has meanwhile
been fixed to lock the buffer correctly. Same for vunmap.

For refactoring the code, remove the pin calls from the helper's
vmap implementation in drm_gem_vram_vmap() and inline the call to
drm_gem_vram_kmap_locked(). This gives a vmap helper that only
maps the buffer object's memory pages without pinning or locking.
Do a similar refactoring for vunmap.

Signed-off-by: Thomas Zimmermann <tzimmermann@suse.de>
Reviewed-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Dmitry Osipenko <dmitry.osipenko@collabora.com>
Tested-by: Dmitry Osipenko <dmitry.osipenko@collabora.com> # virtio-gpu
Acked-by: Zack Rusin <zack.rusin@broadcom.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20240227113853.8464-13-tzimmermann@suse.de

+34 -60
+34 -60
drivers/gpu/drm/drm_gem_vram_helper.c
··· 368 368 } 369 369 EXPORT_SYMBOL(drm_gem_vram_unpin); 370 370 371 - static int drm_gem_vram_kmap_locked(struct drm_gem_vram_object *gbo, 372 - struct iosys_map *map) 373 - { 374 - int ret; 375 - 376 - if (gbo->vmap_use_count > 0) 377 - goto out; 378 - 379 - /* 380 - * VRAM helpers unmap the BO only on demand. So the previous 381 - * page mapping might still be around. Only vmap if the there's 382 - * no mapping present. 383 - */ 384 - if (iosys_map_is_null(&gbo->map)) { 385 - ret = ttm_bo_vmap(&gbo->bo, &gbo->map); 386 - if (ret) 387 - return ret; 388 - } 389 - 390 - out: 391 - ++gbo->vmap_use_count; 392 - *map = gbo->map; 393 - 394 - return 0; 395 - } 396 - 397 - static void drm_gem_vram_kunmap_locked(struct drm_gem_vram_object *gbo, 398 - struct iosys_map *map) 399 - { 400 - struct drm_device *dev = gbo->bo.base.dev; 401 - 402 - if (drm_WARN_ON_ONCE(dev, !gbo->vmap_use_count)) 403 - return; 404 - 405 - if (drm_WARN_ON_ONCE(dev, !iosys_map_is_equal(&gbo->map, map))) 406 - return; /* BUG: map not mapped from this BO */ 407 - 408 - if (--gbo->vmap_use_count > 0) 409 - return; 410 - 411 - /* 412 - * Permanently mapping and unmapping buffers adds overhead from 413 - * updating the page tables and creates debugging output. Therefore, 414 - * we delay the actual unmap operation until the BO gets evicted 415 - * from memory. See drm_gem_vram_bo_driver_move_notify(). 416 - */ 417 - } 418 - 419 371 /** 420 372 * drm_gem_vram_vmap() - Pins and maps a GEM VRAM object into kernel address 421 373 * space ··· 390 438 391 439 dma_resv_assert_held(gbo->bo.base.resv); 392 440 393 - ret = drm_gem_vram_pin_locked(gbo, 0); 394 - if (ret) 395 - return ret; 396 - ret = drm_gem_vram_kmap_locked(gbo, map); 397 - if (ret) 398 - goto err_drm_gem_vram_unpin_locked; 441 + if (gbo->vmap_use_count > 0) 442 + goto out; 443 + 444 + /* 445 + * VRAM helpers unmap the BO only on demand. So the previous 446 + * page mapping might still be around. Only vmap if the there's 447 + * no mapping present. 448 + */ 449 + if (iosys_map_is_null(&gbo->map)) { 450 + ret = ttm_bo_vmap(&gbo->bo, &gbo->map); 451 + if (ret) 452 + return ret; 453 + } 454 + 455 + out: 456 + ++gbo->vmap_use_count; 457 + *map = gbo->map; 399 458 400 459 return 0; 401 - 402 - err_drm_gem_vram_unpin_locked: 403 - drm_gem_vram_unpin_locked(gbo); 404 - return ret; 405 460 } 406 461 EXPORT_SYMBOL(drm_gem_vram_vmap); 407 462 ··· 423 464 void drm_gem_vram_vunmap(struct drm_gem_vram_object *gbo, 424 465 struct iosys_map *map) 425 466 { 467 + struct drm_device *dev = gbo->bo.base.dev; 468 + 426 469 dma_resv_assert_held(gbo->bo.base.resv); 427 470 428 - drm_gem_vram_kunmap_locked(gbo, map); 429 - drm_gem_vram_unpin_locked(gbo); 471 + if (drm_WARN_ON_ONCE(dev, !gbo->vmap_use_count)) 472 + return; 473 + 474 + if (drm_WARN_ON_ONCE(dev, !iosys_map_is_equal(&gbo->map, map))) 475 + return; /* BUG: map not mapped from this BO */ 476 + 477 + if (--gbo->vmap_use_count > 0) 478 + return; 479 + 480 + /* 481 + * Permanently mapping and unmapping buffers adds overhead from 482 + * updating the page tables and creates debugging output. Therefore, 483 + * we delay the actual unmap operation until the BO gets evicted 484 + * from memory. See drm_gem_vram_bo_driver_move_notify(). 485 + */ 430 486 } 431 487 EXPORT_SYMBOL(drm_gem_vram_vunmap); 432 488