Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/vmwgfx: Switch to exclusively using GEM references

Currently we use a combination of TTM and GEM reference counting which is
cumbersome. TTM references are used for kernel internal BOs and operations
like validation. Simply switching the ttm_bo_(get|put) calls to their
GEM equivalents is insufficient as not all BOs are GEM BOs so we must set
the GEM vtable for all BOs even if they are not exposed to userspace.

Suggested-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Ian Forbes <ian.forbes@broadcom.com>
Reviewed-by: Zack Rusin <zack.rusin@broadcom.com>
Signed-off-by: Zack Rusin <zack.rusin@broadcom.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20250131200321.193939-1-ian.forbes@broadcom.com

authored by

Ian Forbes and committed by
Zack Rusin
e95635d7 f42c09e6

+18 -38
+2 -2
drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
··· 36 36 { 37 37 struct vmw_resource *res; 38 38 39 - WARN_ON(vbo->tbo.base.funcs && 40 - kref_read(&vbo->tbo.base.refcount) != 0); 39 + WARN_ON(kref_read(&vbo->tbo.base.refcount) != 0); 41 40 vmw_bo_unmap(vbo); 42 41 43 42 xa_destroy(&vbo->detached_resources); ··· 468 469 if (unlikely(ret != 0)) 469 470 goto out_error; 470 471 472 + (*p_bo)->tbo.base.funcs = &vmw_gem_object_funcs; 471 473 return ret; 472 474 out_error: 473 475 *p_bo = NULL;
+2 -2
drivers/gpu/drm/vmwgfx/vmwgfx_bo.h
··· 204 204 205 205 *buf = NULL; 206 206 if (tmp_buf) 207 - ttm_bo_put(&tmp_buf->tbo); 207 + drm_gem_object_put(&tmp_buf->tbo.base); 208 208 } 209 209 210 210 static inline struct vmw_bo *vmw_bo_reference(struct vmw_bo *buf) 211 211 { 212 - ttm_bo_get(&buf->tbo); 212 + drm_gem_object_get(&buf->tbo.base); 213 213 return buf; 214 214 } 215 215
+1 -1
drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
··· 432 432 * for the new COTable. Initially pin the buffer object to make sure 433 433 * we can use tryreserve without failure. 434 434 */ 435 - ret = vmw_gem_object_create(dev_priv, &bo_params, &buf); 435 + ret = vmw_bo_create(dev_priv, &bo_params, &buf); 436 436 if (ret) { 437 437 DRM_ERROR("Failed initializing new cotable MOB.\n"); 438 438 goto out_done;
+1 -3
drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
··· 822 822 * GEM related functionality - vmwgfx_gem.c 823 823 */ 824 824 struct vmw_bo_params; 825 - int vmw_gem_object_create(struct vmw_private *vmw, 826 - struct vmw_bo_params *params, 827 - struct vmw_bo **p_vbo); 825 + extern const struct drm_gem_object_funcs vmw_gem_object_funcs; 828 826 extern int vmw_gem_object_create_with_handle(struct vmw_private *dev_priv, 829 827 struct drm_file *filp, 830 828 uint32_t size,
+2 -16
drivers/gpu/drm/vmwgfx/vmwgfx_gem.c
··· 140 140 .close = ttm_bo_vm_close, 141 141 }; 142 142 143 - static const struct drm_gem_object_funcs vmw_gem_object_funcs = { 143 + const struct drm_gem_object_funcs vmw_gem_object_funcs = { 144 144 .free = vmw_gem_object_free, 145 145 .open = vmw_gem_object_open, 146 146 .close = vmw_gem_object_close, ··· 153 153 .mmap = vmw_gem_mmap, 154 154 .vm_ops = &vmw_vm_ops, 155 155 }; 156 - 157 - int vmw_gem_object_create(struct vmw_private *vmw, 158 - struct vmw_bo_params *params, 159 - struct vmw_bo **p_vbo) 160 - { 161 - int ret = vmw_bo_create(vmw, params, p_vbo); 162 - 163 - if (ret != 0) 164 - goto out_no_bo; 165 - 166 - (*p_vbo)->tbo.base.funcs = &vmw_gem_object_funcs; 167 - out_no_bo: 168 - return ret; 169 - } 170 156 171 157 int vmw_gem_object_create_with_handle(struct vmw_private *dev_priv, 172 158 struct drm_file *filp, ··· 169 183 .pin = false 170 184 }; 171 185 172 - ret = vmw_gem_object_create(dev_priv, &params, p_vbo); 186 + ret = vmw_bo_create(dev_priv, &params, p_vbo); 173 187 if (ret != 0) 174 188 goto out_no_bo; 175 189
+1 -2
drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
··· 282 282 } 283 283 284 284 vmw_bo_unpin_unlocked(&batch->otable_bo->tbo); 285 - ttm_bo_put(&batch->otable_bo->tbo); 286 - batch->otable_bo = NULL; 285 + vmw_bo_unreference(&batch->otable_bo); 287 286 return ret; 288 287 } 289 288
+4 -4
drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
··· 347 347 return 0; 348 348 } 349 349 350 - ret = vmw_gem_object_create(res->dev_priv, &bo_params, &gbo); 350 + ret = vmw_bo_create(res->dev_priv, &bo_params, &gbo); 351 351 if (unlikely(ret != 0)) 352 352 goto out_no_bo; 353 353 ··· 531 531 } 532 532 533 533 INIT_LIST_HEAD(&val_list); 534 - ttm_bo_get(&res->guest_memory_bo->tbo); 535 534 val_buf->bo = &res->guest_memory_bo->tbo; 536 535 val_buf->num_shared = 0; 536 + drm_gem_object_get(&val_buf->bo->base); 537 537 list_add_tail(&val_buf->head, &val_list); 538 538 ret = ttm_eu_reserve_buffers(ticket, &val_list, interruptible, NULL); 539 539 if (unlikely(ret != 0)) ··· 557 557 out_no_validate: 558 558 ttm_eu_backoff_reservation(ticket, &val_list); 559 559 out_no_reserve: 560 - ttm_bo_put(val_buf->bo); 560 + drm_gem_object_put(&val_buf->bo->base); 561 561 val_buf->bo = NULL; 562 562 if (guest_memory_dirty) 563 563 vmw_user_bo_unref(&res->guest_memory_bo); ··· 619 619 INIT_LIST_HEAD(&val_list); 620 620 list_add_tail(&val_buf->head, &val_list); 621 621 ttm_eu_backoff_reservation(ticket, &val_list); 622 - ttm_bo_put(val_buf->bo); 622 + drm_gem_object_put(&val_buf->bo->base); 623 623 val_buf->bo = NULL; 624 624 } 625 625
+1 -1
drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
··· 445 445 * resume the overlays, this is preferred to failing to alloc. 446 446 */ 447 447 vmw_overlay_pause_all(dev_priv); 448 - ret = vmw_gem_object_create(dev_priv, &bo_params, &vps->uo.buffer); 448 + ret = vmw_bo_create(dev_priv, &bo_params, &vps->uo.buffer); 449 449 vmw_overlay_resume_all(dev_priv); 450 450 if (ret) 451 451 return ret;
+1 -3
drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
··· 830 830 .pin = false 831 831 }; 832 832 833 - ret = vmw_gem_object_create(dev_priv, 834 - &params, 835 - &res->guest_memory_bo); 833 + ret = vmw_bo_create(dev_priv, &params, &res->guest_memory_bo); 836 834 if (unlikely(ret != 0)) { 837 835 vmw_resource_unreference(&res); 838 836 goto out_unlock;
+3 -4
drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
··· 262 262 bo_node->hash.key); 263 263 } 264 264 val_buf = &bo_node->base; 265 - val_buf->bo = ttm_bo_get_unless_zero(&vbo->tbo); 266 - if (!val_buf->bo) 267 - return -ESRCH; 265 + vmw_bo_reference(vbo); 266 + val_buf->bo = &vbo->tbo; 268 267 val_buf->num_shared = 0; 269 268 list_add_tail(&val_buf->head, &ctx->bo_list); 270 269 } ··· 655 656 struct vmw_validation_res_node *val; 656 657 657 658 list_for_each_entry(entry, &ctx->bo_list, base.head) { 658 - ttm_bo_put(entry->base.bo); 659 + drm_gem_object_put(&entry->base.bo->base); 659 660 entry->base.bo = NULL; 660 661 } 661 662