Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/vmwgfx: Implement DRIVER_GEM

This is initial change adding support for DRIVER_GEM to vmwgfx. vmwgfx
was written before GEM and has always used TTM. Over the years the
TTM buffers started inherting from GEM objects but vmwgfx never
implemented GEM making it quite awkward. We were directly setting
variables in GEM objects to not make DRM crash.

This change brings vmwgfx inline with other DRM drivers and allows us
to use a lot of DRM helpers which have depended on drivers with GEM
support.

Due to historical reasons vmwgfx splits the idea of a buffer and surface
which makes it a littly tricky since either one can be used in most
of our ioctl's which take user space handles. For now our BO's are
GEM objects and our surfaces are opaque objects which are backed by
GEM objects. In the future I'd like to combine those into a single
BO but we don't want to break any of our existing ioctl's so it will
take time to do it in a non-destructive way.

Signed-off-by: Zack Rusin <zackr@vmware.com>
Reviewed-by: Martin Krastev <krastevm@vmware.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20211206172620.3139754-5-zack@kde.org

+588 -745
+1
drivers/gpu/drm/vmwgfx/Kconfig
··· 4 4 depends on DRM && PCI && MMU 5 5 depends on X86 || ARM64 6 6 select DRM_TTM 7 + select DRM_TTM_HELPER 7 8 select MAPPING_DIRTY_HELPERS 8 9 # Only needed for the transitional use of drm_crtc_init - can be removed 9 10 # again once vmwgfx sets up the primary plane itself.
+2 -1
drivers/gpu/drm/vmwgfx/Makefile
··· 9 9 vmwgfx_cotable.o vmwgfx_so.o vmwgfx_binding.o vmwgfx_msg.o \ 10 10 vmwgfx_simple_resource.o vmwgfx_va.o vmwgfx_blit.o \ 11 11 vmwgfx_validation.o vmwgfx_page_dirty.o vmwgfx_streamoutput.o \ 12 - vmwgfx_devcaps.o ttm_object.o vmwgfx_system_manager.o 12 + vmwgfx_devcaps.o ttm_object.o vmwgfx_system_manager.o \ 13 + vmwgfx_gem.o 13 14 14 15 vmwgfx-$(CONFIG_DRM_FBDEV_EMULATION) += vmwgfx_fb.o 15 16 vmwgfx-$(CONFIG_TRANSPARENT_HUGEPAGE) += vmwgfx_thp.o
+28 -86
drivers/gpu/drm/vmwgfx/ttm_object.c
··· 50 50 #include <linux/atomic.h> 51 51 #include <linux/module.h> 52 52 #include "ttm_object.h" 53 + #include "vmwgfx_drv.h" 53 54 54 55 MODULE_IMPORT_NS(DMA_BUF); 55 56 ··· 74 73 struct ttm_object_device *tdev; 75 74 spinlock_t lock; 76 75 struct list_head ref_list; 77 - struct vmwgfx_open_hash ref_hash[TTM_REF_NUM]; 76 + struct vmwgfx_open_hash ref_hash; 78 77 struct kref refcount; 79 78 }; 80 79 ··· 125 124 struct vmwgfx_hash_item hash; 126 125 struct list_head head; 127 126 struct kref kref; 128 - enum ttm_ref_type ref_type; 129 127 struct ttm_base_object *obj; 130 128 struct ttm_object_file *tfile; 131 129 }; ··· 160 160 struct ttm_base_object *base, 161 161 bool shareable, 162 162 enum ttm_object_type object_type, 163 - void (*refcount_release) (struct ttm_base_object **), 164 - void (*ref_obj_release) (struct ttm_base_object *, 165 - enum ttm_ref_type ref_type)) 163 + void (*refcount_release) (struct ttm_base_object **)) 166 164 { 167 165 struct ttm_object_device *tdev = tfile->tdev; 168 166 int ret; ··· 168 170 base->shareable = shareable; 169 171 base->tfile = ttm_object_file_ref(tfile); 170 172 base->refcount_release = refcount_release; 171 - base->ref_obj_release = ref_obj_release; 172 173 base->object_type = object_type; 173 174 kref_init(&base->refcount); 174 175 idr_preload(GFP_KERNEL); ··· 179 182 return ret; 180 183 181 184 base->handle = ret; 182 - ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL, false); 185 + ret = ttm_ref_object_add(tfile, base, NULL, false); 183 186 if (unlikely(ret != 0)) 184 187 goto out_err1; 185 188 ··· 243 246 ttm_base_object_noref_lookup(struct ttm_object_file *tfile, uint32_t key) 244 247 { 245 248 struct vmwgfx_hash_item *hash; 246 - struct vmwgfx_open_hash *ht = &tfile->ref_hash[TTM_REF_USAGE]; 249 + struct vmwgfx_open_hash *ht = &tfile->ref_hash; 247 250 int ret; 248 251 249 252 rcu_read_lock(); ··· 263 266 { 264 267 struct ttm_base_object *base = NULL; 265 268 struct vmwgfx_hash_item *hash; 266 - struct vmwgfx_open_hash *ht = &tfile->ref_hash[TTM_REF_USAGE]; 269 + struct vmwgfx_open_hash *ht = &tfile->ref_hash; 267 270 int ret; 268 271 269 272 rcu_read_lock(); ··· 294 297 return base; 295 298 } 296 299 297 - /** 298 - * ttm_ref_object_exists - Check whether a caller has a valid ref object 299 - * (has opened) a base object. 300 - * 301 - * @tfile: Pointer to a struct ttm_object_file identifying the caller. 302 - * @base: Pointer to a struct base object. 303 - * 304 - * Checks wether the caller identified by @tfile has put a valid USAGE 305 - * reference object on the base object identified by @base. 306 - */ 307 - bool ttm_ref_object_exists(struct ttm_object_file *tfile, 308 - struct ttm_base_object *base) 309 - { 310 - struct vmwgfx_open_hash *ht = &tfile->ref_hash[TTM_REF_USAGE]; 311 - struct vmwgfx_hash_item *hash; 312 - struct ttm_ref_object *ref; 313 - 314 - rcu_read_lock(); 315 - if (unlikely(vmwgfx_ht_find_item_rcu(ht, base->handle, &hash) != 0)) 316 - goto out_false; 317 - 318 - /* 319 - * Verify that the ref object is really pointing to our base object. 320 - * Our base object could actually be dead, and the ref object pointing 321 - * to another base object with the same handle. 322 - */ 323 - ref = drm_hash_entry(hash, struct ttm_ref_object, hash); 324 - if (unlikely(base != ref->obj)) 325 - goto out_false; 326 - 327 - /* 328 - * Verify that the ref->obj pointer was actually valid! 329 - */ 330 - rmb(); 331 - if (unlikely(kref_read(&ref->kref) == 0)) 332 - goto out_false; 333 - 334 - rcu_read_unlock(); 335 - return true; 336 - 337 - out_false: 338 - rcu_read_unlock(); 339 - return false; 340 - } 341 - 342 300 int ttm_ref_object_add(struct ttm_object_file *tfile, 343 301 struct ttm_base_object *base, 344 - enum ttm_ref_type ref_type, bool *existed, 302 + bool *existed, 345 303 bool require_existed) 346 304 { 347 - struct vmwgfx_open_hash *ht = &tfile->ref_hash[ref_type]; 305 + struct vmwgfx_open_hash *ht = &tfile->ref_hash; 348 306 struct ttm_ref_object *ref; 349 307 struct vmwgfx_hash_item *hash; 350 308 int ret = -EINVAL; ··· 334 382 ref->hash.key = base->handle; 335 383 ref->obj = base; 336 384 ref->tfile = tfile; 337 - ref->ref_type = ref_type; 338 385 kref_init(&ref->kref); 339 386 340 387 spin_lock(&tfile->lock); ··· 362 411 { 363 412 struct ttm_ref_object *ref = 364 413 container_of(kref, struct ttm_ref_object, kref); 365 - struct ttm_base_object *base = ref->obj; 366 414 struct ttm_object_file *tfile = ref->tfile; 367 415 struct vmwgfx_open_hash *ht; 368 416 369 - ht = &tfile->ref_hash[ref->ref_type]; 417 + ht = &tfile->ref_hash; 370 418 (void)vmwgfx_ht_remove_item_rcu(ht, &ref->hash); 371 419 list_del(&ref->head); 372 420 spin_unlock(&tfile->lock); 373 - 374 - if (ref->ref_type != TTM_REF_USAGE && base->ref_obj_release) 375 - base->ref_obj_release(base, ref->ref_type); 376 421 377 422 ttm_base_object_unref(&ref->obj); 378 423 kfree_rcu(ref, rcu_head); ··· 376 429 } 377 430 378 431 int ttm_ref_object_base_unref(struct ttm_object_file *tfile, 379 - unsigned long key, enum ttm_ref_type ref_type) 432 + unsigned long key) 380 433 { 381 - struct vmwgfx_open_hash *ht = &tfile->ref_hash[ref_type]; 434 + struct vmwgfx_open_hash *ht = &tfile->ref_hash; 382 435 struct ttm_ref_object *ref; 383 436 struct vmwgfx_hash_item *hash; 384 437 int ret; ··· 399 452 { 400 453 struct ttm_ref_object *ref; 401 454 struct list_head *list; 402 - unsigned int i; 403 455 struct ttm_object_file *tfile = *p_tfile; 404 456 405 457 *p_tfile = NULL; ··· 416 470 } 417 471 418 472 spin_unlock(&tfile->lock); 419 - for (i = 0; i < TTM_REF_NUM; ++i) 420 - vmwgfx_ht_remove(&tfile->ref_hash[i]); 473 + vmwgfx_ht_remove(&tfile->ref_hash); 421 474 422 475 ttm_object_file_unref(&tfile); 423 476 } ··· 425 480 unsigned int hash_order) 426 481 { 427 482 struct ttm_object_file *tfile = kmalloc(sizeof(*tfile), GFP_KERNEL); 428 - unsigned int i; 429 - unsigned int j = 0; 430 483 int ret; 431 484 432 485 if (unlikely(tfile == NULL)) ··· 435 492 kref_init(&tfile->refcount); 436 493 INIT_LIST_HEAD(&tfile->ref_list); 437 494 438 - for (i = 0; i < TTM_REF_NUM; ++i) { 439 - ret = vmwgfx_ht_create(&tfile->ref_hash[i], hash_order); 440 - if (ret) { 441 - j = i; 442 - goto out_err; 443 - } 444 - } 495 + ret = vmwgfx_ht_create(&tfile->ref_hash, hash_order); 496 + if (ret) 497 + goto out_err; 445 498 446 499 return tfile; 447 500 out_err: 448 - for (i = 0; i < j; ++i) 449 - vmwgfx_ht_remove(&tfile->ref_hash[i]); 501 + vmwgfx_ht_remove(&tfile->ref_hash); 450 502 451 503 kfree(tfile); 452 504 ··· 464 526 if (ret != 0) 465 527 goto out_no_object_hash; 466 528 467 - idr_init_base(&tdev->idr, 1); 529 + /* 530 + * Our base is at VMWGFX_NUM_MOB + 1 because we want to create 531 + * a seperate namespace for GEM handles (which are 532 + * 1..VMWGFX_NUM_MOB) and the surface handles. Some ioctl's 533 + * can take either handle as an argument so we want to 534 + * easily be able to tell whether the handle refers to a 535 + * GEM buffer or a surface. 536 + */ 537 + idr_init_base(&tdev->idr, VMWGFX_NUM_MOB + 1); 468 538 tdev->ops = *ops; 469 539 tdev->dmabuf_release = tdev->ops.release; 470 540 tdev->ops.release = ttm_prime_dmabuf_release; ··· 593 647 prime = (struct ttm_prime_object *) dma_buf->priv; 594 648 base = &prime->base; 595 649 *handle = base->handle; 596 - ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL, false); 650 + ret = ttm_ref_object_add(tfile, base, NULL, false); 597 651 598 652 dma_buf_put(dma_buf); 599 653 ··· 687 741 * @shareable: See ttm_base_object_init 688 742 * @type: See ttm_base_object_init 689 743 * @refcount_release: See ttm_base_object_init 690 - * @ref_obj_release: See ttm_base_object_init 691 744 * 692 745 * Initializes an object which is compatible with the drm_prime model 693 746 * for data sharing between processes and devices. ··· 694 749 int ttm_prime_object_init(struct ttm_object_file *tfile, size_t size, 695 750 struct ttm_prime_object *prime, bool shareable, 696 751 enum ttm_object_type type, 697 - void (*refcount_release) (struct ttm_base_object **), 698 - void (*ref_obj_release) (struct ttm_base_object *, 699 - enum ttm_ref_type ref_type)) 752 + void (*refcount_release) (struct ttm_base_object **)) 700 753 { 701 754 mutex_init(&prime->mutex); 702 755 prime->size = PAGE_ALIGN(size); ··· 703 760 prime->refcount_release = refcount_release; 704 761 return ttm_base_object_init(tfile, &prime->base, shareable, 705 762 ttm_prime_type, 706 - ttm_prime_refcount_release, 707 - ref_obj_release); 763 + ttm_prime_refcount_release); 708 764 }
+4 -40
drivers/gpu/drm/vmwgfx/ttm_object.h
··· 45 45 #include "vmwgfx_hashtab.h" 46 46 47 47 /** 48 - * enum ttm_ref_type 49 - * 50 - * Describes what type of reference a ref object holds. 51 - * 52 - * TTM_REF_USAGE is a simple refcount on a base object. 53 - * 54 - * TTM_REF_SYNCCPU_READ is a SYNCCPU_READ reference on a 55 - * buffer object. 56 - * 57 - * TTM_REF_SYNCCPU_WRITE is a SYNCCPU_WRITE reference on a 58 - * buffer object. 59 - * 60 - */ 61 - 62 - enum ttm_ref_type { 63 - TTM_REF_USAGE, 64 - TTM_REF_SYNCCPU_READ, 65 - TTM_REF_SYNCCPU_WRITE, 66 - TTM_REF_NUM 67 - }; 68 - 69 - /** 70 48 * enum ttm_object_type 71 49 * 72 50 * One entry per ttm object type. ··· 54 76 55 77 enum ttm_object_type { 56 78 ttm_fence_type, 57 - ttm_buffer_type, 58 79 ttm_lock_type, 59 80 ttm_prime_type, 60 81 ttm_driver_type0 = 256, ··· 104 127 struct ttm_object_file *tfile; 105 128 struct kref refcount; 106 129 void (*refcount_release) (struct ttm_base_object **base); 107 - void (*ref_obj_release) (struct ttm_base_object *base, 108 - enum ttm_ref_type ref_type); 109 130 u32 handle; 110 131 enum ttm_object_type object_type; 111 132 u32 shareable; ··· 152 177 bool shareable, 153 178 enum ttm_object_type type, 154 179 void (*refcount_release) (struct ttm_base_object 155 - **), 156 - void (*ref_obj_release) (struct ttm_base_object 157 - *, 158 - enum ttm_ref_type 159 - ref_type)); 180 + **)); 160 181 161 182 /** 162 183 * ttm_base_object_lookup ··· 216 245 */ 217 246 extern int ttm_ref_object_add(struct ttm_object_file *tfile, 218 247 struct ttm_base_object *base, 219 - enum ttm_ref_type ref_type, bool *existed, 248 + bool *existed, 220 249 bool require_existed); 221 - 222 - extern bool ttm_ref_object_exists(struct ttm_object_file *tfile, 223 - struct ttm_base_object *base); 224 250 225 251 /** 226 252 * ttm_ref_object_base_unref ··· 231 263 * will be unreferenced. 232 264 */ 233 265 extern int ttm_ref_object_base_unref(struct ttm_object_file *tfile, 234 - unsigned long key, 235 - enum ttm_ref_type ref_type); 266 + unsigned long key); 236 267 237 268 /** 238 269 * ttm_object_file_init - initialize a struct ttm_object file ··· 295 328 bool shareable, 296 329 enum ttm_object_type type, 297 330 void (*refcount_release) 298 - (struct ttm_base_object **), 299 - void (*ref_obj_release) 300 - (struct ttm_base_object *, 301 - enum ttm_ref_type ref_type)); 331 + (struct ttm_base_object **)); 302 332 303 333 static inline enum ttm_object_type 304 334 ttm_base_object_type(struct ttm_base_object *base)
+110 -407
drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
··· 33 33 34 34 35 35 /** 36 - * struct vmw_user_buffer_object - User-space-visible buffer object 37 - * 38 - * @prime: The prime object providing user visibility. 39 - * @vbo: The struct vmw_buffer_object 40 - */ 41 - struct vmw_user_buffer_object { 42 - struct ttm_prime_object prime; 43 - struct vmw_buffer_object vbo; 44 - }; 45 - 46 - 47 - /** 48 36 * vmw_buffer_object - Convert a struct ttm_buffer_object to a struct 49 37 * vmw_buffer_object. 50 38 * ··· 44 56 vmw_buffer_object(struct ttm_buffer_object *bo) 45 57 { 46 58 return container_of(bo, struct vmw_buffer_object, base); 47 - } 48 - 49 - 50 - /** 51 - * vmw_user_buffer_object - Convert a struct ttm_buffer_object to a struct 52 - * vmw_user_buffer_object. 53 - * 54 - * @bo: Pointer to the TTM buffer object. 55 - * Return: Pointer to the struct vmw_buffer_object embedding the TTM buffer 56 - * object. 57 - */ 58 - static struct vmw_user_buffer_object * 59 - vmw_user_buffer_object(struct ttm_buffer_object *bo) 60 - { 61 - struct vmw_buffer_object *vmw_bo = vmw_buffer_object(bo); 62 - 63 - return container_of(vmw_bo, struct vmw_user_buffer_object, vbo); 64 59 } 65 60 66 61 ··· 374 403 WARN_ON(vmw_bo->dirty); 375 404 WARN_ON(!RB_EMPTY_ROOT(&vmw_bo->res_tree)); 376 405 vmw_bo_unmap(vmw_bo); 377 - dma_resv_fini(&bo->base._resv); 406 + drm_gem_object_release(&bo->base); 378 407 kfree(vmw_bo); 379 - } 380 - 381 - 382 - /** 383 - * vmw_user_bo_destroy - vmw buffer object destructor 384 - * 385 - * @bo: Pointer to the embedded struct ttm_buffer_object 386 - */ 387 - static void vmw_user_bo_destroy(struct ttm_buffer_object *bo) 388 - { 389 - struct vmw_user_buffer_object *vmw_user_bo = vmw_user_buffer_object(bo); 390 - struct vmw_buffer_object *vbo = &vmw_user_bo->vbo; 391 - 392 - WARN_ON(vbo->dirty); 393 - WARN_ON(!RB_EMPTY_ROOT(&vbo->res_tree)); 394 - vmw_bo_unmap(vbo); 395 - ttm_prime_object_kfree(vmw_user_bo, prime); 396 408 } 397 409 398 410 /** ··· 397 443 .no_wait_gpu = false 398 444 }; 399 445 struct ttm_buffer_object *bo; 446 + struct drm_device *vdev = &dev_priv->drm; 400 447 int ret; 401 448 402 449 bo = kzalloc(sizeof(*bo), GFP_KERNEL); 403 450 if (unlikely(!bo)) 404 451 return -ENOMEM; 405 452 406 - bo->base.size = size; 407 - dma_resv_init(&bo->base._resv); 408 - drm_vma_node_reset(&bo->base.vma_node); 453 + size = ALIGN(size, PAGE_SIZE); 454 + 455 + drm_gem_private_object_init(vdev, &bo->base, size); 409 456 410 457 ret = ttm_bo_init_reserved(&dev_priv->bdev, bo, size, 411 458 ttm_bo_type_kernel, placement, 0, ··· 422 467 423 468 error_free: 424 469 kfree(bo); 470 + return ret; 471 + } 472 + 473 + int vmw_bo_create(struct vmw_private *vmw, 474 + size_t size, struct ttm_placement *placement, 475 + bool interruptible, bool pin, 476 + void (*bo_free)(struct ttm_buffer_object *bo), 477 + struct vmw_buffer_object **p_bo) 478 + { 479 + int ret; 480 + 481 + *p_bo = kmalloc(sizeof(**p_bo), GFP_KERNEL); 482 + if (unlikely(!*p_bo)) { 483 + DRM_ERROR("Failed to allocate a buffer.\n"); 484 + return -ENOMEM; 485 + } 486 + 487 + ret = vmw_bo_init(vmw, *p_bo, size, 488 + placement, interruptible, pin, 489 + bo_free); 490 + if (unlikely(ret != 0)) 491 + goto out_error; 492 + 493 + return ret; 494 + out_error: 495 + kfree(*p_bo); 496 + *p_bo = NULL; 425 497 return ret; 426 498 } 427 499 ··· 477 495 .no_wait_gpu = false 478 496 }; 479 497 struct ttm_device *bdev = &dev_priv->bdev; 498 + struct drm_device *vdev = &dev_priv->drm; 480 499 int ret; 481 - bool user = (bo_free == &vmw_user_bo_destroy); 482 500 483 - WARN_ON_ONCE(!bo_free && (!user && (bo_free != vmw_bo_bo_free))); 501 + WARN_ON_ONCE(!bo_free); 484 502 memset(vmw_bo, 0, sizeof(*vmw_bo)); 485 503 BUILD_BUG_ON(TTM_MAX_BO_PRIORITY <= 3); 486 504 vmw_bo->base.priority = 3; 487 505 vmw_bo->res_tree = RB_ROOT; 488 506 489 - 490 - vmw_bo->base.base.size = size; 491 - dma_resv_init(&vmw_bo->base.base._resv); 492 - drm_vma_node_reset(&vmw_bo->base.base.vma_node); 507 + size = ALIGN(size, PAGE_SIZE); 508 + drm_gem_private_object_init(vdev, &vmw_bo->base.base, size); 493 509 494 510 ret = ttm_bo_init_reserved(bdev, &vmw_bo->base, size, 495 - ttm_bo_type_device, placement, 511 + ttm_bo_type_device, 512 + placement, 496 513 0, &ctx, NULL, NULL, bo_free); 497 514 if (unlikely(ret)) { 498 515 return ret; ··· 500 519 if (pin) 501 520 ttm_bo_pin(&vmw_bo->base); 502 521 ttm_bo_unreserve(&vmw_bo->base); 522 + 503 523 return 0; 504 524 } 505 525 506 - 507 526 /** 508 - * vmw_user_bo_release - TTM reference base object release callback for 509 - * vmw user buffer objects 510 - * 511 - * @p_base: The TTM base object pointer about to be unreferenced. 512 - * 513 - * Clears the TTM base object pointer and drops the reference the 514 - * base object has on the underlying struct vmw_buffer_object. 515 - */ 516 - static void vmw_user_bo_release(struct ttm_base_object **p_base) 517 - { 518 - struct vmw_user_buffer_object *vmw_user_bo; 519 - struct ttm_base_object *base = *p_base; 520 - 521 - *p_base = NULL; 522 - 523 - if (unlikely(base == NULL)) 524 - return; 525 - 526 - vmw_user_bo = container_of(base, struct vmw_user_buffer_object, 527 - prime.base); 528 - ttm_bo_put(&vmw_user_bo->vbo.base); 529 - } 530 - 531 - 532 - /** 533 - * vmw_user_bo_ref_obj_release - TTM synccpu reference object release callback 534 - * for vmw user buffer objects 535 - * 536 - * @base: Pointer to the TTM base object 537 - * @ref_type: Reference type of the reference reaching zero. 538 - * 539 - * Called when user-space drops its last synccpu reference on the buffer 540 - * object, Either explicitly or as part of a cleanup file close. 541 - */ 542 - static void vmw_user_bo_ref_obj_release(struct ttm_base_object *base, 543 - enum ttm_ref_type ref_type) 544 - { 545 - struct vmw_user_buffer_object *user_bo; 546 - 547 - user_bo = container_of(base, struct vmw_user_buffer_object, prime.base); 548 - 549 - switch (ref_type) { 550 - case TTM_REF_SYNCCPU_WRITE: 551 - atomic_dec(&user_bo->vbo.cpu_writers); 552 - break; 553 - default: 554 - WARN_ONCE(true, "Undefined buffer object reference release.\n"); 555 - } 556 - } 557 - 558 - 559 - /** 560 - * vmw_user_bo_alloc - Allocate a user buffer object 561 - * 562 - * @dev_priv: Pointer to a struct device private. 563 - * @tfile: Pointer to a struct ttm_object_file on which to register the user 564 - * object. 565 - * @size: Size of the buffer object. 566 - * @shareable: Boolean whether the buffer is shareable with other open files. 567 - * @handle: Pointer to where the handle value should be assigned. 568 - * @p_vbo: Pointer to where the refcounted struct vmw_buffer_object pointer 569 - * should be assigned. 570 - * @p_base: The TTM base object pointer about to be allocated. 571 - * Return: Zero on success, negative error code on error. 572 - */ 573 - int vmw_user_bo_alloc(struct vmw_private *dev_priv, 574 - struct ttm_object_file *tfile, 575 - uint32_t size, 576 - bool shareable, 577 - uint32_t *handle, 578 - struct vmw_buffer_object **p_vbo, 579 - struct ttm_base_object **p_base) 580 - { 581 - struct vmw_user_buffer_object *user_bo; 582 - int ret; 583 - 584 - user_bo = kzalloc(sizeof(*user_bo), GFP_KERNEL); 585 - if (unlikely(!user_bo)) { 586 - DRM_ERROR("Failed to allocate a buffer.\n"); 587 - return -ENOMEM; 588 - } 589 - 590 - ret = vmw_bo_init(dev_priv, &user_bo->vbo, size, 591 - (dev_priv->has_mob) ? 592 - &vmw_sys_placement : 593 - &vmw_vram_sys_placement, true, false, 594 - &vmw_user_bo_destroy); 595 - if (unlikely(ret != 0)) 596 - return ret; 597 - 598 - ttm_bo_get(&user_bo->vbo.base); 599 - ret = ttm_prime_object_init(tfile, 600 - size, 601 - &user_bo->prime, 602 - shareable, 603 - ttm_buffer_type, 604 - &vmw_user_bo_release, 605 - &vmw_user_bo_ref_obj_release); 606 - if (unlikely(ret != 0)) { 607 - ttm_bo_put(&user_bo->vbo.base); 608 - goto out_no_base_object; 609 - } 610 - 611 - *p_vbo = &user_bo->vbo; 612 - if (p_base) { 613 - *p_base = &user_bo->prime.base; 614 - kref_get(&(*p_base)->refcount); 615 - } 616 - *handle = user_bo->prime.base.handle; 617 - 618 - out_no_base_object: 619 - return ret; 620 - } 621 - 622 - 623 - /** 624 - * vmw_user_bo_verify_access - verify access permissions on this 625 - * buffer object. 626 - * 627 - * @bo: Pointer to the buffer object being accessed 628 - * @tfile: Identifying the caller. 629 - */ 630 - int vmw_user_bo_verify_access(struct ttm_buffer_object *bo, 631 - struct ttm_object_file *tfile) 632 - { 633 - struct vmw_user_buffer_object *vmw_user_bo; 634 - 635 - if (unlikely(bo->destroy != vmw_user_bo_destroy)) 636 - return -EPERM; 637 - 638 - vmw_user_bo = vmw_user_buffer_object(bo); 639 - 640 - /* Check that the caller has opened the object. */ 641 - if (likely(ttm_ref_object_exists(tfile, &vmw_user_bo->prime.base))) 642 - return 0; 643 - 644 - DRM_ERROR("Could not grant buffer access.\n"); 645 - return -EPERM; 646 - } 647 - 648 - 649 - /** 650 - * vmw_user_bo_synccpu_grab - Grab a struct vmw_user_buffer_object for cpu 527 + * vmw_user_bo_synccpu_grab - Grab a struct vmw_buffer_object for cpu 651 528 * access, idling previous GPU operations on the buffer and optionally 652 529 * blocking it for further command submissions. 653 530 * 654 - * @user_bo: Pointer to the buffer object being grabbed for CPU access 655 - * @tfile: Identifying the caller. 531 + * @vmw_bo: Pointer to the buffer object being grabbed for CPU access 656 532 * @flags: Flags indicating how the grab should be performed. 657 533 * Return: Zero on success, Negative error code on error. In particular, 658 534 * -EBUSY will be returned if a dontblock operation is requested and the ··· 518 680 * 519 681 * A blocking grab will be automatically released when @tfile is closed. 520 682 */ 521 - static int vmw_user_bo_synccpu_grab(struct vmw_user_buffer_object *user_bo, 522 - struct ttm_object_file *tfile, 683 + static int vmw_user_bo_synccpu_grab(struct vmw_buffer_object *vmw_bo, 523 684 uint32_t flags) 524 685 { 525 686 bool nonblock = !!(flags & drm_vmw_synccpu_dontblock); 526 - struct ttm_buffer_object *bo = &user_bo->vbo.base; 527 - bool existed; 687 + struct ttm_buffer_object *bo = &vmw_bo->base; 528 688 int ret; 529 689 530 690 if (flags & drm_vmw_synccpu_allow_cs) { ··· 544 708 545 709 ret = ttm_bo_wait(bo, true, nonblock); 546 710 if (likely(ret == 0)) 547 - atomic_inc(&user_bo->vbo.cpu_writers); 711 + atomic_inc(&vmw_bo->cpu_writers); 548 712 549 713 ttm_bo_unreserve(bo); 550 714 if (unlikely(ret != 0)) 551 715 return ret; 552 - 553 - ret = ttm_ref_object_add(tfile, &user_bo->prime.base, 554 - TTM_REF_SYNCCPU_WRITE, &existed, false); 555 - if (ret != 0 || existed) 556 - atomic_dec(&user_bo->vbo.cpu_writers); 557 716 558 717 return ret; 559 718 } ··· 557 726 * vmw_user_bo_synccpu_release - Release a previous grab for CPU access, 558 727 * and unblock command submission on the buffer if blocked. 559 728 * 729 + * @filp: Identifying the caller. 560 730 * @handle: Handle identifying the buffer object. 561 - * @tfile: Identifying the caller. 562 731 * @flags: Flags indicating the type of release. 563 732 */ 564 - static int vmw_user_bo_synccpu_release(uint32_t handle, 565 - struct ttm_object_file *tfile, 566 - uint32_t flags) 733 + static int vmw_user_bo_synccpu_release(struct drm_file *filp, 734 + uint32_t handle, 735 + uint32_t flags) 567 736 { 568 - if (!(flags & drm_vmw_synccpu_allow_cs)) 569 - return ttm_ref_object_base_unref(tfile, handle, 570 - TTM_REF_SYNCCPU_WRITE); 737 + struct vmw_buffer_object *vmw_bo; 738 + int ret = vmw_user_bo_lookup(filp, handle, &vmw_bo); 571 739 572 - return 0; 740 + if (!(flags & drm_vmw_synccpu_allow_cs)) { 741 + atomic_dec(&vmw_bo->cpu_writers); 742 + } 743 + ttm_bo_put(&vmw_bo->base); 744 + 745 + return ret; 573 746 } 574 747 575 748 ··· 595 760 struct drm_vmw_synccpu_arg *arg = 596 761 (struct drm_vmw_synccpu_arg *) data; 597 762 struct vmw_buffer_object *vbo; 598 - struct vmw_user_buffer_object *user_bo; 599 - struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; 600 - struct ttm_base_object *buffer_base; 601 763 int ret; 602 764 603 765 if ((arg->flags & (drm_vmw_synccpu_read | drm_vmw_synccpu_write)) == 0 ··· 607 775 608 776 switch (arg->op) { 609 777 case drm_vmw_synccpu_grab: 610 - ret = vmw_user_bo_lookup(tfile, arg->handle, &vbo, 611 - &buffer_base); 778 + ret = vmw_user_bo_lookup(file_priv, arg->handle, &vbo); 612 779 if (unlikely(ret != 0)) 613 780 return ret; 614 781 615 - user_bo = container_of(vbo, struct vmw_user_buffer_object, 616 - vbo); 617 - ret = vmw_user_bo_synccpu_grab(user_bo, tfile, arg->flags); 782 + ret = vmw_user_bo_synccpu_grab(vbo, arg->flags); 618 783 vmw_bo_unreference(&vbo); 619 - ttm_base_object_unref(&buffer_base); 620 784 if (unlikely(ret != 0 && ret != -ERESTARTSYS && 621 785 ret != -EBUSY)) { 622 786 DRM_ERROR("Failed synccpu grab on handle 0x%08x.\n", ··· 621 793 } 622 794 break; 623 795 case drm_vmw_synccpu_release: 624 - ret = vmw_user_bo_synccpu_release(arg->handle, tfile, 796 + ret = vmw_user_bo_synccpu_release(file_priv, 797 + arg->handle, 625 798 arg->flags); 626 799 if (unlikely(ret != 0)) { 627 800 DRM_ERROR("Failed synccpu release on handle 0x%08x.\n", ··· 637 808 638 809 return 0; 639 810 } 640 - 641 - 642 - /** 643 - * vmw_bo_alloc_ioctl - ioctl function implementing the buffer object 644 - * allocation functionality. 645 - * 646 - * @dev: Identifies the drm device. 647 - * @data: Pointer to the ioctl argument. 648 - * @file_priv: Identifies the caller. 649 - * Return: Zero on success, negative error code on error. 650 - * 651 - * This function checks the ioctl arguments for validity and allocates a 652 - * struct vmw_user_buffer_object bo. 653 - */ 654 - int vmw_bo_alloc_ioctl(struct drm_device *dev, void *data, 655 - struct drm_file *file_priv) 656 - { 657 - struct vmw_private *dev_priv = vmw_priv(dev); 658 - union drm_vmw_alloc_dmabuf_arg *arg = 659 - (union drm_vmw_alloc_dmabuf_arg *)data; 660 - struct drm_vmw_alloc_dmabuf_req *req = &arg->req; 661 - struct drm_vmw_dmabuf_rep *rep = &arg->rep; 662 - struct vmw_buffer_object *vbo; 663 - uint32_t handle; 664 - int ret; 665 - 666 - ret = vmw_user_bo_alloc(dev_priv, vmw_fpriv(file_priv)->tfile, 667 - req->size, false, &handle, &vbo, 668 - NULL); 669 - if (unlikely(ret != 0)) 670 - goto out_no_bo; 671 - 672 - rep->handle = handle; 673 - rep->map_handle = drm_vma_node_offset_addr(&vbo->base.base.vma_node); 674 - rep->cur_gmr_id = handle; 675 - rep->cur_gmr_offset = 0; 676 - 677 - vmw_bo_unreference(&vbo); 678 - 679 - out_no_bo: 680 - 681 - return ret; 682 - } 683 - 684 811 685 812 /** 686 813 * vmw_bo_unref_ioctl - Generic handle close ioctl. ··· 655 870 struct drm_vmw_unref_dmabuf_arg *arg = 656 871 (struct drm_vmw_unref_dmabuf_arg *)data; 657 872 658 - return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile, 659 - arg->handle, 660 - TTM_REF_USAGE); 873 + drm_gem_handle_delete(file_priv, arg->handle); 874 + return 0; 661 875 } 662 876 663 877 664 878 /** 665 879 * vmw_user_bo_lookup - Look up a vmw user buffer object from a handle. 666 880 * 667 - * @tfile: The TTM object file the handle is registered with. 881 + * @filp: The file the handle is registered with. 668 882 * @handle: The user buffer object handle 669 883 * @out: Pointer to a where a pointer to the embedded 670 884 * struct vmw_buffer_object should be placed. 671 - * @p_base: Pointer to where a pointer to the TTM base object should be 672 - * placed, or NULL if no such pointer is required. 673 885 * Return: Zero on success, Negative error code on error. 674 886 * 675 - * Both the output base object pointer and the vmw buffer object pointer 676 - * will be refcounted. 887 + * The vmw buffer object pointer will be refcounted. 677 888 */ 678 - int vmw_user_bo_lookup(struct ttm_object_file *tfile, 679 - uint32_t handle, struct vmw_buffer_object **out, 680 - struct ttm_base_object **p_base) 889 + int vmw_user_bo_lookup(struct drm_file *filp, 890 + uint32_t handle, 891 + struct vmw_buffer_object **out) 681 892 { 682 - struct vmw_user_buffer_object *vmw_user_bo; 683 - struct ttm_base_object *base; 893 + struct drm_gem_object *gobj; 684 894 685 - base = ttm_base_object_lookup(tfile, handle); 686 - if (unlikely(base == NULL)) { 895 + gobj = drm_gem_object_lookup(filp, handle); 896 + if (!gobj) { 687 897 DRM_ERROR("Invalid buffer object handle 0x%08lx.\n", 688 898 (unsigned long)handle); 689 899 return -ESRCH; 690 900 } 691 901 692 - if (unlikely(ttm_base_object_type(base) != ttm_buffer_type)) { 693 - ttm_base_object_unref(&base); 694 - DRM_ERROR("Invalid buffer object handle 0x%08lx.\n", 695 - (unsigned long)handle); 696 - return -EINVAL; 697 - } 698 - 699 - vmw_user_bo = container_of(base, struct vmw_user_buffer_object, 700 - prime.base); 701 - ttm_bo_get(&vmw_user_bo->vbo.base); 702 - if (p_base) 703 - *p_base = base; 704 - else 705 - ttm_base_object_unref(&base); 706 - *out = &vmw_user_bo->vbo; 902 + *out = gem_to_vmw_bo(gobj); 903 + ttm_bo_get(&(*out)->base); 904 + drm_gem_object_put(gobj); 707 905 708 906 return 0; 709 907 } 710 908 711 909 /** 712 910 * vmw_user_bo_noref_lookup - Look up a vmw user buffer object without reference 713 - * @tfile: The TTM object file the handle is registered with. 911 + * @filp: The TTM object file the handle is registered with. 714 912 * @handle: The user buffer object handle. 715 913 * 716 - * This function looks up a struct vmw_user_bo and returns a pointer to the 914 + * This function looks up a struct vmw_bo and returns a pointer to the 717 915 * struct vmw_buffer_object it derives from without refcounting the pointer. 718 916 * The returned pointer is only valid until vmw_user_bo_noref_release() is 719 917 * called, and the object pointed to by the returned pointer may be doomed. ··· 709 941 * error pointer on failure. 710 942 */ 711 943 struct vmw_buffer_object * 712 - vmw_user_bo_noref_lookup(struct ttm_object_file *tfile, u32 handle) 944 + vmw_user_bo_noref_lookup(struct drm_file *filp, u32 handle) 713 945 { 714 - struct vmw_user_buffer_object *vmw_user_bo; 715 - struct ttm_base_object *base; 946 + struct vmw_buffer_object *vmw_bo; 947 + struct ttm_buffer_object *bo; 948 + struct drm_gem_object *gobj = drm_gem_object_lookup(filp, handle); 716 949 717 - base = ttm_base_object_noref_lookup(tfile, handle); 718 - if (!base) { 950 + if (!gobj) { 719 951 DRM_ERROR("Invalid buffer object handle 0x%08lx.\n", 720 952 (unsigned long)handle); 721 953 return ERR_PTR(-ESRCH); 722 954 } 955 + vmw_bo = gem_to_vmw_bo(gobj); 956 + bo = ttm_bo_get_unless_zero(&vmw_bo->base); 957 + vmw_bo = vmw_buffer_object(bo); 958 + drm_gem_object_put(gobj); 723 959 724 - if (unlikely(ttm_base_object_type(base) != ttm_buffer_type)) { 725 - ttm_base_object_noref_release(); 726 - DRM_ERROR("Invalid buffer object handle 0x%08lx.\n", 727 - (unsigned long)handle); 728 - return ERR_PTR(-EINVAL); 729 - } 730 - 731 - vmw_user_bo = container_of(base, struct vmw_user_buffer_object, 732 - prime.base); 733 - return &vmw_user_bo->vbo; 734 - } 735 - 736 - /** 737 - * vmw_user_bo_reference - Open a handle to a vmw user buffer object. 738 - * 739 - * @tfile: The TTM object file to register the handle with. 740 - * @vbo: The embedded vmw buffer object. 741 - * @handle: Pointer to where the new handle should be placed. 742 - * Return: Zero on success, Negative error code on error. 743 - */ 744 - int vmw_user_bo_reference(struct ttm_object_file *tfile, 745 - struct vmw_buffer_object *vbo, 746 - uint32_t *handle) 747 - { 748 - struct vmw_user_buffer_object *user_bo; 749 - 750 - if (vbo->base.destroy != vmw_user_bo_destroy) 751 - return -EINVAL; 752 - 753 - user_bo = container_of(vbo, struct vmw_user_buffer_object, vbo); 754 - 755 - *handle = user_bo->prime.base.handle; 756 - return ttm_ref_object_add(tfile, &user_bo->prime.base, 757 - TTM_REF_USAGE, NULL, false); 960 + return vmw_bo; 758 961 } 759 962 760 963 ··· 779 1040 int ret; 780 1041 781 1042 args->pitch = args->width * ((args->bpp + 7) / 8); 782 - args->size = args->pitch * args->height; 1043 + args->size = ALIGN(args->pitch * args->height, PAGE_SIZE); 783 1044 784 - ret = vmw_user_bo_alloc(dev_priv, vmw_fpriv(file_priv)->tfile, 785 - args->size, false, &args->handle, 786 - &vbo, NULL); 787 - if (unlikely(ret != 0)) 788 - goto out_no_bo; 1045 + ret = vmw_gem_object_create_with_handle(dev_priv, file_priv, 1046 + args->size, &args->handle, 1047 + &vbo); 789 1048 790 - vmw_bo_unreference(&vbo); 791 - out_no_bo: 792 1049 return ret; 793 1050 } 794 - 795 - 796 - /** 797 - * vmw_dumb_map_offset - Return the address space offset of a dumb buffer 798 - * 799 - * @file_priv: Pointer to a struct drm_file identifying the caller. 800 - * @dev: Pointer to the drm device. 801 - * @handle: Handle identifying the dumb buffer. 802 - * @offset: The address space offset returned. 803 - * Return: Zero on success, negative error code on failure. 804 - * 805 - * This is a driver callback for the core drm dumb_map_offset functionality. 806 - */ 807 - int vmw_dumb_map_offset(struct drm_file *file_priv, 808 - struct drm_device *dev, uint32_t handle, 809 - uint64_t *offset) 810 - { 811 - struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; 812 - struct vmw_buffer_object *out_buf; 813 - int ret; 814 - 815 - ret = vmw_user_bo_lookup(tfile, handle, &out_buf, NULL); 816 - if (ret != 0) 817 - return -EINVAL; 818 - 819 - *offset = drm_vma_node_offset_addr(&out_buf->base.base.vma_node); 820 - vmw_bo_unreference(&out_buf); 821 - return 0; 822 - } 823 - 824 - 825 - /** 826 - * vmw_dumb_destroy - Destroy a dumb boffer 827 - * 828 - * @file_priv: Pointer to a struct drm_file identifying the caller. 829 - * @dev: Pointer to the drm device. 830 - * @handle: Handle identifying the dumb buffer. 831 - * Return: Zero on success, negative error code on failure. 832 - * 833 - * This is a driver callback for the core drm dumb_destroy functionality. 834 - */ 835 - int vmw_dumb_destroy(struct drm_file *file_priv, 836 - struct drm_device *dev, 837 - uint32_t handle) 838 - { 839 - return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile, 840 - handle, TTM_REF_USAGE); 841 - } 842 - 843 1051 844 1052 /** 845 1053 * vmw_bo_swap_notify - swapout notify callback. ··· 796 1110 void vmw_bo_swap_notify(struct ttm_buffer_object *bo) 797 1111 { 798 1112 /* Is @bo embedded in a struct vmw_buffer_object? */ 799 - if (bo->destroy != vmw_bo_bo_free && 800 - bo->destroy != vmw_user_bo_destroy) 1113 + if (vmw_bo_is_vmw_bo(bo)) 801 1114 return; 802 1115 803 1116 /* Kill any cached kernel maps before swapout */ ··· 820 1135 struct vmw_buffer_object *vbo; 821 1136 822 1137 /* Make sure @bo is embedded in a struct vmw_buffer_object? */ 823 - if (bo->destroy != vmw_bo_bo_free && 824 - bo->destroy != vmw_user_bo_destroy) 1138 + if (vmw_bo_is_vmw_bo(bo)) 825 1139 return; 826 1140 827 1141 vbo = container_of(bo, struct vmw_buffer_object, base); ··· 840 1156 */ 841 1157 if (mem->mem_type != VMW_PL_MOB && bo->resource->mem_type == VMW_PL_MOB) 842 1158 vmw_resource_unbind_list(vbo); 1159 + } 1160 + 1161 + /** 1162 + * vmw_bo_is_vmw_bo - check if the buffer object is a &vmw_buffer_object 1163 + * @bo: buffer object to be checked 1164 + * 1165 + * Uses destroy function associated with the object to determine if this is 1166 + * a &vmw_buffer_object. 1167 + * 1168 + * Returns: 1169 + * true if the object is of &vmw_buffer_object type, false if not. 1170 + */ 1171 + bool vmw_bo_is_vmw_bo(struct ttm_buffer_object *bo) 1172 + { 1173 + if (bo->destroy == &vmw_bo_bo_free || 1174 + bo->destroy == &vmw_gem_destroy) 1175 + return true; 1176 + 1177 + return false; 843 1178 }
+2 -2
drivers/gpu/drm/vmwgfx/vmwgfx_context.c
··· 715 715 struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data; 716 716 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; 717 717 718 - return ttm_ref_object_base_unref(tfile, arg->cid, TTM_REF_USAGE); 718 + return ttm_ref_object_base_unref(tfile, arg->cid); 719 719 } 720 720 721 721 static int vmw_context_define(struct drm_device *dev, void *data, ··· 754 754 755 755 tmp = vmw_resource_reference(&ctx->res); 756 756 ret = ttm_base_object_init(tfile, &ctx->base, false, VMW_RES_CONTEXT, 757 - &vmw_user_context_base_release, NULL); 757 + &vmw_user_context_base_release); 758 758 759 759 if (unlikely(ret != 0)) { 760 760 vmw_resource_unreference(&tmp);
+2 -6
drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
··· 407 407 * for the new COTable. Initially pin the buffer object to make sure 408 408 * we can use tryreserve without failure. 409 409 */ 410 - buf = kzalloc(sizeof(*buf), GFP_KERNEL); 411 - if (!buf) 412 - return -ENOMEM; 413 - 414 - ret = vmw_bo_init(dev_priv, buf, new_size, &vmw_mob_placement, 415 - true, true, vmw_bo_bo_free); 410 + ret = vmw_bo_create(dev_priv, new_size, &vmw_mob_placement, 411 + true, true, vmw_bo_bo_free, &buf); 416 412 if (ret) { 417 413 DRM_ERROR("Failed initializing new cotable MOB.\n"); 418 414 return ret;
+9 -11
drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
··· 34 34 #include <drm/drm_drv.h> 35 35 #include <drm/drm_ioctl.h> 36 36 #include <drm/drm_sysfs.h> 37 + #include <drm/drm_gem_ttm_helper.h> 37 38 #include <drm/ttm/ttm_bo_driver.h> 38 39 #include <drm/ttm/ttm_range_manager.h> 39 40 #include <drm/ttm/ttm_placement.h> ··· 163 162 static const struct drm_ioctl_desc vmw_ioctls[] = { 164 163 DRM_IOCTL_DEF_DRV(VMW_GET_PARAM, vmw_getparam_ioctl, 165 164 DRM_RENDER_ALLOW), 166 - DRM_IOCTL_DEF_DRV(VMW_ALLOC_DMABUF, vmw_bo_alloc_ioctl, 165 + DRM_IOCTL_DEF_DRV(VMW_ALLOC_DMABUF, vmw_gem_object_create_ioctl, 167 166 DRM_RENDER_ALLOW), 168 167 DRM_IOCTL_DEF_DRV(VMW_UNREF_DMABUF, vmw_bo_unref_ioctl, 169 168 DRM_RENDER_ALLOW), ··· 397 396 * immediately succeed. This is because we're the only 398 397 * user of the bo currently. 399 398 */ 400 - vbo = kzalloc(sizeof(*vbo), GFP_KERNEL); 401 - if (!vbo) 402 - return -ENOMEM; 403 - 404 - ret = vmw_bo_init(dev_priv, vbo, PAGE_SIZE, 405 - &vmw_sys_placement, false, true, 406 - &vmw_bo_bo_free); 399 + ret = vmw_bo_create(dev_priv, PAGE_SIZE, 400 + &vmw_sys_placement, false, true, 401 + &vmw_bo_bo_free, &vbo); 407 402 if (unlikely(ret != 0)) 408 403 return ret; 409 404 ··· 1575 1578 1576 1579 static const struct drm_driver driver = { 1577 1580 .driver_features = 1578 - DRIVER_MODESET | DRIVER_RENDER | DRIVER_ATOMIC, 1581 + DRIVER_MODESET | DRIVER_RENDER | DRIVER_ATOMIC | DRIVER_GEM, 1579 1582 .ioctls = vmw_ioctls, 1580 1583 .num_ioctls = ARRAY_SIZE(vmw_ioctls), 1581 1584 .master_set = vmw_master_set, ··· 1584 1587 .postclose = vmw_postclose, 1585 1588 1586 1589 .dumb_create = vmw_dumb_create, 1587 - .dumb_map_offset = vmw_dumb_map_offset, 1588 - .dumb_destroy = vmw_dumb_destroy, 1590 + .dumb_map_offset = drm_gem_ttm_dumb_map_offset, 1589 1591 1590 1592 .prime_fd_to_handle = vmw_prime_fd_to_handle, 1591 1593 .prime_handle_to_fd = vmw_prime_handle_to_fd, ··· 1637 1641 ret = drm_dev_register(&vmw->drm, 0); 1638 1642 if (ret) 1639 1643 goto out_unload; 1644 + 1645 + vmw_debugfs_gem_init(vmw); 1640 1646 1641 1647 return 0; 1642 1648 out_unload:
+43 -35
drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
··· 361 361 dma_addr_t (*dma_address)(struct vmw_piter *); 362 362 }; 363 363 364 + 365 + struct vmw_ttm_tt { 366 + struct ttm_tt dma_ttm; 367 + struct vmw_private *dev_priv; 368 + int gmr_id; 369 + struct vmw_mob *mob; 370 + int mem_type; 371 + struct sg_table sgt; 372 + struct vmw_sg_table vsgt; 373 + bool mapped; 374 + bool bound; 375 + }; 376 + 364 377 /* 365 378 * enum vmw_display_unit_type - Describes the display unit 366 379 */ ··· 424 411 bool res_ht_initialized; 425 412 bool kernel; 426 413 struct vmw_fpriv *fp; 414 + struct drm_file *filp; 427 415 uint32_t *cmd_bounce; 428 416 uint32_t cmd_bounce_size; 429 417 struct vmw_buffer_object *cur_query_bo; ··· 657 643 #endif 658 644 }; 659 645 646 + static inline struct vmw_buffer_object *gem_to_vmw_bo(struct drm_gem_object *gobj) 647 + { 648 + return container_of((gobj), struct vmw_buffer_object, base.base); 649 + } 650 + 660 651 static inline struct vmw_surface *vmw_res_to_srf(struct vmw_resource *res) 661 652 { 662 653 return container_of(res, struct vmw_surface, res); ··· 784 765 bool no_backup); 785 766 extern bool vmw_resource_needs_backup(const struct vmw_resource *res); 786 767 extern int vmw_user_lookup_handle(struct vmw_private *dev_priv, 787 - struct ttm_object_file *tfile, 768 + struct drm_file *filp, 788 769 uint32_t handle, 789 770 struct vmw_surface **out_surf, 790 771 struct vmw_buffer_object **out_buf); ··· 850 831 /** 851 832 * Buffer object helper functions - vmwgfx_bo.c 852 833 */ 834 + extern bool vmw_bo_is_vmw_bo(struct ttm_buffer_object *bo); 853 835 extern int vmw_bo_pin_in_placement(struct vmw_private *vmw_priv, 854 836 struct vmw_buffer_object *bo, 855 837 struct ttm_placement *placement, ··· 875 855 unsigned long size, 876 856 struct ttm_placement *placement, 877 857 struct ttm_buffer_object **p_bo); 858 + extern int vmw_bo_create(struct vmw_private *dev_priv, 859 + size_t size, struct ttm_placement *placement, 860 + bool interruptible, bool pin, 861 + void (*bo_free)(struct ttm_buffer_object *bo), 862 + struct vmw_buffer_object **p_bo); 878 863 extern int vmw_bo_init(struct vmw_private *dev_priv, 879 864 struct vmw_buffer_object *vmw_bo, 880 865 size_t size, struct ttm_placement *placement, 881 866 bool interruptible, bool pin, 882 867 void (*bo_free)(struct ttm_buffer_object *bo)); 883 - extern int vmw_user_bo_verify_access(struct ttm_buffer_object *bo, 884 - struct ttm_object_file *tfile); 885 - extern int vmw_user_bo_alloc(struct vmw_private *dev_priv, 886 - struct ttm_object_file *tfile, 887 - uint32_t size, 888 - bool shareable, 889 - uint32_t *handle, 890 - struct vmw_buffer_object **p_dma_buf, 891 - struct ttm_base_object **p_base); 892 - extern int vmw_user_bo_reference(struct ttm_object_file *tfile, 893 - struct vmw_buffer_object *dma_buf, 894 - uint32_t *handle); 895 - extern int vmw_bo_alloc_ioctl(struct drm_device *dev, void *data, 896 - struct drm_file *file_priv); 897 868 extern int vmw_bo_unref_ioctl(struct drm_device *dev, void *data, 898 869 struct drm_file *file_priv); 899 870 extern int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data, 900 871 struct drm_file *file_priv); 901 - extern int vmw_user_bo_lookup(struct ttm_object_file *tfile, 902 - uint32_t id, struct vmw_buffer_object **out, 903 - struct ttm_base_object **base); 872 + extern int vmw_user_bo_lookup(struct drm_file *filp, 873 + uint32_t handle, 874 + struct vmw_buffer_object **out); 904 875 extern void vmw_bo_fence_single(struct ttm_buffer_object *bo, 905 876 struct vmw_fence_obj *fence); 906 877 extern void *vmw_bo_map_and_cache(struct vmw_buffer_object *vbo); ··· 900 889 struct ttm_resource *mem); 901 890 extern void vmw_bo_swap_notify(struct ttm_buffer_object *bo); 902 891 extern struct vmw_buffer_object * 903 - vmw_user_bo_noref_lookup(struct ttm_object_file *tfile, u32 handle); 904 - 905 - /** 906 - * vmw_user_bo_noref_release - release a buffer object pointer looked up 907 - * without reference 908 - */ 909 - static inline void vmw_user_bo_noref_release(void) 910 - { 911 - ttm_base_object_noref_release(); 912 - } 892 + vmw_user_bo_noref_lookup(struct drm_file *filp, u32 handle); 913 893 914 894 /** 915 895 * vmw_bo_adjust_prio - Adjust the buffer object eviction priority ··· 950 948 if (--vbo->res_prios[prio] == 0) 951 949 vmw_bo_prio_adjust(vbo); 952 950 } 951 + 952 + /** 953 + * GEM related functionality - vmwgfx_gem.c 954 + */ 955 + extern int vmw_gem_object_create_with_handle(struct vmw_private *dev_priv, 956 + struct drm_file *filp, 957 + uint32_t size, 958 + uint32_t *handle, 959 + struct vmw_buffer_object **p_vbo); 960 + extern int vmw_gem_object_create_ioctl(struct drm_device *dev, void *data, 961 + struct drm_file *filp); 962 + extern void vmw_gem_destroy(struct ttm_buffer_object *bo); 963 + extern void vmw_debugfs_gem_init(struct vmw_private *vdev); 953 964 954 965 /** 955 966 * Misc Ioctl functionality - vmwgfx_ioctl.c ··· 1227 1212 int vmw_dumb_create(struct drm_file *file_priv, 1228 1213 struct drm_device *dev, 1229 1214 struct drm_mode_create_dumb *args); 1230 - 1231 - int vmw_dumb_map_offset(struct drm_file *file_priv, 1232 - struct drm_device *dev, uint32_t handle, 1233 - uint64_t *offset); 1234 - int vmw_dumb_destroy(struct drm_file *file_priv, 1235 - struct drm_device *dev, 1236 - uint32_t handle); 1237 1215 extern int vmw_resource_pin(struct vmw_resource *res, bool interruptible); 1238 1216 extern void vmw_resource_unpin(struct vmw_resource *res); 1239 1217 extern enum vmw_res_type vmw_res_type(const struct vmw_resource *res);
+8 -10
drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
··· 1171 1171 int ret; 1172 1172 1173 1173 vmw_validation_preload_bo(sw_context->ctx); 1174 - vmw_bo = vmw_user_bo_noref_lookup(sw_context->fp->tfile, handle); 1175 - if (IS_ERR(vmw_bo)) { 1174 + vmw_bo = vmw_user_bo_noref_lookup(sw_context->filp, handle); 1175 + if (IS_ERR_OR_NULL(vmw_bo)) { 1176 1176 VMW_DEBUG_USER("Could not find or use MOB buffer.\n"); 1177 1177 return PTR_ERR(vmw_bo); 1178 1178 } 1179 - 1180 1179 ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo, true, false); 1181 - vmw_user_bo_noref_release(); 1180 + ttm_bo_put(&vmw_bo->base); 1182 1181 if (unlikely(ret != 0)) 1183 1182 return ret; 1184 1183 ··· 1225 1226 int ret; 1226 1227 1227 1228 vmw_validation_preload_bo(sw_context->ctx); 1228 - vmw_bo = vmw_user_bo_noref_lookup(sw_context->fp->tfile, handle); 1229 - if (IS_ERR(vmw_bo)) { 1229 + vmw_bo = vmw_user_bo_noref_lookup(sw_context->filp, handle); 1230 + if (IS_ERR_OR_NULL(vmw_bo)) { 1230 1231 VMW_DEBUG_USER("Could not find or use GMR region.\n"); 1231 1232 return PTR_ERR(vmw_bo); 1232 1233 } 1233 - 1234 1234 ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo, false, false); 1235 - vmw_user_bo_noref_release(); 1235 + ttm_bo_put(&vmw_bo->base); 1236 1236 if (unlikely(ret != 0)) 1237 1237 return ret; 1238 1238 ··· 3867 3869 fence_rep.fd = -1; 3868 3870 } 3869 3871 3870 - ttm_ref_object_base_unref(vmw_fp->tfile, fence_handle, 3871 - TTM_REF_USAGE); 3872 + ttm_ref_object_base_unref(vmw_fp->tfile, fence_handle); 3872 3873 VMW_DEBUG_USER("Fence copy error. Syncing.\n"); 3873 3874 (void) vmw_fence_obj_wait(fence, false, false, 3874 3875 VMW_FENCE_WAIT_TIMEOUT); ··· 4096 4099 sw_context->kernel = true; 4097 4100 } 4098 4101 4102 + sw_context->filp = file_priv; 4099 4103 sw_context->fp = vmw_fpriv(file_priv); 4100 4104 INIT_LIST_HEAD(&sw_context->ctx_list); 4101 4105 sw_context->cur_query_bo = dev_priv->pinned_bo;
+3 -10
drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
··· 394 394 struct vmw_buffer_object *vmw_bo; 395 395 int ret; 396 396 397 - vmw_bo = kmalloc(sizeof(*vmw_bo), GFP_KERNEL); 398 - if (!vmw_bo) { 399 - ret = -ENOMEM; 400 - goto err_unlock; 401 - } 402 - 403 - ret = vmw_bo_init(vmw_priv, vmw_bo, size, 397 + ret = vmw_bo_create(vmw_priv, size, 404 398 &vmw_sys_placement, 405 399 false, false, 406 - &vmw_bo_bo_free); 400 + &vmw_bo_bo_free, &vmw_bo); 407 401 if (unlikely(ret != 0)) 408 - goto err_unlock; /* init frees the buffer on failure */ 402 + return ret; 409 403 410 404 *out = vmw_bo; 411 405 412 - err_unlock: 413 406 return ret; 414 407 } 415 408
+6 -7
drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
··· 596 596 * vmw_user_fence_base_release. 597 597 */ 598 598 tmp = vmw_fence_obj_reference(&ufence->fence); 599 + 599 600 ret = ttm_base_object_init(tfile, &ufence->base, false, 600 601 VMW_RES_FENCE, 601 - &vmw_user_fence_base_release, NULL); 602 + &vmw_user_fence_base_release); 602 603 603 604 604 605 if (unlikely(ret != 0)) { ··· 802 801 */ 803 802 804 803 if (ret == 0 && (arg->wait_options & DRM_VMW_WAIT_OPTION_UNREF)) 805 - return ttm_ref_object_base_unref(tfile, arg->handle, 806 - TTM_REF_USAGE); 804 + return ttm_ref_object_base_unref(tfile, arg->handle); 807 805 return ret; 808 806 } 809 807 ··· 844 844 (struct drm_vmw_fence_arg *) data; 845 845 846 846 return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile, 847 - arg->handle, 848 - TTM_REF_USAGE); 847 + arg->handle); 849 848 } 850 849 851 850 /** ··· 1090 1091 1091 1092 if (user_fence_rep != NULL) { 1092 1093 ret = ttm_ref_object_add(vmw_fp->tfile, base, 1093 - TTM_REF_USAGE, NULL, false); 1094 + NULL, false); 1094 1095 if (unlikely(ret != 0)) { 1095 1096 DRM_ERROR("Failed to reference a fence " 1096 1097 "object.\n"); ··· 1133 1134 return 0; 1134 1135 out_no_create: 1135 1136 if (user_fence_rep != NULL) 1136 - ttm_ref_object_base_unref(tfile, handle, TTM_REF_USAGE); 1137 + ttm_ref_object_base_unref(tfile, handle); 1137 1138 out_no_ref_obj: 1138 1139 vmw_fence_obj_unreference(&fence); 1139 1140 return ret;
+294
drivers/gpu/drm/vmwgfx/vmwgfx_gem.c
··· 1 + /* SPDX-License-Identifier: GPL-2.0 OR MIT */ 2 + /* 3 + * Copyright 2021 VMware, Inc. 4 + * 5 + * Permission is hereby granted, free of charge, to any person 6 + * obtaining a copy of this software and associated documentation 7 + * files (the "Software"), to deal in the Software without 8 + * restriction, including without limitation the rights to use, copy, 9 + * modify, merge, publish, distribute, sublicense, and/or sell copies 10 + * of the Software, and to permit persons to whom the Software is 11 + * furnished to do so, subject to the following conditions: 12 + * 13 + * The above copyright notice and this permission notice shall be 14 + * included in all copies or substantial portions of the Software. 15 + * 16 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 17 + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 18 + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 19 + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 20 + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 21 + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 22 + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 23 + * SOFTWARE. 24 + * 25 + */ 26 + 27 + #include "vmwgfx_drv.h" 28 + 29 + #include "drm/drm_prime.h" 30 + #include "drm/drm_gem_ttm_helper.h" 31 + 32 + /** 33 + * vmw_buffer_object - Convert a struct ttm_buffer_object to a struct 34 + * vmw_buffer_object. 35 + * 36 + * @bo: Pointer to the TTM buffer object. 37 + * Return: Pointer to the struct vmw_buffer_object embedding the 38 + * TTM buffer object. 39 + */ 40 + static struct vmw_buffer_object * 41 + vmw_buffer_object(struct ttm_buffer_object *bo) 42 + { 43 + return container_of(bo, struct vmw_buffer_object, base); 44 + } 45 + 46 + static void vmw_gem_object_free(struct drm_gem_object *gobj) 47 + { 48 + struct ttm_buffer_object *bo = drm_gem_ttm_of_gem(gobj); 49 + if (bo) { 50 + ttm_bo_put(bo); 51 + } 52 + } 53 + 54 + static int vmw_gem_object_open(struct drm_gem_object *obj, 55 + struct drm_file *file_priv) 56 + { 57 + return 0; 58 + } 59 + 60 + static void vmw_gem_object_close(struct drm_gem_object *obj, 61 + struct drm_file *file_priv) 62 + { 63 + } 64 + 65 + static int vmw_gem_pin_private(struct drm_gem_object *obj, bool do_pin) 66 + { 67 + struct ttm_buffer_object *bo = drm_gem_ttm_of_gem(obj); 68 + struct vmw_buffer_object *vbo = vmw_buffer_object(bo); 69 + int ret; 70 + 71 + ret = ttm_bo_reserve(bo, false, false, NULL); 72 + if (unlikely(ret != 0)) 73 + goto err; 74 + 75 + vmw_bo_pin_reserved(vbo, do_pin); 76 + 77 + ttm_bo_unreserve(bo); 78 + 79 + err: 80 + return ret; 81 + } 82 + 83 + 84 + static int vmw_gem_object_pin(struct drm_gem_object *obj) 85 + { 86 + return vmw_gem_pin_private(obj, true); 87 + } 88 + 89 + static void vmw_gem_object_unpin(struct drm_gem_object *obj) 90 + { 91 + vmw_gem_pin_private(obj, false); 92 + } 93 + 94 + static struct sg_table *vmw_gem_object_get_sg_table(struct drm_gem_object *obj) 95 + { 96 + struct ttm_buffer_object *bo = drm_gem_ttm_of_gem(obj); 97 + struct vmw_ttm_tt *vmw_tt = 98 + container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm); 99 + 100 + if (vmw_tt->vsgt.sgt) 101 + return vmw_tt->vsgt.sgt; 102 + 103 + return drm_prime_pages_to_sg(obj->dev, vmw_tt->dma_ttm.pages, vmw_tt->dma_ttm.num_pages); 104 + } 105 + 106 + 107 + static const struct drm_gem_object_funcs vmw_gem_object_funcs = { 108 + .free = vmw_gem_object_free, 109 + .open = vmw_gem_object_open, 110 + .close = vmw_gem_object_close, 111 + .print_info = drm_gem_ttm_print_info, 112 + .pin = vmw_gem_object_pin, 113 + .unpin = vmw_gem_object_unpin, 114 + .get_sg_table = vmw_gem_object_get_sg_table, 115 + .vmap = drm_gem_ttm_vmap, 116 + .vunmap = drm_gem_ttm_vunmap, 117 + .mmap = drm_gem_ttm_mmap, 118 + }; 119 + 120 + /** 121 + * vmw_gem_destroy - vmw buffer object destructor 122 + * 123 + * @bo: Pointer to the embedded struct ttm_buffer_object 124 + */ 125 + void vmw_gem_destroy(struct ttm_buffer_object *bo) 126 + { 127 + struct vmw_buffer_object *vbo = vmw_buffer_object(bo); 128 + 129 + WARN_ON(vbo->dirty); 130 + WARN_ON(!RB_EMPTY_ROOT(&vbo->res_tree)); 131 + vmw_bo_unmap(vbo); 132 + drm_gem_object_release(&vbo->base.base); 133 + kfree(vbo); 134 + } 135 + 136 + int vmw_gem_object_create_with_handle(struct vmw_private *dev_priv, 137 + struct drm_file *filp, 138 + uint32_t size, 139 + uint32_t *handle, 140 + struct vmw_buffer_object **p_vbo) 141 + { 142 + int ret; 143 + 144 + ret = vmw_bo_create(dev_priv, size, 145 + (dev_priv->has_mob) ? 146 + &vmw_sys_placement : 147 + &vmw_vram_sys_placement, 148 + true, false, &vmw_gem_destroy, p_vbo); 149 + 150 + (*p_vbo)->base.base.funcs = &vmw_gem_object_funcs; 151 + if (ret != 0) 152 + goto out_no_bo; 153 + 154 + ret = drm_gem_handle_create(filp, &(*p_vbo)->base.base, handle); 155 + /* drop reference from allocate - handle holds it now */ 156 + drm_gem_object_put(&(*p_vbo)->base.base); 157 + out_no_bo: 158 + return ret; 159 + } 160 + 161 + 162 + int vmw_gem_object_create_ioctl(struct drm_device *dev, void *data, 163 + struct drm_file *filp) 164 + { 165 + struct vmw_private *dev_priv = vmw_priv(dev); 166 + union drm_vmw_alloc_dmabuf_arg *arg = 167 + (union drm_vmw_alloc_dmabuf_arg *)data; 168 + struct drm_vmw_alloc_dmabuf_req *req = &arg->req; 169 + struct drm_vmw_dmabuf_rep *rep = &arg->rep; 170 + struct vmw_buffer_object *vbo; 171 + uint32_t handle; 172 + int ret; 173 + 174 + ret = vmw_gem_object_create_with_handle(dev_priv, filp, 175 + req->size, &handle, &vbo); 176 + if (ret) 177 + goto out_no_bo; 178 + 179 + rep->handle = handle; 180 + rep->map_handle = drm_vma_node_offset_addr(&vbo->base.base.vma_node); 181 + rep->cur_gmr_id = handle; 182 + rep->cur_gmr_offset = 0; 183 + out_no_bo: 184 + return ret; 185 + } 186 + 187 + #if defined(CONFIG_DEBUG_FS) 188 + 189 + static void vmw_bo_print_info(int id, struct vmw_buffer_object *bo, struct seq_file *m) 190 + { 191 + const char *placement; 192 + const char *type; 193 + 194 + switch (bo->base.resource->mem_type) { 195 + case TTM_PL_SYSTEM: 196 + placement = " CPU"; 197 + break; 198 + case VMW_PL_GMR: 199 + placement = " GMR"; 200 + break; 201 + case VMW_PL_MOB: 202 + placement = " MOB"; 203 + break; 204 + case VMW_PL_SYSTEM: 205 + placement = "VCPU"; 206 + break; 207 + case TTM_PL_VRAM: 208 + placement = "VRAM"; 209 + break; 210 + default: 211 + placement = "None"; 212 + break; 213 + } 214 + 215 + switch (bo->base.type) { 216 + case ttm_bo_type_device: 217 + type = "device"; 218 + break; 219 + case ttm_bo_type_kernel: 220 + type = "kernel"; 221 + break; 222 + case ttm_bo_type_sg: 223 + type = "sg "; 224 + break; 225 + default: 226 + type = "none "; 227 + break; 228 + } 229 + 230 + seq_printf(m, "\t\t0x%08x: %12ld bytes %s, type = %s", 231 + id, bo->base.base.size, placement, type); 232 + seq_printf(m, ", priority = %u, pin_count = %u, GEM refs = %d, TTM refs = %d", 233 + bo->base.priority, 234 + bo->base.pin_count, 235 + kref_read(&bo->base.base.refcount), 236 + kref_read(&bo->base.kref)); 237 + seq_puts(m, "\n"); 238 + } 239 + 240 + static int vmw_debugfs_gem_info_show(struct seq_file *m, void *unused) 241 + { 242 + struct vmw_private *vdev = (struct vmw_private *)m->private; 243 + struct drm_device *dev = &vdev->drm; 244 + struct drm_file *file; 245 + int r; 246 + 247 + r = mutex_lock_interruptible(&dev->filelist_mutex); 248 + if (r) 249 + return r; 250 + 251 + list_for_each_entry(file, &dev->filelist, lhead) { 252 + struct task_struct *task; 253 + struct drm_gem_object *gobj; 254 + int id; 255 + 256 + /* 257 + * Although we have a valid reference on file->pid, that does 258 + * not guarantee that the task_struct who called get_pid() is 259 + * still alive (e.g. get_pid(current) => fork() => exit()). 260 + * Therefore, we need to protect this ->comm access using RCU. 261 + */ 262 + rcu_read_lock(); 263 + task = pid_task(file->pid, PIDTYPE_PID); 264 + seq_printf(m, "pid %8d command %s:\n", pid_nr(file->pid), 265 + task ? task->comm : "<unknown>"); 266 + rcu_read_unlock(); 267 + 268 + spin_lock(&file->table_lock); 269 + idr_for_each_entry(&file->object_idr, gobj, id) { 270 + struct vmw_buffer_object *bo = gem_to_vmw_bo(gobj); 271 + 272 + vmw_bo_print_info(id, bo, m); 273 + } 274 + spin_unlock(&file->table_lock); 275 + } 276 + 277 + mutex_unlock(&dev->filelist_mutex); 278 + return 0; 279 + } 280 + 281 + DEFINE_SHOW_ATTRIBUTE(vmw_debugfs_gem_info); 282 + 283 + #endif 284 + 285 + void vmw_debugfs_gem_init(struct vmw_private *vdev) 286 + { 287 + #if defined(CONFIG_DEBUG_FS) 288 + struct drm_minor *minor = vdev->drm.primary; 289 + struct dentry *root = minor->debugfs_root; 290 + 291 + debugfs_create_file("vmwgfx_gem_info", 0444, root, vdev, 292 + &vmw_debugfs_gem_info_fops); 293 + #endif 294 + }
+3 -29
drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
··· 843 843 844 844 drm_framebuffer_cleanup(framebuffer); 845 845 vmw_surface_unreference(&vfbs->surface); 846 - if (vfbs->base.user_obj) 847 - ttm_base_object_unref(&vfbs->base.user_obj); 848 846 849 847 kfree(vfbs); 850 848 } ··· 994 996 995 997 drm_framebuffer_cleanup(framebuffer); 996 998 vmw_bo_unreference(&vfbd->buffer); 997 - if (vfbd->base.user_obj) 998 - ttm_base_object_unref(&vfbd->base.user_obj); 999 999 1000 1000 kfree(vfbd); 1001 1001 } ··· 1247 1251 goto out_err1; 1248 1252 } 1249 1253 1254 + vfbd->base.base.obj[0] = &bo->base.base; 1250 1255 drm_helper_mode_fill_fb_struct(dev, &vfbd->base.base, mode_cmd); 1251 1256 vfbd->base.bo = true; 1252 1257 vfbd->buffer = vmw_bo_reference(bo); ··· 1365 1368 const struct drm_mode_fb_cmd2 *mode_cmd) 1366 1369 { 1367 1370 struct vmw_private *dev_priv = vmw_priv(dev); 1368 - struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; 1369 1371 struct vmw_framebuffer *vfb = NULL; 1370 1372 struct vmw_surface *surface = NULL; 1371 1373 struct vmw_buffer_object *bo = NULL; 1372 - struct ttm_base_object *user_obj; 1373 1374 int ret; 1374 1375 1375 - /* 1376 - * Take a reference on the user object of the resource 1377 - * backing the kms fb. This ensures that user-space handle 1378 - * lookups on that resource will always work as long as 1379 - * it's registered with a kms framebuffer. This is important, 1380 - * since vmw_execbuf_process identifies resources in the 1381 - * command stream using user-space handles. 1382 - */ 1383 - 1384 - user_obj = ttm_base_object_lookup(tfile, mode_cmd->handles[0]); 1385 - if (unlikely(user_obj == NULL)) { 1386 - DRM_ERROR("Could not locate requested kms frame buffer.\n"); 1387 - return ERR_PTR(-ENOENT); 1388 - } 1389 - 1390 - /** 1391 - * End conditioned code. 1392 - */ 1393 - 1394 1376 /* returns either a bo or surface */ 1395 - ret = vmw_user_lookup_handle(dev_priv, tfile, 1377 + ret = vmw_user_lookup_handle(dev_priv, file_priv, 1396 1378 mode_cmd->handles[0], 1397 1379 &surface, &bo); 1398 1380 if (ret) ··· 1404 1428 1405 1429 if (ret) { 1406 1430 DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret); 1407 - ttm_base_object_unref(&user_obj); 1408 1431 return ERR_PTR(ret); 1409 - } else 1410 - vfb->user_obj = user_obj; 1432 + } 1411 1433 1412 1434 return &vfb->base; 1413 1435 }
-1
drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
··· 219 219 int (*pin)(struct vmw_framebuffer *fb); 220 220 int (*unpin)(struct vmw_framebuffer *fb); 221 221 bool bo; 222 - struct ttm_base_object *user_obj; 223 222 uint32_t user_handle; 224 223 }; 225 224
+1 -1
drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
··· 451 451 goto out_unlock; 452 452 } 453 453 454 - ret = vmw_user_bo_lookup(tfile, arg->handle, &buf, NULL); 454 + ret = vmw_user_bo_lookup(file_priv, arg->handle, &buf); 455 455 if (ret) 456 456 goto out_unlock; 457 457
-1
drivers/gpu/drm/vmwgfx/vmwgfx_prime.c
··· 85 85 int *prime_fd) 86 86 { 87 87 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; 88 - 89 88 return ttm_prime_handle_to_fd(tfile, handle, flags, prime_fd); 90 89 }
+7 -10
drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
··· 320 320 * The pointer this pointed at by out_surf and out_buf needs to be null. 321 321 */ 322 322 int vmw_user_lookup_handle(struct vmw_private *dev_priv, 323 - struct ttm_object_file *tfile, 323 + struct drm_file *filp, 324 324 uint32_t handle, 325 325 struct vmw_surface **out_surf, 326 326 struct vmw_buffer_object **out_buf) 327 327 { 328 + struct ttm_object_file *tfile = vmw_fpriv(filp)->tfile; 328 329 struct vmw_resource *res; 329 330 int ret; 330 331 ··· 340 339 } 341 340 342 341 *out_surf = NULL; 343 - ret = vmw_user_bo_lookup(tfile, handle, out_buf, NULL); 342 + ret = vmw_user_bo_lookup(filp, handle, out_buf); 344 343 return ret; 345 344 } 346 345 ··· 363 362 return 0; 364 363 } 365 364 366 - backup = kzalloc(sizeof(*backup), GFP_KERNEL); 367 - if (unlikely(!backup)) 368 - return -ENOMEM; 369 - 370 - ret = vmw_bo_init(res->dev_priv, backup, res->backup_size, 371 - res->func->backup_placement, 372 - interruptible, false, 373 - &vmw_bo_bo_free); 365 + ret = vmw_bo_create(res->dev_priv, res->backup_size, 366 + res->func->backup_placement, 367 + interruptible, false, 368 + &vmw_bo_bo_free, &backup); 374 369 if (unlikely(ret != 0)) 375 370 goto out_no_bo; 376 371
+3 -7
drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
··· 442 442 vps->bo_size = 0; 443 443 } 444 444 445 - vps->bo = kzalloc(sizeof(*vps->bo), GFP_KERNEL); 446 - if (!vps->bo) 447 - return -ENOMEM; 448 - 449 445 vmw_svga_enable(dev_priv); 450 446 451 447 /* After we have alloced the backing store might not be able to 452 448 * resume the overlays, this is preferred to failing to alloc. 453 449 */ 454 450 vmw_overlay_pause_all(dev_priv); 455 - ret = vmw_bo_init(dev_priv, vps->bo, size, 456 - &vmw_vram_placement, 457 - false, true, &vmw_bo_bo_free); 451 + ret = vmw_bo_create(dev_priv, size, 452 + &vmw_vram_placement, 453 + false, true, &vmw_bo_bo_free, &vps->bo); 458 454 vmw_overlay_resume_all(dev_priv); 459 455 if (ret) { 460 456 vps->bo = NULL; /* vmw_bo_init frees on error */
+5 -12
drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
··· 676 676 struct drm_vmw_shader_arg *arg = (struct drm_vmw_shader_arg *)data; 677 677 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; 678 678 679 - return ttm_ref_object_base_unref(tfile, arg->handle, 680 - TTM_REF_USAGE); 679 + return ttm_ref_object_base_unref(tfile, arg->handle); 681 680 } 682 681 683 682 static int vmw_user_shader_alloc(struct vmw_private *dev_priv, ··· 717 718 tmp = vmw_resource_reference(res); 718 719 ret = ttm_base_object_init(tfile, &ushader->base, false, 719 720 VMW_RES_SHADER, 720 - &vmw_user_shader_base_release, NULL); 721 + &vmw_user_shader_base_release); 721 722 722 723 if (unlikely(ret != 0)) { 723 724 vmw_resource_unreference(&tmp); ··· 776 777 int ret; 777 778 778 779 if (buffer_handle != SVGA3D_INVALID_ID) { 779 - ret = vmw_user_bo_lookup(tfile, buffer_handle, 780 - &buffer, NULL); 780 + ret = vmw_user_bo_lookup(file_priv, buffer_handle, &buffer); 781 781 if (unlikely(ret != 0)) { 782 782 VMW_DEBUG_USER("Couldn't find buffer for shader creation.\n"); 783 783 return ret; ··· 892 894 if (!vmw_shader_id_ok(user_key, shader_type)) 893 895 return -EINVAL; 894 896 895 - /* Allocate and pin a DMA buffer */ 896 - buf = kzalloc(sizeof(*buf), GFP_KERNEL); 897 - if (unlikely(!buf)) 898 - return -ENOMEM; 899 - 900 - ret = vmw_bo_init(dev_priv, buf, size, &vmw_sys_placement, 901 - true, true, vmw_bo_bo_free); 897 + ret = vmw_bo_create(dev_priv, size, &vmw_sys_placement, 898 + true, true, vmw_bo_bo_free, &buf); 902 899 if (unlikely(ret != 0)) 903 900 goto out; 904 901
+1 -1
drivers/gpu/drm/vmwgfx/vmwgfx_simple_resource.c
··· 172 172 tmp = vmw_resource_reference(res); 173 173 ret = ttm_base_object_init(tfile, &usimple->base, false, 174 174 func->ttm_res_type, 175 - &vmw_simple_resource_base_release, NULL); 175 + &vmw_simple_resource_base_release); 176 176 177 177 if (ret) { 178 178 vmw_resource_unreference(&tmp);
+29 -39
drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
··· 46 46 * @base: The TTM base object handling user-space visibility. 47 47 * @srf: The surface metadata. 48 48 * @master: Master of the creating client. Used for security check. 49 - * @backup_base: The TTM base object of the backup buffer. 50 49 */ 51 50 struct vmw_user_surface { 52 51 struct ttm_prime_object prime; 53 52 struct vmw_surface srf; 54 53 struct drm_master *master; 55 - struct ttm_base_object *backup_base; 56 54 }; 57 55 58 56 /** ··· 684 686 struct vmw_resource *res = &user_srf->srf.res; 685 687 686 688 *p_base = NULL; 687 - if (user_srf->backup_base) 688 - ttm_base_object_unref(&user_srf->backup_base); 689 689 vmw_resource_unreference(&res); 690 690 } 691 691 ··· 701 705 struct drm_vmw_surface_arg *arg = (struct drm_vmw_surface_arg *)data; 702 706 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; 703 707 704 - return ttm_ref_object_base_unref(tfile, arg->sid, TTM_REF_USAGE); 708 + return ttm_ref_object_base_unref(tfile, arg->sid); 705 709 } 706 710 707 711 /** ··· 847 851 if (dev_priv->has_mob && req->shareable) { 848 852 uint32_t backup_handle; 849 853 850 - ret = vmw_user_bo_alloc(dev_priv, tfile, 851 - res->backup_size, 852 - true, 853 - &backup_handle, 854 - &res->backup, 855 - &user_srf->backup_base); 854 + ret = vmw_gem_object_create_with_handle(dev_priv, 855 + file_priv, 856 + res->backup_size, 857 + &backup_handle, 858 + &res->backup); 856 859 if (unlikely(ret != 0)) { 857 860 vmw_resource_unreference(&res); 858 861 goto out_unlock; 859 862 } 863 + vmw_bo_reference(res->backup); 860 864 } 861 865 862 866 tmp = vmw_resource_reference(&srf->res); 863 867 ret = ttm_prime_object_init(tfile, res->backup_size, &user_srf->prime, 864 868 req->shareable, VMW_RES_SURFACE, 865 - &vmw_user_surface_base_release, NULL); 869 + &vmw_user_surface_base_release); 866 870 867 871 if (unlikely(ret != 0)) { 868 872 vmw_resource_unreference(&tmp); ··· 917 921 VMW_DEBUG_USER("Referenced object is not a surface.\n"); 918 922 goto out_bad_resource; 919 923 } 920 - 921 924 if (handle_type != DRM_VMW_HANDLE_PRIME) { 922 925 bool require_exist = false; 923 926 ··· 941 946 if (unlikely(drm_is_render_client(file_priv))) 942 947 require_exist = true; 943 948 944 - ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL, 945 - require_exist); 949 + ret = ttm_ref_object_add(tfile, base, NULL, require_exist); 946 950 if (unlikely(ret != 0)) { 947 951 DRM_ERROR("Could not add a reference to a surface.\n"); 948 952 goto out_bad_resource; ··· 955 961 ttm_base_object_unref(&base); 956 962 out_no_lookup: 957 963 if (handle_type == DRM_VMW_HANDLE_PRIME) 958 - (void) ttm_ref_object_base_unref(tfile, handle, TTM_REF_USAGE); 964 + (void) ttm_ref_object_base_unref(tfile, handle); 959 965 960 966 return ret; 961 967 } ··· 1005 1011 if (unlikely(ret != 0)) { 1006 1012 VMW_DEBUG_USER("copy_to_user failed %p %u\n", user_sizes, 1007 1013 srf->metadata.num_sizes); 1008 - ttm_ref_object_base_unref(tfile, base->handle, TTM_REF_USAGE); 1014 + ttm_ref_object_base_unref(tfile, base->handle); 1009 1015 ret = -EFAULT; 1010 1016 } 1011 1017 ··· 1492 1498 res = &user_srf->srf.res; 1493 1499 1494 1500 if (req->base.buffer_handle != SVGA3D_INVALID_ID) { 1495 - ret = vmw_user_bo_lookup(tfile, req->base.buffer_handle, 1496 - &res->backup, 1497 - &user_srf->backup_base); 1501 + ret = vmw_user_bo_lookup(file_priv, req->base.buffer_handle, 1502 + &res->backup); 1498 1503 if (ret == 0) { 1499 1504 if (res->backup->base.base.size < res->backup_size) { 1500 1505 VMW_DEBUG_USER("Surface backup buffer too small.\n"); ··· 1506 1513 } 1507 1514 } else if (req->base.drm_surface_flags & 1508 1515 (drm_vmw_surface_flag_create_buffer | 1509 - drm_vmw_surface_flag_coherent)) 1510 - ret = vmw_user_bo_alloc(dev_priv, tfile, 1511 - res->backup_size, 1512 - req->base.drm_surface_flags & 1513 - drm_vmw_surface_flag_shareable, 1514 - &backup_handle, 1515 - &res->backup, 1516 - &user_srf->backup_base); 1516 + drm_vmw_surface_flag_coherent)) { 1517 + ret = vmw_gem_object_create_with_handle(dev_priv, file_priv, 1518 + res->backup_size, 1519 + &backup_handle, 1520 + &res->backup); 1521 + if (ret == 0) 1522 + vmw_bo_reference(res->backup); 1523 + 1524 + } 1517 1525 1518 1526 if (unlikely(ret != 0)) { 1519 1527 vmw_resource_unreference(&res); ··· 1546 1552 req->base.drm_surface_flags & 1547 1553 drm_vmw_surface_flag_shareable, 1548 1554 VMW_RES_SURFACE, 1549 - &vmw_user_surface_base_release, NULL); 1555 + &vmw_user_surface_base_release); 1550 1556 1551 1557 if (unlikely(ret != 0)) { 1552 1558 vmw_resource_unreference(&tmp); ··· 1566 1572 rep->buffer_size = 0; 1567 1573 rep->buffer_handle = SVGA3D_INVALID_ID; 1568 1574 } 1569 - 1570 1575 vmw_resource_unreference(&res); 1571 1576 1572 1577 out_unlock: ··· 1588 1595 struct drm_file *file_priv) 1589 1596 { 1590 1597 struct vmw_private *dev_priv = vmw_priv(dev); 1591 - struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; 1592 1598 struct vmw_surface *srf; 1593 1599 struct vmw_user_surface *user_srf; 1594 1600 struct vmw_surface_metadata *metadata; 1595 1601 struct ttm_base_object *base; 1596 - uint32_t backup_handle; 1602 + u32 backup_handle; 1597 1603 int ret; 1598 1604 1599 1605 ret = vmw_surface_handle_reference(dev_priv, file_priv, req->sid, ··· 1609 1617 metadata = &srf->metadata; 1610 1618 1611 1619 mutex_lock(&dev_priv->cmdbuf_mutex); /* Protect res->backup */ 1612 - ret = vmw_user_bo_reference(tfile, srf->res.backup, &backup_handle); 1620 + ret = drm_gem_handle_create(file_priv, &srf->res.backup->base.base, 1621 + &backup_handle); 1613 1622 mutex_unlock(&dev_priv->cmdbuf_mutex); 1614 - 1615 - if (unlikely(ret != 0)) { 1616 - DRM_ERROR("Could not add a reference to a GB surface " 1617 - "backup buffer.\n"); 1618 - (void) ttm_ref_object_base_unref(tfile, base->handle, 1619 - TTM_REF_USAGE); 1623 + if (ret != 0) { 1624 + drm_err(dev, "Wasn't able to create a backing handle for surface sid = %u.\n", 1625 + req->sid); 1620 1626 goto out_bad_resource; 1621 1627 } 1622 1628
+2 -13
drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
··· 167 167 .busy_placement = &sys_placement_flags 168 168 }; 169 169 170 - struct vmw_ttm_tt { 171 - struct ttm_tt dma_ttm; 172 - struct vmw_private *dev_priv; 173 - int gmr_id; 174 - struct vmw_mob *mob; 175 - int mem_type; 176 - struct sg_table sgt; 177 - struct vmw_sg_table vsgt; 178 - bool mapped; 179 - bool bound; 180 - }; 181 - 182 170 const size_t vmw_tt_size = sizeof(struct vmw_ttm_tt); 183 171 184 172 /** ··· 299 311 vsgt->pages = vmw_tt->dma_ttm.pages; 300 312 vsgt->num_pages = vmw_tt->dma_ttm.num_pages; 301 313 vsgt->addrs = vmw_tt->dma_ttm.dma_address; 302 - vsgt->sgt = &vmw_tt->sgt; 314 + vsgt->sgt = NULL; 303 315 304 316 switch (dev_priv->map_mode) { 305 317 case vmw_dma_map_bind: 306 318 case vmw_dma_map_populate: 319 + vsgt->sgt = &vmw_tt->sgt; 307 320 ret = sg_alloc_table_from_pages_segment( 308 321 &vmw_tt->sgt, vsgt->pages, vsgt->num_pages, 0, 309 322 (unsigned long)vsgt->num_pages << PAGE_SHIFT,
+24 -15
drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
··· 27 27 28 28 #include "vmwgfx_drv.h" 29 29 30 - static struct ttm_buffer_object *vmw_bo_vm_lookup(struct ttm_device *bdev, 31 - unsigned long offset, 32 - unsigned long pages) 30 + static int vmw_bo_vm_lookup(struct ttm_device *bdev, 31 + struct drm_file *filp, 32 + unsigned long offset, 33 + unsigned long pages, 34 + struct ttm_buffer_object **p_bo) 33 35 { 34 36 struct vmw_private *dev_priv = container_of(bdev, struct vmw_private, bdev); 35 37 struct drm_device *drm = &dev_priv->drm; 36 38 struct drm_vma_offset_node *node; 37 - struct ttm_buffer_object *bo = NULL; 39 + int ret; 40 + 41 + *p_bo = NULL; 38 42 39 43 drm_vma_offset_lock_lookup(bdev->vma_manager); 40 44 41 45 node = drm_vma_offset_lookup_locked(bdev->vma_manager, offset, pages); 42 46 if (likely(node)) { 43 - bo = container_of(node, struct ttm_buffer_object, 47 + *p_bo = container_of(node, struct ttm_buffer_object, 44 48 base.vma_node); 45 - bo = ttm_bo_get_unless_zero(bo); 49 + *p_bo = ttm_bo_get_unless_zero(*p_bo); 46 50 } 47 51 48 52 drm_vma_offset_unlock_lookup(bdev->vma_manager); 49 53 50 - if (!bo) 54 + if (!*p_bo) { 51 55 drm_err(drm, "Could not find buffer object to map\n"); 56 + return -EINVAL; 57 + } 52 58 53 - return bo; 59 + if (!drm_vma_node_is_allowed(node, filp)) { 60 + ret = -EACCES; 61 + goto out_no_access; 62 + } 63 + 64 + return 0; 65 + out_no_access: 66 + ttm_bo_put(*p_bo); 67 + return ret; 54 68 } 55 69 56 70 int vmw_mmap(struct file *filp, struct vm_area_struct *vma) ··· 78 64 }; 79 65 struct drm_file *file_priv = filp->private_data; 80 66 struct vmw_private *dev_priv = vmw_priv(file_priv->minor->dev); 81 - struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; 82 67 struct ttm_device *bdev = &dev_priv->bdev; 83 68 struct ttm_buffer_object *bo; 84 69 int ret; ··· 85 72 if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET_START)) 86 73 return -EINVAL; 87 74 88 - bo = vmw_bo_vm_lookup(bdev, vma->vm_pgoff, vma_pages(vma)); 89 - if (unlikely(!bo)) 90 - return -EINVAL; 91 - 92 - ret = vmw_user_bo_verify_access(bo, tfile); 75 + ret = vmw_bo_vm_lookup(bdev, file_priv, vma->vm_pgoff, vma_pages(vma), &bo); 93 76 if (unlikely(ret != 0)) 94 - goto out_unref; 77 + return ret; 95 78 96 79 ret = ttm_bo_mmap_obj(vma, bo); 97 80 if (unlikely(ret != 0))
+1 -1
drivers/gpu/drm/vmwgfx/vmwgfx_va.c
··· 117 117 struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data; 118 118 119 119 return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile, 120 - arg->stream_id, TTM_REF_USAGE); 120 + arg->stream_id); 121 121 } 122 122 123 123 /**