Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

drm/vmwgfx: Make vmwgfx dma buffers prime aware

Should we need to share dma buffers using prime, let's make them prime
aware.

Signed-off-by: Thomas Hellstrom <thellstrom@vmware.com>
Reviewed-by: Jakob Bornecrantz <jakob@vmware.com>

+25 -20
+25 -20
drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
··· 35 35 #define VMW_RES_EVICT_ERR_COUNT 10 36 36 37 37 struct vmw_user_dma_buffer { 38 - struct ttm_base_object base; 38 + struct ttm_prime_object prime; 39 39 struct vmw_dma_buffer dma; 40 40 }; 41 41 ··· 387 387 { 388 388 struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo); 389 389 390 - ttm_base_object_kfree(vmw_user_bo, base); 390 + ttm_prime_object_kfree(vmw_user_bo, prime); 391 391 } 392 392 393 393 static void vmw_user_dmabuf_release(struct ttm_base_object **p_base) ··· 401 401 if (unlikely(base == NULL)) 402 402 return; 403 403 404 - vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, base); 404 + vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, 405 + prime.base); 405 406 bo = &vmw_user_bo->dma.base; 406 407 ttm_bo_unref(&bo); 407 408 } ··· 443 442 return ret; 444 443 445 444 tmp = ttm_bo_reference(&user_bo->dma.base); 446 - ret = ttm_base_object_init(tfile, 447 - &user_bo->base, 448 - shareable, 449 - ttm_buffer_type, 450 - &vmw_user_dmabuf_release, NULL); 445 + ret = ttm_prime_object_init(tfile, 446 + size, 447 + &user_bo->prime, 448 + shareable, 449 + ttm_buffer_type, 450 + &vmw_user_dmabuf_release, NULL); 451 451 if (unlikely(ret != 0)) { 452 452 ttm_bo_unref(&tmp); 453 453 goto out_no_base_object; 454 454 } 455 455 456 456 *p_dma_buf = &user_bo->dma; 457 - *handle = user_bo->base.hash.key; 457 + *handle = user_bo->prime.base.hash.key; 458 458 459 459 out_no_base_object: 460 460 return ret; ··· 477 475 return -EPERM; 478 476 479 477 vmw_user_bo = vmw_user_dma_buffer(bo); 480 - return (vmw_user_bo->base.tfile == tfile || 481 - vmw_user_bo->base.shareable) ? 0 : -EPERM; 478 + return (vmw_user_bo->prime.base.tfile == tfile || 479 + vmw_user_bo->prime.base.shareable) ? 0 : -EPERM; 482 480 } 483 481 484 482 int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data, ··· 540 538 return -ESRCH; 541 539 } 542 540 543 - if (unlikely(base->object_type != ttm_buffer_type)) { 541 + if (unlikely(ttm_base_object_type(base) != ttm_buffer_type)) { 544 542 ttm_base_object_unref(&base); 545 543 printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n", 546 544 (unsigned long)handle); 547 545 return -EINVAL; 548 546 } 549 547 550 - vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, base); 548 + vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, 549 + prime.base); 551 550 (void)ttm_bo_reference(&vmw_user_bo->dma.base); 552 551 ttm_base_object_unref(&base); 553 552 *out = &vmw_user_bo->dma; ··· 565 562 return -EINVAL; 566 563 567 564 user_bo = container_of(dma_buf, struct vmw_user_dma_buffer, dma); 568 - return ttm_ref_object_add(tfile, &user_bo->base, TTM_REF_USAGE, NULL); 565 + return ttm_ref_object_add(tfile, &user_bo->prime.base, 566 + TTM_REF_USAGE, NULL); 569 567 } 570 568 571 569 /* ··· 811 807 goto out_no_dmabuf; 812 808 813 809 tmp = ttm_bo_reference(&vmw_user_bo->dma.base); 814 - ret = ttm_base_object_init(vmw_fpriv(file_priv)->tfile, 815 - &vmw_user_bo->base, 816 - false, 817 - ttm_buffer_type, 818 - &vmw_user_dmabuf_release, NULL); 810 + ret = ttm_prime_object_init(vmw_fpriv(file_priv)->tfile, 811 + args->size, 812 + &vmw_user_bo->prime, 813 + false, 814 + ttm_buffer_type, 815 + &vmw_user_dmabuf_release, NULL); 819 816 if (unlikely(ret != 0)) 820 817 goto out_no_base_object; 821 818 822 - args->handle = vmw_user_bo->base.hash.key; 819 + args->handle = vmw_user_bo->prime.base.hash.key; 823 820 824 821 out_no_base_object: 825 822 ttm_bo_unref(&tmp);