Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/ttm: remove use_ticket parameter from ttm_bo_reserve

Not used any more.

Reviewed-by: Sinclair Yeh <syeh@vmware.com>
Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>

authored by

Christian König and committed by
Alex Deucher
dfd5e50e 5ee7b41a

+55 -62
+1 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
··· 71 71 { 72 72 int r; 73 73 74 - r = ttm_bo_reserve(&bo->tbo, !no_intr, false, false, 0); 74 + r = ttm_bo_reserve(&bo->tbo, !no_intr, false, NULL); 75 75 if (unlikely(r != 0)) { 76 76 if (r != -ERESTARTSYS) 77 77 dev_err(bo->adev->dev, "%p reserve failed\n", bo);
+1 -1
drivers/gpu/drm/ast/ast_drv.h
··· 367 367 { 368 368 int ret; 369 369 370 - ret = ttm_bo_reserve(&bo->bo, true, no_wait, false, NULL); 370 + ret = ttm_bo_reserve(&bo->bo, true, no_wait, NULL); 371 371 if (ret) { 372 372 if (ret != -ERESTARTSYS && ret != -EBUSY) 373 373 DRM_ERROR("reserve failed %p\n", bo);
+1 -1
drivers/gpu/drm/bochs/bochs_fbdev.c
··· 82 82 83 83 bo = gem_to_bochs_bo(gobj); 84 84 85 - ret = ttm_bo_reserve(&bo->bo, true, false, false, NULL); 85 + ret = ttm_bo_reserve(&bo->bo, true, false, NULL); 86 86 if (ret) 87 87 return ret; 88 88
+2 -2
drivers/gpu/drm/bochs/bochs_kms.c
··· 43 43 if (old_fb) { 44 44 bochs_fb = to_bochs_framebuffer(old_fb); 45 45 bo = gem_to_bochs_bo(bochs_fb->obj); 46 - ret = ttm_bo_reserve(&bo->bo, true, false, false, NULL); 46 + ret = ttm_bo_reserve(&bo->bo, true, false, NULL); 47 47 if (ret) { 48 48 DRM_ERROR("failed to reserve old_fb bo\n"); 49 49 } else { ··· 57 57 58 58 bochs_fb = to_bochs_framebuffer(crtc->primary->fb); 59 59 bo = gem_to_bochs_bo(bochs_fb->obj); 60 - ret = ttm_bo_reserve(&bo->bo, true, false, false, NULL); 60 + ret = ttm_bo_reserve(&bo->bo, true, false, NULL); 61 61 if (ret) 62 62 return ret; 63 63
+1 -1
drivers/gpu/drm/cirrus/cirrus_drv.h
··· 245 245 { 246 246 int ret; 247 247 248 - ret = ttm_bo_reserve(&bo->bo, true, no_wait, false, NULL); 248 + ret = ttm_bo_reserve(&bo->bo, true, no_wait, NULL); 249 249 if (ret) { 250 250 if (ret != -ERESTARTSYS && ret != -EBUSY) 251 251 DRM_ERROR("reserve failed %p\n", bo);
+1 -1
drivers/gpu/drm/mgag200/mgag200_drv.h
··· 281 281 { 282 282 int ret; 283 283 284 - ret = ttm_bo_reserve(&bo->bo, true, no_wait, false, NULL); 284 + ret = ttm_bo_reserve(&bo->bo, true, no_wait, NULL); 285 285 if (ret) { 286 286 if (ret != -ERESTARTSYS && ret != -EBUSY) 287 287 DRM_ERROR("reserve failed %p\n", bo);
+3 -3
drivers/gpu/drm/nouveau/nouveau_bo.c
··· 312 312 bool force = false, evict = false; 313 313 int ret; 314 314 315 - ret = ttm_bo_reserve(bo, false, false, false, NULL); 315 + ret = ttm_bo_reserve(bo, false, false, NULL); 316 316 if (ret) 317 317 return ret; 318 318 ··· 385 385 struct ttm_buffer_object *bo = &nvbo->bo; 386 386 int ret, ref; 387 387 388 - ret = ttm_bo_reserve(bo, false, false, false, NULL); 388 + ret = ttm_bo_reserve(bo, false, false, NULL); 389 389 if (ret) 390 390 return ret; 391 391 ··· 420 420 { 421 421 int ret; 422 422 423 - ret = ttm_bo_reserve(&nvbo->bo, false, false, false, NULL); 423 + ret = ttm_bo_reserve(&nvbo->bo, false, false, NULL); 424 424 if (ret) 425 425 return ret; 426 426
+2 -2
drivers/gpu/drm/nouveau/nouveau_display.c
··· 739 739 } 740 740 741 741 mutex_lock(&cli->mutex); 742 - ret = ttm_bo_reserve(&new_bo->bo, true, false, false, NULL); 742 + ret = ttm_bo_reserve(&new_bo->bo, true, false, NULL); 743 743 if (ret) 744 744 goto fail_unpin; 745 745 ··· 753 753 if (new_bo != old_bo) { 754 754 ttm_bo_unreserve(&new_bo->bo); 755 755 756 - ret = ttm_bo_reserve(&old_bo->bo, true, false, false, NULL); 756 + ret = ttm_bo_reserve(&old_bo->bo, true, false, NULL); 757 757 if (ret) 758 758 goto fail_unpin; 759 759 }
+3 -3
drivers/gpu/drm/nouveau/nouveau_gem.c
··· 71 71 if (!cli->vm) 72 72 return 0; 73 73 74 - ret = ttm_bo_reserve(&nvbo->bo, false, false, false, NULL); 74 + ret = ttm_bo_reserve(&nvbo->bo, false, false, NULL); 75 75 if (ret) 76 76 return ret; 77 77 ··· 156 156 if (!cli->vm) 157 157 return; 158 158 159 - ret = ttm_bo_reserve(&nvbo->bo, false, false, false, NULL); 159 + ret = ttm_bo_reserve(&nvbo->bo, false, false, NULL); 160 160 if (ret) 161 161 return; 162 162 ··· 409 409 break; 410 410 } 411 411 412 - ret = ttm_bo_reserve(&nvbo->bo, true, false, true, &op->ticket); 412 + ret = ttm_bo_reserve(&nvbo->bo, true, false, &op->ticket); 413 413 if (ret) { 414 414 list_splice_tail_init(&vram_list, &op->list); 415 415 list_splice_tail_init(&gart_list, &op->list);
+2 -2
drivers/gpu/drm/qxl/qxl_object.h
··· 31 31 { 32 32 int r; 33 33 34 - r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, NULL); 34 + r = ttm_bo_reserve(&bo->tbo, true, no_wait, NULL); 35 35 if (unlikely(r != 0)) { 36 36 if (r != -ERESTARTSYS) { 37 37 struct qxl_device *qdev = (struct qxl_device *)bo->gem_base.dev->dev_private; ··· 67 67 { 68 68 int r; 69 69 70 - r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, NULL); 70 + r = ttm_bo_reserve(&bo->tbo, true, no_wait, NULL); 71 71 if (unlikely(r != 0)) { 72 72 if (r != -ERESTARTSYS) { 73 73 struct qxl_device *qdev = (struct qxl_device *)bo->gem_base.dev->dev_private;
+1 -1
drivers/gpu/drm/radeon/radeon_object.c
··· 832 832 { 833 833 int r; 834 834 835 - r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, NULL); 835 + r = ttm_bo_reserve(&bo->tbo, true, no_wait, NULL); 836 836 if (unlikely(r != 0)) 837 837 return r; 838 838 if (mem_type)
+1 -1
drivers/gpu/drm/radeon/radeon_object.h
··· 65 65 { 66 66 int r; 67 67 68 - r = ttm_bo_reserve(&bo->tbo, !no_intr, false, false, NULL); 68 + r = ttm_bo_reserve(&bo->tbo, !no_intr, false, NULL); 69 69 if (unlikely(r != 0)) { 70 70 if (r != -ERESTARTSYS) 71 71 dev_err(bo->rdev->dev, "%p reserve failed\n", bo);
+8 -9
drivers/gpu/drm/ttm/ttm_bo.c
··· 452 452 int ret; 453 453 454 454 spin_lock(&glob->lru_lock); 455 - ret = __ttm_bo_reserve(bo, false, true, false, NULL); 455 + ret = __ttm_bo_reserve(bo, false, true, NULL); 456 456 457 457 if (!ret) { 458 458 if (!ttm_bo_wait(bo, false, false, true)) { ··· 526 526 return -EBUSY; 527 527 528 528 spin_lock(&glob->lru_lock); 529 - ret = __ttm_bo_reserve(bo, false, true, false, NULL); 529 + ret = __ttm_bo_reserve(bo, false, true, NULL); 530 530 531 531 /* 532 532 * We raced, and lost, someone else holds the reservation now, ··· 595 595 kref_get(&nentry->list_kref); 596 596 } 597 597 598 - ret = __ttm_bo_reserve(entry, false, true, false, NULL); 598 + ret = __ttm_bo_reserve(entry, false, true, NULL); 599 599 if (remove_all && ret) { 600 600 spin_unlock(&glob->lru_lock); 601 - ret = __ttm_bo_reserve(entry, false, false, 602 - false, NULL); 601 + ret = __ttm_bo_reserve(entry, false, false, NULL); 603 602 spin_lock(&glob->lru_lock); 604 603 } 605 604 ··· 740 741 741 742 spin_lock(&glob->lru_lock); 742 743 list_for_each_entry(bo, &man->lru, lru) { 743 - ret = __ttm_bo_reserve(bo, false, true, false, NULL); 744 + ret = __ttm_bo_reserve(bo, false, true, NULL); 744 745 if (!ret) { 745 746 if (place && (place->fpfn || place->lpfn)) { 746 747 /* Don't evict this BO if it's outside of the ··· 1622 1623 * Using ttm_bo_reserve makes sure the lru lists are updated. 1623 1624 */ 1624 1625 1625 - ret = ttm_bo_reserve(bo, true, no_wait, false, NULL); 1626 + ret = ttm_bo_reserve(bo, true, no_wait, NULL); 1626 1627 if (unlikely(ret != 0)) 1627 1628 return ret; 1628 1629 ret = ttm_bo_wait(bo, false, true, no_wait); ··· 1655 1656 1656 1657 spin_lock(&glob->lru_lock); 1657 1658 list_for_each_entry(bo, &glob->swap_lru, swap) { 1658 - ret = __ttm_bo_reserve(bo, false, true, false, NULL); 1659 + ret = __ttm_bo_reserve(bo, false, true, NULL); 1659 1660 if (!ret) 1660 1661 break; 1661 1662 } ··· 1754 1755 return -ERESTARTSYS; 1755 1756 if (!ww_mutex_is_locked(&bo->resv->lock)) 1756 1757 goto out_unlock; 1757 - ret = __ttm_bo_reserve(bo, true, false, false, NULL); 1758 + ret = __ttm_bo_reserve(bo, true, false, NULL); 1758 1759 if (unlikely(ret != 0)) 1759 1760 goto out_unlock; 1760 1761 __ttm_bo_unreserve(bo);
+1 -1
drivers/gpu/drm/ttm/ttm_bo_vm.c
··· 108 108 * for reserve, and if it fails, retry the fault after waiting 109 109 * for the buffer to become unreserved. 110 110 */ 111 - ret = ttm_bo_reserve(bo, true, true, false, NULL); 111 + ret = ttm_bo_reserve(bo, true, true, NULL); 112 112 if (unlikely(ret != 0)) { 113 113 if (ret != -EBUSY) 114 114 return VM_FAULT_NOPAGE;
+1 -2
drivers/gpu/drm/ttm/ttm_execbuf_util.c
··· 112 112 list_for_each_entry(entry, list, head) { 113 113 struct ttm_buffer_object *bo = entry->bo; 114 114 115 - ret = __ttm_bo_reserve(bo, intr, (ticket == NULL), true, 116 - ticket); 115 + ret = __ttm_bo_reserve(bo, intr, (ticket == NULL), ticket); 117 116 if (!ret && unlikely(atomic_read(&bo->cpu_writers) > 0)) { 118 117 __ttm_bo_unreserve(bo); 119 118
+1 -1
drivers/gpu/drm/virtio/virtgpu_drv.h
··· 400 400 { 401 401 int r; 402 402 403 - r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, NULL); 403 + r = ttm_bo_reserve(&bo->tbo, true, no_wait, NULL); 404 404 if (unlikely(r != 0)) { 405 405 if (r != -ERESTARTSYS) { 406 406 struct virtio_gpu_device *qdev =
+1 -1
drivers/gpu/drm/virtio/virtgpu_object.c
··· 155 155 { 156 156 int r; 157 157 158 - r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, NULL); 158 + r = ttm_bo_reserve(&bo->tbo, true, no_wait, NULL); 159 159 if (unlikely(r != 0)) 160 160 return r; 161 161 r = ttm_bo_wait(&bo->tbo, true, true, no_wait);
+1 -1
drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
··· 421 421 } 422 422 423 423 bo = &buf->base; 424 - WARN_ON_ONCE(ttm_bo_reserve(bo, false, true, false, NULL)); 424 + WARN_ON_ONCE(ttm_bo_reserve(bo, false, true, NULL)); 425 425 426 426 ret = ttm_bo_wait(old_bo, false, false, false); 427 427 if (unlikely(ret != 0)) {
+4 -4
drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
··· 56 56 57 57 vmw_execbuf_release_pinned_bo(dev_priv); 58 58 59 - ret = ttm_bo_reserve(bo, interruptible, false, false, NULL); 59 + ret = ttm_bo_reserve(bo, interruptible, false, NULL); 60 60 if (unlikely(ret != 0)) 61 61 goto err; 62 62 ··· 98 98 99 99 vmw_execbuf_release_pinned_bo(dev_priv); 100 100 101 - ret = ttm_bo_reserve(bo, interruptible, false, false, NULL); 101 + ret = ttm_bo_reserve(bo, interruptible, false, NULL); 102 102 if (unlikely(ret != 0)) 103 103 goto err; 104 104 ··· 174 174 return ret; 175 175 176 176 vmw_execbuf_release_pinned_bo(dev_priv); 177 - ret = ttm_bo_reserve(bo, interruptible, false, false, NULL); 177 + ret = ttm_bo_reserve(bo, interruptible, false, NULL); 178 178 if (unlikely(ret != 0)) 179 179 goto err_unlock; 180 180 ··· 225 225 if (unlikely(ret != 0)) 226 226 return ret; 227 227 228 - ret = ttm_bo_reserve(bo, interruptible, false, false, NULL); 228 + ret = ttm_bo_reserve(bo, interruptible, false, NULL); 229 229 if (unlikely(ret != 0)) 230 230 goto err; 231 231
+1 -1
drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
··· 326 326 if (unlikely(ret != 0)) 327 327 return ret; 328 328 329 - ret = ttm_bo_reserve(&vbo->base, false, true, false, NULL); 329 + ret = ttm_bo_reserve(&vbo->base, false, true, NULL); 330 330 BUG_ON(ret != 0); 331 331 vmw_bo_pin_reserved(vbo, true); 332 332
+3 -3
drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
··· 98 98 kmap_offset = 0; 99 99 kmap_num = (width*height*4 + PAGE_SIZE - 1) >> PAGE_SHIFT; 100 100 101 - ret = ttm_bo_reserve(&dmabuf->base, true, false, false, NULL); 101 + ret = ttm_bo_reserve(&dmabuf->base, true, false, NULL); 102 102 if (unlikely(ret != 0)) { 103 103 DRM_ERROR("reserve failed\n"); 104 104 return -EINVAL; ··· 318 318 kmap_offset = cmd->dma.guest.ptr.offset >> PAGE_SHIFT; 319 319 kmap_num = (64*64*4) >> PAGE_SHIFT; 320 320 321 - ret = ttm_bo_reserve(bo, true, false, false, NULL); 321 + ret = ttm_bo_reserve(bo, true, false, NULL); 322 322 if (unlikely(ret != 0)) { 323 323 DRM_ERROR("reserve failed\n"); 324 324 return; ··· 1859 1859 struct ttm_buffer_object *bo = &buf->base; 1860 1860 int ret; 1861 1861 1862 - ttm_bo_reserve(bo, false, false, interruptible, NULL); 1862 + ttm_bo_reserve(bo, false, false, NULL); 1863 1863 ret = vmw_validate_single_buffer(dev_priv, bo, interruptible, 1864 1864 validate_as_mob); 1865 1865 if (ret)
+6 -6
drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
··· 222 222 if (bo) { 223 223 int ret; 224 224 225 - ret = ttm_bo_reserve(bo, false, true, false, NULL); 225 + ret = ttm_bo_reserve(bo, false, true, NULL); 226 226 BUG_ON(ret != 0); 227 227 228 228 vmw_fence_single_bo(bo, NULL); ··· 262 262 if (unlikely(ret != 0)) 263 263 goto out_no_bo; 264 264 265 - ret = ttm_bo_reserve(batch->otable_bo, false, true, false, NULL); 265 + ret = ttm_bo_reserve(batch->otable_bo, false, true, NULL); 266 266 BUG_ON(ret != 0); 267 267 ret = vmw_bo_driver.ttm_tt_populate(batch->otable_bo->ttm); 268 268 if (unlikely(ret != 0)) ··· 357 357 vmw_takedown_otable_base(dev_priv, i, 358 358 &batch->otables[i]); 359 359 360 - ret = ttm_bo_reserve(bo, false, true, false, NULL); 360 + ret = ttm_bo_reserve(bo, false, true, NULL); 361 361 BUG_ON(ret != 0); 362 362 363 363 vmw_fence_single_bo(bo, NULL); ··· 440 440 if (unlikely(ret != 0)) 441 441 return ret; 442 442 443 - ret = ttm_bo_reserve(mob->pt_bo, false, true, false, NULL); 443 + ret = ttm_bo_reserve(mob->pt_bo, false, true, NULL); 444 444 445 445 BUG_ON(ret != 0); 446 446 ret = vmw_bo_driver.ttm_tt_populate(mob->pt_bo->ttm); ··· 545 545 const struct vmw_sg_table *vsgt; 546 546 int ret; 547 547 548 - ret = ttm_bo_reserve(bo, false, true, false, NULL); 548 + ret = ttm_bo_reserve(bo, false, true, NULL); 549 549 BUG_ON(ret != 0); 550 550 551 551 vsgt = vmw_bo_sg_table(bo); ··· 595 595 struct ttm_buffer_object *bo = mob->pt_bo; 596 596 597 597 if (bo) { 598 - ret = ttm_bo_reserve(bo, false, true, false, NULL); 598 + ret = ttm_bo_reserve(bo, false, true, NULL); 599 599 /* 600 600 * Noone else should be using this buffer. 601 601 */
+3 -4
drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
··· 129 129 if (res->backup) { 130 130 struct ttm_buffer_object *bo = &res->backup->base; 131 131 132 - ttm_bo_reserve(bo, false, false, false, NULL); 132 + ttm_bo_reserve(bo, false, false, NULL); 133 133 if (!list_empty(&res->mob_head) && 134 134 res->func->unbind != NULL) { 135 135 struct ttm_validate_buffer val_buf; ··· 1717 1717 if (res->backup) { 1718 1718 vbo = res->backup; 1719 1719 1720 - ttm_bo_reserve(&vbo->base, interruptible, false, false, 1721 - NULL); 1720 + ttm_bo_reserve(&vbo->base, interruptible, false, NULL); 1722 1721 if (!vbo->pin_count) { 1723 1722 ret = ttm_bo_validate 1724 1723 (&vbo->base, ··· 1772 1773 if (--res->pin_count == 0 && res->backup) { 1773 1774 struct vmw_dma_buffer *vbo = res->backup; 1774 1775 1775 - ttm_bo_reserve(&vbo->base, false, false, false, NULL); 1776 + ttm_bo_reserve(&vbo->base, false, false, NULL); 1776 1777 vmw_bo_pin_reserved(vbo, false); 1777 1778 ttm_bo_unreserve(&vbo->base); 1778 1779 }
+1 -1
drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
··· 988 988 if (unlikely(ret != 0)) 989 989 goto out; 990 990 991 - ret = ttm_bo_reserve(&buf->base, false, true, false, NULL); 991 + ret = ttm_bo_reserve(&buf->base, false, true, NULL); 992 992 if (unlikely(ret != 0)) 993 993 goto no_reserve; 994 994
+5 -9
include/drm/ttm/ttm_bo_driver.h
··· 759 759 * @bo: A pointer to a struct ttm_buffer_object. 760 760 * @interruptible: Sleep interruptible if waiting. 761 761 * @no_wait: Don't sleep while trying to reserve, rather return -EBUSY. 762 - * @use_ticket: If @bo is already reserved, Only sleep waiting for 763 - * it to become unreserved if @ticket->stamp is older. 762 + * @ticket: ticket used to acquire the ww_mutex. 764 763 * 765 764 * Will not remove reserved buffers from the lru lists. 766 765 * Otherwise identical to ttm_bo_reserve. ··· 775 776 * be returned if @use_ticket is set to true. 776 777 */ 777 778 static inline int __ttm_bo_reserve(struct ttm_buffer_object *bo, 778 - bool interruptible, 779 - bool no_wait, bool use_ticket, 779 + bool interruptible, bool no_wait, 780 780 struct ww_acquire_ctx *ticket) 781 781 { 782 782 int ret = 0; ··· 804 806 * @bo: A pointer to a struct ttm_buffer_object. 805 807 * @interruptible: Sleep interruptible if waiting. 806 808 * @no_wait: Don't sleep while trying to reserve, rather return -EBUSY. 807 - * @use_ticket: If @bo is already reserved, Only sleep waiting for 808 - * it to become unreserved if @ticket->stamp is older. 809 + * @ticket: ticket used to acquire the ww_mutex. 809 810 * 810 811 * Locks a buffer object for validation. (Or prevents other processes from 811 812 * locking it for validation) and removes it from lru lists, while taking ··· 843 846 * be returned if @use_ticket is set to true. 844 847 */ 845 848 static inline int ttm_bo_reserve(struct ttm_buffer_object *bo, 846 - bool interruptible, 847 - bool no_wait, bool use_ticket, 849 + bool interruptible, bool no_wait, 848 850 struct ww_acquire_ctx *ticket) 849 851 { 850 852 int ret; 851 853 852 854 WARN_ON(!atomic_read(&bo->kref.refcount)); 853 855 854 - ret = __ttm_bo_reserve(bo, interruptible, no_wait, use_ticket, ticket); 856 + ret = __ttm_bo_reserve(bo, interruptible, no_wait, ticket); 855 857 if (likely(ret == 0)) 856 858 ttm_bo_del_sub_from_lru(bo); 857 859