Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/ttm: remove lazy parameter from ttm_bo_wait

Not used any more.

Reviewed-by: Sinclair Yeh <syeh@vmware.com>
Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>

authored by

Christian König and committed by
Alex Deucher
8aa6d4fc dfd5e50e

+24 -24
+1 -1
drivers/gpu/drm/nouveau/nouveau_bo.c
··· 1322 1322 } 1323 1323 1324 1324 /* Fallback to software copy. */ 1325 - ret = ttm_bo_wait(bo, true, intr, no_wait_gpu); 1325 + ret = ttm_bo_wait(bo, intr, no_wait_gpu); 1326 1326 if (ret == 0) 1327 1327 ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem); 1328 1328
+2 -2
drivers/gpu/drm/nouveau/nouveau_gem.c
··· 126 126 list_del(&vma->head); 127 127 128 128 if (fobj && fobj->shared_count > 1) 129 - ttm_bo_wait(&nvbo->bo, true, false, false); 129 + ttm_bo_wait(&nvbo->bo, false, false); 130 130 else if (fobj && fobj->shared_count == 1) 131 131 fence = rcu_dereference_protected(fobj->shared[0], 132 132 reservation_object_held(resv)); ··· 651 651 data |= r->vor; 652 652 } 653 653 654 - ret = ttm_bo_wait(&nvbo->bo, true, false, false); 654 + ret = ttm_bo_wait(&nvbo->bo, false, false); 655 655 if (ret) { 656 656 NV_PRINTK(err, cli, "reloc wait_idle failed: %d\n", ret); 657 657 break;
+1 -1
drivers/gpu/drm/qxl/qxl_cmd.c
··· 624 624 if (stall) 625 625 mutex_unlock(&qdev->surf_evict_mutex); 626 626 627 - ret = ttm_bo_wait(&surf->tbo, true, true, !stall); 627 + ret = ttm_bo_wait(&surf->tbo, true, !stall); 628 628 629 629 if (stall) 630 630 mutex_lock(&qdev->surf_evict_mutex);
+1 -1
drivers/gpu/drm/qxl/qxl_object.h
··· 79 79 if (mem_type) 80 80 *mem_type = bo->tbo.mem.mem_type; 81 81 82 - r = ttm_bo_wait(&bo->tbo, true, true, no_wait); 82 + r = ttm_bo_wait(&bo->tbo, true, no_wait); 83 83 ttm_bo_unreserve(&bo->tbo); 84 84 return r; 85 85 }
+1 -1
drivers/gpu/drm/radeon/radeon_object.c
··· 838 838 if (mem_type) 839 839 *mem_type = bo->tbo.mem.mem_type; 840 840 841 - r = ttm_bo_wait(&bo->tbo, true, true, no_wait); 841 + r = ttm_bo_wait(&bo->tbo, true, no_wait); 842 842 ttm_bo_unreserve(&bo->tbo); 843 843 return r; 844 844 }
+8 -8
drivers/gpu/drm/ttm/ttm_bo.c
··· 455 455 ret = __ttm_bo_reserve(bo, false, true, NULL); 456 456 457 457 if (!ret) { 458 - if (!ttm_bo_wait(bo, false, false, true)) { 458 + if (!ttm_bo_wait(bo, false, true)) { 459 459 put_count = ttm_bo_del_from_lru(bo); 460 460 461 461 spin_unlock(&glob->lru_lock); ··· 508 508 int put_count; 509 509 int ret; 510 510 511 - ret = ttm_bo_wait(bo, false, false, true); 511 + ret = ttm_bo_wait(bo, false, true); 512 512 513 513 if (ret && !no_wait_gpu) { 514 514 long lret; ··· 545 545 * remove sync_obj with ttm_bo_wait, the wait should be 546 546 * finished, and no new wait object should have been added. 547 547 */ 548 - ret = ttm_bo_wait(bo, false, false, true); 548 + ret = ttm_bo_wait(bo, false, true); 549 549 WARN_ON(ret); 550 550 } 551 551 ··· 684 684 struct ttm_placement placement; 685 685 int ret = 0; 686 686 687 - ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu); 687 + ret = ttm_bo_wait(bo, interruptible, no_wait_gpu); 688 688 689 689 if (unlikely(ret != 0)) { 690 690 if (ret != -ERESTARTSYS) { ··· 1006 1006 * Have the driver move function wait for idle when necessary, 1007 1007 * instead of doing it here. 1008 1008 */ 1009 - ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu); 1009 + ret = ttm_bo_wait(bo, interruptible, no_wait_gpu); 1010 1010 if (ret) 1011 1011 return ret; 1012 1012 } ··· 1567 1567 EXPORT_SYMBOL(ttm_bo_unmap_virtual); 1568 1568 1569 1569 int ttm_bo_wait(struct ttm_buffer_object *bo, 1570 - bool lazy, bool interruptible, bool no_wait) 1570 + bool interruptible, bool no_wait) 1571 1571 { 1572 1572 struct reservation_object_list *fobj; 1573 1573 struct reservation_object *resv; ··· 1625 1625 ret = ttm_bo_reserve(bo, true, no_wait, NULL); 1626 1626 if (unlikely(ret != 0)) 1627 1627 return ret; 1628 - ret = ttm_bo_wait(bo, false, true, no_wait); 1628 + ret = ttm_bo_wait(bo, true, no_wait); 1629 1629 if (likely(ret == 0)) 1630 1630 atomic_inc(&bo->cpu_writers); 1631 1631 ttm_bo_unreserve(bo); ··· 1682 1682 * Wait for GPU, then move to system cached. 1683 1683 */ 1684 1684 1685 - ret = ttm_bo_wait(bo, false, false, false); 1685 + ret = ttm_bo_wait(bo, false, false); 1686 1686 1687 1687 if (unlikely(ret != 0)) 1688 1688 goto out;
+1 -1
drivers/gpu/drm/ttm/ttm_bo_util.c
··· 645 645 646 646 reservation_object_add_excl_fence(bo->resv, fence); 647 647 if (evict) { 648 - ret = ttm_bo_wait(bo, false, false, false); 648 + ret = ttm_bo_wait(bo, false, false); 649 649 if (ret) 650 650 return ret; 651 651
+3 -3
drivers/gpu/drm/ttm/ttm_bo_vm.c
··· 54 54 /* 55 55 * Quick non-stalling check for idle. 56 56 */ 57 - ret = ttm_bo_wait(bo, false, false, true); 57 + ret = ttm_bo_wait(bo, false, true); 58 58 if (likely(ret == 0)) 59 59 goto out_unlock; 60 60 ··· 68 68 goto out_unlock; 69 69 70 70 up_read(&vma->vm_mm->mmap_sem); 71 - (void) ttm_bo_wait(bo, false, true, false); 71 + (void) ttm_bo_wait(bo, true, false); 72 72 goto out_unlock; 73 73 } 74 74 75 75 /* 76 76 * Ordinary wait. 77 77 */ 78 - ret = ttm_bo_wait(bo, false, true, false); 78 + ret = ttm_bo_wait(bo, true, false); 79 79 if (unlikely(ret != 0)) 80 80 ret = (ret != -ERESTARTSYS) ? VM_FAULT_SIGBUS : 81 81 VM_FAULT_NOPAGE;
+1 -1
drivers/gpu/drm/virtio/virtgpu_object.c
··· 158 158 r = ttm_bo_reserve(&bo->tbo, true, no_wait, NULL); 159 159 if (unlikely(r != 0)) 160 160 return r; 161 - r = ttm_bo_wait(&bo->tbo, true, true, no_wait); 161 + r = ttm_bo_wait(&bo->tbo, true, no_wait); 162 162 ttm_bo_unreserve(&bo->tbo); 163 163 return r; 164 164 }
+1 -1
drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
··· 839 839 */ 840 840 static void vmw_swap_notify(struct ttm_buffer_object *bo) 841 841 { 842 - ttm_bo_wait(bo, false, false, false); 842 + ttm_bo_wait(bo, false, false); 843 843 } 844 844 845 845
+1 -1
drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
··· 423 423 bo = &buf->base; 424 424 WARN_ON_ONCE(ttm_bo_reserve(bo, false, true, NULL)); 425 425 426 - ret = ttm_bo_wait(old_bo, false, false, false); 426 + ret = ttm_bo_wait(old_bo, false, false); 427 427 if (unlikely(ret != 0)) { 428 428 DRM_ERROR("Failed waiting for cotable unbind.\n"); 429 429 goto out_wait;
+2 -2
drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
··· 1512 1512 list_del_init(&res->mob_head); 1513 1513 } 1514 1514 1515 - (void) ttm_bo_wait(bo, false, false, false); 1515 + (void) ttm_bo_wait(bo, false, false); 1516 1516 } 1517 1517 } 1518 1518 ··· 1605 1605 if (fence != NULL) 1606 1606 vmw_fence_obj_unreference(&fence); 1607 1607 1608 - (void) ttm_bo_wait(bo, false, false, false); 1608 + (void) ttm_bo_wait(bo, false, false); 1609 1609 } else 1610 1610 mutex_unlock(&dev_priv->binding_mutex); 1611 1611
+1 -1
include/drm/ttm/ttm_bo_api.h
··· 314 314 * Returns -EBUSY if no_wait is true and the buffer is busy. 315 315 * Returns -ERESTARTSYS if interrupted by a signal. 316 316 */ 317 - extern int ttm_bo_wait(struct ttm_buffer_object *bo, bool lazy, 317 + extern int ttm_bo_wait(struct ttm_buffer_object *bo, 318 318 bool interruptible, bool no_wait); 319 319 /** 320 320 * ttm_bo_validate