Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/ttm/vmwgfx: Have TTM manage the validation sequence.

Rather than having the driver supply the validation sequence, leave that
responsibility to TTM. This saves some confusion and a function argument.

Signed-off-by: Thomas Hellstrom <thellstrom@vmware.com>
Signed-off-by: Dave Airlie <airlied@redhat.com>

authored by

Thomas Hellstrom and committed by
Dave Airlie
65705962 95762c2b

+9 -6
+1
drivers/gpu/drm/ttm/ttm_bo.c
··· 1539 1539 bdev->dev_mapping = NULL; 1540 1540 bdev->glob = glob; 1541 1541 bdev->need_dma32 = need_dma32; 1542 + bdev->val_seq = 0; 1542 1543 spin_lock_init(&bdev->fence_lock); 1543 1544 mutex_lock(&glob->device_list_mutex); 1544 1545 list_add_tail(&bdev->device_list, &glob->device_list);
+4 -1
drivers/gpu/drm/ttm/ttm_execbuf_util.c
··· 126 126 * buffers in different orders. 127 127 */ 128 128 129 - int ttm_eu_reserve_buffers(struct list_head *list, uint32_t val_seq) 129 + int ttm_eu_reserve_buffers(struct list_head *list) 130 130 { 131 131 struct ttm_bo_global *glob; 132 132 struct ttm_validate_buffer *entry; 133 133 int ret; 134 + uint32_t val_seq; 134 135 135 136 if (list_empty(list)) 136 137 return 0; ··· 147 146 148 147 retry: 149 148 spin_lock(&glob->lru_lock); 149 + val_seq = entry->bo->bdev->val_seq++; 150 + 150 151 list_for_each_entry(entry, list, head) { 151 152 struct ttm_buffer_object *bo = entry->bo; 152 153
-1
drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
··· 264 264 */ 265 265 266 266 struct vmw_sw_context ctx; 267 - uint32_t val_seq; 268 267 struct mutex cmdbuf_mutex; 269 268 270 269 /**
+1 -2
drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
··· 653 653 ret = vmw_cmd_check_all(dev_priv, sw_context, cmd, arg->command_size); 654 654 if (unlikely(ret != 0)) 655 655 goto out_err; 656 - ret = ttm_eu_reserve_buffers(&sw_context->validate_nodes, 657 - dev_priv->val_seq++); 656 + ret = ttm_eu_reserve_buffers(&sw_context->validate_nodes); 658 657 if (unlikely(ret != 0)) 659 658 goto out_err; 660 659
+2
include/drm/ttm/ttm_bo_driver.h
··· 515 515 * @addr_space_mm: Range manager for the device address space. 516 516 * lru_lock: Spinlock that protects the buffer+device lru lists and 517 517 * ddestroy lists. 518 + * @val_seq: Current validation sequence. 518 519 * @nice_mode: Try nicely to wait for buffer idle when cleaning a manager. 519 520 * If a GPU lockup has been detected, this is forced to 0. 520 521 * @dev_mapping: A pointer to the struct address_space representing the ··· 545 544 * Protected by the global:lru lock. 546 545 */ 547 546 struct list_head ddestroy; 547 + uint32_t val_seq; 548 548 549 549 /* 550 550 * Protected by load / firstopen / lastclose /unload sync.
+1 -2
include/drm/ttm/ttm_execbuf_util.h
··· 72 72 * function ttm_eu_reserve_buffers 73 73 * 74 74 * @list: thread private list of ttm_validate_buffer structs. 75 - * @val_seq: A unique sequence number. 76 75 * 77 76 * Tries to reserve bos pointed to by the list entries for validation. 78 77 * If the function returns 0, all buffers are marked as "unfenced", ··· 93 94 * has failed. 94 95 */ 95 96 96 - extern int ttm_eu_reserve_buffers(struct list_head *list, uint32_t val_seq); 97 + extern int ttm_eu_reserve_buffers(struct list_head *list); 97 98 98 99 /** 99 100 * function ttm_eu_fence_buffer_objects.