Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/ttm: optionally move duplicates to a separate list

This patch adds an optional list_head parameter to ttm_eu_reserve_buffers.
If specified duplicates in the execbuf list are no longer reported as errors,
but moved to this list instead.

Reviewed-by: Thomas Hellstrom <thellstrom@vmware.com>
Signed-off-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>

authored by

Christian König and committed by
Alex Deucher
aa35071c 5e5c21ca

+26 -8
+2 -1
drivers/gpu/drm/qxl/qxl_release.c
··· 264 264 if (list_is_singular(&release->bos)) 265 265 return 0; 266 266 267 - ret = ttm_eu_reserve_buffers(&release->ticket, &release->bos, !no_intr); 267 + ret = ttm_eu_reserve_buffers(&release->ticket, &release->bos, 268 + !no_intr, NULL); 268 269 if (ret) 269 270 return ret; 270 271
+1 -1
drivers/gpu/drm/radeon/radeon_gem.c
··· 564 564 if (!vm_bos) 565 565 return; 566 566 567 - r = ttm_eu_reserve_buffers(&ticket, &list, true); 567 + r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL); 568 568 if (r) 569 569 goto error_free; 570 570
+1 -1
drivers/gpu/drm/radeon/radeon_object.c
··· 508 508 u64 bytes_moved = 0, initial_bytes_moved; 509 509 u64 bytes_moved_threshold = radeon_bo_get_threshold_for_moves(rdev); 510 510 511 - r = ttm_eu_reserve_buffers(ticket, head, true); 511 + r = ttm_eu_reserve_buffers(ticket, head, true, NULL); 512 512 if (unlikely(r != 0)) { 513 513 return r; 514 514 }
+9 -1
drivers/gpu/drm/ttm/ttm_execbuf_util.c
··· 93 93 */ 94 94 95 95 int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket, 96 - struct list_head *list, bool intr) 96 + struct list_head *list, bool intr, 97 + struct list_head *dups) 97 98 { 98 99 struct ttm_bo_global *glob; 99 100 struct ttm_validate_buffer *entry; ··· 118 117 __ttm_bo_unreserve(bo); 119 118 120 119 ret = -EBUSY; 120 + 121 + } else if (ret == -EALREADY && dups) { 122 + struct ttm_validate_buffer *safe = entry; 123 + entry = list_prev_entry(entry, head); 124 + list_del(&safe->head); 125 + list_add(&safe->head, dups); 126 + continue; 121 127 } 122 128 123 129 if (!ret) {
+4 -2
drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
··· 2487 2487 if (unlikely(ret != 0)) 2488 2488 goto out_err_nores; 2489 2489 2490 - ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes, true); 2490 + ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes, 2491 + true, NULL); 2491 2492 if (unlikely(ret != 0)) 2492 2493 goto out_err; 2493 2494 ··· 2678 2677 query_val.shared = false; 2679 2678 list_add_tail(&query_val.head, &validate_list); 2680 2679 2681 - ret = ttm_eu_reserve_buffers(&ticket, &validate_list, false); 2680 + ret = ttm_eu_reserve_buffers(&ticket, &validate_list, 2681 + false, NULL); 2682 2682 if (unlikely(ret != 0)) { 2683 2683 vmw_execbuf_unpin_panic(dev_priv); 2684 2684 goto out_no_reserve;
+1 -1
drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
··· 1222 1222 val_buf->bo = ttm_bo_reference(&res->backup->base); 1223 1223 val_buf->shared = false; 1224 1224 list_add_tail(&val_buf->head, &val_list); 1225 - ret = ttm_eu_reserve_buffers(NULL, &val_list, interruptible); 1225 + ret = ttm_eu_reserve_buffers(NULL, &val_list, interruptible, NULL); 1226 1226 if (unlikely(ret != 0)) 1227 1227 goto out_no_reserve; 1228 1228
+8 -1
include/drm/ttm/ttm_execbuf_util.h
··· 68 68 * non-blocking reserves should be tried. 69 69 * @list: thread private list of ttm_validate_buffer structs. 70 70 * @intr: should the wait be interruptible 71 + * @dups: [out] optional list of duplicates. 71 72 * 72 73 * Tries to reserve bos pointed to by the list entries for validation. 73 74 * If the function returns 0, all buffers are marked as "unfenced", ··· 84 83 * calling process receives a signal while waiting. In that case, no 85 84 * buffers on the list will be reserved upon return. 86 85 * 86 + * If dups is non NULL all buffers already reserved by the current thread 87 + * (e.g. duplicates) are added to this list, otherwise -EALREADY is returned 88 + * on the first already reserved buffer and all buffers from the list are 89 + * unreserved again. 90 + * 87 91 * Buffers reserved by this function should be unreserved by 88 92 * a call to either ttm_eu_backoff_reservation() or 89 93 * ttm_eu_fence_buffer_objects() when command submission is complete or ··· 96 90 */ 97 91 98 92 extern int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket, 99 - struct list_head *list, bool intr); 93 + struct list_head *list, bool intr, 94 + struct list_head *dups); 100 95 101 96 /** 102 97 * function ttm_eu_fence_buffer_objects.