Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

dma-buf: add enum dma_resv_usage v4

This change adds the dma_resv_usage enum and allows us to specify why a
dma_resv object is queried for its containing fences.

Additional to that a dma_resv_usage_rw() helper function is added to aid
retrieving the fences for a read or write userspace submission.

This is then deployed to the different query functions of the dma_resv
object and all of their users. When the write paratermer was previously
true we now use DMA_RESV_USAGE_WRITE and DMA_RESV_USAGE_READ otherwise.

v2: add KERNEL/OTHER in separate patch
v3: some kerneldoc suggestions by Daniel
v4: some more kerneldoc suggestions by Daniel, fix missing cases lost in
the rebase pointed out by Bas.

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Link: https://patchwork.freedesktop.org/patch/msgid/20220407085946.744568-2-christian.koenig@amd.com

+215 -126
+4 -2
drivers/dma-buf/dma-buf.c
··· 216 216 struct dma_fence *fence; 217 217 int r; 218 218 219 - dma_resv_for_each_fence(&cursor, resv, write, fence) { 219 + dma_resv_for_each_fence(&cursor, resv, dma_resv_usage_rw(write), 220 + fence) { 220 221 dma_fence_get(fence); 221 222 r = dma_fence_add_callback(fence, &dcb->cb, dma_buf_poll_cb); 222 223 if (!r) ··· 1125 1124 long ret; 1126 1125 1127 1126 /* Wait on any implicit rendering fences */ 1128 - ret = dma_resv_wait_timeout(resv, write, true, MAX_SCHEDULE_TIMEOUT); 1127 + ret = dma_resv_wait_timeout(resv, dma_resv_usage_rw(write), 1128 + true, MAX_SCHEDULE_TIMEOUT); 1129 1129 if (ret < 0) 1130 1130 return ret; 1131 1131
+17 -18
drivers/dma-buf/dma-resv.c
··· 384 384 cursor->seq = read_seqcount_begin(&cursor->obj->seq); 385 385 cursor->index = -1; 386 386 cursor->shared_count = 0; 387 - if (cursor->all_fences) { 387 + if (cursor->usage >= DMA_RESV_USAGE_READ) { 388 388 cursor->fences = dma_resv_shared_list(cursor->obj); 389 389 if (cursor->fences) 390 390 cursor->shared_count = cursor->fences->shared_count; ··· 496 496 dma_resv_assert_held(cursor->obj); 497 497 498 498 cursor->index = 0; 499 - if (cursor->all_fences) 499 + if (cursor->usage >= DMA_RESV_USAGE_READ) 500 500 cursor->fences = dma_resv_shared_list(cursor->obj); 501 501 else 502 502 cursor->fences = NULL; ··· 551 551 list = NULL; 552 552 excl = NULL; 553 553 554 - dma_resv_iter_begin(&cursor, src, true); 554 + dma_resv_iter_begin(&cursor, src, DMA_RESV_USAGE_READ); 555 555 dma_resv_for_each_fence_unlocked(&cursor, f) { 556 556 557 557 if (dma_resv_iter_is_restarted(&cursor)) { ··· 597 597 * dma_resv_get_fences - Get an object's shared and exclusive 598 598 * fences without update side lock held 599 599 * @obj: the reservation object 600 - * @write: true if we should return all fences 600 + * @usage: controls which fences to include, see enum dma_resv_usage. 601 601 * @num_fences: the number of fences returned 602 602 * @fences: the array of fence ptrs returned (array is krealloc'd to the 603 603 * required size, and must be freed by caller) ··· 605 605 * Retrieve all fences from the reservation object. 606 606 * Returns either zero or -ENOMEM. 607 607 */ 608 - int dma_resv_get_fences(struct dma_resv *obj, bool write, 608 + int dma_resv_get_fences(struct dma_resv *obj, enum dma_resv_usage usage, 609 609 unsigned int *num_fences, struct dma_fence ***fences) 610 610 { 611 611 struct dma_resv_iter cursor; ··· 614 614 *num_fences = 0; 615 615 *fences = NULL; 616 616 617 - dma_resv_iter_begin(&cursor, obj, write); 617 + dma_resv_iter_begin(&cursor, obj, usage); 618 618 dma_resv_for_each_fence_unlocked(&cursor, fence) { 619 619 620 620 if (dma_resv_iter_is_restarted(&cursor)) { ··· 646 646 /** 647 647 * dma_resv_get_singleton - Get a single fence for all the fences 648 648 * @obj: the reservation object 649 - * @write: true if we should return all fences 649 + * @usage: controls which fences to include, see enum dma_resv_usage. 650 650 * @fence: the resulting fence 651 651 * 652 652 * Get a single fence representing all the fences inside the resv object. ··· 658 658 * 659 659 * Returns 0 on success and negative error values on failure. 660 660 */ 661 - int dma_resv_get_singleton(struct dma_resv *obj, bool write, 661 + int dma_resv_get_singleton(struct dma_resv *obj, enum dma_resv_usage usage, 662 662 struct dma_fence **fence) 663 663 { 664 664 struct dma_fence_array *array; ··· 666 666 unsigned count; 667 667 int r; 668 668 669 - r = dma_resv_get_fences(obj, write, &count, &fences); 669 + r = dma_resv_get_fences(obj, usage, &count, &fences); 670 670 if (r) 671 671 return r; 672 672 ··· 700 700 * dma_resv_wait_timeout - Wait on reservation's objects 701 701 * shared and/or exclusive fences. 702 702 * @obj: the reservation object 703 - * @wait_all: if true, wait on all fences, else wait on just exclusive fence 703 + * @usage: controls which fences to include, see enum dma_resv_usage. 704 704 * @intr: if true, do interruptible wait 705 705 * @timeout: timeout value in jiffies or zero to return immediately 706 706 * ··· 710 710 * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or 711 711 * greater than zer on success. 712 712 */ 713 - long dma_resv_wait_timeout(struct dma_resv *obj, bool wait_all, bool intr, 714 - unsigned long timeout) 713 + long dma_resv_wait_timeout(struct dma_resv *obj, enum dma_resv_usage usage, 714 + bool intr, unsigned long timeout) 715 715 { 716 716 long ret = timeout ? timeout : 1; 717 717 struct dma_resv_iter cursor; 718 718 struct dma_fence *fence; 719 719 720 - dma_resv_iter_begin(&cursor, obj, wait_all); 720 + dma_resv_iter_begin(&cursor, obj, usage); 721 721 dma_resv_for_each_fence_unlocked(&cursor, fence) { 722 722 723 723 ret = dma_fence_wait_timeout(fence, intr, ret); ··· 737 737 * dma_resv_test_signaled - Test if a reservation object's fences have been 738 738 * signaled. 739 739 * @obj: the reservation object 740 - * @test_all: if true, test all fences, otherwise only test the exclusive 741 - * fence 740 + * @usage: controls which fences to include, see enum dma_resv_usage. 742 741 * 743 742 * Callers are not required to hold specific locks, but maybe hold 744 743 * dma_resv_lock() already. ··· 746 747 * 747 748 * True if all fences signaled, else false. 748 749 */ 749 - bool dma_resv_test_signaled(struct dma_resv *obj, bool test_all) 750 + bool dma_resv_test_signaled(struct dma_resv *obj, enum dma_resv_usage usage) 750 751 { 751 752 struct dma_resv_iter cursor; 752 753 struct dma_fence *fence; 753 754 754 - dma_resv_iter_begin(&cursor, obj, test_all); 755 + dma_resv_iter_begin(&cursor, obj, usage); 755 756 dma_resv_for_each_fence_unlocked(&cursor, fence) { 756 757 dma_resv_iter_end(&cursor); 757 758 return false; ··· 774 775 struct dma_resv_iter cursor; 775 776 struct dma_fence *fence; 776 777 777 - dma_resv_for_each_fence(&cursor, obj, true, fence) { 778 + dma_resv_for_each_fence(&cursor, obj, DMA_RESV_USAGE_READ, fence) { 778 779 seq_printf(seq, "\t%s fence:", 779 780 dma_resv_iter_is_exclusive(&cursor) ? 780 781 "Exclusive" : "Shared");
+25 -23
drivers/dma-buf/st-dma-resv.c
··· 58 58 return r; 59 59 } 60 60 61 - static int test_signaling(void *arg, bool shared) 61 + static int test_signaling(void *arg, enum dma_resv_usage usage) 62 62 { 63 63 struct dma_resv resv; 64 64 struct dma_fence *f; ··· 81 81 goto err_unlock; 82 82 } 83 83 84 - if (shared) 84 + if (usage >= DMA_RESV_USAGE_READ) 85 85 dma_resv_add_shared_fence(&resv, f); 86 86 else 87 87 dma_resv_add_excl_fence(&resv, f); 88 88 89 - if (dma_resv_test_signaled(&resv, shared)) { 89 + if (dma_resv_test_signaled(&resv, usage)) { 90 90 pr_err("Resv unexpectedly signaled\n"); 91 91 r = -EINVAL; 92 92 goto err_unlock; 93 93 } 94 94 dma_fence_signal(f); 95 - if (!dma_resv_test_signaled(&resv, shared)) { 95 + if (!dma_resv_test_signaled(&resv, usage)) { 96 96 pr_err("Resv not reporting signaled\n"); 97 97 r = -EINVAL; 98 98 goto err_unlock; ··· 107 107 108 108 static int test_excl_signaling(void *arg) 109 109 { 110 - return test_signaling(arg, false); 110 + return test_signaling(arg, DMA_RESV_USAGE_WRITE); 111 111 } 112 112 113 113 static int test_shared_signaling(void *arg) 114 114 { 115 - return test_signaling(arg, true); 115 + return test_signaling(arg, DMA_RESV_USAGE_READ); 116 116 } 117 117 118 - static int test_for_each(void *arg, bool shared) 118 + static int test_for_each(void *arg, enum dma_resv_usage usage) 119 119 { 120 120 struct dma_resv_iter cursor; 121 121 struct dma_fence *f, *fence; ··· 139 139 goto err_unlock; 140 140 } 141 141 142 - if (shared) 142 + if (usage >= DMA_RESV_USAGE_READ) 143 143 dma_resv_add_shared_fence(&resv, f); 144 144 else 145 145 dma_resv_add_excl_fence(&resv, f); 146 146 147 147 r = -ENOENT; 148 - dma_resv_for_each_fence(&cursor, &resv, shared, fence) { 148 + dma_resv_for_each_fence(&cursor, &resv, usage, fence) { 149 149 if (!r) { 150 150 pr_err("More than one fence found\n"); 151 151 r = -EINVAL; ··· 156 156 r = -EINVAL; 157 157 goto err_unlock; 158 158 } 159 - if (dma_resv_iter_is_exclusive(&cursor) != !shared) { 159 + if (dma_resv_iter_is_exclusive(&cursor) != 160 + (usage >= DMA_RESV_USAGE_READ)) { 160 161 pr_err("Unexpected fence usage\n"); 161 162 r = -EINVAL; 162 163 goto err_unlock; ··· 179 178 180 179 static int test_excl_for_each(void *arg) 181 180 { 182 - return test_for_each(arg, false); 181 + return test_for_each(arg, DMA_RESV_USAGE_WRITE); 183 182 } 184 183 185 184 static int test_shared_for_each(void *arg) 186 185 { 187 - return test_for_each(arg, true); 186 + return test_for_each(arg, DMA_RESV_USAGE_READ); 188 187 } 189 188 190 - static int test_for_each_unlocked(void *arg, bool shared) 189 + static int test_for_each_unlocked(void *arg, enum dma_resv_usage usage) 191 190 { 192 191 struct dma_resv_iter cursor; 193 192 struct dma_fence *f, *fence; ··· 212 211 goto err_free; 213 212 } 214 213 215 - if (shared) 214 + if (usage >= DMA_RESV_USAGE_READ) 216 215 dma_resv_add_shared_fence(&resv, f); 217 216 else 218 217 dma_resv_add_excl_fence(&resv, f); 219 218 dma_resv_unlock(&resv); 220 219 221 220 r = -ENOENT; 222 - dma_resv_iter_begin(&cursor, &resv, shared); 221 + dma_resv_iter_begin(&cursor, &resv, usage); 223 222 dma_resv_for_each_fence_unlocked(&cursor, fence) { 224 223 if (!r) { 225 224 pr_err("More than one fence found\n"); ··· 235 234 r = -EINVAL; 236 235 goto err_iter_end; 237 236 } 238 - if (dma_resv_iter_is_exclusive(&cursor) != !shared) { 237 + if (dma_resv_iter_is_exclusive(&cursor) != 238 + (usage >= DMA_RESV_USAGE_READ)) { 239 239 pr_err("Unexpected fence usage\n"); 240 240 r = -EINVAL; 241 241 goto err_iter_end; ··· 264 262 265 263 static int test_excl_for_each_unlocked(void *arg) 266 264 { 267 - return test_for_each_unlocked(arg, false); 265 + return test_for_each_unlocked(arg, DMA_RESV_USAGE_WRITE); 268 266 } 269 267 270 268 static int test_shared_for_each_unlocked(void *arg) 271 269 { 272 - return test_for_each_unlocked(arg, true); 270 + return test_for_each_unlocked(arg, DMA_RESV_USAGE_READ); 273 271 } 274 272 275 - static int test_get_fences(void *arg, bool shared) 273 + static int test_get_fences(void *arg, enum dma_resv_usage usage) 276 274 { 277 275 struct dma_fence *f, **fences = NULL; 278 276 struct dma_resv resv; ··· 296 294 goto err_resv; 297 295 } 298 296 299 - if (shared) 297 + if (usage >= DMA_RESV_USAGE_READ) 300 298 dma_resv_add_shared_fence(&resv, f); 301 299 else 302 300 dma_resv_add_excl_fence(&resv, f); 303 301 dma_resv_unlock(&resv); 304 302 305 - r = dma_resv_get_fences(&resv, shared, &i, &fences); 303 + r = dma_resv_get_fences(&resv, usage, &i, &fences); 306 304 if (r) { 307 305 pr_err("get_fences failed\n"); 308 306 goto err_free; ··· 326 324 327 325 static int test_excl_get_fences(void *arg) 328 326 { 329 - return test_get_fences(arg, false); 327 + return test_get_fences(arg, DMA_RESV_USAGE_WRITE); 330 328 } 331 329 332 330 static int test_shared_get_fences(void *arg) 333 331 { 334 - return test_get_fences(arg, true); 332 + return test_get_fences(arg, DMA_RESV_USAGE_READ); 335 333 } 336 334 337 335 int dma_resv(void)
+3 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
··· 1288 1288 * 1289 1289 * TODO: Remove together with dma_resv rework. 1290 1290 */ 1291 - dma_resv_for_each_fence(&cursor, resv, false, fence) { 1291 + dma_resv_for_each_fence(&cursor, resv, 1292 + DMA_RESV_USAGE_WRITE, 1293 + fence) { 1292 1294 break; 1293 1295 } 1294 1296 dma_fence_chain_init(chain, fence, dma_fence_get(p->fence), 1);
+1 -2
drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
··· 200 200 goto unpin; 201 201 } 202 202 203 - /* TODO: Unify this with other drivers */ 204 - r = dma_resv_get_fences(new_abo->tbo.base.resv, true, 203 + r = dma_resv_get_fences(new_abo->tbo.base.resv, DMA_RESV_USAGE_WRITE, 205 204 &work->shared_count, 206 205 &work->shared); 207 206 if (unlikely(r != 0)) {
+2 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
··· 526 526 return -ENOENT; 527 527 } 528 528 robj = gem_to_amdgpu_bo(gobj); 529 - ret = dma_resv_wait_timeout(robj->tbo.base.resv, true, true, timeout); 529 + ret = dma_resv_wait_timeout(robj->tbo.base.resv, DMA_RESV_USAGE_READ, 530 + true, timeout); 530 531 531 532 /* ret == 0 means not signaled, 532 533 * ret > 0 means signaled
+3 -2
drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
··· 111 111 struct dma_fence *fence; 112 112 int r; 113 113 114 - r = dma_resv_get_singleton(resv, true, &fence); 114 + r = dma_resv_get_singleton(resv, DMA_RESV_USAGE_READ, &fence); 115 115 if (r) 116 116 goto fallback; 117 117 ··· 139 139 /* Not enough memory for the delayed delete, as last resort 140 140 * block for all the fences to complete. 141 141 */ 142 - dma_resv_wait_timeout(resv, true, false, MAX_SCHEDULE_TIMEOUT); 142 + dma_resv_wait_timeout(resv, DMA_RESV_USAGE_READ, 143 + false, MAX_SCHEDULE_TIMEOUT); 143 144 amdgpu_pasid_free(pasid); 144 145 } 145 146
+2 -2
drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
··· 75 75 76 76 mmu_interval_set_seq(mni, cur_seq); 77 77 78 - r = dma_resv_wait_timeout(bo->tbo.base.resv, true, false, 79 - MAX_SCHEDULE_TIMEOUT); 78 + r = dma_resv_wait_timeout(bo->tbo.base.resv, DMA_RESV_USAGE_READ, 79 + false, MAX_SCHEDULE_TIMEOUT); 80 80 mutex_unlock(&adev->notifier_lock); 81 81 if (r <= 0) 82 82 DRM_ERROR("(%ld) failed to wait for user bo\n", r);
+2 -2
drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
··· 768 768 return 0; 769 769 } 770 770 771 - r = dma_resv_wait_timeout(bo->tbo.base.resv, false, false, 772 - MAX_SCHEDULE_TIMEOUT); 771 + r = dma_resv_wait_timeout(bo->tbo.base.resv, DMA_RESV_USAGE_WRITE, 772 + false, MAX_SCHEDULE_TIMEOUT); 773 773 if (r < 0) 774 774 return r; 775 775
+2 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
··· 259 259 if (resv == NULL) 260 260 return -EINVAL; 261 261 262 - dma_resv_for_each_fence(&cursor, resv, true, f) { 262 + /* TODO: Use DMA_RESV_USAGE_READ here */ 263 + dma_resv_for_each_fence(&cursor, resv, DMA_RESV_USAGE_READ, f) { 263 264 dma_fence_chain_for_each(f, f) { 264 265 struct dma_fence *tmp = dma_fence_chain_contained(f); 265 266
+2 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
··· 1344 1344 * If true, then return false as any KFD process needs all its BOs to 1345 1345 * be resident to run successfully 1346 1346 */ 1347 - dma_resv_for_each_fence(&resv_cursor, bo->base.resv, true, f) { 1347 + dma_resv_for_each_fence(&resv_cursor, bo->base.resv, 1348 + DMA_RESV_USAGE_READ, f) { 1348 1349 if (amdkfd_fence_check_mm(f, current->mm)) 1349 1350 return false; 1350 1351 }
+2 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
··· 1163 1163 ib->length_dw = 16; 1164 1164 1165 1165 if (direct) { 1166 - r = dma_resv_wait_timeout(bo->tbo.base.resv, true, false, 1166 + r = dma_resv_wait_timeout(bo->tbo.base.resv, 1167 + DMA_RESV_USAGE_WRITE, false, 1167 1168 msecs_to_jiffies(10)); 1168 1169 if (r == 0) 1169 1170 r = -ETIMEDOUT;
+4 -3
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
··· 2059 2059 struct dma_resv_iter cursor; 2060 2060 struct dma_fence *fence; 2061 2061 2062 - dma_resv_for_each_fence(&cursor, resv, true, fence) { 2062 + dma_resv_for_each_fence(&cursor, resv, DMA_RESV_USAGE_READ, fence) { 2063 2063 /* Add a callback for each fence in the reservation object */ 2064 2064 amdgpu_vm_prt_get(adev); 2065 2065 amdgpu_vm_add_prt_cb(adev, fence); ··· 2665 2665 return true; 2666 2666 2667 2667 /* Don't evict VM page tables while they are busy */ 2668 - if (!dma_resv_test_signaled(bo->tbo.base.resv, true)) 2668 + if (!dma_resv_test_signaled(bo->tbo.base.resv, DMA_RESV_USAGE_READ)) 2669 2669 return false; 2670 2670 2671 2671 /* Try to block ongoing updates */ ··· 2845 2845 */ 2846 2846 long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout) 2847 2847 { 2848 - timeout = dma_resv_wait_timeout(vm->root.bo->tbo.base.resv, true, 2848 + timeout = dma_resv_wait_timeout(vm->root.bo->tbo.base.resv, 2849 + DMA_RESV_USAGE_READ, 2849 2850 true, timeout); 2850 2851 if (timeout <= 0) 2851 2852 return timeout;
+2 -1
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
··· 9236 9236 * deadlock during GPU reset when this fence will not signal 9237 9237 * but we hold reservation lock for the BO. 9238 9238 */ 9239 - r = dma_resv_wait_timeout(abo->tbo.base.resv, true, false, 9239 + r = dma_resv_wait_timeout(abo->tbo.base.resv, 9240 + DMA_RESV_USAGE_WRITE, false, 9240 9241 msecs_to_jiffies(5000)); 9241 9242 if (unlikely(r <= 0)) 9242 9243 DRM_ERROR("Waiting for fences timed out!");
+2 -1
drivers/gpu/drm/drm_gem.c
··· 771 771 return -EINVAL; 772 772 } 773 773 774 - ret = dma_resv_wait_timeout(obj->resv, wait_all, true, timeout); 774 + ret = dma_resv_wait_timeout(obj->resv, dma_resv_usage_rw(wait_all), 775 + true, timeout); 775 776 if (ret == 0) 776 777 ret = -ETIME; 777 778 else if (ret > 0)
+1 -1
drivers/gpu/drm/drm_gem_atomic_helper.c
··· 151 151 return 0; 152 152 153 153 obj = drm_gem_fb_get_obj(state->fb, 0); 154 - ret = dma_resv_get_singleton(obj->resv, false, &fence); 154 + ret = dma_resv_get_singleton(obj->resv, DMA_RESV_USAGE_WRITE, &fence); 155 155 if (ret) 156 156 return ret; 157 157
+4 -2
drivers/gpu/drm/etnaviv/etnaviv_gem.c
··· 380 380 } 381 381 382 382 if (op & ETNA_PREP_NOSYNC) { 383 - if (!dma_resv_test_signaled(obj->resv, write)) 383 + if (!dma_resv_test_signaled(obj->resv, 384 + dma_resv_usage_rw(write))) 384 385 return -EBUSY; 385 386 } else { 386 387 unsigned long remain = etnaviv_timeout_to_jiffies(timeout); 387 388 388 - ret = dma_resv_wait_timeout(obj->resv, write, true, remain); 389 + ret = dma_resv_wait_timeout(obj->resv, dma_resv_usage_rw(write), 390 + true, remain); 389 391 if (ret <= 0) 390 392 return ret == 0 ? -ETIMEDOUT : ret; 391 393 }
+2 -1
drivers/gpu/drm/i915/display/intel_atomic_plane.c
··· 997 997 if (ret < 0) 998 998 goto unpin_fb; 999 999 1000 - dma_resv_iter_begin(&cursor, obj->base.resv, false); 1000 + dma_resv_iter_begin(&cursor, obj->base.resv, 1001 + DMA_RESV_USAGE_WRITE); 1001 1002 dma_resv_for_each_fence_unlocked(&cursor, fence) { 1002 1003 add_rps_boost_after_vblank(new_plane_state->hw.crtc, 1003 1004 fence);
+2 -2
drivers/gpu/drm/i915/gem/i915_gem_busy.c
··· 138 138 * Alternatively, we can trade that extra information on read/write 139 139 * activity with 140 140 * args->busy = 141 - * !dma_resv_test_signaled(obj->resv, true); 141 + * !dma_resv_test_signaled(obj->resv, DMA_RESV_USAGE_READ); 142 142 * to report the overall busyness. This is what the wait-ioctl does. 143 143 * 144 144 */ 145 145 args->busy = 0; 146 - dma_resv_iter_begin(&cursor, obj->base.resv, true); 146 + dma_resv_iter_begin(&cursor, obj->base.resv, DMA_RESV_USAGE_READ); 147 147 dma_resv_for_each_fence_unlocked(&cursor, fence) { 148 148 if (dma_resv_iter_is_restarted(&cursor)) 149 149 args->busy = 0;
+1 -1
drivers/gpu/drm/i915/gem/i915_gem_lmem.c
··· 66 66 struct intel_memory_region *mr = READ_ONCE(obj->mm.region); 67 67 68 68 #ifdef CONFIG_LOCKDEP 69 - GEM_WARN_ON(dma_resv_test_signaled(obj->base.resv, true) && 69 + GEM_WARN_ON(dma_resv_test_signaled(obj->base.resv, DMA_RESV_USAGE_READ) && 70 70 i915_gem_object_evictable(obj)); 71 71 #endif 72 72 return mr && (mr->type == INTEL_MEMORY_LOCAL ||
+1 -1
drivers/gpu/drm/i915/gem/i915_gem_userptr.c
··· 86 86 return true; 87 87 88 88 /* we will unbind on next submission, still have userptr pins */ 89 - r = dma_resv_wait_timeout(obj->base.resv, true, false, 89 + r = dma_resv_wait_timeout(obj->base.resv, DMA_RESV_USAGE_READ, false, 90 90 MAX_SCHEDULE_TIMEOUT); 91 91 if (r <= 0) 92 92 drm_err(&i915->drm, "(%ld) failed to wait for idle\n", r);
+4 -2
drivers/gpu/drm/i915/gem/i915_gem_wait.c
··· 40 40 struct dma_fence *fence; 41 41 long ret = timeout ?: 1; 42 42 43 - dma_resv_iter_begin(&cursor, resv, flags & I915_WAIT_ALL); 43 + dma_resv_iter_begin(&cursor, resv, 44 + dma_resv_usage_rw(flags & I915_WAIT_ALL)); 44 45 dma_resv_for_each_fence_unlocked(&cursor, fence) { 45 46 ret = i915_gem_object_wait_fence(fence, flags, timeout); 46 47 if (ret <= 0) ··· 118 117 struct dma_resv_iter cursor; 119 118 struct dma_fence *fence; 120 119 121 - dma_resv_iter_begin(&cursor, obj->base.resv, flags & I915_WAIT_ALL); 120 + dma_resv_iter_begin(&cursor, obj->base.resv, 121 + dma_resv_usage_rw(flags & I915_WAIT_ALL)); 122 122 dma_resv_for_each_fence_unlocked(&cursor, fence) 123 123 i915_gem_fence_wait_priority(fence, attr); 124 124 dma_resv_iter_end(&cursor);
+2 -1
drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
··· 219 219 goto out_detach; 220 220 } 221 221 222 - timeout = dma_resv_wait_timeout(dmabuf->resv, false, true, 5 * HZ); 222 + timeout = dma_resv_wait_timeout(dmabuf->resv, DMA_RESV_USAGE_WRITE, 223 + true, 5 * HZ); 223 224 if (!timeout) { 224 225 pr_err("dmabuf wait for exclusive fence timed out.\n"); 225 226 timeout = -ETIME;
+2 -1
drivers/gpu/drm/i915/i915_request.c
··· 1598 1598 struct dma_fence *fence; 1599 1599 int ret = 0; 1600 1600 1601 - dma_resv_for_each_fence(&cursor, obj->base.resv, write, fence) { 1601 + dma_resv_for_each_fence(&cursor, obj->base.resv, 1602 + dma_resv_usage_rw(write), fence) { 1602 1603 ret = i915_request_await_dma_fence(to, fence); 1603 1604 if (ret) 1604 1605 break;
+1 -1
drivers/gpu/drm/i915/i915_sw_fence.c
··· 585 585 debug_fence_assert(fence); 586 586 might_sleep_if(gfpflags_allow_blocking(gfp)); 587 587 588 - dma_resv_iter_begin(&cursor, resv, write); 588 + dma_resv_iter_begin(&cursor, resv, dma_resv_usage_rw(write)); 589 589 dma_resv_for_each_fence_unlocked(&cursor, f) { 590 590 pending = i915_sw_fence_await_dma_fence(fence, f, timeout, 591 591 gfp);
+2 -1
drivers/gpu/drm/msm/msm_gem.c
··· 848 848 op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout); 849 849 long ret; 850 850 851 - ret = dma_resv_wait_timeout(obj->resv, write, true, remain); 851 + ret = dma_resv_wait_timeout(obj->resv, dma_resv_usage_rw(write), 852 + true, remain); 852 853 if (ret == 0) 853 854 return remain == 0 ? -EBUSY : -ETIMEDOUT; 854 855 else if (ret < 0)
+2 -1
drivers/gpu/drm/nouveau/dispnv50/wndw.c
··· 558 558 asyw->image.handle[0] = ctxdma->object.handle; 559 559 } 560 560 561 - ret = dma_resv_get_singleton(nvbo->bo.base.resv, false, 561 + ret = dma_resv_get_singleton(nvbo->bo.base.resv, 562 + DMA_RESV_USAGE_WRITE, 562 563 &asyw->state.fence); 563 564 if (ret) 564 565 return ret;
+4 -4
drivers/gpu/drm/nouveau/nouveau_bo.c
··· 962 962 struct dma_fence *fence; 963 963 int ret; 964 964 965 - /* TODO: This is actually a memory management dependency */ 966 - ret = dma_resv_get_singleton(bo->base.resv, false, &fence); 965 + ret = dma_resv_get_singleton(bo->base.resv, DMA_RESV_USAGE_WRITE, 966 + &fence); 967 967 if (ret) 968 - dma_resv_wait_timeout(bo->base.resv, false, false, 969 - MAX_SCHEDULE_TIMEOUT); 968 + dma_resv_wait_timeout(bo->base.resv, DMA_RESV_USAGE_WRITE, 969 + false, MAX_SCHEDULE_TIMEOUT); 970 970 971 971 nv10_bo_put_tile_region(dev, *old_tile, fence); 972 972 *old_tile = new_tile;
+5 -3
drivers/gpu/drm/nouveau/nouveau_fence.c
··· 350 350 if (ret) 351 351 return ret; 352 352 353 - /* Waiting for the exclusive fence first causes performance regressions 354 - * under some circumstances. So manually wait for the shared ones first. 353 + /* Waiting for the writes first causes performance regressions 354 + * under some circumstances. So manually wait for the reads first. 355 355 */ 356 356 for (i = 0; i < 2; ++i) { 357 357 struct dma_resv_iter cursor; 358 358 struct dma_fence *fence; 359 359 360 - dma_resv_for_each_fence(&cursor, resv, exclusive, fence) { 360 + dma_resv_for_each_fence(&cursor, resv, 361 + dma_resv_usage_rw(exclusive), 362 + fence) { 361 363 struct nouveau_fence *f; 362 364 363 365 if (i == 0 && dma_resv_iter_is_exclusive(&cursor))
+2 -1
drivers/gpu/drm/nouveau/nouveau_gem.c
··· 962 962 return -ENOENT; 963 963 nvbo = nouveau_gem_object(gem); 964 964 965 - lret = dma_resv_wait_timeout(nvbo->bo.base.resv, write, true, 965 + lret = dma_resv_wait_timeout(nvbo->bo.base.resv, 966 + dma_resv_usage_rw(write), true, 966 967 no_wait ? 0 : 30 * HZ); 967 968 if (!lret) 968 969 ret = -EBUSY;
+2 -1
drivers/gpu/drm/panfrost/panfrost_drv.c
··· 316 316 if (!gem_obj) 317 317 return -ENOENT; 318 318 319 - ret = dma_resv_wait_timeout(gem_obj->resv, true, true, timeout); 319 + ret = dma_resv_wait_timeout(gem_obj->resv, DMA_RESV_USAGE_READ, 320 + true, timeout); 320 321 if (!ret) 321 322 ret = timeout ? -ETIMEDOUT : -EBUSY; 322 323
+2 -1
drivers/gpu/drm/qxl/qxl_debugfs.c
··· 61 61 struct dma_fence *fence; 62 62 int rel = 0; 63 63 64 - dma_resv_iter_begin(&cursor, bo->tbo.base.resv, true); 64 + dma_resv_iter_begin(&cursor, bo->tbo.base.resv, 65 + DMA_RESV_USAGE_READ); 65 66 dma_resv_for_each_fence_unlocked(&cursor, fence) { 66 67 if (dma_resv_iter_is_restarted(&cursor)) 67 68 rel = 0;
+2 -1
drivers/gpu/drm/radeon/radeon_display.c
··· 533 533 DRM_ERROR("failed to pin new rbo buffer before flip\n"); 534 534 goto cleanup; 535 535 } 536 - r = dma_resv_get_singleton(new_rbo->tbo.base.resv, false, &work->fence); 536 + r = dma_resv_get_singleton(new_rbo->tbo.base.resv, DMA_RESV_USAGE_WRITE, 537 + &work->fence); 537 538 if (r) { 538 539 radeon_bo_unreserve(new_rbo); 539 540 DRM_ERROR("failed to get new rbo buffer fences\n");
+6 -3
drivers/gpu/drm/radeon/radeon_gem.c
··· 162 162 } 163 163 if (domain == RADEON_GEM_DOMAIN_CPU) { 164 164 /* Asking for cpu access wait for object idle */ 165 - r = dma_resv_wait_timeout(robj->tbo.base.resv, true, true, 30 * HZ); 165 + r = dma_resv_wait_timeout(robj->tbo.base.resv, 166 + DMA_RESV_USAGE_READ, 167 + true, 30 * HZ); 166 168 if (!r) 167 169 r = -EBUSY; 168 170 ··· 526 524 } 527 525 robj = gem_to_radeon_bo(gobj); 528 526 529 - r = dma_resv_test_signaled(robj->tbo.base.resv, true); 527 + r = dma_resv_test_signaled(robj->tbo.base.resv, DMA_RESV_USAGE_READ); 530 528 if (r == 0) 531 529 r = -EBUSY; 532 530 else ··· 555 553 } 556 554 robj = gem_to_radeon_bo(gobj); 557 555 558 - ret = dma_resv_wait_timeout(robj->tbo.base.resv, true, true, 30 * HZ); 556 + ret = dma_resv_wait_timeout(robj->tbo.base.resv, DMA_RESV_USAGE_READ, 557 + true, 30 * HZ); 559 558 if (ret == 0) 560 559 r = -EBUSY; 561 560 else if (ret < 0)
+2 -2
drivers/gpu/drm/radeon/radeon_mn.c
··· 66 66 return true; 67 67 } 68 68 69 - r = dma_resv_wait_timeout(bo->tbo.base.resv, true, false, 70 - MAX_SCHEDULE_TIMEOUT); 69 + r = dma_resv_wait_timeout(bo->tbo.base.resv, DMA_RESV_USAGE_READ, 70 + false, MAX_SCHEDULE_TIMEOUT); 71 71 if (r <= 0) 72 72 DRM_ERROR("(%ld) failed to wait for user bo\n", r); 73 73
+1 -1
drivers/gpu/drm/radeon/radeon_sync.c
··· 96 96 struct dma_fence *f; 97 97 int r = 0; 98 98 99 - dma_resv_for_each_fence(&cursor, resv, shared, f) { 99 + dma_resv_for_each_fence(&cursor, resv, dma_resv_usage_rw(shared), f) { 100 100 fence = to_radeon_fence(f); 101 101 if (fence && fence->rdev == rdev) 102 102 radeon_sync_fence(sync, fence);
+2 -2
drivers/gpu/drm/radeon/radeon_uvd.c
··· 478 478 return -EINVAL; 479 479 } 480 480 481 - r = dma_resv_wait_timeout(bo->tbo.base.resv, false, false, 482 - MAX_SCHEDULE_TIMEOUT); 481 + r = dma_resv_wait_timeout(bo->tbo.base.resv, DMA_RESV_USAGE_WRITE, 482 + false, MAX_SCHEDULE_TIMEOUT); 483 483 if (r <= 0) { 484 484 DRM_ERROR("Failed waiting for UVD message (%ld)!\n", r); 485 485 return r ? r : -ETIME;
+2 -1
drivers/gpu/drm/scheduler/sched_main.c
··· 705 705 706 706 dma_resv_assert_held(obj->resv); 707 707 708 - dma_resv_for_each_fence(&cursor, obj->resv, write, fence) { 708 + dma_resv_for_each_fence(&cursor, obj->resv, dma_resv_usage_rw(write), 709 + fence) { 709 710 /* Make sure to grab an additional ref on the added fence */ 710 711 dma_fence_get(fence); 711 712 ret = drm_sched_job_add_dependency(job, fence);
+10 -8
drivers/gpu/drm/ttm/ttm_bo.c
··· 223 223 struct dma_resv_iter cursor; 224 224 struct dma_fence *fence; 225 225 226 - dma_resv_iter_begin(&cursor, resv, true); 226 + dma_resv_iter_begin(&cursor, resv, DMA_RESV_USAGE_READ); 227 227 dma_resv_for_each_fence_unlocked(&cursor, fence) { 228 228 if (!fence->ops->signaled) 229 229 dma_fence_enable_sw_signaling(fence); ··· 252 252 struct dma_resv *resv = &bo->base._resv; 253 253 int ret; 254 254 255 - if (dma_resv_test_signaled(resv, true)) 255 + if (dma_resv_test_signaled(resv, DMA_RESV_USAGE_READ)) 256 256 ret = 0; 257 257 else 258 258 ret = -EBUSY; ··· 264 264 dma_resv_unlock(bo->base.resv); 265 265 spin_unlock(&bo->bdev->lru_lock); 266 266 267 - lret = dma_resv_wait_timeout(resv, true, interruptible, 267 + lret = dma_resv_wait_timeout(resv, DMA_RESV_USAGE_READ, 268 + interruptible, 268 269 30 * HZ); 269 270 270 271 if (lret < 0) ··· 368 367 /* Last resort, if we fail to allocate memory for the 369 368 * fences block for the BO to become idle 370 369 */ 371 - dma_resv_wait_timeout(bo->base.resv, true, false, 370 + dma_resv_wait_timeout(bo->base.resv, 371 + DMA_RESV_USAGE_READ, false, 372 372 30 * HZ); 373 373 } 374 374 ··· 380 378 ttm_mem_io_free(bdev, bo->resource); 381 379 } 382 380 383 - if (!dma_resv_test_signaled(bo->base.resv, true) || 381 + if (!dma_resv_test_signaled(bo->base.resv, DMA_RESV_USAGE_READ) || 384 382 !dma_resv_trylock(bo->base.resv)) { 385 383 /* The BO is not idle, resurrect it for delayed destroy */ 386 384 ttm_bo_flush_all_fences(bo); ··· 1046 1044 long timeout = 15 * HZ; 1047 1045 1048 1046 if (no_wait) { 1049 - if (dma_resv_test_signaled(bo->base.resv, true)) 1047 + if (dma_resv_test_signaled(bo->base.resv, DMA_RESV_USAGE_READ)) 1050 1048 return 0; 1051 1049 else 1052 1050 return -EBUSY; 1053 1051 } 1054 1052 1055 - timeout = dma_resv_wait_timeout(bo->base.resv, true, interruptible, 1056 - timeout); 1053 + timeout = dma_resv_wait_timeout(bo->base.resv, DMA_RESV_USAGE_READ, 1054 + interruptible, timeout); 1057 1055 if (timeout < 0) 1058 1056 return timeout; 1059 1057
+3 -1
drivers/gpu/drm/vgem/vgem_fence.c
··· 130 130 struct vgem_file *vfile = file->driver_priv; 131 131 struct dma_resv *resv; 132 132 struct drm_gem_object *obj; 133 + enum dma_resv_usage usage; 133 134 struct dma_fence *fence; 134 135 int ret; 135 136 ··· 152 151 153 152 /* Check for a conflicting fence */ 154 153 resv = obj->resv; 155 - if (!dma_resv_test_signaled(resv, arg->flags & VGEM_FENCE_WRITE)) { 154 + usage = dma_resv_usage_rw(arg->flags & VGEM_FENCE_WRITE); 155 + if (!dma_resv_test_signaled(resv, usage)) { 156 156 ret = -EBUSY; 157 157 goto err_fence; 158 158 }
+3 -2
drivers/gpu/drm/virtio/virtgpu_ioctl.c
··· 518 518 return -ENOENT; 519 519 520 520 if (args->flags & VIRTGPU_WAIT_NOWAIT) { 521 - ret = dma_resv_test_signaled(obj->resv, true); 521 + ret = dma_resv_test_signaled(obj->resv, DMA_RESV_USAGE_READ); 522 522 } else { 523 - ret = dma_resv_wait_timeout(obj->resv, true, true, timeout); 523 + ret = dma_resv_wait_timeout(obj->resv, DMA_RESV_USAGE_READ, 524 + true, timeout); 524 525 } 525 526 if (ret == 0) 526 527 ret = -EBUSY;
+2 -2
drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
··· 528 528 if (flags & drm_vmw_synccpu_allow_cs) { 529 529 long lret; 530 530 531 - lret = dma_resv_wait_timeout(bo->base.resv, true, true, 532 - nonblock ? 0 : 531 + lret = dma_resv_wait_timeout(bo->base.resv, DMA_RESV_USAGE_READ, 532 + true, nonblock ? 0 : 533 533 MAX_SCHEDULE_TIMEOUT); 534 534 if (!lret) 535 535 return -EBUSY;
+2 -1
drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
··· 1164 1164 if (bo->moving) 1165 1165 dma_fence_put(bo->moving); 1166 1166 1167 - return dma_resv_get_singleton(bo->base.resv, false, 1167 + return dma_resv_get_singleton(bo->base.resv, 1168 + DMA_RESV_USAGE_WRITE, 1168 1169 &bo->moving); 1169 1170 } 1170 1171
+2 -1
drivers/infiniband/core/umem_dmabuf.c
··· 67 67 * may be not up-to-date. Wait for the exporter to finish 68 68 * the migration. 69 69 */ 70 - return dma_resv_wait_timeout(umem_dmabuf->attach->dmabuf->resv, false, 70 + return dma_resv_wait_timeout(umem_dmabuf->attach->dmabuf->resv, 71 + DMA_RESV_USAGE_WRITE, 71 72 false, MAX_SCHEDULE_TIMEOUT); 72 73 } 73 74 EXPORT_SYMBOL(ib_umem_dmabuf_map_pages);
+6 -2
include/linux/dma-buf.h
··· 408 408 * pipelining across drivers. These do not set any fences for their 409 409 * access. An example here is v4l. 410 410 * 411 + * - Driver should use dma_resv_usage_rw() when retrieving fences as 412 + * dependency for implicit synchronization. 413 + * 411 414 * DYNAMIC IMPORTER RULES: 412 415 * 413 416 * Dynamic importers, see dma_buf_attachment_is_dynamic(), have ··· 426 423 * 427 424 * IMPORTANT: 428 425 * 429 - * All drivers must obey the struct dma_resv rules, specifically the 430 - * rules for updating and obeying fences. 426 + * All drivers and memory management related functions must obey the 427 + * struct dma_resv rules, specifically the rules for updating and 428 + * obeying fences. See enum dma_resv_usage for further descriptions. 431 429 */ 432 430 struct dma_resv *resv; 433 431
+60 -13
include/linux/dma-resv.h
··· 50 50 struct dma_resv_list; 51 51 52 52 /** 53 + * enum dma_resv_usage - how the fences from a dma_resv obj are used 54 + * 55 + * This enum describes the different use cases for a dma_resv object and 56 + * controls which fences are returned when queried. 57 + * 58 + * An important fact is that there is the order WRITE<READ and when the 59 + * dma_resv object is asked for fences for one use case the fences for the 60 + * lower use case are returned as well. 61 + */ 62 + enum dma_resv_usage { 63 + /** 64 + * @DMA_RESV_USAGE_WRITE: Implicit write synchronization. 65 + * 66 + * This should only be used for userspace command submissions which add 67 + * an implicit write dependency. 68 + */ 69 + DMA_RESV_USAGE_WRITE, 70 + 71 + /** 72 + * @DMA_RESV_USAGE_READ: Implicit read synchronization. 73 + * 74 + * This should only be used for userspace command submissions which add 75 + * an implicit read dependency. 76 + */ 77 + DMA_RESV_USAGE_READ, 78 + }; 79 + 80 + /** 81 + * dma_resv_usage_rw - helper for implicit sync 82 + * @write: true if we create a new implicit sync write 83 + * 84 + * This returns the implicit synchronization usage for write or read accesses, 85 + * see enum dma_resv_usage and &dma_buf.resv. 86 + */ 87 + static inline enum dma_resv_usage dma_resv_usage_rw(bool write) 88 + { 89 + /* This looks confusing at first sight, but is indeed correct. 90 + * 91 + * The rational is that new write operations needs to wait for the 92 + * existing read and write operations to finish. 93 + * But a new read operation only needs to wait for the existing write 94 + * operations to finish. 95 + */ 96 + return write ? DMA_RESV_USAGE_READ : DMA_RESV_USAGE_WRITE; 97 + } 98 + 99 + /** 53 100 * struct dma_resv - a reservation object manages fences for a buffer 54 101 * 55 102 * There are multiple uses for this, with sometimes slightly different rules in ··· 189 142 /** @obj: The dma_resv object we iterate over */ 190 143 struct dma_resv *obj; 191 144 192 - /** @all_fences: If all fences should be returned */ 193 - bool all_fences; 145 + /** @usage: Return fences with this usage or lower. */ 146 + enum dma_resv_usage usage; 194 147 195 148 /** @fence: the currently handled fence */ 196 149 struct dma_fence *fence; ··· 220 173 * dma_resv_iter_begin - initialize a dma_resv_iter object 221 174 * @cursor: The dma_resv_iter object to initialize 222 175 * @obj: The dma_resv object which we want to iterate over 223 - * @all_fences: If all fences should be returned or just the exclusive one 176 + * @usage: controls which fences to include, see enum dma_resv_usage. 224 177 */ 225 178 static inline void dma_resv_iter_begin(struct dma_resv_iter *cursor, 226 179 struct dma_resv *obj, 227 - bool all_fences) 180 + enum dma_resv_usage usage) 228 181 { 229 182 cursor->obj = obj; 230 - cursor->all_fences = all_fences; 183 + cursor->usage = usage; 231 184 cursor->fence = NULL; 232 185 } 233 186 ··· 288 241 * dma_resv_for_each_fence - fence iterator 289 242 * @cursor: a struct dma_resv_iter pointer 290 243 * @obj: a dma_resv object pointer 291 - * @all_fences: true if all fences should be returned 244 + * @usage: controls which fences to return 292 245 * @fence: the current fence 293 246 * 294 247 * Iterate over the fences in a struct dma_resv object while holding the ··· 297 250 * valid as long as the lock is held and so no extra reference to the fence is 298 251 * taken. 299 252 */ 300 - #define dma_resv_for_each_fence(cursor, obj, all_fences, fence) \ 301 - for (dma_resv_iter_begin(cursor, obj, all_fences), \ 253 + #define dma_resv_for_each_fence(cursor, obj, usage, fence) \ 254 + for (dma_resv_iter_begin(cursor, obj, usage), \ 302 255 fence = dma_resv_iter_first(cursor); fence; \ 303 256 fence = dma_resv_iter_next(cursor)) 304 257 ··· 465 418 void dma_resv_replace_fences(struct dma_resv *obj, uint64_t context, 466 419 struct dma_fence *fence); 467 420 void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence); 468 - int dma_resv_get_fences(struct dma_resv *obj, bool write, 421 + int dma_resv_get_fences(struct dma_resv *obj, enum dma_resv_usage usage, 469 422 unsigned int *num_fences, struct dma_fence ***fences); 470 - int dma_resv_get_singleton(struct dma_resv *obj, bool write, 423 + int dma_resv_get_singleton(struct dma_resv *obj, enum dma_resv_usage usage, 471 424 struct dma_fence **fence); 472 425 int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src); 473 - long dma_resv_wait_timeout(struct dma_resv *obj, bool wait_all, bool intr, 474 - unsigned long timeout); 475 - bool dma_resv_test_signaled(struct dma_resv *obj, bool test_all); 426 + long dma_resv_wait_timeout(struct dma_resv *obj, enum dma_resv_usage usage, 427 + bool intr, unsigned long timeout); 428 + bool dma_resv_test_signaled(struct dma_resv *obj, enum dma_resv_usage usage); 476 429 void dma_resv_describe(struct dma_resv *obj, struct seq_file *seq); 477 430 478 431 #endif /* _LINUX_RESERVATION_H */