Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

dma-buf: specify usage while adding fences to dma_resv obj v7

Instead of distingting between shared and exclusive fences specify
the fence usage while adding fences.

Rework all drivers to use this interface instead and deprecate the old one.

v2: some kerneldoc comments suggested by Daniel
v3: fix a missing case in radeon
v4: rebase on nouveau changes, fix lockdep and temporary disable warning
v5: more documentation updates
v6: separate internal dma_resv changes from this patch, avoids to
disable warning temporary, rebase on upstream changes
v7: fix missed case in lima driver, minimize changes to i915_gem_busy_ioctl

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Link: https://patchwork.freedesktop.org/patch/msgid/20220407085946.744568-3-christian.koenig@amd.com

+149 -166
+37 -11
drivers/dma-buf/dma-resv.c
··· 234 234 235 235 #ifdef CONFIG_DEBUG_MUTEXES 236 236 /** 237 - * dma_resv_reset_shared_max - reset shared fences for debugging 237 + * dma_resv_reset_max_fences - reset shared fences for debugging 238 238 * @obj: the dma_resv object to reset 239 239 * 240 240 * Reset the number of pre-reserved shared slots to test that drivers do 241 241 * correct slot allocation using dma_resv_reserve_fences(). See also 242 242 * &dma_resv_list.shared_max. 243 243 */ 244 - void dma_resv_reset_shared_max(struct dma_resv *obj) 244 + void dma_resv_reset_max_fences(struct dma_resv *obj) 245 245 { 246 246 struct dma_resv_list *fences = dma_resv_shared_list(obj); 247 247 ··· 251 251 if (fences) 252 252 fences->shared_max = fences->shared_count; 253 253 } 254 - EXPORT_SYMBOL(dma_resv_reset_shared_max); 254 + EXPORT_SYMBOL(dma_resv_reset_max_fences); 255 255 #endif 256 256 257 257 /** ··· 264 264 * 265 265 * See also &dma_resv.fence for a discussion of the semantics. 266 266 */ 267 - void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence) 267 + static void dma_resv_add_shared_fence(struct dma_resv *obj, 268 + struct dma_fence *fence) 268 269 { 269 270 struct dma_resv_list *fobj; 270 271 struct dma_fence *old; ··· 306 305 write_seqcount_end(&obj->seq); 307 306 dma_fence_put(old); 308 307 } 309 - EXPORT_SYMBOL(dma_resv_add_shared_fence); 310 308 311 309 /** 312 310 * dma_resv_replace_fences - replace fences in the dma_resv obj 313 311 * @obj: the reservation object 314 312 * @context: the context of the fences to replace 315 313 * @replacement: the new fence to use instead 314 + * @usage: how the new fence is used, see enum dma_resv_usage 316 315 * 317 316 * Replace fences with a specified context with a new fence. Only valid if the 318 317 * operation represented by the original fence has no longer access to the ··· 322 321 * update fence which makes the resource inaccessible. 323 322 */ 324 323 void dma_resv_replace_fences(struct dma_resv *obj, uint64_t context, 325 - struct dma_fence *replacement) 324 + struct dma_fence *replacement, 325 + enum dma_resv_usage usage) 326 326 { 327 327 struct dma_resv_list *list; 328 328 struct dma_fence *old; 329 329 unsigned int i; 330 + 331 + /* Only readers supported for now */ 332 + WARN_ON(usage != DMA_RESV_USAGE_READ); 330 333 331 334 dma_resv_assert_held(obj); 332 335 ··· 365 360 * Add a fence to the exclusive slot. @obj must be locked with dma_resv_lock(). 366 361 * See also &dma_resv.fence_excl for a discussion of the semantics. 367 362 */ 368 - void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence) 363 + static void dma_resv_add_excl_fence(struct dma_resv *obj, 364 + struct dma_fence *fence) 369 365 { 370 366 struct dma_fence *old_fence = dma_resv_excl_fence(obj); 371 367 ··· 381 375 382 376 dma_fence_put(old_fence); 383 377 } 384 - EXPORT_SYMBOL(dma_resv_add_excl_fence); 378 + 379 + /** 380 + * dma_resv_add_fence - Add a fence to the dma_resv obj 381 + * @obj: the reservation object 382 + * @fence: the fence to add 383 + * @usage: how the fence is used, see enum dma_resv_usage 384 + * 385 + * Add a fence to a slot, @obj must be locked with dma_resv_lock(), and 386 + * dma_resv_reserve_fences() has been called. 387 + * 388 + * See also &dma_resv.fence for a discussion of the semantics. 389 + */ 390 + void dma_resv_add_fence(struct dma_resv *obj, struct dma_fence *fence, 391 + enum dma_resv_usage usage) 392 + { 393 + if (usage == DMA_RESV_USAGE_WRITE) 394 + dma_resv_add_excl_fence(obj, fence); 395 + else 396 + dma_resv_add_shared_fence(obj, fence); 397 + } 398 + EXPORT_SYMBOL(dma_resv_add_fence); 385 399 386 400 /* Restart the iterator by initializing all the necessary fields, but not the 387 401 * relation to the dma_resv object. */ ··· 600 574 } 601 575 602 576 dma_fence_get(f); 603 - if (dma_resv_iter_is_exclusive(&cursor)) 577 + if (dma_resv_iter_usage(&cursor) == DMA_RESV_USAGE_WRITE) 604 578 excl = f; 605 579 else 606 580 RCU_INIT_POINTER(list->shared[list->shared_count++], f); ··· 797 771 */ 798 772 void dma_resv_describe(struct dma_resv *obj, struct seq_file *seq) 799 773 { 774 + static const char *usage[] = { "write", "read" }; 800 775 struct dma_resv_iter cursor; 801 776 struct dma_fence *fence; 802 777 803 778 dma_resv_for_each_fence(&cursor, obj, DMA_RESV_USAGE_READ, fence) { 804 779 seq_printf(seq, "\t%s fence:", 805 - dma_resv_iter_is_exclusive(&cursor) ? 806 - "Exclusive" : "Shared"); 780 + usage[dma_resv_iter_usage(&cursor)]); 807 781 dma_fence_describe(fence, seq); 808 782 } 809 783 }
+27 -74
drivers/dma-buf/st-dma-resv.c
··· 58 58 return r; 59 59 } 60 60 61 - static int test_signaling(void *arg, enum dma_resv_usage usage) 61 + static int test_signaling(void *arg) 62 62 { 63 + enum dma_resv_usage usage = (unsigned long)arg; 63 64 struct dma_resv resv; 64 65 struct dma_fence *f; 65 66 int r; ··· 82 81 goto err_unlock; 83 82 } 84 83 85 - if (usage >= DMA_RESV_USAGE_READ) 86 - dma_resv_add_shared_fence(&resv, f); 87 - else 88 - dma_resv_add_excl_fence(&resv, f); 89 - 84 + dma_resv_add_fence(&resv, f, usage); 90 85 if (dma_resv_test_signaled(&resv, usage)) { 91 86 pr_err("Resv unexpectedly signaled\n"); 92 87 r = -EINVAL; ··· 102 105 return r; 103 106 } 104 107 105 - static int test_excl_signaling(void *arg) 108 + static int test_for_each(void *arg) 106 109 { 107 - return test_signaling(arg, DMA_RESV_USAGE_WRITE); 108 - } 109 - 110 - static int test_shared_signaling(void *arg) 111 - { 112 - return test_signaling(arg, DMA_RESV_USAGE_READ); 113 - } 114 - 115 - static int test_for_each(void *arg, enum dma_resv_usage usage) 116 - { 110 + enum dma_resv_usage usage = (unsigned long)arg; 117 111 struct dma_resv_iter cursor; 118 112 struct dma_fence *f, *fence; 119 113 struct dma_resv resv; ··· 127 139 goto err_unlock; 128 140 } 129 141 130 - if (usage >= DMA_RESV_USAGE_READ) 131 - dma_resv_add_shared_fence(&resv, f); 132 - else 133 - dma_resv_add_excl_fence(&resv, f); 142 + dma_resv_add_fence(&resv, f, usage); 134 143 135 144 r = -ENOENT; 136 145 dma_resv_for_each_fence(&cursor, &resv, usage, fence) { ··· 141 156 r = -EINVAL; 142 157 goto err_unlock; 143 158 } 144 - if (dma_resv_iter_is_exclusive(&cursor) != 145 - (usage >= DMA_RESV_USAGE_READ)) { 159 + if (dma_resv_iter_usage(&cursor) != usage) { 146 160 pr_err("Unexpected fence usage\n"); 147 161 r = -EINVAL; 148 162 goto err_unlock; ··· 161 177 return r; 162 178 } 163 179 164 - static int test_excl_for_each(void *arg) 180 + static int test_for_each_unlocked(void *arg) 165 181 { 166 - return test_for_each(arg, DMA_RESV_USAGE_WRITE); 167 - } 168 - 169 - static int test_shared_for_each(void *arg) 170 - { 171 - return test_for_each(arg, DMA_RESV_USAGE_READ); 172 - } 173 - 174 - static int test_for_each_unlocked(void *arg, enum dma_resv_usage usage) 175 - { 182 + enum dma_resv_usage usage = (unsigned long)arg; 176 183 struct dma_resv_iter cursor; 177 184 struct dma_fence *f, *fence; 178 185 struct dma_resv resv; ··· 187 212 goto err_free; 188 213 } 189 214 190 - if (usage >= DMA_RESV_USAGE_READ) 191 - dma_resv_add_shared_fence(&resv, f); 192 - else 193 - dma_resv_add_excl_fence(&resv, f); 215 + dma_resv_add_fence(&resv, f, usage); 194 216 dma_resv_unlock(&resv); 195 217 196 218 r = -ENOENT; ··· 207 235 r = -EINVAL; 208 236 goto err_iter_end; 209 237 } 210 - if (dma_resv_iter_is_exclusive(&cursor) != 211 - (usage >= DMA_RESV_USAGE_READ)) { 238 + if (dma_resv_iter_usage(&cursor) != usage) { 212 239 pr_err("Unexpected fence usage\n"); 213 240 r = -EINVAL; 214 241 goto err_iter_end; ··· 233 262 return r; 234 263 } 235 264 236 - static int test_excl_for_each_unlocked(void *arg) 265 + static int test_get_fences(void *arg) 237 266 { 238 - return test_for_each_unlocked(arg, DMA_RESV_USAGE_WRITE); 239 - } 240 - 241 - static int test_shared_for_each_unlocked(void *arg) 242 - { 243 - return test_for_each_unlocked(arg, DMA_RESV_USAGE_READ); 244 - } 245 - 246 - static int test_get_fences(void *arg, enum dma_resv_usage usage) 247 - { 267 + enum dma_resv_usage usage = (unsigned long)arg; 248 268 struct dma_fence *f, **fences = NULL; 249 269 struct dma_resv resv; 250 270 int r, i; ··· 258 296 goto err_resv; 259 297 } 260 298 261 - if (usage >= DMA_RESV_USAGE_READ) 262 - dma_resv_add_shared_fence(&resv, f); 263 - else 264 - dma_resv_add_excl_fence(&resv, f); 299 + dma_resv_add_fence(&resv, f, usage); 265 300 dma_resv_unlock(&resv); 266 301 267 302 r = dma_resv_get_fences(&resv, usage, &i, &fences); ··· 283 324 return r; 284 325 } 285 326 286 - static int test_excl_get_fences(void *arg) 287 - { 288 - return test_get_fences(arg, DMA_RESV_USAGE_WRITE); 289 - } 290 - 291 - static int test_shared_get_fences(void *arg) 292 - { 293 - return test_get_fences(arg, DMA_RESV_USAGE_READ); 294 - } 295 - 296 327 int dma_resv(void) 297 328 { 298 329 static const struct subtest tests[] = { 299 330 SUBTEST(sanitycheck), 300 - SUBTEST(test_excl_signaling), 301 - SUBTEST(test_shared_signaling), 302 - SUBTEST(test_excl_for_each), 303 - SUBTEST(test_shared_for_each), 304 - SUBTEST(test_excl_for_each_unlocked), 305 - SUBTEST(test_shared_for_each_unlocked), 306 - SUBTEST(test_excl_get_fences), 307 - SUBTEST(test_shared_get_fences), 331 + SUBTEST(test_signaling), 332 + SUBTEST(test_for_each), 333 + SUBTEST(test_for_each_unlocked), 334 + SUBTEST(test_get_fences), 308 335 }; 336 + enum dma_resv_usage usage; 337 + int r; 309 338 310 339 spin_lock_init(&fence_lock); 311 - return subtests(tests, NULL); 340 + for (usage = DMA_RESV_USAGE_WRITE; usage <= DMA_RESV_USAGE_READ; 341 + ++usage) { 342 + r = subtests(tests, (void *)(unsigned long)usage); 343 + if (r) 344 + return r; 345 + } 346 + return 0; 312 347 }
+1 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
··· 263 263 */ 264 264 replacement = dma_fence_get_stub(); 265 265 dma_resv_replace_fences(bo->tbo.base.resv, ef->base.context, 266 - replacement); 266 + replacement, DMA_RESV_USAGE_READ); 267 267 dma_fence_put(replacement); 268 268 return 0; 269 269 }
+2 -2
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
··· 55 55 bo = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj)); 56 56 p->uf_entry.priority = 0; 57 57 p->uf_entry.tv.bo = &bo->tbo; 58 - /* One for TTM and one for the CS job */ 59 - p->uf_entry.tv.num_shared = 2; 58 + /* One for TTM and two for the CS job */ 59 + p->uf_entry.tv.num_shared = 3; 60 60 61 61 drm_gem_object_put(gobj); 62 62
+2 -4
drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
··· 1397 1397 return; 1398 1398 } 1399 1399 1400 - if (shared) 1401 - dma_resv_add_shared_fence(resv, fence); 1402 - else 1403 - dma_resv_add_excl_fence(resv, fence); 1400 + dma_resv_add_fence(resv, fence, shared ? DMA_RESV_USAGE_READ : 1401 + DMA_RESV_USAGE_WRITE); 1404 1402 } 1405 1403 1406 1404 /**
+3 -7
drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
··· 202 202 203 203 for (i = 0; i < submit->nr_bos; i++) { 204 204 struct drm_gem_object *obj = &submit->bos[i].obj->base; 205 + bool write = submit->bos[i].flags & ETNA_SUBMIT_BO_WRITE; 205 206 206 - if (submit->bos[i].flags & ETNA_SUBMIT_BO_WRITE) 207 - dma_resv_add_excl_fence(obj->resv, 208 - submit->out_fence); 209 - else 210 - dma_resv_add_shared_fence(obj->resv, 211 - submit->out_fence); 212 - 207 + dma_resv_add_fence(obj->resv, submit->out_fence, write ? 208 + DMA_RESV_USAGE_WRITE : DMA_RESV_USAGE_READ); 213 209 submit_unlock_object(submit, i); 214 210 } 215 211 }
+3 -3
drivers/gpu/drm/i915/gem/i915_gem_busy.c
··· 148 148 if (dma_resv_iter_is_restarted(&cursor)) 149 149 args->busy = 0; 150 150 151 - if (dma_resv_iter_is_exclusive(&cursor)) 152 - /* Translate the exclusive fence to the READ *and* WRITE engine */ 151 + if (dma_resv_iter_usage(&cursor) <= DMA_RESV_USAGE_WRITE) 152 + /* Translate the write fences to the READ *and* WRITE engine */ 153 153 args->busy |= busy_check_writer(fence); 154 154 else 155 - /* Translate shared fences to READ set of engines */ 155 + /* Translate read fences to READ set of engines */ 156 156 args->busy |= busy_check_reader(fence); 157 157 } 158 158 dma_resv_iter_end(&cursor);
+2 -1
drivers/gpu/drm/i915/gem/i915_gem_clflush.c
··· 116 116 obj->base.resv, NULL, true, 117 117 i915_fence_timeout(i915), 118 118 I915_FENCE_GFP); 119 - dma_resv_add_excl_fence(obj->base.resv, &clflush->base.dma); 119 + dma_resv_add_fence(obj->base.resv, &clflush->base.dma, 120 + DMA_RESV_USAGE_WRITE); 120 121 dma_fence_work_commit(&clflush->base); 121 122 /* 122 123 * We must have successfully populated the pages(since we are
+2 -3
drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c
··· 637 637 if (IS_ERR_OR_NULL(copy_fence)) 638 638 return PTR_ERR_OR_ZERO(copy_fence); 639 639 640 - dma_resv_add_excl_fence(dst_bo->base.resv, copy_fence); 641 - dma_resv_add_shared_fence(src_bo->base.resv, copy_fence); 642 - 640 + dma_resv_add_fence(dst_bo->base.resv, copy_fence, DMA_RESV_USAGE_WRITE); 641 + dma_resv_add_fence(src_bo->base.resv, copy_fence, DMA_RESV_USAGE_READ); 643 642 dma_fence_put(copy_fence); 644 643 645 644 return 0;
+2 -2
drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c
··· 218 218 if (rq) { 219 219 err = dma_resv_reserve_fences(obj->base.resv, 1); 220 220 if (!err) 221 - dma_resv_add_excl_fence(obj->base.resv, 222 - &rq->fence); 221 + dma_resv_add_fence(obj->base.resv, &rq->fence, 222 + DMA_RESV_USAGE_WRITE); 223 223 i915_gem_object_set_moving_fence(obj, &rq->fence); 224 224 i915_request_put(rq); 225 225 }
+2 -1
drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
··· 1220 1220 expand32(POISON_INUSE), &rq); 1221 1221 i915_gem_object_unpin_pages(obj); 1222 1222 if (rq) { 1223 - dma_resv_add_excl_fence(obj->base.resv, &rq->fence); 1223 + dma_resv_add_fence(obj->base.resv, &rq->fence, 1224 + DMA_RESV_USAGE_WRITE); 1224 1225 i915_gem_object_set_moving_fence(obj, &rq->fence); 1225 1226 i915_request_put(rq); 1226 1227 }
+5 -3
drivers/gpu/drm/i915/i915_vma.c
··· 1826 1826 } 1827 1827 1828 1828 if (fence) { 1829 - dma_resv_add_excl_fence(vma->obj->base.resv, fence); 1829 + dma_resv_add_fence(vma->obj->base.resv, fence, 1830 + DMA_RESV_USAGE_WRITE); 1830 1831 obj->write_domain = I915_GEM_DOMAIN_RENDER; 1831 1832 obj->read_domains = 0; 1832 1833 } ··· 1839 1838 } 1840 1839 1841 1840 if (fence) { 1842 - dma_resv_add_shared_fence(vma->obj->base.resv, fence); 1841 + dma_resv_add_fence(vma->obj->base.resv, fence, 1842 + DMA_RESV_USAGE_READ); 1843 1843 obj->write_domain = 0; 1844 1844 } 1845 1845 } ··· 2080 2078 goto out_rpm; 2081 2079 } 2082 2080 2083 - dma_resv_add_shared_fence(obj->base.resv, fence); 2081 + dma_resv_add_fence(obj->base.resv, fence, DMA_RESV_USAGE_READ); 2084 2082 dma_fence_put(fence); 2085 2083 2086 2084 out_rpm:
+2 -1
drivers/gpu/drm/i915/selftests/intel_memory_region.c
··· 1056 1056 obj->mm.pages->sgl, I915_CACHE_NONE, 1057 1057 true, 0xdeadbeaf, &rq); 1058 1058 if (rq) { 1059 - dma_resv_add_excl_fence(obj->base.resv, &rq->fence); 1059 + dma_resv_add_fence(obj->base.resv, &rq->fence, 1060 + DMA_RESV_USAGE_WRITE); 1060 1061 i915_request_put(rq); 1061 1062 } 1062 1063
+3 -4
drivers/gpu/drm/lima/lima_gem.c
··· 364 364 fence = lima_sched_context_queue_task(submit->task); 365 365 366 366 for (i = 0; i < submit->nr_bos; i++) { 367 - if (submit->bos[i].flags & LIMA_SUBMIT_BO_WRITE) 368 - dma_resv_add_excl_fence(lima_bo_resv(bos[i]), fence); 369 - else 370 - dma_resv_add_shared_fence(lima_bo_resv(bos[i]), fence); 367 + dma_resv_add_fence(lima_bo_resv(bos[i]), fence, 368 + submit->bos[i].flags & LIMA_SUBMIT_BO_WRITE ? 369 + DMA_RESV_USAGE_WRITE : DMA_RESV_USAGE_READ); 371 370 } 372 371 373 372 drm_gem_unlock_reservations((struct drm_gem_object **)bos,
+4 -2
drivers/gpu/drm/msm/msm_gem_submit.c
··· 395 395 struct drm_gem_object *obj = &submit->bos[i].obj->base; 396 396 397 397 if (submit->bos[i].flags & MSM_SUBMIT_BO_WRITE) 398 - dma_resv_add_excl_fence(obj->resv, submit->user_fence); 398 + dma_resv_add_fence(obj->resv, submit->user_fence, 399 + DMA_RESV_USAGE_WRITE); 399 400 else if (submit->bos[i].flags & MSM_SUBMIT_BO_READ) 400 - dma_resv_add_shared_fence(obj->resv, submit->user_fence); 401 + dma_resv_add_fence(obj->resv, submit->user_fence, 402 + DMA_RESV_USAGE_READ); 401 403 } 402 404 } 403 405
+5 -4
drivers/gpu/drm/nouveau/nouveau_bo.c
··· 1308 1308 { 1309 1309 struct dma_resv *resv = nvbo->bo.base.resv; 1310 1310 1311 - if (exclusive) 1312 - dma_resv_add_excl_fence(resv, &fence->base); 1313 - else if (fence) 1314 - dma_resv_add_shared_fence(resv, &fence->base); 1311 + if (!fence) 1312 + return; 1313 + 1314 + dma_resv_add_fence(resv, &fence->base, exclusive ? 1315 + DMA_RESV_USAGE_WRITE : DMA_RESV_USAGE_READ); 1315 1316 } 1316 1317 1317 1318 static void
+3 -1
drivers/gpu/drm/nouveau/nouveau_fence.c
··· 360 360 dma_resv_for_each_fence(&cursor, resv, 361 361 dma_resv_usage_rw(exclusive), 362 362 fence) { 363 + enum dma_resv_usage usage; 363 364 struct nouveau_fence *f; 364 365 365 - if (i == 0 && dma_resv_iter_is_exclusive(&cursor)) 366 + usage = dma_resv_iter_usage(&cursor); 367 + if (i == 0 && usage == DMA_RESV_USAGE_WRITE) 366 368 continue; 367 369 368 370 f = nouveau_local_fence(fence, chan->drm);
+1 -1
drivers/gpu/drm/panfrost/panfrost_job.c
··· 268 268 int i; 269 269 270 270 for (i = 0; i < bo_count; i++) 271 - dma_resv_add_excl_fence(bos[i]->resv, fence); 271 + dma_resv_add_fence(bos[i]->resv, fence, DMA_RESV_USAGE_WRITE); 272 272 } 273 273 274 274 int panfrost_job_push(struct panfrost_job *job)
+2 -1
drivers/gpu/drm/qxl/qxl_release.c
··· 429 429 list_for_each_entry(entry, &release->bos, head) { 430 430 bo = entry->bo; 431 431 432 - dma_resv_add_shared_fence(bo->base.resv, &release->base); 432 + dma_resv_add_fence(bo->base.resv, &release->base, 433 + DMA_RESV_USAGE_READ); 433 434 ttm_bo_move_to_lru_tail_unlocked(bo); 434 435 dma_resv_unlock(bo->base.resv); 435 436 }
+2 -4
drivers/gpu/drm/radeon/radeon_object.c
··· 791 791 return; 792 792 } 793 793 794 - if (shared) 795 - dma_resv_add_shared_fence(resv, &fence->base); 796 - else 797 - dma_resv_add_excl_fence(resv, &fence->base); 794 + dma_resv_add_fence(resv, &fence->base, shared ? 795 + DMA_RESV_USAGE_READ : DMA_RESV_USAGE_WRITE); 798 796 }
+1 -1
drivers/gpu/drm/ttm/ttm_bo.c
··· 739 739 return ret; 740 740 } 741 741 742 - dma_resv_add_shared_fence(bo->base.resv, fence); 742 + dma_resv_add_fence(bo->base.resv, fence, DMA_RESV_USAGE_WRITE); 743 743 744 744 ret = dma_resv_reserve_fences(bo->base.resv, 1); 745 745 if (unlikely(ret)) {
+3 -2
drivers/gpu/drm/ttm/ttm_bo_util.c
··· 507 507 if (ret) 508 508 return ret; 509 509 510 - dma_resv_add_excl_fence(&ghost_obj->base._resv, fence); 510 + dma_resv_add_fence(&ghost_obj->base._resv, fence, 511 + DMA_RESV_USAGE_WRITE); 511 512 512 513 /** 513 514 * If we're not moving to fixed memory, the TTM object ··· 562 561 struct ttm_resource_manager *man = ttm_manager_type(bdev, new_mem->mem_type); 563 562 int ret = 0; 564 563 565 - dma_resv_add_excl_fence(bo->base.resv, fence); 564 + dma_resv_add_fence(bo->base.resv, fence, DMA_RESV_USAGE_WRITE); 566 565 if (!evict) 567 566 ret = ttm_bo_move_to_ghost(bo, fence, man->use_tt); 568 567 else if (!from->use_tt && pipeline)
+2 -4
drivers/gpu/drm/ttm/ttm_execbuf_util.c
··· 154 154 list_for_each_entry(entry, list, head) { 155 155 struct ttm_buffer_object *bo = entry->bo; 156 156 157 - if (entry->num_shared) 158 - dma_resv_add_shared_fence(bo->base.resv, fence); 159 - else 160 - dma_resv_add_excl_fence(bo->base.resv, fence); 157 + dma_resv_add_fence(bo->base.resv, fence, entry->num_shared ? 158 + DMA_RESV_USAGE_READ : DMA_RESV_USAGE_WRITE); 161 159 ttm_bo_move_to_lru_tail_unlocked(bo); 162 160 dma_resv_unlock(bo->base.resv); 163 161 }
+2 -2
drivers/gpu/drm/v3d/v3d_gem.c
··· 550 550 551 551 for (i = 0; i < job->bo_count; i++) { 552 552 /* XXX: Use shared fences for read-only objects. */ 553 - dma_resv_add_excl_fence(job->bo[i]->resv, 554 - job->done_fence); 553 + dma_resv_add_fence(job->bo[i]->resv, job->done_fence, 554 + DMA_RESV_USAGE_WRITE); 555 555 } 556 556 557 557 drm_gem_unlock_reservations(job->bo, job->bo_count, acquire_ctx);
+1 -1
drivers/gpu/drm/vc4/vc4_gem.c
··· 546 546 bo = to_vc4_bo(&exec->bo[i]->base); 547 547 bo->seqno = seqno; 548 548 549 - dma_resv_add_shared_fence(bo->base.base.resv, exec->fence); 549 + dma_resv_add_fence(bo->base.base.resv, exec->fence); 550 550 } 551 551 552 552 list_for_each_entry(bo, &exec->unref_list, unref_head) {
+3 -6
drivers/gpu/drm/vgem/vgem_fence.c
··· 161 161 /* Expose the fence via the dma-buf */ 162 162 dma_resv_lock(resv, NULL); 163 163 ret = dma_resv_reserve_fences(resv, 1); 164 - if (!ret) { 165 - if (arg->flags & VGEM_FENCE_WRITE) 166 - dma_resv_add_excl_fence(resv, fence); 167 - else 168 - dma_resv_add_shared_fence(resv, fence); 169 - } 164 + if (!ret) 165 + dma_resv_add_fence(resv, fence, arg->flags & VGEM_FENCE_WRITE ? 166 + DMA_RESV_USAGE_WRITE : DMA_RESV_USAGE_READ); 170 167 dma_resv_unlock(resv); 171 168 172 169 /* Record the fence in our idr for later signaling */
+2 -1
drivers/gpu/drm/virtio/virtgpu_gem.c
··· 250 250 int i; 251 251 252 252 for (i = 0; i < objs->nents; i++) 253 - dma_resv_add_excl_fence(objs->objs[i]->resv, fence); 253 + dma_resv_add_fence(objs->objs[i]->resv, fence, 254 + DMA_RESV_USAGE_WRITE); 254 255 } 255 256 256 257 void virtio_gpu_array_put_free(struct virtio_gpu_object_array *objs)
+2 -1
drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
··· 758 758 759 759 ret = dma_resv_reserve_fences(bo->base.resv, 1); 760 760 if (!ret) 761 - dma_resv_add_excl_fence(bo->base.resv, &fence->base); 761 + dma_resv_add_fence(bo->base.resv, &fence->base, 762 + DMA_RESV_USAGE_WRITE); 762 763 else 763 764 /* Last resort fallback when we are OOM */ 764 765 dma_fence_wait(&fence->base, false);
+8 -8
include/linux/dma-buf.h
··· 393 393 * e.g. exposed in `Implicit Fence Poll Support`_ must follow the 394 394 * below rules. 395 395 * 396 - * - Drivers must add a shared fence through dma_resv_add_shared_fence() 397 - * for anything the userspace API considers a read access. This highly 398 - * depends upon the API and window system. 396 + * - Drivers must add a read fence through dma_resv_add_fence() with the 397 + * DMA_RESV_USAGE_READ flag for anything the userspace API considers a 398 + * read access. This highly depends upon the API and window system. 399 399 * 400 - * - Similarly drivers must set the exclusive fence through 401 - * dma_resv_add_excl_fence() for anything the userspace API considers 402 - * write access. 400 + * - Similarly drivers must add a write fence through 401 + * dma_resv_add_fence() with the DMA_RESV_USAGE_WRITE flag for 402 + * anything the userspace API considers write access. 403 403 * 404 - * - Drivers may just always set the exclusive fence, since that only 404 + * - Drivers may just always add a write fence, since that only 405 405 * causes unecessarily synchronization, but no correctness issues. 406 406 * 407 407 * - Some drivers only expose a synchronous userspace API with no ··· 416 416 * Dynamic importers, see dma_buf_attachment_is_dynamic(), have 417 417 * additional constraints on how they set up fences: 418 418 * 419 - * - Dynamic importers must obey the exclusive fence and wait for it to 419 + * - Dynamic importers must obey the write fences and wait for them to 420 420 * signal before allowing access to the buffer's underlying storage 421 421 * through the device. 422 422 *
+15 -10
include/linux/dma-resv.h
··· 195 195 /** @fence: the currently handled fence */ 196 196 struct dma_fence *fence; 197 197 198 + /** @fence_usage: the usage of the current fence */ 199 + enum dma_resv_usage fence_usage; 200 + 198 201 /** @seq: sequence number to check for modifications */ 199 202 unsigned int seq; 200 203 ··· 247 244 } 248 245 249 246 /** 250 - * dma_resv_iter_is_exclusive - test if the current fence is the exclusive one 247 + * dma_resv_iter_usage - Return the usage of the current fence 251 248 * @cursor: the cursor of the current position 252 249 * 253 - * Returns true if the currently returned fence is the exclusive one. 250 + * Returns the usage of the currently processed fence. 254 251 */ 255 - static inline bool dma_resv_iter_is_exclusive(struct dma_resv_iter *cursor) 252 + static inline enum dma_resv_usage 253 + dma_resv_iter_usage(struct dma_resv_iter *cursor) 256 254 { 257 - return cursor->index == 0; 255 + return cursor->fence_usage; 258 256 } 259 257 260 258 /** ··· 310 306 #define dma_resv_assert_held(obj) lockdep_assert_held(&(obj)->lock.base) 311 307 312 308 #ifdef CONFIG_DEBUG_MUTEXES 313 - void dma_resv_reset_shared_max(struct dma_resv *obj); 309 + void dma_resv_reset_max_fences(struct dma_resv *obj); 314 310 #else 315 - static inline void dma_resv_reset_shared_max(struct dma_resv *obj) {} 311 + static inline void dma_resv_reset_max_fences(struct dma_resv *obj) {} 316 312 #endif 317 313 318 314 /** ··· 458 454 */ 459 455 static inline void dma_resv_unlock(struct dma_resv *obj) 460 456 { 461 - dma_resv_reset_shared_max(obj); 457 + dma_resv_reset_max_fences(obj); 462 458 ww_mutex_unlock(&obj->lock); 463 459 } 464 460 465 461 void dma_resv_init(struct dma_resv *obj); 466 462 void dma_resv_fini(struct dma_resv *obj); 467 463 int dma_resv_reserve_fences(struct dma_resv *obj, unsigned int num_fences); 468 - void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence); 464 + void dma_resv_add_fence(struct dma_resv *obj, struct dma_fence *fence, 465 + enum dma_resv_usage usage); 469 466 void dma_resv_replace_fences(struct dma_resv *obj, uint64_t context, 470 - struct dma_fence *fence); 471 - void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence); 467 + struct dma_fence *fence, 468 + enum dma_resv_usage usage); 472 469 int dma_resv_get_fences(struct dma_resv *obj, enum dma_resv_usage usage, 473 470 unsigned int *num_fences, struct dma_fence ***fences); 474 471 int dma_resv_get_singleton(struct dma_resv *obj, enum dma_resv_usage usage,