Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/i915: call i915_request_await_object from _i915_vma_move_to_active

Since almost all calls to i915_vma_move_to_active are prepended with
i915_request_await_object, let's call the latter from
_i915_vma_move_to_active by default and add flag allowing bypassing it.
Adjust all callers accordingly.
The patch should not introduce functional changes.

Signed-off-by: Andrzej Hajda <andrzej.hajda@intel.com>
Acked-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
Signed-off-by: Matthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20221019215906.295296-2-andrzej.hajda@intel.com

authored by

Andrzej Hajda and committed by
Matthew Auld
2a76fc89 5664561c

+53 -120
+9 -8
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
··· 53 53 #define DBG_FORCE_RELOC 0 /* choose one of the above! */ 54 54 }; 55 55 56 - /* __EXEC_OBJECT_NO_RESERVE is BIT(31), defined in i915_vma.h */ 57 - #define __EXEC_OBJECT_HAS_PIN BIT(30) 58 - #define __EXEC_OBJECT_HAS_FENCE BIT(29) 59 - #define __EXEC_OBJECT_USERPTR_INIT BIT(28) 60 - #define __EXEC_OBJECT_NEEDS_MAP BIT(27) 61 - #define __EXEC_OBJECT_NEEDS_BIAS BIT(26) 62 - #define __EXEC_OBJECT_INTERNAL_FLAGS (~0u << 26) /* all of the above + */ 56 + /* __EXEC_OBJECT_ flags > BIT(29) defined in i915_vma.h */ 57 + #define __EXEC_OBJECT_HAS_PIN BIT(29) 58 + #define __EXEC_OBJECT_HAS_FENCE BIT(28) 59 + #define __EXEC_OBJECT_USERPTR_INIT BIT(27) 60 + #define __EXEC_OBJECT_NEEDS_MAP BIT(26) 61 + #define __EXEC_OBJECT_NEEDS_BIAS BIT(25) 62 + #define __EXEC_OBJECT_INTERNAL_FLAGS (~0u << 25) /* all of the above + */ 63 63 #define __EXEC_OBJECT_RESERVED (__EXEC_OBJECT_HAS_PIN | __EXEC_OBJECT_HAS_FENCE) 64 64 65 65 #define __EXEC_HAS_RELOC BIT(31) ··· 2101 2101 eb->composite_fence ? 2102 2102 eb->composite_fence : 2103 2103 &eb->requests[j]->fence, 2104 - flags | __EXEC_OBJECT_NO_RESERVE); 2104 + flags | __EXEC_OBJECT_NO_RESERVE | 2105 + __EXEC_OBJECT_NO_REQUEST_AWAIT); 2105 2106 } 2106 2107 } 2107 2108
+1 -3
drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
··· 464 464 int err; 465 465 466 466 i915_vma_lock(vma); 467 - err = i915_request_await_object(rq, vma->obj, false); 468 - if (err == 0) 469 - err = i915_vma_move_to_active(vma, rq, flags); 467 + err = i915_vma_move_to_active(vma, rq, flags); 470 468 i915_vma_unlock(vma); 471 469 472 470 return err;
+1 -3
drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c
··· 239 239 } 240 240 intel_ring_advance(rq, cs); 241 241 242 - err = i915_request_await_object(rq, vma->obj, true); 243 - if (err == 0) 244 - err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); 242 + err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); 245 243 246 244 out_rq: 247 245 i915_request_add(rq);
+4 -12
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
··· 984 984 goto err_batch; 985 985 } 986 986 987 - err = i915_request_await_object(rq, batch->obj, false); 988 - if (err == 0) 989 - err = i915_vma_move_to_active(batch, rq, 0); 987 + err = i915_vma_move_to_active(batch, rq, 0); 990 988 if (err) 991 989 goto skip_request; 992 990 993 - err = i915_request_await_object(rq, vma->obj, true); 994 - if (err == 0) 995 - err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); 991 + err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); 996 992 if (err) 997 993 goto skip_request; 998 994 ··· 1549 1553 } 1550 1554 1551 1555 i915_vma_lock(vma); 1552 - err = i915_request_await_object(rq, vma->obj, false); 1553 - if (err == 0) 1554 - err = i915_vma_move_to_active(vma, rq, 0); 1556 + err = i915_vma_move_to_active(vma, rq, 0); 1555 1557 i915_vma_unlock(vma); 1556 1558 if (err) 1557 1559 goto skip_request; ··· 1683 1689 } 1684 1690 1685 1691 i915_vma_lock(vma); 1686 - err = i915_request_await_object(rq, vma->obj, true); 1687 - if (err == 0) 1688 - err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); 1692 + err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); 1689 1693 i915_vma_unlock(vma); 1690 1694 if (err) 1691 1695 goto skip_request;
+3 -7
drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
··· 565 565 goto err_unpin; 566 566 } 567 567 568 - err = i915_request_await_object(rq, vma->obj, true); 569 - if (err == 0) 570 - err = i915_vma_move_to_active(vma, rq, 571 - EXEC_OBJECT_WRITE); 568 + err = i915_vma_move_to_active(vma, rq, 569 + EXEC_OBJECT_WRITE); 572 570 573 571 i915_request_add(rq); 574 572 err_unpin: ··· 1606 1608 goto out_unpin; 1607 1609 } 1608 1610 1609 - err = i915_request_await_object(rq, vma->obj, false); 1610 - if (err == 0) 1611 - err = i915_vma_move_to_active(vma, rq, 0); 1611 + err = i915_vma_move_to_active(vma, rq, 0); 1612 1612 1613 1613 err = engine->emit_bb_start(rq, vma->node.start, 0, 0); 1614 1614 i915_request_get(rq);
+2 -6
drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c
··· 131 131 } 132 132 133 133 i915_vma_lock(batch); 134 - err = i915_request_await_object(rq, batch->obj, false); 135 - if (err == 0) 136 - err = i915_vma_move_to_active(batch, rq, 0); 134 + err = i915_vma_move_to_active(batch, rq, 0); 137 135 i915_vma_unlock(batch); 138 136 if (err) 139 137 goto skip_request; 140 138 141 139 i915_vma_lock(vma); 142 - err = i915_request_await_object(rq, vma->obj, true); 143 - if (err == 0) 144 - err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); 140 + err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); 145 141 i915_vma_unlock(vma); 146 142 if (err) 147 143 goto skip_request;
+1 -3
drivers/gpu/drm/i915/gt/intel_renderstate.c
··· 215 215 if (!so->vma) 216 216 return 0; 217 217 218 - err = i915_request_await_object(rq, so->vma->obj, false); 219 - if (err == 0) 220 - err = i915_vma_move_to_active(so->vma, rq, 0); 218 + err = i915_vma_move_to_active(so->vma, rq, 0); 221 219 if (err) 222 220 return err; 223 221
+1 -3
drivers/gpu/drm/i915/gt/intel_workarounds.c
··· 3189 3189 goto err_vma; 3190 3190 } 3191 3191 3192 - err = i915_request_await_object(rq, vma->obj, true); 3193 - if (err == 0) 3194 - err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); 3192 + err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); 3195 3193 if (err == 0) 3196 3194 err = wa_list_srm(rq, wal, vma); 3197 3195
+3 -11
drivers/gpu/drm/i915/gt/selftest_execlists.c
··· 2764 2764 i915_request_get(rq); 2765 2765 2766 2766 i915_vma_lock(vma); 2767 - err = i915_request_await_object(rq, vma->obj, false); 2768 - if (!err) 2769 - err = i915_vma_move_to_active(vma, rq, 0); 2767 + err = i915_vma_move_to_active(vma, rq, 0); 2770 2768 if (!err) 2771 2769 err = rq->engine->emit_bb_start(rq, 2772 2770 vma->node.start, ··· 3178 3180 } 3179 3181 3180 3182 i915_vma_lock(vma); 3181 - err = i915_request_await_object(rq, vma->obj, false); 3182 - if (!err) 3183 - err = i915_vma_move_to_active(vma, rq, 0); 3183 + err = i915_vma_move_to_active(vma, rq, 0); 3184 3184 i915_vma_unlock(vma); 3185 3185 3186 3186 i915_vma_lock(batch); 3187 - if (!err) 3188 - err = i915_request_await_object(rq, batch->obj, false); 3189 3187 if (!err) 3190 3188 err = i915_vma_move_to_active(batch, rq, 0); 3191 3189 if (!err) ··· 3515 3521 3516 3522 if (vma) { 3517 3523 i915_vma_lock(vma); 3518 - err = i915_request_await_object(rq, vma->obj, false); 3519 - if (!err) 3520 - err = i915_vma_move_to_active(vma, rq, 0); 3524 + err = i915_vma_move_to_active(vma, rq, 0); 3521 3525 if (!err) 3522 3526 err = rq->engine->emit_bb_start(rq, 3523 3527 vma->node.start,
+4 -13
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
··· 106 106 int err; 107 107 108 108 i915_vma_lock(vma); 109 - err = i915_request_await_object(rq, vma->obj, 110 - flags & EXEC_OBJECT_WRITE); 111 - if (err == 0) 112 - err = i915_vma_move_to_active(vma, rq, flags); 109 + err = i915_vma_move_to_active(vma, rq, flags); 113 110 i915_vma_unlock(vma); 114 111 115 112 return err; ··· 1517 1520 } 1518 1521 1519 1522 i915_vma_lock(arg.vma); 1520 - err = i915_request_await_object(rq, arg.vma->obj, 1521 - flags & EXEC_OBJECT_WRITE); 1522 - if (err == 0) { 1523 - err = i915_vma_move_to_active(arg.vma, rq, flags); 1524 - if (err) 1525 - pr_err("[%s] Move to active failed: %d!\n", engine->name, err); 1526 - } else { 1527 - pr_err("[%s] Request await failed: %d!\n", engine->name, err); 1528 - } 1523 + err = i915_vma_move_to_active(arg.vma, rq, flags); 1524 + if (err) 1525 + pr_err("[%s] Move to active failed: %d!\n", engine->name, err); 1529 1526 1530 1527 i915_vma_unlock(arg.vma); 1531 1528
+3 -9
drivers/gpu/drm/i915/gt/selftest_lrc.c
··· 452 452 *cs++ = i915_ggtt_offset(scratch) + RING_TAIL_IDX * sizeof(u32); 453 453 *cs++ = 0; 454 454 455 - err = i915_request_await_object(rq, scratch->obj, true); 456 - if (!err) 457 - err = i915_vma_move_to_active(scratch, rq, EXEC_OBJECT_WRITE); 455 + err = i915_vma_move_to_active(scratch, rq, EXEC_OBJECT_WRITE); 458 456 459 457 i915_request_get(rq); 460 458 i915_request_add(rq); ··· 600 602 } 601 603 602 604 i915_vma_lock(scratch); 603 - err = i915_request_await_object(rq, scratch->obj, true); 604 - if (!err) 605 - err = i915_vma_move_to_active(scratch, rq, EXEC_OBJECT_WRITE); 605 + err = i915_vma_move_to_active(scratch, rq, EXEC_OBJECT_WRITE); 606 606 i915_vma_unlock(scratch); 607 607 608 608 i915_request_get(rq); ··· 1056 1060 int err; 1057 1061 1058 1062 i915_vma_lock(vma); 1059 - err = i915_request_await_object(rq, vma->obj, flags); 1060 - if (!err) 1061 - err = i915_vma_move_to_active(vma, rq, flags); 1063 + err = i915_vma_move_to_active(vma, rq, flags); 1062 1064 i915_vma_unlock(vma); 1063 1065 1064 1066 return err;
+1 -3
drivers/gpu/drm/i915/gt/selftest_mocs.c
··· 228 228 return PTR_ERR(rq); 229 229 230 230 i915_vma_lock(vma); 231 - err = i915_request_await_object(rq, vma->obj, true); 232 - if (!err) 233 - err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); 231 + err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); 234 232 i915_vma_unlock(vma); 235 233 236 234 /* Read the mocs tables back using SRM */
+2 -6
drivers/gpu/drm/i915/gt/selftest_rps.c
··· 652 652 goto err_vma; 653 653 } 654 654 655 - err = i915_request_await_object(rq, vma->obj, false); 656 - if (!err) 657 - err = i915_vma_move_to_active(vma, rq, 0); 655 + err = i915_vma_move_to_active(vma, rq, 0); 658 656 if (!err) 659 657 err = rq->engine->emit_bb_start(rq, 660 658 vma->node.start, ··· 791 793 goto err_vma; 792 794 } 793 795 794 - err = i915_request_await_object(rq, vma->obj, false); 795 - if (!err) 796 - err = i915_vma_move_to_active(vma, rq, 0); 796 + err = i915_vma_move_to_active(vma, rq, 0); 797 797 if (!err) 798 798 err = rq->engine->emit_bb_start(rq, 799 799 vma->node.start,
+6 -16
drivers/gpu/drm/i915/gt/selftest_workarounds.c
··· 139 139 } 140 140 141 141 i915_vma_lock(vma); 142 - err = i915_request_await_object(rq, vma->obj, true); 143 - if (err == 0) 144 - err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); 142 + err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); 145 143 i915_vma_unlock(vma); 146 144 if (err) 147 145 goto err_req; ··· 630 632 goto err_request; 631 633 } 632 634 633 - err = i915_request_await_object(rq, batch->obj, false); 634 - if (err == 0) 635 - err = i915_vma_move_to_active(batch, rq, 0); 635 + err = i915_vma_move_to_active(batch, rq, 0); 636 636 if (err) 637 637 goto err_request; 638 638 639 - err = i915_request_await_object(rq, scratch->obj, true); 640 - if (err == 0) 641 - err = i915_vma_move_to_active(scratch, rq, 642 - EXEC_OBJECT_WRITE); 639 + err = i915_vma_move_to_active(scratch, rq, 640 + EXEC_OBJECT_WRITE); 643 641 if (err) 644 642 goto err_request; 645 643 ··· 854 860 return PTR_ERR(rq); 855 861 856 862 i915_vma_lock(results); 857 - err = i915_request_await_object(rq, results->obj, true); 858 - if (err == 0) 859 - err = i915_vma_move_to_active(results, rq, EXEC_OBJECT_WRITE); 863 + err = i915_vma_move_to_active(results, rq, EXEC_OBJECT_WRITE); 860 864 i915_vma_unlock(results); 861 865 if (err) 862 866 goto err_req; ··· 936 944 } 937 945 938 946 i915_vma_lock(batch); 939 - err = i915_request_await_object(rq, batch->obj, false); 940 - if (err == 0) 941 - err = i915_vma_move_to_active(batch, rq, 0); 947 + err = i915_vma_move_to_active(batch, rq, 0); 942 948 i915_vma_unlock(batch); 943 949 if (err) 944 950 goto err_request;
+2 -3
drivers/gpu/drm/i915/gvt/scheduler.c
··· 570 570 if (gmadr_bytes == 8) 571 571 bb->bb_start_cmd_va[2] = 0; 572 572 573 - ret = i915_vma_move_to_active(bb->vma, 574 - workload->req, 575 - 0); 573 + ret = i915_vma_move_to_active(bb->vma, workload->req, 574 + __EXEC_OBJECT_NO_REQUEST_AWAIT); 576 575 if (ret) 577 576 goto err; 578 577
+1 -3
drivers/gpu/drm/i915/i915_perf.c
··· 2253 2253 goto err_add_request; 2254 2254 } 2255 2255 2256 - err = i915_request_await_object(rq, vma->obj, 0); 2257 - if (!err) 2258 - err = i915_vma_move_to_active(vma, rq, 0); 2256 + err = i915_vma_move_to_active(vma, rq, 0); 2259 2257 if (err) 2260 2258 goto err_add_request; 2261 2259
+5
drivers/gpu/drm/i915/i915_vma.c
··· 1844 1844 1845 1845 GEM_BUG_ON(!vma->pages); 1846 1846 1847 + if (!(flags & __EXEC_OBJECT_NO_REQUEST_AWAIT)) { 1848 + err = i915_request_await_object(rq, vma->obj, flags & EXEC_OBJECT_WRITE); 1849 + if (unlikely(err)) 1850 + return err; 1851 + } 1847 1852 err = __i915_vma_move_to_active(vma, rq); 1848 1853 if (unlikely(err)) 1849 1854 return err;
+1
drivers/gpu/drm/i915/i915_vma.h
··· 55 55 56 56 /* do not reserve memory to prevent deadlocks */ 57 57 #define __EXEC_OBJECT_NO_RESERVE BIT(31) 58 + #define __EXEC_OBJECT_NO_REQUEST_AWAIT BIT(30) 58 59 59 60 int __must_check _i915_vma_move_to_active(struct i915_vma *vma, 60 61 struct i915_request *rq,
+2 -7
drivers/gpu/drm/i915/selftests/i915_request.c
··· 1223 1223 goto out_request; 1224 1224 } 1225 1225 1226 - err = i915_request_await_object(request[idx], batch->obj, 0); 1227 - if (err == 0) 1228 - err = i915_vma_move_to_active(batch, request[idx], 0); 1226 + err = i915_vma_move_to_active(batch, request[idx], 0); 1229 1227 GEM_BUG_ON(err); 1230 1228 1231 1229 err = engine->emit_bb_start(request[idx], ··· 1350 1352 } 1351 1353 } 1352 1354 1353 - err = i915_request_await_object(request[idx], 1354 - batch->obj, false); 1355 - if (err == 0) 1356 - err = i915_vma_move_to_active(batch, request[idx], 0); 1355 + err = i915_vma_move_to_active(batch, request[idx], 0); 1357 1356 GEM_BUG_ON(err); 1358 1357 1359 1358 err = engine->emit_bb_start(request[idx],
+1 -4
drivers/gpu/drm/i915/selftests/igt_spinner.c
··· 126 126 int err; 127 127 128 128 i915_vma_lock(vma); 129 - err = i915_request_await_object(rq, vma->obj, 130 - flags & EXEC_OBJECT_WRITE); 131 - if (err == 0) 132 - err = i915_vma_move_to_active(vma, rq, flags); 129 + err = i915_vma_move_to_active(vma, rq, flags); 133 130 i915_vma_unlock(vma); 134 131 135 132 return err;