Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/i915: Split i915_gem_timeline into individual timelines

We need to move to a more flexible timeline that doesn't assume one
fence context per engine, and so allow for a single timeline to be used
across a combination of engines. This means that preallocating a fence
context per engine is now a hindrance, and so we want to introduce the
singular timeline. From the code perspective, this has the notable
advantage of clearing up a lot of mirky semantics and some clumsy
pointer chasing.

By splitting the timeline up into a single entity rather than an array
of per-engine timelines, we can realise the goal of the previous patch
of tracking the timeline alongside the ring.

v2: Tweak wait_for_idle to stop the compiling thinking that ret may be
uninitialised.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180502163839.3248-2-chris@chris-wilson.co.uk

+397 -582
+1 -1
drivers/gpu/drm/i915/Makefile
··· 71 71 i915_gem_shrinker.o \ 72 72 i915_gem_stolen.o \ 73 73 i915_gem_tiling.o \ 74 - i915_gem_timeline.o \ 75 74 i915_gem_userptr.o \ 76 75 i915_gemfs.o \ 77 76 i915_query.o \ 78 77 i915_request.o \ 78 + i915_timeline.o \ 79 79 i915_trace_points.o \ 80 80 i915_vma.o \ 81 81 intel_breadcrumbs.o \
+1 -3
drivers/gpu/drm/i915/i915_drv.h
··· 72 72 #include "i915_gem_fence_reg.h" 73 73 #include "i915_gem_object.h" 74 74 #include "i915_gem_gtt.h" 75 - #include "i915_gem_timeline.h" 76 75 #include "i915_gpu_error.h" 77 76 #include "i915_request.h" 78 77 #include "i915_scheduler.h" 78 + #include "i915_timeline.h" 79 79 #include "i915_vma.h" 80 80 81 81 #include "intel_gvt.h" ··· 2059 2059 void (*resume)(struct drm_i915_private *); 2060 2060 void (*cleanup_engine)(struct intel_engine_cs *engine); 2061 2061 2062 - struct i915_gem_timeline execution_timeline; 2063 - struct i915_gem_timeline legacy_timeline; 2064 2062 struct list_head timelines; 2065 2063 2066 2064 struct list_head active_rings;
+56 -73
drivers/gpu/drm/i915/i915_gem.c
··· 162 162 synchronize_irq(i915->drm.irq); 163 163 164 164 intel_engines_park(i915); 165 - i915_gem_timelines_park(i915); 165 + i915_timelines_park(i915); 166 166 167 167 i915_pmu_gt_parked(i915); 168 168 ··· 2977 2977 * extra delay for a recent interrupt is pointless. Hence, we do 2978 2978 * not need an engine->irq_seqno_barrier() before the seqno reads. 2979 2979 */ 2980 - spin_lock_irqsave(&engine->timeline->lock, flags); 2981 - list_for_each_entry(request, &engine->timeline->requests, link) { 2980 + spin_lock_irqsave(&engine->timeline.lock, flags); 2981 + list_for_each_entry(request, &engine->timeline.requests, link) { 2982 2982 if (__i915_request_completed(request, request->global_seqno)) 2983 2983 continue; 2984 2984 ··· 2989 2989 active = request; 2990 2990 break; 2991 2991 } 2992 - spin_unlock_irqrestore(&engine->timeline->lock, flags); 2992 + spin_unlock_irqrestore(&engine->timeline.lock, flags); 2993 2993 2994 2994 return active; 2995 2995 } ··· 3110 3110 { 3111 3111 struct intel_engine_cs *engine = request->engine; 3112 3112 struct i915_gem_context *hung_ctx = request->ctx; 3113 - struct intel_timeline *timeline = request->timeline; 3113 + struct i915_timeline *timeline = request->timeline; 3114 3114 unsigned long flags; 3115 3115 3116 - GEM_BUG_ON(timeline == engine->timeline); 3116 + GEM_BUG_ON(timeline == &engine->timeline); 3117 3117 3118 - spin_lock_irqsave(&engine->timeline->lock, flags); 3118 + spin_lock_irqsave(&engine->timeline.lock, flags); 3119 3119 spin_lock(&timeline->lock); 3120 3120 3121 - list_for_each_entry_continue(request, &engine->timeline->requests, link) 3121 + list_for_each_entry_continue(request, &engine->timeline.requests, link) 3122 3122 if (request->ctx == hung_ctx) 3123 3123 skip_request(request); 3124 3124 ··· 3126 3126 skip_request(request); 3127 3127 3128 3128 spin_unlock(&timeline->lock); 3129 - spin_unlock_irqrestore(&engine->timeline->lock, flags); 3129 + spin_unlock_irqrestore(&engine->timeline.lock, flags); 3130 3130 } 3131 3131 3132 3132 /* Returns the request if it was guilty of the hang */ ··· 3183 3183 dma_fence_set_error(&request->fence, -EAGAIN); 3184 3184 3185 3185 /* Rewind the engine to replay the incomplete rq */ 3186 - spin_lock_irq(&engine->timeline->lock); 3186 + spin_lock_irq(&engine->timeline.lock); 3187 3187 request = list_prev_entry(request, link); 3188 - if (&request->link == &engine->timeline->requests) 3188 + if (&request->link == &engine->timeline.requests) 3189 3189 request = NULL; 3190 - spin_unlock_irq(&engine->timeline->lock); 3190 + spin_unlock_irq(&engine->timeline.lock); 3191 3191 } 3192 3192 } 3193 3193 ··· 3300 3300 request->fence.context, request->fence.seqno); 3301 3301 dma_fence_set_error(&request->fence, -EIO); 3302 3302 3303 - spin_lock_irqsave(&request->engine->timeline->lock, flags); 3303 + spin_lock_irqsave(&request->engine->timeline.lock, flags); 3304 3304 __i915_request_submit(request); 3305 3305 intel_engine_init_global_seqno(request->engine, request->global_seqno); 3306 - spin_unlock_irqrestore(&request->engine->timeline->lock, flags); 3306 + spin_unlock_irqrestore(&request->engine->timeline.lock, flags); 3307 3307 } 3308 3308 3309 3309 void i915_gem_set_wedged(struct drm_i915_private *i915) ··· 3372 3372 * (lockless) lookup doesn't try and wait upon the request as we 3373 3373 * reset it. 3374 3374 */ 3375 - spin_lock_irqsave(&engine->timeline->lock, flags); 3375 + spin_lock_irqsave(&engine->timeline.lock, flags); 3376 3376 intel_engine_init_global_seqno(engine, 3377 3377 intel_engine_last_submit(engine)); 3378 - spin_unlock_irqrestore(&engine->timeline->lock, flags); 3378 + spin_unlock_irqrestore(&engine->timeline.lock, flags); 3379 3379 3380 3380 i915_gem_reset_finish_engine(engine); 3381 3381 } ··· 3387 3387 3388 3388 bool i915_gem_unset_wedged(struct drm_i915_private *i915) 3389 3389 { 3390 - struct i915_gem_timeline *tl; 3391 - int i; 3390 + struct i915_timeline *tl; 3392 3391 3393 3392 lockdep_assert_held(&i915->drm.struct_mutex); 3394 3393 if (!test_bit(I915_WEDGED, &i915->gpu_error.flags)) ··· 3406 3407 * No more can be submitted until we reset the wedged bit. 3407 3408 */ 3408 3409 list_for_each_entry(tl, &i915->gt.timelines, link) { 3409 - for (i = 0; i < ARRAY_SIZE(tl->engine); i++) { 3410 - struct i915_request *rq; 3410 + struct i915_request *rq; 3411 3411 3412 - rq = i915_gem_active_peek(&tl->engine[i].last_request, 3413 - &i915->drm.struct_mutex); 3414 - if (!rq) 3415 - continue; 3412 + rq = i915_gem_active_peek(&tl->last_request, 3413 + &i915->drm.struct_mutex); 3414 + if (!rq) 3415 + continue; 3416 3416 3417 - /* 3418 - * We can't use our normal waiter as we want to 3419 - * avoid recursively trying to handle the current 3420 - * reset. The basic dma_fence_default_wait() installs 3421 - * a callback for dma_fence_signal(), which is 3422 - * triggered by our nop handler (indirectly, the 3423 - * callback enables the signaler thread which is 3424 - * woken by the nop_submit_request() advancing the seqno 3425 - * and when the seqno passes the fence, the signaler 3426 - * then signals the fence waking us up). 3427 - */ 3428 - if (dma_fence_default_wait(&rq->fence, true, 3429 - MAX_SCHEDULE_TIMEOUT) < 0) 3430 - return false; 3431 - } 3417 + /* 3418 + * We can't use our normal waiter as we want to 3419 + * avoid recursively trying to handle the current 3420 + * reset. The basic dma_fence_default_wait() installs 3421 + * a callback for dma_fence_signal(), which is 3422 + * triggered by our nop handler (indirectly, the 3423 + * callback enables the signaler thread which is 3424 + * woken by the nop_submit_request() advancing the seqno 3425 + * and when the seqno passes the fence, the signaler 3426 + * then signals the fence waking us up). 3427 + */ 3428 + if (dma_fence_default_wait(&rq->fence, true, 3429 + MAX_SCHEDULE_TIMEOUT) < 0) 3430 + return false; 3432 3431 } 3433 3432 i915_retire_requests(i915); 3434 3433 GEM_BUG_ON(i915->gt.active_requests); ··· 3731 3734 return ret; 3732 3735 } 3733 3736 3734 - static int wait_for_timeline(struct i915_gem_timeline *tl, unsigned int flags) 3737 + static int wait_for_timeline(struct i915_timeline *tl, unsigned int flags) 3735 3738 { 3736 - int ret, i; 3737 - 3738 - for (i = 0; i < ARRAY_SIZE(tl->engine); i++) { 3739 - ret = i915_gem_active_wait(&tl->engine[i].last_request, flags); 3740 - if (ret) 3741 - return ret; 3742 - } 3743 - 3744 - return 0; 3739 + return i915_gem_active_wait(&tl->last_request, flags); 3745 3740 } 3746 3741 3747 3742 static int wait_for_engines(struct drm_i915_private *i915) ··· 3751 3762 3752 3763 int i915_gem_wait_for_idle(struct drm_i915_private *i915, unsigned int flags) 3753 3764 { 3754 - int ret; 3755 - 3756 3765 /* If the device is asleep, we have no requests outstanding */ 3757 3766 if (!READ_ONCE(i915->gt.awake)) 3758 3767 return 0; 3759 3768 3760 3769 if (flags & I915_WAIT_LOCKED) { 3761 - struct i915_gem_timeline *tl; 3770 + struct i915_timeline *tl; 3771 + int err; 3762 3772 3763 3773 lockdep_assert_held(&i915->drm.struct_mutex); 3764 3774 3765 3775 list_for_each_entry(tl, &i915->gt.timelines, link) { 3766 - ret = wait_for_timeline(tl, flags); 3767 - if (ret) 3768 - return ret; 3776 + err = wait_for_timeline(tl, flags); 3777 + if (err) 3778 + return err; 3769 3779 } 3770 3780 i915_retire_requests(i915); 3771 3781 3772 - ret = wait_for_engines(i915); 3782 + return wait_for_engines(i915); 3773 3783 } else { 3774 - ret = wait_for_timeline(&i915->gt.execution_timeline, flags); 3775 - } 3784 + struct intel_engine_cs *engine; 3785 + enum intel_engine_id id; 3786 + int err; 3776 3787 3777 - return ret; 3788 + for_each_engine(engine, i915, id) { 3789 + err = wait_for_timeline(&engine->timeline, flags); 3790 + if (err) 3791 + return err; 3792 + } 3793 + 3794 + return 0; 3795 + } 3778 3796 } 3779 3797 3780 3798 static void __i915_gem_object_flush_for_display(struct drm_i915_gem_object *obj) ··· 4950 4954 enum intel_engine_id id; 4951 4955 4952 4956 for_each_engine(engine, i915, id) { 4953 - GEM_BUG_ON(__i915_gem_active_peek(&engine->timeline->last_request)); 4957 + GEM_BUG_ON(__i915_gem_active_peek(&engine->timeline.last_request)); 4954 4958 GEM_BUG_ON(engine->last_retired_context != kernel_context); 4955 4959 } 4956 4960 } ··· 5599 5603 INIT_LIST_HEAD(&dev_priv->gt.timelines); 5600 5604 INIT_LIST_HEAD(&dev_priv->gt.active_rings); 5601 5605 5602 - mutex_lock(&dev_priv->drm.struct_mutex); 5603 - err = i915_gem_timeline_init__global(dev_priv); 5604 - mutex_unlock(&dev_priv->drm.struct_mutex); 5605 - if (err) 5606 - goto err_priorities; 5607 - 5608 5606 i915_gem_init__mm(dev_priv); 5609 5607 5610 5608 INIT_DELAYED_WORK(&dev_priv->gt.retire_work, ··· 5618 5628 5619 5629 return 0; 5620 5630 5621 - err_priorities: 5622 - kmem_cache_destroy(dev_priv->priorities); 5623 5631 err_dependencies: 5624 5632 kmem_cache_destroy(dev_priv->dependencies); 5625 5633 err_requests: ··· 5638 5650 GEM_BUG_ON(!llist_empty(&dev_priv->mm.free_list)); 5639 5651 GEM_BUG_ON(atomic_read(&dev_priv->mm.free_count)); 5640 5652 WARN_ON(dev_priv->mm.object_count); 5641 - 5642 - mutex_lock(&dev_priv->drm.struct_mutex); 5643 - i915_gem_timeline_fini(&dev_priv->gt.legacy_timeline); 5644 - i915_gem_timeline_fini(&dev_priv->gt.execution_timeline); 5645 5653 WARN_ON(!list_empty(&dev_priv->gt.timelines)); 5646 - mutex_unlock(&dev_priv->drm.struct_mutex); 5647 5654 5648 5655 kmem_cache_destroy(dev_priv->priorities); 5649 5656 kmem_cache_destroy(dev_priv->dependencies);
+21 -27
drivers/gpu/drm/i915/i915_gem_context.c
··· 122 122 lockdep_assert_held(&ctx->i915->drm.struct_mutex); 123 123 GEM_BUG_ON(!i915_gem_context_is_closed(ctx)); 124 124 125 - i915_gem_timeline_free(ctx->timeline); 126 125 i915_ppgtt_put(ctx->ppgtt); 127 126 128 127 for (n = 0; n < ARRAY_SIZE(ctx->__engine); n++) { ··· 376 377 ctx->desc_template = default_desc_template(dev_priv, ppgtt); 377 378 } 378 379 379 - if (HAS_EXECLISTS(dev_priv)) { 380 - struct i915_gem_timeline *timeline; 381 - 382 - timeline = i915_gem_timeline_create(dev_priv, ctx->name); 383 - if (IS_ERR(timeline)) { 384 - __destroy_hw_context(ctx, file_priv); 385 - return ERR_CAST(timeline); 386 - } 387 - 388 - ctx->timeline = timeline; 389 - } 390 - 391 380 trace_i915_context_create(ctx); 392 381 393 382 return ctx; ··· 577 590 idr_destroy(&file_priv->context_idr); 578 591 } 579 592 593 + static struct i915_request * 594 + last_request_on_engine(struct i915_timeline *timeline, 595 + struct intel_engine_cs *engine) 596 + { 597 + struct i915_request *rq; 598 + 599 + if (timeline == &engine->timeline) 600 + return NULL; 601 + 602 + rq = i915_gem_active_raw(&timeline->last_request, 603 + &engine->i915->drm.struct_mutex); 604 + if (rq && rq->engine == engine) 605 + return rq; 606 + 607 + return NULL; 608 + } 609 + 580 610 static bool engine_has_idle_kernel_context(struct intel_engine_cs *engine) 581 611 { 582 - struct i915_gem_timeline *timeline; 612 + struct i915_timeline *timeline; 583 613 584 614 list_for_each_entry(timeline, &engine->i915->gt.timelines, link) { 585 - struct intel_timeline *tl; 586 - 587 - if (timeline == &engine->i915->gt.execution_timeline) 588 - continue; 589 - 590 - tl = &timeline->engine[engine->id]; 591 - if (i915_gem_active_peek(&tl->last_request, 592 - &engine->i915->drm.struct_mutex)) 615 + if (last_request_on_engine(timeline, engine)) 593 616 return false; 594 617 } 595 618 ··· 609 612 int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv) 610 613 { 611 614 struct intel_engine_cs *engine; 612 - struct i915_gem_timeline *timeline; 615 + struct i915_timeline *timeline; 613 616 enum intel_engine_id id; 614 617 615 618 lockdep_assert_held(&dev_priv->drm.struct_mutex); ··· 629 632 /* Queue this switch after all other activity */ 630 633 list_for_each_entry(timeline, &dev_priv->gt.timelines, link) { 631 634 struct i915_request *prev; 632 - struct intel_timeline *tl; 633 635 634 - tl = &timeline->engine[engine->id]; 635 - prev = i915_gem_active_raw(&tl->last_request, 636 - &dev_priv->drm.struct_mutex); 636 + prev = last_request_on_engine(timeline, engine); 637 637 if (prev) 638 638 i915_sw_fence_await_sw_fence_gfp(&rq->submit, 639 639 &prev->submit,
-2
drivers/gpu/drm/i915/i915_gem_context.h
··· 58 58 /** file_priv: owning file descriptor */ 59 59 struct drm_i915_file_private *file_priv; 60 60 61 - struct i915_gem_timeline *timeline; 62 - 63 61 /** 64 62 * @ppgtt: unique address space (GTT) 65 63 *
+1 -2
drivers/gpu/drm/i915/i915_gem_gtt.h
··· 38 38 #include <linux/mm.h> 39 39 #include <linux/pagevec.h> 40 40 41 - #include "i915_gem_timeline.h" 42 - 43 41 #include "i915_request.h" 44 42 #include "i915_selftest.h" 43 + #include "i915_timeline.h" 45 44 46 45 #define I915_GTT_PAGE_SIZE_4K BIT(12) 47 46 #define I915_GTT_PAGE_SIZE_64K BIT(16)
-198
drivers/gpu/drm/i915/i915_gem_timeline.c
··· 1 - /* 2 - * Copyright © 2016 Intel Corporation 3 - * 4 - * Permission is hereby granted, free of charge, to any person obtaining a 5 - * copy of this software and associated documentation files (the "Software"), 6 - * to deal in the Software without restriction, including without limitation 7 - * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 - * and/or sell copies of the Software, and to permit persons to whom the 9 - * Software is furnished to do so, subject to the following conditions: 10 - * 11 - * The above copyright notice and this permission notice (including the next 12 - * paragraph) shall be included in all copies or substantial portions of the 13 - * Software. 14 - * 15 - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 - * IN THE SOFTWARE. 22 - * 23 - */ 24 - 25 - #include "i915_drv.h" 26 - #include "i915_syncmap.h" 27 - 28 - static void __intel_timeline_init(struct intel_timeline *tl, 29 - struct i915_gem_timeline *parent, 30 - u64 context, 31 - struct lock_class_key *lockclass, 32 - const char *lockname) 33 - { 34 - tl->fence_context = context; 35 - tl->common = parent; 36 - spin_lock_init(&tl->lock); 37 - lockdep_set_class_and_name(&tl->lock, lockclass, lockname); 38 - init_request_active(&tl->last_request, NULL); 39 - INIT_LIST_HEAD(&tl->requests); 40 - i915_syncmap_init(&tl->sync); 41 - } 42 - 43 - static void __intel_timeline_fini(struct intel_timeline *tl) 44 - { 45 - GEM_BUG_ON(!list_empty(&tl->requests)); 46 - 47 - i915_syncmap_free(&tl->sync); 48 - } 49 - 50 - static int __i915_gem_timeline_init(struct drm_i915_private *i915, 51 - struct i915_gem_timeline *timeline, 52 - const char *name, 53 - struct lock_class_key *lockclass, 54 - const char *lockname) 55 - { 56 - unsigned int i; 57 - u64 fences; 58 - 59 - lockdep_assert_held(&i915->drm.struct_mutex); 60 - 61 - /* 62 - * Ideally we want a set of engines on a single leaf as we expect 63 - * to mostly be tracking synchronisation between engines. It is not 64 - * a huge issue if this is not the case, but we may want to mitigate 65 - * any page crossing penalties if they become an issue. 66 - */ 67 - BUILD_BUG_ON(KSYNCMAP < I915_NUM_ENGINES); 68 - 69 - timeline->i915 = i915; 70 - timeline->name = kstrdup(name ?: "[kernel]", GFP_KERNEL); 71 - if (!timeline->name) 72 - return -ENOMEM; 73 - 74 - list_add(&timeline->link, &i915->gt.timelines); 75 - 76 - /* Called during early_init before we know how many engines there are */ 77 - fences = dma_fence_context_alloc(ARRAY_SIZE(timeline->engine)); 78 - for (i = 0; i < ARRAY_SIZE(timeline->engine); i++) 79 - __intel_timeline_init(&timeline->engine[i], 80 - timeline, fences++, 81 - lockclass, lockname); 82 - 83 - return 0; 84 - } 85 - 86 - int i915_gem_timeline_init(struct drm_i915_private *i915, 87 - struct i915_gem_timeline *timeline, 88 - const char *name) 89 - { 90 - static struct lock_class_key class; 91 - 92 - return __i915_gem_timeline_init(i915, timeline, name, 93 - &class, "&timeline->lock"); 94 - } 95 - 96 - int i915_gem_timeline_init__global(struct drm_i915_private *i915) 97 - { 98 - static struct lock_class_key class1, class2; 99 - int err; 100 - 101 - err = __i915_gem_timeline_init(i915, 102 - &i915->gt.execution_timeline, 103 - "[execution]", &class1, 104 - "i915_execution_timeline"); 105 - if (err) 106 - return err; 107 - 108 - err = __i915_gem_timeline_init(i915, 109 - &i915->gt.legacy_timeline, 110 - "[global]", &class2, 111 - "i915_global_timeline"); 112 - if (err) 113 - goto err_exec_timeline; 114 - 115 - return 0; 116 - 117 - err_exec_timeline: 118 - i915_gem_timeline_fini(&i915->gt.execution_timeline); 119 - return err; 120 - } 121 - 122 - /** 123 - * i915_gem_timelines_park - called when the driver idles 124 - * @i915: the drm_i915_private device 125 - * 126 - * When the driver is completely idle, we know that all of our sync points 127 - * have been signaled and our tracking is then entirely redundant. Any request 128 - * to wait upon an older sync point will be completed instantly as we know 129 - * the fence is signaled and therefore we will not even look them up in the 130 - * sync point map. 131 - */ 132 - void i915_gem_timelines_park(struct drm_i915_private *i915) 133 - { 134 - struct i915_gem_timeline *timeline; 135 - int i; 136 - 137 - lockdep_assert_held(&i915->drm.struct_mutex); 138 - 139 - list_for_each_entry(timeline, &i915->gt.timelines, link) { 140 - for (i = 0; i < ARRAY_SIZE(timeline->engine); i++) { 141 - struct intel_timeline *tl = &timeline->engine[i]; 142 - 143 - /* 144 - * All known fences are completed so we can scrap 145 - * the current sync point tracking and start afresh, 146 - * any attempt to wait upon a previous sync point 147 - * will be skipped as the fence was signaled. 148 - */ 149 - i915_syncmap_free(&tl->sync); 150 - } 151 - } 152 - } 153 - 154 - void i915_gem_timeline_fini(struct i915_gem_timeline *timeline) 155 - { 156 - int i; 157 - 158 - lockdep_assert_held(&timeline->i915->drm.struct_mutex); 159 - 160 - for (i = 0; i < ARRAY_SIZE(timeline->engine); i++) 161 - __intel_timeline_fini(&timeline->engine[i]); 162 - 163 - list_del(&timeline->link); 164 - kfree(timeline->name); 165 - } 166 - 167 - struct i915_gem_timeline * 168 - i915_gem_timeline_create(struct drm_i915_private *i915, const char *name) 169 - { 170 - struct i915_gem_timeline *timeline; 171 - int err; 172 - 173 - timeline = kzalloc(sizeof(*timeline), GFP_KERNEL); 174 - if (!timeline) 175 - return ERR_PTR(-ENOMEM); 176 - 177 - err = i915_gem_timeline_init(i915, timeline, name); 178 - if (err) { 179 - kfree(timeline); 180 - return ERR_PTR(err); 181 - } 182 - 183 - return timeline; 184 - } 185 - 186 - void i915_gem_timeline_free(struct i915_gem_timeline *timeline) 187 - { 188 - if (!timeline) 189 - return; 190 - 191 - i915_gem_timeline_fini(timeline); 192 - kfree(timeline); 193 - } 194 - 195 - #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) 196 - #include "selftests/mock_timeline.c" 197 - #include "selftests/i915_gem_timeline.c" 198 - #endif
+36 -31
drivers/gpu/drm/i915/i915_gem_timeline.h drivers/gpu/drm/i915/i915_timeline.h
··· 22 22 * 23 23 */ 24 24 25 - #ifndef I915_GEM_TIMELINE_H 26 - #define I915_GEM_TIMELINE_H 25 + #ifndef I915_TIMELINE_H 26 + #define I915_TIMELINE_H 27 27 28 28 #include <linux/list.h> 29 + #include <linux/kref.h> 29 30 30 31 #include "i915_request.h" 31 32 #include "i915_syncmap.h" 32 33 #include "i915_utils.h" 33 34 34 - struct i915_gem_timeline; 35 - 36 - struct intel_timeline { 35 + struct i915_timeline { 37 36 u64 fence_context; 38 37 u32 seqno; 39 38 ··· 70 71 */ 71 72 u32 global_sync[I915_NUM_ENGINES]; 72 73 73 - struct i915_gem_timeline *common; 74 - }; 75 - 76 - struct i915_gem_timeline { 77 74 struct list_head link; 78 - 79 - struct drm_i915_private *i915; 80 75 const char *name; 81 76 82 - struct intel_timeline engine[I915_NUM_ENGINES]; 77 + struct kref kref; 83 78 }; 84 79 85 - int i915_gem_timeline_init(struct drm_i915_private *i915, 86 - struct i915_gem_timeline *tl, 87 - const char *name); 88 - int i915_gem_timeline_init__global(struct drm_i915_private *i915); 89 - void i915_gem_timelines_park(struct drm_i915_private *i915); 90 - void i915_gem_timeline_fini(struct i915_gem_timeline *tl); 80 + void i915_timeline_init(struct drm_i915_private *i915, 81 + struct i915_timeline *tl, 82 + const char *name); 83 + void i915_timeline_fini(struct i915_timeline *tl); 91 84 92 - struct i915_gem_timeline * 93 - i915_gem_timeline_create(struct drm_i915_private *i915, const char *name); 94 - void i915_gem_timeline_free(struct i915_gem_timeline *timeline); 85 + struct i915_timeline * 86 + i915_timeline_create(struct drm_i915_private *i915, const char *name); 95 87 96 - static inline int __intel_timeline_sync_set(struct intel_timeline *tl, 97 - u64 context, u32 seqno) 88 + static inline struct i915_timeline * 89 + i915_timeline_get(struct i915_timeline *timeline) 90 + { 91 + kref_get(&timeline->kref); 92 + return timeline; 93 + } 94 + 95 + void __i915_timeline_free(struct kref *kref); 96 + static inline void i915_timeline_put(struct i915_timeline *timeline) 97 + { 98 + kref_put(&timeline->kref, __i915_timeline_free); 99 + } 100 + 101 + static inline int __i915_timeline_sync_set(struct i915_timeline *tl, 102 + u64 context, u32 seqno) 98 103 { 99 104 return i915_syncmap_set(&tl->sync, context, seqno); 100 105 } 101 106 102 - static inline int intel_timeline_sync_set(struct intel_timeline *tl, 103 - const struct dma_fence *fence) 107 + static inline int i915_timeline_sync_set(struct i915_timeline *tl, 108 + const struct dma_fence *fence) 104 109 { 105 - return __intel_timeline_sync_set(tl, fence->context, fence->seqno); 110 + return __i915_timeline_sync_set(tl, fence->context, fence->seqno); 106 111 } 107 112 108 - static inline bool __intel_timeline_sync_is_later(struct intel_timeline *tl, 109 - u64 context, u32 seqno) 113 + static inline bool __i915_timeline_sync_is_later(struct i915_timeline *tl, 114 + u64 context, u32 seqno) 110 115 { 111 116 return i915_syncmap_is_later(&tl->sync, context, seqno); 112 117 } 113 118 114 - static inline bool intel_timeline_sync_is_later(struct intel_timeline *tl, 115 - const struct dma_fence *fence) 119 + static inline bool i915_timeline_sync_is_later(struct i915_timeline *tl, 120 + const struct dma_fence *fence) 116 121 { 117 - return __intel_timeline_sync_is_later(tl, fence->context, fence->seqno); 122 + return __i915_timeline_sync_is_later(tl, fence->context, fence->seqno); 118 123 } 124 + 125 + void i915_timelines_park(struct drm_i915_private *i915); 119 126 120 127 #endif
+2 -2
drivers/gpu/drm/i915/i915_gpu_error.c
··· 1310 1310 1311 1311 count = 0; 1312 1312 request = first; 1313 - list_for_each_entry_from(request, &engine->timeline->requests, link) 1313 + list_for_each_entry_from(request, &engine->timeline.requests, link) 1314 1314 count++; 1315 1315 if (!count) 1316 1316 return; ··· 1323 1323 1324 1324 count = 0; 1325 1325 request = first; 1326 - list_for_each_entry_from(request, &engine->timeline->requests, link) { 1326 + list_for_each_entry_from(request, &engine->timeline.requests, link) { 1327 1327 if (count >= ee->num_requests) { 1328 1328 /* 1329 1329 * If the ring request list was changed in
+3 -7
drivers/gpu/drm/i915/i915_perf.c
··· 1695 1695 const struct i915_oa_config *oa_config) 1696 1696 { 1697 1697 struct intel_engine_cs *engine = dev_priv->engine[RCS]; 1698 - struct i915_gem_timeline *timeline; 1698 + struct i915_timeline *timeline; 1699 1699 struct i915_request *rq; 1700 1700 int ret; 1701 1701 ··· 1716 1716 /* Queue this switch after all other activity */ 1717 1717 list_for_each_entry(timeline, &dev_priv->gt.timelines, link) { 1718 1718 struct i915_request *prev; 1719 - struct intel_timeline *tl; 1720 1719 1721 - tl = &timeline->engine[engine->id]; 1722 - prev = i915_gem_active_raw(&tl->last_request, 1720 + prev = i915_gem_active_raw(&timeline->last_request, 1723 1721 &dev_priv->drm.struct_mutex); 1724 1722 if (prev) 1725 - i915_sw_fence_await_sw_fence_gfp(&rq->submit, 1726 - &prev->submit, 1727 - GFP_KERNEL); 1723 + i915_request_await_dma_fence(rq, &prev->fence); 1728 1724 } 1729 1725 1730 1726 i915_request_add(rq);
+33 -35
drivers/gpu/drm/i915/i915_request.c
··· 49 49 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) 50 50 return "signaled"; 51 51 52 - return to_request(fence)->timeline->common->name; 52 + return to_request(fence)->timeline->name; 53 53 } 54 54 55 55 static bool i915_fence_signaled(struct dma_fence *fence) ··· 199 199 static int reset_all_global_seqno(struct drm_i915_private *i915, u32 seqno) 200 200 { 201 201 struct intel_engine_cs *engine; 202 + struct i915_timeline *timeline; 202 203 enum intel_engine_id id; 203 204 int ret; 204 205 ··· 214 213 215 214 /* If the seqno wraps around, we need to clear the breadcrumb rbtree */ 216 215 for_each_engine(engine, i915, id) { 217 - struct i915_gem_timeline *timeline; 218 - struct intel_timeline *tl = engine->timeline; 219 - 220 216 GEM_TRACE("%s seqno %d (current %d) -> %d\n", 221 217 engine->name, 222 - tl->seqno, 218 + engine->timeline.seqno, 223 219 intel_engine_get_seqno(engine), 224 220 seqno); 225 221 226 - if (!i915_seqno_passed(seqno, tl->seqno)) { 222 + if (!i915_seqno_passed(seqno, engine->timeline.seqno)) { 227 223 /* Flush any waiters before we reuse the seqno */ 228 224 intel_engine_disarm_breadcrumbs(engine); 229 225 GEM_BUG_ON(!list_empty(&engine->breadcrumbs.signals)); ··· 228 230 229 231 /* Check we are idle before we fiddle with hw state! */ 230 232 GEM_BUG_ON(!intel_engine_is_idle(engine)); 231 - GEM_BUG_ON(i915_gem_active_isset(&engine->timeline->last_request)); 233 + GEM_BUG_ON(i915_gem_active_isset(&engine->timeline.last_request)); 232 234 233 235 /* Finally reset hw state */ 234 236 intel_engine_init_global_seqno(engine, seqno); 235 - tl->seqno = seqno; 236 - 237 - list_for_each_entry(timeline, &i915->gt.timelines, link) 238 - memset(timeline->engine[id].global_sync, 0, 239 - sizeof(timeline->engine[id].global_sync)); 237 + engine->timeline.seqno = seqno; 240 238 } 241 239 240 + list_for_each_entry(timeline, &i915->gt.timelines, link) 241 + memset(timeline->global_sync, 0, sizeof(timeline->global_sync)); 242 + 242 243 i915->gt.request_serial = seqno; 244 + 243 245 return 0; 244 246 } 245 247 ··· 355 357 356 358 local_irq_disable(); 357 359 358 - spin_lock(&engine->timeline->lock); 359 - GEM_BUG_ON(!list_is_first(&rq->link, &engine->timeline->requests)); 360 + spin_lock(&engine->timeline.lock); 361 + GEM_BUG_ON(!list_is_first(&rq->link, &engine->timeline.requests)); 360 362 list_del_init(&rq->link); 361 - spin_unlock(&engine->timeline->lock); 363 + spin_unlock(&engine->timeline.lock); 362 364 363 365 spin_lock(&rq->lock); 364 366 if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags)) ··· 395 397 return; 396 398 397 399 do { 398 - tmp = list_first_entry(&engine->timeline->requests, 400 + tmp = list_first_entry(&engine->timeline.requests, 399 401 typeof(*tmp), link); 400 402 401 403 GEM_BUG_ON(tmp->engine != engine); ··· 490 492 } while (tmp != rq); 491 493 } 492 494 493 - static u32 timeline_get_seqno(struct intel_timeline *tl) 495 + static u32 timeline_get_seqno(struct i915_timeline *tl) 494 496 { 495 497 return ++tl->seqno; 496 498 } 497 499 498 500 static void move_to_timeline(struct i915_request *request, 499 - struct intel_timeline *timeline) 501 + struct i915_timeline *timeline) 500 502 { 501 - GEM_BUG_ON(request->timeline == request->engine->timeline); 502 - lockdep_assert_held(&request->engine->timeline->lock); 503 + GEM_BUG_ON(request->timeline == &request->engine->timeline); 504 + lockdep_assert_held(&request->engine->timeline.lock); 503 505 504 506 spin_lock(&request->timeline->lock); 505 507 list_move_tail(&request->link, &timeline->requests); ··· 514 516 GEM_TRACE("%s fence %llx:%d -> global=%d, current %d\n", 515 517 engine->name, 516 518 request->fence.context, request->fence.seqno, 517 - engine->timeline->seqno + 1, 519 + engine->timeline.seqno + 1, 518 520 intel_engine_get_seqno(engine)); 519 521 520 522 GEM_BUG_ON(!irqs_disabled()); 521 - lockdep_assert_held(&engine->timeline->lock); 523 + lockdep_assert_held(&engine->timeline.lock); 522 524 523 525 GEM_BUG_ON(request->global_seqno); 524 526 525 - seqno = timeline_get_seqno(engine->timeline); 527 + seqno = timeline_get_seqno(&engine->timeline); 526 528 GEM_BUG_ON(!seqno); 527 529 GEM_BUG_ON(i915_seqno_passed(intel_engine_get_seqno(engine), seqno)); 528 530 ··· 537 539 request->ring->vaddr + request->postfix); 538 540 539 541 /* Transfer from per-context onto the global per-engine timeline */ 540 - move_to_timeline(request, engine->timeline); 542 + move_to_timeline(request, &engine->timeline); 541 543 542 544 trace_i915_request_execute(request); 543 545 ··· 550 552 unsigned long flags; 551 553 552 554 /* Will be called from irq-context when using foreign fences. */ 553 - spin_lock_irqsave(&engine->timeline->lock, flags); 555 + spin_lock_irqsave(&engine->timeline.lock, flags); 554 556 555 557 __i915_request_submit(request); 556 558 557 - spin_unlock_irqrestore(&engine->timeline->lock, flags); 559 + spin_unlock_irqrestore(&engine->timeline.lock, flags); 558 560 } 559 561 560 562 void __i915_request_unsubmit(struct i915_request *request) ··· 568 570 intel_engine_get_seqno(engine)); 569 571 570 572 GEM_BUG_ON(!irqs_disabled()); 571 - lockdep_assert_held(&engine->timeline->lock); 573 + lockdep_assert_held(&engine->timeline.lock); 572 574 573 575 /* 574 576 * Only unwind in reverse order, required so that the per-context list 575 577 * is kept in seqno/ring order. 576 578 */ 577 579 GEM_BUG_ON(!request->global_seqno); 578 - GEM_BUG_ON(request->global_seqno != engine->timeline->seqno); 580 + GEM_BUG_ON(request->global_seqno != engine->timeline.seqno); 579 581 GEM_BUG_ON(i915_seqno_passed(intel_engine_get_seqno(engine), 580 582 request->global_seqno)); 581 - engine->timeline->seqno--; 583 + engine->timeline.seqno--; 582 584 583 585 /* We may be recursing from the signal callback of another i915 fence */ 584 586 spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING); ··· 605 607 unsigned long flags; 606 608 607 609 /* Will be called from irq-context when using foreign fences. */ 608 - spin_lock_irqsave(&engine->timeline->lock, flags); 610 + spin_lock_irqsave(&engine->timeline.lock, flags); 609 611 610 612 __i915_request_unsubmit(request); 611 613 612 - spin_unlock_irqrestore(&engine->timeline->lock, flags); 614 + spin_unlock_irqrestore(&engine->timeline.lock, flags); 613 615 } 614 616 615 617 static int __i915_sw_fence_call ··· 762 764 rq->ctx = ctx; 763 765 rq->ring = ring; 764 766 rq->timeline = ring->timeline; 765 - GEM_BUG_ON(rq->timeline == engine->timeline); 767 + GEM_BUG_ON(rq->timeline == &engine->timeline); 766 768 767 769 spin_lock_init(&rq->lock); 768 770 dma_fence_init(&rq->fence, ··· 927 929 928 930 /* Squash repeated waits to the same timelines */ 929 931 if (fence->context != rq->i915->mm.unordered_timeline && 930 - intel_timeline_sync_is_later(rq->timeline, fence)) 932 + i915_timeline_sync_is_later(rq->timeline, fence)) 931 933 continue; 932 934 933 935 if (dma_fence_is_i915(fence)) ··· 941 943 942 944 /* Record the latest fence used against each timeline */ 943 945 if (fence->context != rq->i915->mm.unordered_timeline) 944 - intel_timeline_sync_set(rq->timeline, fence); 946 + i915_timeline_sync_set(rq->timeline, fence); 945 947 } while (--nchild); 946 948 947 949 return 0; ··· 1018 1020 { 1019 1021 struct intel_engine_cs *engine = request->engine; 1020 1022 struct intel_ring *ring = request->ring; 1021 - struct intel_timeline *timeline = request->timeline; 1023 + struct i915_timeline *timeline = request->timeline; 1022 1024 struct i915_request *prev; 1023 1025 u32 *cs; 1024 1026 int err;
+2 -1
drivers/gpu/drm/i915/i915_request.h
··· 37 37 struct drm_file; 38 38 struct drm_i915_gem_object; 39 39 struct i915_request; 40 + struct i915_timeline; 40 41 41 42 struct intel_wait { 42 43 struct rb_node node; ··· 96 95 struct i915_gem_context *ctx; 97 96 struct intel_engine_cs *engine; 98 97 struct intel_ring *ring; 99 - struct intel_timeline *timeline; 98 + struct i915_timeline *timeline; 100 99 struct intel_signal_node signaling; 101 100 102 101 /*
+105
drivers/gpu/drm/i915/i915_timeline.c
··· 1 + /* 2 + * SPDX-License-Identifier: MIT 3 + * 4 + * Copyright © 2016-2018 Intel Corporation 5 + */ 6 + 7 + #include "i915_drv.h" 8 + 9 + #include "i915_timeline.h" 10 + #include "i915_syncmap.h" 11 + 12 + void i915_timeline_init(struct drm_i915_private *i915, 13 + struct i915_timeline *timeline, 14 + const char *name) 15 + { 16 + lockdep_assert_held(&i915->drm.struct_mutex); 17 + 18 + /* 19 + * Ideally we want a set of engines on a single leaf as we expect 20 + * to mostly be tracking synchronisation between engines. It is not 21 + * a huge issue if this is not the case, but we may want to mitigate 22 + * any page crossing penalties if they become an issue. 23 + */ 24 + BUILD_BUG_ON(KSYNCMAP < I915_NUM_ENGINES); 25 + 26 + timeline->name = name; 27 + 28 + list_add(&timeline->link, &i915->gt.timelines); 29 + 30 + /* Called during early_init before we know how many engines there are */ 31 + 32 + timeline->fence_context = dma_fence_context_alloc(1); 33 + 34 + spin_lock_init(&timeline->lock); 35 + 36 + init_request_active(&timeline->last_request, NULL); 37 + INIT_LIST_HEAD(&timeline->requests); 38 + 39 + i915_syncmap_init(&timeline->sync); 40 + } 41 + 42 + /** 43 + * i915_timelines_park - called when the driver idles 44 + * @i915: the drm_i915_private device 45 + * 46 + * When the driver is completely idle, we know that all of our sync points 47 + * have been signaled and our tracking is then entirely redundant. Any request 48 + * to wait upon an older sync point will be completed instantly as we know 49 + * the fence is signaled and therefore we will not even look them up in the 50 + * sync point map. 51 + */ 52 + void i915_timelines_park(struct drm_i915_private *i915) 53 + { 54 + struct i915_timeline *timeline; 55 + 56 + lockdep_assert_held(&i915->drm.struct_mutex); 57 + 58 + list_for_each_entry(timeline, &i915->gt.timelines, link) { 59 + /* 60 + * All known fences are completed so we can scrap 61 + * the current sync point tracking and start afresh, 62 + * any attempt to wait upon a previous sync point 63 + * will be skipped as the fence was signaled. 64 + */ 65 + i915_syncmap_free(&timeline->sync); 66 + } 67 + } 68 + 69 + void i915_timeline_fini(struct i915_timeline *timeline) 70 + { 71 + GEM_BUG_ON(!list_empty(&timeline->requests)); 72 + 73 + i915_syncmap_free(&timeline->sync); 74 + 75 + list_del(&timeline->link); 76 + } 77 + 78 + struct i915_timeline * 79 + i915_timeline_create(struct drm_i915_private *i915, const char *name) 80 + { 81 + struct i915_timeline *timeline; 82 + 83 + timeline = kzalloc(sizeof(*timeline), GFP_KERNEL); 84 + if (!timeline) 85 + return ERR_PTR(-ENOMEM); 86 + 87 + i915_timeline_init(i915, timeline, name); 88 + kref_init(&timeline->kref); 89 + 90 + return timeline; 91 + } 92 + 93 + void __i915_timeline_free(struct kref *kref) 94 + { 95 + struct i915_timeline *timeline = 96 + container_of(kref, typeof(*timeline), kref); 97 + 98 + i915_timeline_fini(timeline); 99 + kfree(timeline); 100 + } 101 + 102 + #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) 103 + #include "selftests/mock_timeline.c" 104 + #include "selftests/i915_timeline.c" 105 + #endif
+12 -15
drivers/gpu/drm/i915/intel_engine_cs.c
··· 451 451 GEM_BUG_ON(intel_engine_get_seqno(engine) != seqno); 452 452 } 453 453 454 - static void intel_engine_init_timeline(struct intel_engine_cs *engine) 455 - { 456 - engine->timeline = 457 - &engine->i915->gt.execution_timeline.engine[engine->id]; 458 - } 459 - 460 454 static void intel_engine_init_batch_pool(struct intel_engine_cs *engine) 461 455 { 462 456 i915_gem_batch_pool_init(&engine->batch_pool, engine); ··· 502 508 */ 503 509 void intel_engine_setup_common(struct intel_engine_cs *engine) 504 510 { 511 + i915_timeline_init(engine->i915, &engine->timeline, engine->name); 512 + 505 513 intel_engine_init_execlist(engine); 506 - intel_engine_init_timeline(engine); 507 514 intel_engine_init_hangcheck(engine); 508 515 intel_engine_init_batch_pool(engine); 509 516 intel_engine_init_cmd_parser(engine); ··· 746 751 if (engine->i915->preempt_context) 747 752 intel_context_unpin(engine->i915->preempt_context, engine); 748 753 intel_context_unpin(engine->i915->kernel_context, engine); 754 + 755 + i915_timeline_fini(&engine->timeline); 749 756 } 750 757 751 758 u64 intel_engine_get_active_head(const struct intel_engine_cs *engine) ··· 1000 1003 * the last request that remains in the timeline. When idle, it is 1001 1004 * the last executed context as tracked by retirement. 1002 1005 */ 1003 - rq = __i915_gem_active_peek(&engine->timeline->last_request); 1006 + rq = __i915_gem_active_peek(&engine->timeline.last_request); 1004 1007 if (rq) 1005 1008 return rq->ctx == kernel_context; 1006 1009 else ··· 1332 1335 1333 1336 drm_printf(m, "\tRequests:\n"); 1334 1337 1335 - rq = list_first_entry(&engine->timeline->requests, 1338 + rq = list_first_entry(&engine->timeline.requests, 1336 1339 struct i915_request, link); 1337 - if (&rq->link != &engine->timeline->requests) 1340 + if (&rq->link != &engine->timeline.requests) 1338 1341 print_request(m, rq, "\t\tfirst "); 1339 1342 1340 - rq = list_last_entry(&engine->timeline->requests, 1343 + rq = list_last_entry(&engine->timeline.requests, 1341 1344 struct i915_request, link); 1342 - if (&rq->link != &engine->timeline->requests) 1345 + if (&rq->link != &engine->timeline.requests) 1343 1346 print_request(m, rq, "\t\tlast "); 1344 1347 1345 1348 rq = i915_gem_find_active_request(engine); ··· 1371 1374 drm_printf(m, "\tDevice is asleep; skipping register dump\n"); 1372 1375 } 1373 1376 1374 - spin_lock_irq(&engine->timeline->lock); 1377 + spin_lock_irq(&engine->timeline.lock); 1375 1378 1376 1379 last = NULL; 1377 1380 count = 0; 1378 - list_for_each_entry(rq, &engine->timeline->requests, link) { 1381 + list_for_each_entry(rq, &engine->timeline.requests, link) { 1379 1382 if (count++ < MAX_REQUESTS_TO_SHOW - 1) 1380 1383 print_request(m, rq, "\t\tE "); 1381 1384 else ··· 1413 1416 print_request(m, last, "\t\tQ "); 1414 1417 } 1415 1418 1416 - spin_unlock_irq(&engine->timeline->lock); 1419 + spin_unlock_irq(&engine->timeline.lock); 1417 1420 1418 1421 spin_lock_irq(&b->rb_lock); 1419 1422 for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
+2 -2
drivers/gpu/drm/i915/intel_guc_submission.c
··· 679 679 bool submit = false; 680 680 struct rb_node *rb; 681 681 682 - spin_lock_irq(&engine->timeline->lock); 682 + spin_lock_irq(&engine->timeline.lock); 683 683 rb = execlists->first; 684 684 GEM_BUG_ON(rb_first(&execlists->queue) != rb); 685 685 ··· 750 750 GEM_BUG_ON(execlists->first && !port_isset(execlists->port)); 751 751 752 752 unlock: 753 - spin_unlock_irq(&engine->timeline->lock); 753 + spin_unlock_irq(&engine->timeline.lock); 754 754 } 755 755 756 756 static void guc_submission_tasklet(unsigned long data)
+28 -20
drivers/gpu/drm/i915/intel_lrc.c
··· 331 331 struct i915_priolist *uninitialized_var(p); 332 332 int last_prio = I915_PRIORITY_INVALID; 333 333 334 - lockdep_assert_held(&engine->timeline->lock); 334 + lockdep_assert_held(&engine->timeline.lock); 335 335 336 336 list_for_each_entry_safe_reverse(rq, rn, 337 - &engine->timeline->requests, 337 + &engine->timeline.requests, 338 338 link) { 339 339 if (i915_request_completed(rq)) 340 340 return; ··· 358 358 struct intel_engine_cs *engine = 359 359 container_of(execlists, typeof(*engine), execlists); 360 360 361 - spin_lock_irq(&engine->timeline->lock); 361 + spin_lock_irq(&engine->timeline.lock); 362 362 __unwind_incomplete_requests(engine); 363 - spin_unlock_irq(&engine->timeline->lock); 363 + spin_unlock_irq(&engine->timeline.lock); 364 364 } 365 365 366 366 static inline void ··· 584 584 * and context switches) submission. 585 585 */ 586 586 587 - spin_lock_irq(&engine->timeline->lock); 587 + spin_lock_irq(&engine->timeline.lock); 588 588 rb = execlists->first; 589 589 GEM_BUG_ON(rb_first(&execlists->queue) != rb); 590 590 ··· 744 744 GEM_BUG_ON(execlists->first && !port_isset(execlists->port)); 745 745 746 746 unlock: 747 - spin_unlock_irq(&engine->timeline->lock); 747 + spin_unlock_irq(&engine->timeline.lock); 748 748 749 749 if (submit) { 750 750 execlists_user_begin(execlists, execlists->port); ··· 894 894 execlists_cancel_port_requests(execlists); 895 895 reset_irq(engine); 896 896 897 - spin_lock(&engine->timeline->lock); 897 + spin_lock(&engine->timeline.lock); 898 898 899 899 /* Mark all executing requests as skipped. */ 900 - list_for_each_entry(rq, &engine->timeline->requests, link) { 900 + list_for_each_entry(rq, &engine->timeline.requests, link) { 901 901 GEM_BUG_ON(!rq->global_seqno); 902 902 if (!i915_request_completed(rq)) 903 903 dma_fence_set_error(&rq->fence, -EIO); ··· 929 929 execlists->first = NULL; 930 930 GEM_BUG_ON(port_isset(execlists->port)); 931 931 932 - spin_unlock(&engine->timeline->lock); 932 + spin_unlock(&engine->timeline.lock); 933 933 934 934 local_irq_restore(flags); 935 935 } ··· 1167 1167 unsigned long flags; 1168 1168 1169 1169 /* Will be called from irq-context when using foreign fences. */ 1170 - spin_lock_irqsave(&engine->timeline->lock, flags); 1170 + spin_lock_irqsave(&engine->timeline.lock, flags); 1171 1171 1172 1172 queue_request(engine, &request->sched, rq_prio(request)); 1173 1173 submit_queue(engine, rq_prio(request)); ··· 1175 1175 GEM_BUG_ON(!engine->execlists.first); 1176 1176 GEM_BUG_ON(list_empty(&request->sched.link)); 1177 1177 1178 - spin_unlock_irqrestore(&engine->timeline->lock, flags); 1178 + spin_unlock_irqrestore(&engine->timeline.lock, flags); 1179 1179 } 1180 1180 1181 1181 static struct i915_request *sched_to_request(struct i915_sched_node *node) ··· 1191 1191 GEM_BUG_ON(!locked); 1192 1192 1193 1193 if (engine != locked) { 1194 - spin_unlock(&locked->timeline->lock); 1195 - spin_lock(&engine->timeline->lock); 1194 + spin_unlock(&locked->timeline.lock); 1195 + spin_lock(&engine->timeline.lock); 1196 1196 } 1197 1197 1198 1198 return engine; ··· 1275 1275 } 1276 1276 1277 1277 engine = request->engine; 1278 - spin_lock_irq(&engine->timeline->lock); 1278 + spin_lock_irq(&engine->timeline.lock); 1279 1279 1280 1280 /* Fifo and depth-first replacement ensure our deps execute before us */ 1281 1281 list_for_each_entry_safe_reverse(dep, p, &dfs, dfs_link) { ··· 1299 1299 __submit_queue(engine, prio); 1300 1300 } 1301 1301 1302 - spin_unlock_irq(&engine->timeline->lock); 1302 + spin_unlock_irq(&engine->timeline.lock); 1303 1303 } 1304 1304 1305 1305 static int __context_pin(struct i915_gem_context *ctx, struct i915_vma *vma) ··· 1828 1828 reset_irq(engine); 1829 1829 1830 1830 /* Push back any incomplete requests for replay after the reset. */ 1831 - spin_lock(&engine->timeline->lock); 1831 + spin_lock(&engine->timeline.lock); 1832 1832 __unwind_incomplete_requests(engine); 1833 - spin_unlock(&engine->timeline->lock); 1833 + spin_unlock(&engine->timeline.lock); 1834 1834 1835 1835 local_irq_restore(flags); 1836 1836 ··· 2599 2599 struct i915_vma *vma; 2600 2600 uint32_t context_size; 2601 2601 struct intel_ring *ring; 2602 + struct i915_timeline *timeline; 2602 2603 int ret; 2603 2604 2604 2605 if (ce->state) ··· 2615 2614 2616 2615 ctx_obj = i915_gem_object_create(ctx->i915, context_size); 2617 2616 if (IS_ERR(ctx_obj)) { 2618 - DRM_DEBUG_DRIVER("Alloc LRC backing obj failed.\n"); 2619 - return PTR_ERR(ctx_obj); 2617 + ret = PTR_ERR(ctx_obj); 2618 + goto error_deref_obj; 2620 2619 } 2621 2620 2622 2621 vma = i915_vma_instance(ctx_obj, &ctx->i915->ggtt.base, NULL); ··· 2625 2624 goto error_deref_obj; 2626 2625 } 2627 2626 2628 - ring = intel_engine_create_ring(engine, ctx->timeline, ctx->ring_size); 2627 + timeline = i915_timeline_create(ctx->i915, ctx->name); 2628 + if (IS_ERR(timeline)) { 2629 + ret = PTR_ERR(timeline); 2630 + goto error_deref_obj; 2631 + } 2632 + 2633 + ring = intel_engine_create_ring(engine, timeline, ctx->ring_size); 2634 + i915_timeline_put(timeline); 2629 2635 if (IS_ERR(ring)) { 2630 2636 ret = PTR_ERR(ring); 2631 2637 goto error_deref_obj;
+16 -9
drivers/gpu/drm/i915/intel_ringbuffer.c
··· 697 697 struct i915_request *request; 698 698 unsigned long flags; 699 699 700 - spin_lock_irqsave(&engine->timeline->lock, flags); 700 + spin_lock_irqsave(&engine->timeline.lock, flags); 701 701 702 702 /* Mark all submitted requests as skipped. */ 703 - list_for_each_entry(request, &engine->timeline->requests, link) { 703 + list_for_each_entry(request, &engine->timeline.requests, link) { 704 704 GEM_BUG_ON(!request->global_seqno); 705 705 if (!i915_request_completed(request)) 706 706 dma_fence_set_error(&request->fence, -EIO); 707 707 } 708 708 /* Remaining _unready_ requests will be nop'ed when submitted */ 709 709 710 - spin_unlock_irqrestore(&engine->timeline->lock, flags); 710 + spin_unlock_irqrestore(&engine->timeline.lock, flags); 711 711 } 712 712 713 713 static void i9xx_submit_request(struct i915_request *request) ··· 1118 1118 1119 1119 struct intel_ring * 1120 1120 intel_engine_create_ring(struct intel_engine_cs *engine, 1121 - struct i915_gem_timeline *timeline, 1121 + struct i915_timeline *timeline, 1122 1122 int size) 1123 1123 { 1124 1124 struct intel_ring *ring; ··· 1126 1126 1127 1127 GEM_BUG_ON(!is_power_of_2(size)); 1128 1128 GEM_BUG_ON(RING_CTL_SIZE(size) & ~RING_NR_PAGES); 1129 - GEM_BUG_ON(&timeline->engine[engine->id] == engine->timeline); 1129 + GEM_BUG_ON(timeline == &engine->timeline); 1130 1130 lockdep_assert_held(&engine->i915->drm.struct_mutex); 1131 1131 1132 1132 ring = kzalloc(sizeof(*ring), GFP_KERNEL); ··· 1134 1134 return ERR_PTR(-ENOMEM); 1135 1135 1136 1136 INIT_LIST_HEAD(&ring->request_list); 1137 - ring->timeline = &timeline->engine[engine->id]; 1137 + ring->timeline = i915_timeline_get(timeline); 1138 1138 1139 1139 ring->size = size; 1140 1140 /* Workaround an erratum on the i830 which causes a hang if ··· 1165 1165 i915_vma_close(ring->vma); 1166 1166 __i915_gem_object_release_unless_active(obj); 1167 1167 1168 + i915_timeline_put(ring->timeline); 1168 1169 kfree(ring); 1169 1170 } 1170 1171 ··· 1324 1323 static int intel_init_ring_buffer(struct intel_engine_cs *engine) 1325 1324 { 1326 1325 struct intel_ring *ring; 1326 + struct i915_timeline *timeline; 1327 1327 int err; 1328 1328 1329 1329 intel_engine_setup_common(engine); ··· 1333 1331 if (err) 1334 1332 goto err; 1335 1333 1336 - ring = intel_engine_create_ring(engine, 1337 - &engine->i915->gt.legacy_timeline, 1338 - 32 * PAGE_SIZE); 1334 + timeline = i915_timeline_create(engine->i915, engine->name); 1335 + if (IS_ERR(timeline)) { 1336 + err = PTR_ERR(timeline); 1337 + goto err; 1338 + } 1339 + 1340 + ring = intel_engine_create_ring(engine, timeline, 32 * PAGE_SIZE); 1341 + i915_timeline_put(timeline); 1339 1342 if (IS_ERR(ring)) { 1340 1343 err = PTR_ERR(ring); 1341 1344 goto err;
+6 -5
drivers/gpu/drm/i915/intel_ringbuffer.h
··· 6 6 #include <linux/seqlock.h> 7 7 8 8 #include "i915_gem_batch_pool.h" 9 - #include "i915_gem_timeline.h" 10 9 11 10 #include "i915_reg.h" 12 11 #include "i915_pmu.h" 13 12 #include "i915_request.h" 14 13 #include "i915_selftest.h" 14 + #include "i915_timeline.h" 15 15 #include "intel_gpu_commands.h" 16 16 17 17 struct drm_printer; ··· 129 129 struct i915_vma *vma; 130 130 void *vaddr; 131 131 132 - struct intel_timeline *timeline; 132 + struct i915_timeline *timeline; 133 133 struct list_head request_list; 134 134 struct list_head active_link; 135 135 ··· 338 338 u32 mmio_base; 339 339 340 340 struct intel_ring *buffer; 341 - struct intel_timeline *timeline; 341 + 342 + struct i915_timeline timeline; 342 343 343 344 struct drm_i915_gem_object *default_state; 344 345 ··· 771 770 772 771 struct intel_ring * 773 772 intel_engine_create_ring(struct intel_engine_cs *engine, 774 - struct i915_gem_timeline *timeline, 773 + struct i915_timeline *timeline, 775 774 int size); 776 775 int intel_ring_pin(struct intel_ring *ring, 777 776 struct drm_i915_private *i915, ··· 890 889 * wtih serialising this hint with anything, so document it as 891 890 * a hint and nothing more. 892 891 */ 893 - return READ_ONCE(engine->timeline->seqno); 892 + return READ_ONCE(engine->timeline.seqno); 894 893 } 895 894 896 895 void intel_engine_get_instdone(struct intel_engine_cs *engine,
-12
drivers/gpu/drm/i915/selftests/i915_gem_context.c
··· 355 355 356 356 if (first_shared_gtt) { 357 357 ctx = __create_hw_context(i915, file->driver_priv); 358 - if (!IS_ERR(ctx) && HAS_EXECLISTS(i915)) { 359 - struct i915_gem_timeline *timeline; 360 - 361 - timeline = i915_gem_timeline_create(i915, ctx->name); 362 - if (IS_ERR(timeline)) { 363 - __destroy_hw_context(ctx, file->driver_priv); 364 - ctx = ERR_CAST(timeline); 365 - } else { 366 - ctx->timeline = timeline; 367 - } 368 - } 369 - 370 358 first_shared_gtt = false; 371 359 } else { 372 360 ctx = i915_gem_create_context(i915, file->driver_priv);
+31 -63
drivers/gpu/drm/i915/selftests/i915_gem_timeline.c drivers/gpu/drm/i915/selftests/i915_timeline.c
··· 1 1 /* 2 - * Copyright © 2017 Intel Corporation 2 + * SPDX-License-Identifier: MIT 3 3 * 4 - * Permission is hereby granted, free of charge, to any person obtaining a 5 - * copy of this software and associated documentation files (the "Software"), 6 - * to deal in the Software without restriction, including without limitation 7 - * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 - * and/or sell copies of the Software, and to permit persons to whom the 9 - * Software is furnished to do so, subject to the following conditions: 10 - * 11 - * The above copyright notice and this permission notice (including the next 12 - * paragraph) shall be included in all copies or substantial portions of the 13 - * Software. 14 - * 15 - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 - * IN THE SOFTWARE. 22 - * 4 + * Copyright © 2017-2018 Intel Corporation 23 5 */ 24 6 25 7 #include "../i915_selftest.h" ··· 17 35 bool set; 18 36 }; 19 37 20 - static int __igt_sync(struct intel_timeline *tl, 38 + static int __igt_sync(struct i915_timeline *tl, 21 39 u64 ctx, 22 40 const struct __igt_sync *p, 23 41 const char *name) 24 42 { 25 43 int ret; 26 44 27 - if (__intel_timeline_sync_is_later(tl, ctx, p->seqno) != p->expected) { 45 + if (__i915_timeline_sync_is_later(tl, ctx, p->seqno) != p->expected) { 28 46 pr_err("%s: %s(ctx=%llu, seqno=%u) expected passed %s but failed\n", 29 47 name, p->name, ctx, p->seqno, yesno(p->expected)); 30 48 return -EINVAL; 31 49 } 32 50 33 51 if (p->set) { 34 - ret = __intel_timeline_sync_set(tl, ctx, p->seqno); 52 + ret = __i915_timeline_sync_set(tl, ctx, p->seqno); 35 53 if (ret) 36 54 return ret; 37 55 } ··· 59 77 { "unwrap", UINT_MAX, true, false }, 60 78 {}, 61 79 }, *p; 62 - struct intel_timeline *tl; 80 + struct i915_timeline tl; 63 81 int order, offset; 64 82 int ret = -ENODEV; 65 83 66 - tl = mock_timeline(0); 67 - if (!tl) 68 - return -ENOMEM; 69 - 84 + mock_timeline_init(&tl, 0); 70 85 for (p = pass; p->name; p++) { 71 86 for (order = 1; order < 64; order++) { 72 87 for (offset = -1; offset <= (order > 1); offset++) { 73 88 u64 ctx = BIT_ULL(order) + offset; 74 89 75 - ret = __igt_sync(tl, ctx, p, "1"); 90 + ret = __igt_sync(&tl, ctx, p, "1"); 76 91 if (ret) 77 92 goto out; 78 93 } 79 94 } 80 95 } 81 - mock_timeline_destroy(tl); 96 + mock_timeline_fini(&tl); 82 97 83 - tl = mock_timeline(0); 84 - if (!tl) 85 - return -ENOMEM; 86 - 98 + mock_timeline_init(&tl, 0); 87 99 for (order = 1; order < 64; order++) { 88 100 for (offset = -1; offset <= (order > 1); offset++) { 89 101 u64 ctx = BIT_ULL(order) + offset; 90 102 91 103 for (p = pass; p->name; p++) { 92 - ret = __igt_sync(tl, ctx, p, "2"); 104 + ret = __igt_sync(&tl, ctx, p, "2"); 93 105 if (ret) 94 106 goto out; 95 107 } ··· 91 115 } 92 116 93 117 out: 94 - mock_timeline_destroy(tl); 118 + mock_timeline_fini(&tl); 95 119 return ret; 96 120 } 97 121 ··· 103 127 static int bench_sync(void *arg) 104 128 { 105 129 struct rnd_state prng; 106 - struct intel_timeline *tl; 130 + struct i915_timeline tl; 107 131 unsigned long end_time, count; 108 132 u64 prng32_1M; 109 133 ktime_t kt; 110 134 int order, last_order; 111 135 112 - tl = mock_timeline(0); 113 - if (!tl) 114 - return -ENOMEM; 136 + mock_timeline_init(&tl, 0); 115 137 116 138 /* Lookups from cache are very fast and so the random number generation 117 139 * and the loop itself becomes a significant factor in the per-iteration ··· 141 167 do { 142 168 u64 id = i915_prandom_u64_state(&prng); 143 169 144 - __intel_timeline_sync_set(tl, id, 0); 170 + __i915_timeline_sync_set(&tl, id, 0); 145 171 count++; 146 172 } while (!time_after(jiffies, end_time)); 147 173 kt = ktime_sub(ktime_get(), kt); ··· 156 182 while (end_time--) { 157 183 u64 id = i915_prandom_u64_state(&prng); 158 184 159 - if (!__intel_timeline_sync_is_later(tl, id, 0)) { 160 - mock_timeline_destroy(tl); 185 + if (!__i915_timeline_sync_is_later(&tl, id, 0)) { 186 + mock_timeline_fini(&tl); 161 187 pr_err("Lookup of %llu failed\n", id); 162 188 return -EINVAL; 163 189 } ··· 167 193 pr_info("%s: %lu random lookups, %lluns/lookup\n", 168 194 __func__, count, (long long)div64_ul(ktime_to_ns(kt), count)); 169 195 170 - mock_timeline_destroy(tl); 196 + mock_timeline_fini(&tl); 171 197 cond_resched(); 172 198 173 - tl = mock_timeline(0); 174 - if (!tl) 175 - return -ENOMEM; 199 + mock_timeline_init(&tl, 0); 176 200 177 201 /* Benchmark setting the first N (in order) contexts */ 178 202 count = 0; 179 203 kt = ktime_get(); 180 204 end_time = jiffies + HZ/10; 181 205 do { 182 - __intel_timeline_sync_set(tl, count++, 0); 206 + __i915_timeline_sync_set(&tl, count++, 0); 183 207 } while (!time_after(jiffies, end_time)); 184 208 kt = ktime_sub(ktime_get(), kt); 185 209 pr_info("%s: %lu in-order insertions, %lluns/insert\n", ··· 187 215 end_time = count; 188 216 kt = ktime_get(); 189 217 while (end_time--) { 190 - if (!__intel_timeline_sync_is_later(tl, end_time, 0)) { 218 + if (!__i915_timeline_sync_is_later(&tl, end_time, 0)) { 191 219 pr_err("Lookup of %lu failed\n", end_time); 192 - mock_timeline_destroy(tl); 220 + mock_timeline_fini(&tl); 193 221 return -EINVAL; 194 222 } 195 223 } ··· 197 225 pr_info("%s: %lu in-order lookups, %lluns/lookup\n", 198 226 __func__, count, (long long)div64_ul(ktime_to_ns(kt), count)); 199 227 200 - mock_timeline_destroy(tl); 228 + mock_timeline_fini(&tl); 201 229 cond_resched(); 202 230 203 - tl = mock_timeline(0); 204 - if (!tl) 205 - return -ENOMEM; 231 + mock_timeline_init(&tl, 0); 206 232 207 233 /* Benchmark searching for a random context id and maybe changing it */ 208 234 prandom_seed_state(&prng, i915_selftest.random_seed); ··· 211 241 u32 id = random_engine(&prng); 212 242 u32 seqno = prandom_u32_state(&prng); 213 243 214 - if (!__intel_timeline_sync_is_later(tl, id, seqno)) 215 - __intel_timeline_sync_set(tl, id, seqno); 244 + if (!__i915_timeline_sync_is_later(&tl, id, seqno)) 245 + __i915_timeline_sync_set(&tl, id, seqno); 216 246 217 247 count++; 218 248 } while (!time_after(jiffies, end_time)); ··· 220 250 kt = ktime_sub_ns(kt, (count * prng32_1M * 2) >> 20); 221 251 pr_info("%s: %lu repeated insert/lookups, %lluns/op\n", 222 252 __func__, count, (long long)div64_ul(ktime_to_ns(kt), count)); 223 - mock_timeline_destroy(tl); 253 + mock_timeline_fini(&tl); 224 254 cond_resched(); 225 255 226 256 /* Benchmark searching for a known context id and changing the seqno */ ··· 228 258 ({ int tmp = last_order; last_order = order; order += tmp; })) { 229 259 unsigned int mask = BIT(order) - 1; 230 260 231 - tl = mock_timeline(0); 232 - if (!tl) 233 - return -ENOMEM; 261 + mock_timeline_init(&tl, 0); 234 262 235 263 count = 0; 236 264 kt = ktime_get(); ··· 240 272 */ 241 273 u64 id = (u64)(count & mask) << order; 242 274 243 - __intel_timeline_sync_is_later(tl, id, 0); 244 - __intel_timeline_sync_set(tl, id, 0); 275 + __i915_timeline_sync_is_later(&tl, id, 0); 276 + __i915_timeline_sync_set(&tl, id, 0); 245 277 246 278 count++; 247 279 } while (!time_after(jiffies, end_time)); ··· 249 281 pr_info("%s: %lu cyclic/%d insert/lookups, %lluns/op\n", 250 282 __func__, count, order, 251 283 (long long)div64_ul(ktime_to_ns(kt), count)); 252 - mock_timeline_destroy(tl); 284 + mock_timeline_fini(&tl); 253 285 cond_resched(); 254 286 } 255 287
+21 -11
drivers/gpu/drm/i915/selftests/mock_engine.c
··· 25 25 #include "mock_engine.h" 26 26 #include "mock_request.h" 27 27 28 + struct mock_ring { 29 + struct intel_ring base; 30 + struct i915_timeline timeline; 31 + }; 32 + 28 33 static struct mock_request *first_request(struct mock_engine *engine) 29 34 { 30 35 return list_first_entry_or_null(&engine->hw_queue, ··· 137 132 static struct intel_ring *mock_ring(struct intel_engine_cs *engine) 138 133 { 139 134 const unsigned long sz = PAGE_SIZE / 2; 140 - struct intel_ring *ring; 135 + struct mock_ring *ring; 141 136 142 137 BUILD_BUG_ON(MIN_SPACE_FOR_ADD_REQUEST > sz); 143 138 ··· 145 140 if (!ring) 146 141 return NULL; 147 142 148 - ring->timeline = &engine->i915->gt.legacy_timeline.engine[engine->id]; 143 + i915_timeline_init(engine->i915, &ring->timeline, engine->name); 149 144 150 - ring->size = sz; 151 - ring->effective_size = sz; 152 - ring->vaddr = (void *)(ring + 1); 145 + ring->base.size = sz; 146 + ring->base.effective_size = sz; 147 + ring->base.vaddr = (void *)(ring + 1); 148 + ring->base.timeline = &ring->timeline; 153 149 154 - INIT_LIST_HEAD(&ring->request_list); 155 - intel_ring_update_space(ring); 150 + INIT_LIST_HEAD(&ring->base.request_list); 151 + intel_ring_update_space(&ring->base); 156 152 157 - return ring; 153 + return &ring->base; 158 154 } 159 155 160 - static void mock_ring_free(struct intel_ring *ring) 156 + static void mock_ring_free(struct intel_ring *base) 161 157 { 158 + struct mock_ring *ring = container_of(base, typeof(*ring), base); 159 + 160 + i915_timeline_fini(&ring->timeline); 162 161 kfree(ring); 163 162 } 164 163 ··· 191 182 engine->base.emit_breadcrumb = mock_emit_breadcrumb; 192 183 engine->base.submit_request = mock_submit_request; 193 184 194 - intel_engine_init_timeline(&engine->base); 195 - 185 + i915_timeline_init(i915, &engine->base.timeline, engine->base.name); 196 186 intel_engine_init_breadcrumbs(&engine->base); 197 187 engine->base.breadcrumbs.mock = true; /* prevent touching HW for irqs */ 198 188 ··· 208 200 209 201 err_breadcrumbs: 210 202 intel_engine_fini_breadcrumbs(&engine->base); 203 + i915_timeline_fini(&engine->base.timeline); 211 204 kfree(engine); 212 205 return NULL; 213 206 } ··· 247 238 mock_ring_free(engine->buffer); 248 239 249 240 intel_engine_fini_breadcrumbs(engine); 241 + i915_timeline_fini(&engine->timeline); 250 242 251 243 kfree(engine); 252 244 }
+1 -9
drivers/gpu/drm/i915/selftests/mock_gem_device.c
··· 73 73 74 74 mutex_lock(&i915->drm.struct_mutex); 75 75 mock_fini_ggtt(i915); 76 - i915_gem_timeline_fini(&i915->gt.legacy_timeline); 77 - i915_gem_timeline_fini(&i915->gt.execution_timeline); 78 - WARN_ON(!list_empty(&i915->gt.timelines)); 79 76 mutex_unlock(&i915->drm.struct_mutex); 77 + WARN_ON(!list_empty(&i915->gt.timelines)); 80 78 81 79 destroy_workqueue(i915->wq); 82 80 ··· 228 230 INIT_LIST_HEAD(&i915->gt.active_rings); 229 231 230 232 mutex_lock(&i915->drm.struct_mutex); 231 - err = i915_gem_timeline_init__global(i915); 232 - if (err) { 233 - mutex_unlock(&i915->drm.struct_mutex); 234 - goto err_priorities; 235 - } 236 - 237 233 mock_init_ggtt(i915); 238 234 mutex_unlock(&i915->drm.struct_mutex); 239 235
+14 -31
drivers/gpu/drm/i915/selftests/mock_timeline.c
··· 1 1 /* 2 - * Copyright © 2017 Intel Corporation 2 + * SPDX-License-Identifier: MIT 3 3 * 4 - * Permission is hereby granted, free of charge, to any person obtaining a 5 - * copy of this software and associated documentation files (the "Software"), 6 - * to deal in the Software without restriction, including without limitation 7 - * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 - * and/or sell copies of the Software, and to permit persons to whom the 9 - * Software is furnished to do so, subject to the following conditions: 10 - * 11 - * The above copyright notice and this permission notice (including the next 12 - * paragraph) shall be included in all copies or substantial portions of the 13 - * Software. 14 - * 15 - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 - * IN THE SOFTWARE. 22 - * 4 + * Copyright © 2017-2018 Intel Corporation 23 5 */ 6 + 7 + #include "../i915_timeline.h" 24 8 25 9 #include "mock_timeline.h" 26 10 27 - struct intel_timeline *mock_timeline(u64 context) 11 + void mock_timeline_init(struct i915_timeline *timeline, u64 context) 28 12 { 29 - static struct lock_class_key class; 30 - struct intel_timeline *tl; 13 + timeline->fence_context = context; 31 14 32 - tl = kzalloc(sizeof(*tl), GFP_KERNEL); 33 - if (!tl) 34 - return NULL; 15 + spin_lock_init(&timeline->lock); 35 16 36 - __intel_timeline_init(tl, NULL, context, &class, "mock"); 17 + init_request_active(&timeline->last_request, NULL); 18 + INIT_LIST_HEAD(&timeline->requests); 37 19 38 - return tl; 20 + i915_syncmap_init(&timeline->sync); 21 + 22 + INIT_LIST_HEAD(&timeline->link); 39 23 } 40 24 41 - void mock_timeline_destroy(struct intel_timeline *tl) 25 + void mock_timeline_fini(struct i915_timeline *timeline) 42 26 { 43 - __intel_timeline_fini(tl); 44 - kfree(tl); 27 + i915_timeline_fini(timeline); 45 28 }
+5 -23
drivers/gpu/drm/i915/selftests/mock_timeline.h
··· 1 1 /* 2 - * Copyright © 2017 Intel Corporation 2 + * SPDX-License-Identifier: MIT 3 3 * 4 - * Permission is hereby granted, free of charge, to any person obtaining a 5 - * copy of this software and associated documentation files (the "Software"), 6 - * to deal in the Software without restriction, including without limitation 7 - * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 - * and/or sell copies of the Software, and to permit persons to whom the 9 - * Software is furnished to do so, subject to the following conditions: 10 - * 11 - * The above copyright notice and this permission notice (including the next 12 - * paragraph) shall be included in all copies or substantial portions of the 13 - * Software. 14 - * 15 - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 - * IN THE SOFTWARE. 22 - * 4 + * Copyright © 2017-2018 Intel Corporation 23 5 */ 24 6 25 7 #ifndef __MOCK_TIMELINE__ 26 8 #define __MOCK_TIMELINE__ 27 9 28 - #include "../i915_gem_timeline.h" 10 + struct i915_timeline; 29 11 30 - struct intel_timeline *mock_timeline(u64 context); 31 - void mock_timeline_destroy(struct intel_timeline *tl); 12 + void mock_timeline_init(struct i915_timeline *timeline, u64 context); 13 + void mock_timeline_fini(struct i915_timeline *timeline); 32 14 33 15 #endif /* !__MOCK_TIMELINE__ */