Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/i915: Do not share hwsp across contexts any more, v8.

Instead of sharing pages with breadcrumbs, give each timeline a
single page. This allows unrelated timelines not to share locks
any more during command submission.

As an additional benefit, seqno wraparound no longer requires
i915_vma_pin, which means we no longer need to worry about a
potential -EDEADLK at a point where we are ready to submit.

Changes since v1:
- Fix erroneous i915_vma_acquire that should be a i915_vma_release (ickle).
- Extra check for completion in intel_read_hwsp().
Changes since v2:
- Fix inconsistent indent in hwsp_alloc() (kbuild)
- memset entire cacheline to 0.
Changes since v3:
- Do same in intel_timeline_reset_seqno(), and clflush for good measure.
Changes since v4:
- Use refcounting on timeline, instead of relying on i915_active.
- Fix waiting on kernel requests.
Changes since v5:
- Bump amount of slots to maximum (256), for best wraparounds.
- Add hwsp_offset to i915_request to fix potential wraparound hang.
- Ensure timeline wrap test works with the changes.
- Assign hwsp in intel_timeline_read_hwsp() within the rcu lock to
fix a hang.
Changes since v6:
- Rename i915_request_active_offset to i915_request_active_seqno(),
and elaborate the function. (tvrtko)
Changes since v7:
- Move hunk to where it belongs. (jekstrand)
- Replace CACHELINE_BYTES with TIMELINE_SEQNO_BYTES. (jekstrand)

Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Reviewed-by: Thomas Hellström <thomas.hellstrom@intel.com> #v1
Reported-by: kernel test robot <lkp@intel.com>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Link: https://patchwork.freedesktop.org/patch/msgid/20210323155059.628690-2-maarten.lankhorst@linux.intel.com

authored by

Maarten Lankhorst and committed by
Daniel Vetter
12ca695d 547be6a4

+178 -417
+1 -1
drivers/gpu/drm/i915/gt/gen2_engine_cs.c
··· 143 143 int flush, int post) 144 144 { 145 145 GEM_BUG_ON(i915_request_active_timeline(rq)->hwsp_ggtt != rq->engine->status_page.vma); 146 - GEM_BUG_ON(offset_in_page(i915_request_active_timeline(rq)->hwsp_offset) != I915_GEM_HWS_SEQNO_ADDR); 146 + GEM_BUG_ON(offset_in_page(rq->hwsp_seqno) != I915_GEM_HWS_SEQNO_ADDR); 147 147 148 148 *cs++ = MI_FLUSH; 149 149
+4 -4
drivers/gpu/drm/i915/gt/gen6_engine_cs.c
··· 161 161 PIPE_CONTROL_DC_FLUSH_ENABLE | 162 162 PIPE_CONTROL_QW_WRITE | 163 163 PIPE_CONTROL_CS_STALL); 164 - *cs++ = i915_request_active_timeline(rq)->hwsp_offset | 164 + *cs++ = i915_request_active_seqno(rq) | 165 165 PIPE_CONTROL_GLOBAL_GTT; 166 166 *cs++ = rq->fence.seqno; 167 167 ··· 359 359 PIPE_CONTROL_QW_WRITE | 360 360 PIPE_CONTROL_GLOBAL_GTT_IVB | 361 361 PIPE_CONTROL_CS_STALL); 362 - *cs++ = i915_request_active_timeline(rq)->hwsp_offset; 362 + *cs++ = i915_request_active_seqno(rq); 363 363 *cs++ = rq->fence.seqno; 364 364 365 365 *cs++ = MI_USER_INTERRUPT; ··· 374 374 u32 *gen6_emit_breadcrumb_xcs(struct i915_request *rq, u32 *cs) 375 375 { 376 376 GEM_BUG_ON(i915_request_active_timeline(rq)->hwsp_ggtt != rq->engine->status_page.vma); 377 - GEM_BUG_ON(offset_in_page(i915_request_active_timeline(rq)->hwsp_offset) != I915_GEM_HWS_SEQNO_ADDR); 377 + GEM_BUG_ON(offset_in_page(rq->hwsp_seqno) != I915_GEM_HWS_SEQNO_ADDR); 378 378 379 379 *cs++ = MI_FLUSH_DW | MI_FLUSH_DW_OP_STOREDW | MI_FLUSH_DW_STORE_INDEX; 380 380 *cs++ = I915_GEM_HWS_SEQNO_ADDR | MI_FLUSH_DW_USE_GTT; ··· 394 394 int i; 395 395 396 396 GEM_BUG_ON(i915_request_active_timeline(rq)->hwsp_ggtt != rq->engine->status_page.vma); 397 - GEM_BUG_ON(offset_in_page(i915_request_active_timeline(rq)->hwsp_offset) != I915_GEM_HWS_SEQNO_ADDR); 397 + GEM_BUG_ON(offset_in_page(rq->hwsp_seqno) != I915_GEM_HWS_SEQNO_ADDR); 398 398 399 399 *cs++ = MI_FLUSH_DW | MI_INVALIDATE_TLB | 400 400 MI_FLUSH_DW_OP_STOREDW | MI_FLUSH_DW_STORE_INDEX;
+6 -7
drivers/gpu/drm/i915/gt/gen8_engine_cs.c
··· 338 338 339 339 static u32 hwsp_offset(const struct i915_request *rq) 340 340 { 341 - const struct intel_timeline_cacheline *cl; 341 + const struct intel_timeline *tl; 342 342 343 - /* Before the request is executed, the timeline/cachline is fixed */ 343 + /* Before the request is executed, the timeline is fixed */ 344 + tl = rcu_dereference_protected(rq->timeline, 345 + !i915_request_signaled(rq)); 344 346 345 - cl = rcu_dereference_protected(rq->hwsp_cacheline, 1); 346 - if (cl) 347 - return cl->ggtt_offset; 348 - 349 - return rcu_dereference_protected(rq->timeline, 1)->hwsp_offset; 347 + /* See the comment in i915_request_active_seqno(). */ 348 + return page_mask_bits(tl->hwsp_offset) + offset_in_page(rq->hwsp_seqno); 350 349 } 351 350 352 351 int gen8_emit_init_breadcrumb(struct i915_request *rq)
+1
drivers/gpu/drm/i915/gt/intel_engine_cs.c
··· 763 763 frame->rq.engine = engine; 764 764 frame->rq.context = ce; 765 765 rcu_assign_pointer(frame->rq.timeline, ce->timeline); 766 + frame->rq.hwsp_seqno = ce->timeline->hwsp_seqno; 766 767 767 768 frame->ring.vaddr = frame->cs; 768 769 frame->ring.size = sizeof(frame->cs);
-4
drivers/gpu/drm/i915/gt/intel_gt_types.h
··· 39 39 struct intel_gt_timelines { 40 40 spinlock_t lock; /* protects active_list */ 41 41 struct list_head active_list; 42 - 43 - /* Pack multiple timelines' seqnos into the same page */ 44 - spinlock_t hwsp_lock; 45 - struct list_head hwsp_free_list; 46 42 } timelines; 47 43 48 44 struct intel_gt_requests {
+84 -338
drivers/gpu/drm/i915/gt/intel_timeline.c
··· 12 12 #include "intel_ring.h" 13 13 #include "intel_timeline.h" 14 14 15 - #define ptr_set_bit(ptr, bit) ((typeof(ptr))((unsigned long)(ptr) | BIT(bit))) 16 - #define ptr_test_bit(ptr, bit) ((unsigned long)(ptr) & BIT(bit)) 15 + #define TIMELINE_SEQNO_BYTES 8 17 16 18 - #define CACHELINE_BITS 6 19 - #define CACHELINE_FREE CACHELINE_BITS 20 - 21 - struct intel_timeline_hwsp { 22 - struct intel_gt *gt; 23 - struct intel_gt_timelines *gt_timelines; 24 - struct list_head free_link; 25 - struct i915_vma *vma; 26 - u64 free_bitmap; 27 - }; 28 - 29 - static struct i915_vma *__hwsp_alloc(struct intel_gt *gt) 17 + static struct i915_vma *hwsp_alloc(struct intel_gt *gt) 30 18 { 31 19 struct drm_i915_private *i915 = gt->i915; 32 20 struct drm_i915_gem_object *obj; ··· 33 45 return vma; 34 46 } 35 47 36 - static struct i915_vma * 37 - hwsp_alloc(struct intel_timeline *timeline, unsigned int *cacheline) 38 - { 39 - struct intel_gt_timelines *gt = &timeline->gt->timelines; 40 - struct intel_timeline_hwsp *hwsp; 41 - 42 - BUILD_BUG_ON(BITS_PER_TYPE(u64) * CACHELINE_BYTES > PAGE_SIZE); 43 - 44 - spin_lock_irq(&gt->hwsp_lock); 45 - 46 - /* hwsp_free_list only contains HWSP that have available cachelines */ 47 - hwsp = list_first_entry_or_null(&gt->hwsp_free_list, 48 - typeof(*hwsp), free_link); 49 - if (!hwsp) { 50 - struct i915_vma *vma; 51 - 52 - spin_unlock_irq(&gt->hwsp_lock); 53 - 54 - hwsp = kmalloc(sizeof(*hwsp), GFP_KERNEL); 55 - if (!hwsp) 56 - return ERR_PTR(-ENOMEM); 57 - 58 - vma = __hwsp_alloc(timeline->gt); 59 - if (IS_ERR(vma)) { 60 - kfree(hwsp); 61 - return vma; 62 - } 63 - 64 - GT_TRACE(timeline->gt, "new HWSP allocated\n"); 65 - 66 - vma->private = hwsp; 67 - hwsp->gt = timeline->gt; 68 - hwsp->vma = vma; 69 - hwsp->free_bitmap = ~0ull; 70 - hwsp->gt_timelines = gt; 71 - 72 - spin_lock_irq(&gt->hwsp_lock); 73 - list_add(&hwsp->free_link, &gt->hwsp_free_list); 74 - } 75 - 76 - GEM_BUG_ON(!hwsp->free_bitmap); 77 - *cacheline = __ffs64(hwsp->free_bitmap); 78 - hwsp->free_bitmap &= ~BIT_ULL(*cacheline); 79 - if (!hwsp->free_bitmap) 80 - list_del(&hwsp->free_link); 81 - 82 - spin_unlock_irq(&gt->hwsp_lock); 83 - 84 - GEM_BUG_ON(hwsp->vma->private != hwsp); 85 - return hwsp->vma; 86 - } 87 - 88 - static void __idle_hwsp_free(struct intel_timeline_hwsp *hwsp, int cacheline) 89 - { 90 - struct intel_gt_timelines *gt = hwsp->gt_timelines; 91 - unsigned long flags; 92 - 93 - spin_lock_irqsave(&gt->hwsp_lock, flags); 94 - 95 - /* As a cacheline becomes available, publish the HWSP on the freelist */ 96 - if (!hwsp->free_bitmap) 97 - list_add_tail(&hwsp->free_link, &gt->hwsp_free_list); 98 - 99 - GEM_BUG_ON(cacheline >= BITS_PER_TYPE(hwsp->free_bitmap)); 100 - hwsp->free_bitmap |= BIT_ULL(cacheline); 101 - 102 - /* And if no one is left using it, give the page back to the system */ 103 - if (hwsp->free_bitmap == ~0ull) { 104 - i915_vma_put(hwsp->vma); 105 - list_del(&hwsp->free_link); 106 - kfree(hwsp); 107 - } 108 - 109 - spin_unlock_irqrestore(&gt->hwsp_lock, flags); 110 - } 111 - 112 - static void __rcu_cacheline_free(struct rcu_head *rcu) 113 - { 114 - struct intel_timeline_cacheline *cl = 115 - container_of(rcu, typeof(*cl), rcu); 116 - 117 - /* Must wait until after all *rq->hwsp are complete before removing */ 118 - i915_gem_object_unpin_map(cl->hwsp->vma->obj); 119 - __idle_hwsp_free(cl->hwsp, ptr_unmask_bits(cl->vaddr, CACHELINE_BITS)); 120 - 121 - i915_active_fini(&cl->active); 122 - kfree(cl); 123 - } 124 - 125 - static void __idle_cacheline_free(struct intel_timeline_cacheline *cl) 126 - { 127 - GEM_BUG_ON(!i915_active_is_idle(&cl->active)); 128 - call_rcu(&cl->rcu, __rcu_cacheline_free); 129 - } 130 - 131 48 __i915_active_call 132 - static void __cacheline_retire(struct i915_active *active) 49 + static void __timeline_retire(struct i915_active *active) 133 50 { 134 - struct intel_timeline_cacheline *cl = 135 - container_of(active, typeof(*cl), active); 51 + struct intel_timeline *tl = 52 + container_of(active, typeof(*tl), active); 136 53 137 - i915_vma_unpin(cl->hwsp->vma); 138 - if (ptr_test_bit(cl->vaddr, CACHELINE_FREE)) 139 - __idle_cacheline_free(cl); 54 + i915_vma_unpin(tl->hwsp_ggtt); 55 + intel_timeline_put(tl); 140 56 } 141 57 142 - static int __cacheline_active(struct i915_active *active) 58 + static int __timeline_active(struct i915_active *active) 143 59 { 144 - struct intel_timeline_cacheline *cl = 145 - container_of(active, typeof(*cl), active); 60 + struct intel_timeline *tl = 61 + container_of(active, typeof(*tl), active); 146 62 147 - __i915_vma_pin(cl->hwsp->vma); 63 + __i915_vma_pin(tl->hwsp_ggtt); 64 + intel_timeline_get(tl); 148 65 return 0; 149 - } 150 - 151 - static struct intel_timeline_cacheline * 152 - cacheline_alloc(struct intel_timeline_hwsp *hwsp, unsigned int cacheline) 153 - { 154 - struct intel_timeline_cacheline *cl; 155 - void *vaddr; 156 - 157 - GEM_BUG_ON(cacheline >= BIT(CACHELINE_BITS)); 158 - 159 - cl = kmalloc(sizeof(*cl), GFP_KERNEL); 160 - if (!cl) 161 - return ERR_PTR(-ENOMEM); 162 - 163 - vaddr = i915_gem_object_pin_map(hwsp->vma->obj, I915_MAP_WB); 164 - if (IS_ERR(vaddr)) { 165 - kfree(cl); 166 - return ERR_CAST(vaddr); 167 - } 168 - 169 - cl->hwsp = hwsp; 170 - cl->vaddr = page_pack_bits(vaddr, cacheline); 171 - 172 - i915_active_init(&cl->active, __cacheline_active, __cacheline_retire); 173 - 174 - return cl; 175 - } 176 - 177 - static void cacheline_acquire(struct intel_timeline_cacheline *cl, 178 - u32 ggtt_offset) 179 - { 180 - if (!cl) 181 - return; 182 - 183 - cl->ggtt_offset = ggtt_offset; 184 - i915_active_acquire(&cl->active); 185 - } 186 - 187 - static void cacheline_release(struct intel_timeline_cacheline *cl) 188 - { 189 - if (cl) 190 - i915_active_release(&cl->active); 191 - } 192 - 193 - static void cacheline_free(struct intel_timeline_cacheline *cl) 194 - { 195 - if (!i915_active_acquire_if_busy(&cl->active)) { 196 - __idle_cacheline_free(cl); 197 - return; 198 - } 199 - 200 - GEM_BUG_ON(ptr_test_bit(cl->vaddr, CACHELINE_FREE)); 201 - cl->vaddr = ptr_set_bit(cl->vaddr, CACHELINE_FREE); 202 - 203 - i915_active_release(&cl->active); 204 66 } 205 67 206 68 static int intel_timeline_init(struct intel_timeline *timeline, ··· 59 221 unsigned int offset) 60 222 { 61 223 void *vaddr; 224 + u32 *seqno; 62 225 63 226 kref_init(&timeline->kref); 64 227 atomic_set(&timeline->pin_count, 0); 65 228 66 229 timeline->gt = gt; 67 230 68 - timeline->has_initial_breadcrumb = !hwsp; 69 - timeline->hwsp_cacheline = NULL; 70 - 71 - if (!hwsp) { 72 - struct intel_timeline_cacheline *cl; 73 - unsigned int cacheline; 74 - 75 - hwsp = hwsp_alloc(timeline, &cacheline); 231 + if (hwsp) { 232 + timeline->hwsp_offset = offset; 233 + timeline->hwsp_ggtt = i915_vma_get(hwsp); 234 + } else { 235 + timeline->has_initial_breadcrumb = true; 236 + hwsp = hwsp_alloc(gt); 76 237 if (IS_ERR(hwsp)) 77 238 return PTR_ERR(hwsp); 78 - 79 - cl = cacheline_alloc(hwsp->private, cacheline); 80 - if (IS_ERR(cl)) { 81 - __idle_hwsp_free(hwsp->private, cacheline); 82 - return PTR_ERR(cl); 83 - } 84 - 85 - timeline->hwsp_cacheline = cl; 86 - timeline->hwsp_offset = cacheline * CACHELINE_BYTES; 87 - 88 - vaddr = page_mask_bits(cl->vaddr); 89 - } else { 90 - timeline->hwsp_offset = offset; 91 - vaddr = i915_gem_object_pin_map(hwsp->obj, I915_MAP_WB); 92 - if (IS_ERR(vaddr)) 93 - return PTR_ERR(vaddr); 239 + timeline->hwsp_ggtt = hwsp; 94 240 } 95 241 96 - timeline->hwsp_seqno = 97 - memset(vaddr + timeline->hwsp_offset, 0, CACHELINE_BYTES); 242 + vaddr = i915_gem_object_pin_map(hwsp->obj, I915_MAP_WB); 243 + if (IS_ERR(vaddr)) 244 + return PTR_ERR(vaddr); 98 245 99 - timeline->hwsp_ggtt = i915_vma_get(hwsp); 246 + timeline->hwsp_map = vaddr; 247 + seqno = vaddr + timeline->hwsp_offset; 248 + WRITE_ONCE(*seqno, 0); 249 + timeline->hwsp_seqno = seqno; 250 + 100 251 GEM_BUG_ON(timeline->hwsp_offset >= hwsp->size); 101 252 102 253 timeline->fence_context = dma_fence_context_alloc(1); ··· 96 269 INIT_LIST_HEAD(&timeline->requests); 97 270 98 271 i915_syncmap_init(&timeline->sync); 272 + i915_active_init(&timeline->active, __timeline_active, __timeline_retire); 99 273 100 274 return 0; 101 275 } ··· 107 279 108 280 spin_lock_init(&timelines->lock); 109 281 INIT_LIST_HEAD(&timelines->active_list); 110 - 111 - spin_lock_init(&timelines->hwsp_lock); 112 - INIT_LIST_HEAD(&timelines->hwsp_free_list); 113 282 } 114 283 115 - static void intel_timeline_fini(struct intel_timeline *timeline) 284 + static void intel_timeline_fini(struct rcu_head *rcu) 116 285 { 117 - GEM_BUG_ON(atomic_read(&timeline->pin_count)); 118 - GEM_BUG_ON(!list_empty(&timeline->requests)); 119 - GEM_BUG_ON(timeline->retire); 286 + struct intel_timeline *timeline = 287 + container_of(rcu, struct intel_timeline, rcu); 120 288 121 - if (timeline->hwsp_cacheline) 122 - cacheline_free(timeline->hwsp_cacheline); 123 - else 124 - i915_gem_object_unpin_map(timeline->hwsp_ggtt->obj); 289 + i915_gem_object_unpin_map(timeline->hwsp_ggtt->obj); 125 290 126 291 i915_vma_put(timeline->hwsp_ggtt); 292 + i915_active_fini(&timeline->active); 293 + kfree(timeline); 127 294 } 128 295 129 296 struct intel_timeline * ··· 184 361 GT_TRACE(tl->gt, "timeline:%llx using HWSP offset:%x\n", 185 362 tl->fence_context, tl->hwsp_offset); 186 363 187 - cacheline_acquire(tl->hwsp_cacheline, tl->hwsp_offset); 364 + i915_active_acquire(&tl->active); 188 365 if (atomic_fetch_inc(&tl->pin_count)) { 189 - cacheline_release(tl->hwsp_cacheline); 366 + i915_active_release(&tl->active); 190 367 __i915_vma_unpin(tl->hwsp_ggtt); 191 368 } 192 369 ··· 195 372 196 373 void intel_timeline_reset_seqno(const struct intel_timeline *tl) 197 374 { 375 + u32 *hwsp_seqno = (u32 *)tl->hwsp_seqno; 198 376 /* Must be pinned to be writable, and no requests in flight. */ 199 377 GEM_BUG_ON(!atomic_read(&tl->pin_count)); 200 - WRITE_ONCE(*(u32 *)tl->hwsp_seqno, tl->seqno); 378 + 379 + memset(hwsp_seqno + 1, 0, TIMELINE_SEQNO_BYTES - sizeof(*hwsp_seqno)); 380 + WRITE_ONCE(*hwsp_seqno, tl->seqno); 381 + clflush(hwsp_seqno); 201 382 } 202 383 203 384 void intel_timeline_enter(struct intel_timeline *tl) ··· 277 450 return tl->seqno += 1 + tl->has_initial_breadcrumb; 278 451 } 279 452 280 - static void timeline_rollback(struct intel_timeline *tl) 281 - { 282 - tl->seqno -= 1 + tl->has_initial_breadcrumb; 283 - } 284 - 285 453 static noinline int 286 454 __intel_timeline_get_seqno(struct intel_timeline *tl, 287 - struct i915_request *rq, 288 455 u32 *seqno) 289 456 { 290 - struct intel_timeline_cacheline *cl; 291 - unsigned int cacheline; 292 - struct i915_vma *vma; 293 - void *vaddr; 294 - int err; 457 + u32 next_ofs = offset_in_page(tl->hwsp_offset + TIMELINE_SEQNO_BYTES); 295 458 296 - might_lock(&tl->gt->ggtt->vm.mutex); 297 - GT_TRACE(tl->gt, "timeline:%llx wrapped\n", tl->fence_context); 459 + /* w/a: bit 5 needs to be zero for MI_FLUSH_DW address. */ 460 + if (TIMELINE_SEQNO_BYTES <= BIT(5) && (next_ofs & BIT(5))) 461 + next_ofs = offset_in_page(next_ofs + BIT(5)); 298 462 299 - /* 300 - * If there is an outstanding GPU reference to this cacheline, 301 - * such as it being sampled by a HW semaphore on another timeline, 302 - * we cannot wraparound our seqno value (the HW semaphore does 303 - * a strict greater-than-or-equals compare, not i915_seqno_passed). 304 - * So if the cacheline is still busy, we must detach ourselves 305 - * from it and leave it inflight alongside its users. 306 - * 307 - * However, if nobody is watching and we can guarantee that nobody 308 - * will, we could simply reuse the same cacheline. 309 - * 310 - * if (i915_active_request_is_signaled(&tl->last_request) && 311 - * i915_active_is_signaled(&tl->hwsp_cacheline->active)) 312 - * return 0; 313 - * 314 - * That seems unlikely for a busy timeline that needed to wrap in 315 - * the first place, so just replace the cacheline. 316 - */ 317 - 318 - vma = hwsp_alloc(tl, &cacheline); 319 - if (IS_ERR(vma)) { 320 - err = PTR_ERR(vma); 321 - goto err_rollback; 322 - } 323 - 324 - err = i915_ggtt_pin(vma, NULL, 0, PIN_HIGH); 325 - if (err) { 326 - __idle_hwsp_free(vma->private, cacheline); 327 - goto err_rollback; 328 - } 329 - 330 - cl = cacheline_alloc(vma->private, cacheline); 331 - if (IS_ERR(cl)) { 332 - err = PTR_ERR(cl); 333 - __idle_hwsp_free(vma->private, cacheline); 334 - goto err_unpin; 335 - } 336 - GEM_BUG_ON(cl->hwsp->vma != vma); 337 - 338 - /* 339 - * Attach the old cacheline to the current request, so that we only 340 - * free it after the current request is retired, which ensures that 341 - * all writes into the cacheline from previous requests are complete. 342 - */ 343 - err = i915_active_ref(&tl->hwsp_cacheline->active, 344 - tl->fence_context, 345 - &rq->fence); 346 - if (err) 347 - goto err_cacheline; 348 - 349 - cacheline_release(tl->hwsp_cacheline); /* ownership now xfered to rq */ 350 - cacheline_free(tl->hwsp_cacheline); 351 - 352 - i915_vma_unpin(tl->hwsp_ggtt); /* binding kept alive by old cacheline */ 353 - i915_vma_put(tl->hwsp_ggtt); 354 - 355 - tl->hwsp_ggtt = i915_vma_get(vma); 356 - 357 - vaddr = page_mask_bits(cl->vaddr); 358 - tl->hwsp_offset = cacheline * CACHELINE_BYTES; 359 - tl->hwsp_seqno = 360 - memset(vaddr + tl->hwsp_offset, 0, CACHELINE_BYTES); 361 - 362 - tl->hwsp_offset += i915_ggtt_offset(vma); 363 - GT_TRACE(tl->gt, "timeline:%llx using HWSP offset:%x\n", 364 - tl->fence_context, tl->hwsp_offset); 365 - 366 - cacheline_acquire(cl, tl->hwsp_offset); 367 - tl->hwsp_cacheline = cl; 463 + tl->hwsp_offset = i915_ggtt_offset(tl->hwsp_ggtt) + next_ofs; 464 + tl->hwsp_seqno = tl->hwsp_map + next_ofs; 465 + intel_timeline_reset_seqno(tl); 368 466 369 467 *seqno = timeline_advance(tl); 370 468 GEM_BUG_ON(i915_seqno_passed(*tl->hwsp_seqno, *seqno)); 371 469 return 0; 372 - 373 - err_cacheline: 374 - cacheline_free(cl); 375 - err_unpin: 376 - i915_vma_unpin(vma); 377 - err_rollback: 378 - timeline_rollback(tl); 379 - return err; 380 470 } 381 471 382 472 int intel_timeline_get_seqno(struct intel_timeline *tl, ··· 303 559 *seqno = timeline_advance(tl); 304 560 305 561 /* Replace the HWSP on wraparound for HW semaphores */ 306 - if (unlikely(!*seqno && tl->hwsp_cacheline)) 307 - return __intel_timeline_get_seqno(tl, rq, seqno); 562 + if (unlikely(!*seqno && tl->has_initial_breadcrumb)) 563 + return __intel_timeline_get_seqno(tl, seqno); 308 564 309 565 return 0; 310 - } 311 - 312 - static int cacheline_ref(struct intel_timeline_cacheline *cl, 313 - struct i915_request *rq) 314 - { 315 - return i915_active_add_request(&cl->active, rq); 316 566 } 317 567 318 568 int intel_timeline_read_hwsp(struct i915_request *from, 319 569 struct i915_request *to, 320 570 u32 *hwsp) 321 571 { 322 - struct intel_timeline_cacheline *cl; 572 + struct intel_timeline *tl; 323 573 int err; 324 574 325 - GEM_BUG_ON(!rcu_access_pointer(from->hwsp_cacheline)); 326 - 327 575 rcu_read_lock(); 328 - cl = rcu_dereference(from->hwsp_cacheline); 329 - if (i915_request_signaled(from)) /* confirm cacheline is valid */ 330 - goto unlock; 331 - if (unlikely(!i915_active_acquire_if_busy(&cl->active))) 332 - goto unlock; /* seqno wrapped and completed! */ 333 - if (unlikely(__i915_request_is_complete(from))) 334 - goto release; 576 + tl = rcu_dereference(from->timeline); 577 + if (i915_request_signaled(from) || 578 + !i915_active_acquire_if_busy(&tl->active)) 579 + tl = NULL; 580 + 581 + if (tl) { 582 + /* hwsp_offset may wraparound, so use from->hwsp_seqno */ 583 + *hwsp = i915_ggtt_offset(tl->hwsp_ggtt) + 584 + offset_in_page(from->hwsp_seqno); 585 + } 586 + 587 + /* ensure we wait on the right request, if not, we completed */ 588 + if (tl && __i915_request_is_complete(from)) { 589 + i915_active_release(&tl->active); 590 + tl = NULL; 591 + } 335 592 rcu_read_unlock(); 336 593 337 - err = cacheline_ref(cl, to); 338 - if (err) 594 + if (!tl) 595 + return 1; 596 + 597 + /* Can't do semaphore waits on kernel context */ 598 + if (!tl->has_initial_breadcrumb) { 599 + err = -EINVAL; 339 600 goto out; 601 + } 340 602 341 - *hwsp = cl->ggtt_offset; 603 + err = i915_active_add_request(&tl->active, to); 604 + 342 605 out: 343 - i915_active_release(&cl->active); 606 + i915_active_release(&tl->active); 344 607 return err; 345 - 346 - release: 347 - i915_active_release(&cl->active); 348 - unlock: 349 - rcu_read_unlock(); 350 - return 1; 351 608 } 352 609 353 610 void intel_timeline_unpin(struct intel_timeline *tl) ··· 357 612 if (!atomic_dec_and_test(&tl->pin_count)) 358 613 return; 359 614 360 - cacheline_release(tl->hwsp_cacheline); 361 - 615 + i915_active_release(&tl->active); 362 616 __i915_vma_unpin(tl->hwsp_ggtt); 363 617 } 364 618 ··· 366 622 struct intel_timeline *timeline = 367 623 container_of(kref, typeof(*timeline), kref); 368 624 369 - intel_timeline_fini(timeline); 370 - kfree_rcu(timeline, rcu); 625 + GEM_BUG_ON(atomic_read(&timeline->pin_count)); 626 + GEM_BUG_ON(!list_empty(&timeline->requests)); 627 + GEM_BUG_ON(timeline->retire); 628 + 629 + call_rcu(&timeline->rcu, intel_timeline_fini); 371 630 } 372 631 373 632 void intel_gt_fini_timelines(struct intel_gt *gt) ··· 378 631 struct intel_gt_timelines *timelines = &gt->timelines; 379 632 380 633 GEM_BUG_ON(!list_empty(&timelines->active_list)); 381 - GEM_BUG_ON(!list_empty(&timelines->hwsp_free_list)); 382 634 } 383 635 384 636 void intel_gt_show_timelines(struct intel_gt *gt,
+3 -14
drivers/gpu/drm/i915/gt/intel_timeline_types.h
··· 18 18 struct i915_vma; 19 19 struct i915_syncmap; 20 20 struct intel_gt; 21 - struct intel_timeline_hwsp; 22 21 23 22 struct intel_timeline { 24 23 u64 fence_context; ··· 44 45 atomic_t pin_count; 45 46 atomic_t active_count; 46 47 48 + void *hwsp_map; 47 49 const u32 *hwsp_seqno; 48 50 struct i915_vma *hwsp_ggtt; 49 51 u32 hwsp_offset; 50 - 51 - struct intel_timeline_cacheline *hwsp_cacheline; 52 52 53 53 bool has_initial_breadcrumb; 54 54 ··· 64 66 * protection themselves (cf the i915_active_fence API). 65 67 */ 66 68 struct i915_active_fence last_request; 69 + 70 + struct i915_active active; 67 71 68 72 /** A chain of completed timelines ready for early retirement. */ 69 73 struct intel_timeline *retire; ··· 87 87 struct list_head engine_link; 88 88 89 89 struct kref kref; 90 - struct rcu_head rcu; 91 - }; 92 - 93 - struct intel_timeline_cacheline { 94 - struct i915_active active; 95 - 96 - struct intel_timeline_hwsp *hwsp; 97 - void *vaddr; 98 - 99 - u32 ggtt_offset; 100 - 101 90 struct rcu_head rcu; 102 91 }; 103 92
+4 -1
drivers/gpu/drm/i915/gt/selftest_engine_cs.c
··· 42 42 43 43 static int write_timestamp(struct i915_request *rq, int slot) 44 44 { 45 + struct intel_timeline *tl = 46 + rcu_dereference_protected(rq->timeline, 47 + !i915_request_signaled(rq)); 45 48 u32 cmd; 46 49 u32 *cs; 47 50 ··· 57 54 cmd++; 58 55 *cs++ = cmd; 59 56 *cs++ = i915_mmio_reg_offset(RING_TIMESTAMP(rq->engine->mmio_base)); 60 - *cs++ = i915_request_timeline(rq)->hwsp_offset + slot * sizeof(u32); 57 + *cs++ = tl->hwsp_offset + slot * sizeof(u32); 61 58 *cs++ = 0; 62 59 63 60 intel_ring_advance(rq, cs);
+54 -34
drivers/gpu/drm/i915/gt/selftest_timeline.c
··· 35 35 { 36 36 unsigned long address = (unsigned long)page_address(hwsp_page(tl)); 37 37 38 - return (address + tl->hwsp_offset) / CACHELINE_BYTES; 38 + return (address + offset_in_page(tl->hwsp_offset)) / TIMELINE_SEQNO_BYTES; 39 39 } 40 40 41 - #define CACHELINES_PER_PAGE (PAGE_SIZE / CACHELINE_BYTES) 41 + /* Only half of seqno's are usable, see __intel_timeline_get_seqno() */ 42 + #define CACHELINES_PER_PAGE (PAGE_SIZE / TIMELINE_SEQNO_BYTES / 2) 42 43 43 44 struct mock_hwsp_freelist { 44 45 struct intel_gt *gt; ··· 667 666 if (IS_ERR(tl)) 668 667 return PTR_ERR(tl); 669 668 670 - if (!tl->has_initial_breadcrumb || !tl->hwsp_cacheline) 669 + if (!tl->has_initial_breadcrumb) 671 670 goto out_free; 672 671 673 672 err = intel_timeline_pin(tl, NULL); ··· 834 833 return 0; 835 834 } 836 835 836 + static void switch_tl_lock(struct i915_request *from, struct i915_request *to) 837 + { 838 + /* some light mutex juggling required; think co-routines */ 839 + 840 + if (from) { 841 + lockdep_unpin_lock(&from->context->timeline->mutex, from->cookie); 842 + mutex_unlock(&from->context->timeline->mutex); 843 + } 844 + 845 + if (to) { 846 + mutex_lock(&to->context->timeline->mutex); 847 + to->cookie = lockdep_pin_lock(&to->context->timeline->mutex); 848 + } 849 + } 850 + 837 851 static int create_watcher(struct hwsp_watcher *w, 838 852 struct intel_engine_cs *engine, 839 853 int ringsz) 840 854 { 841 855 struct intel_context *ce; 842 - struct intel_timeline *tl; 843 856 844 857 ce = intel_context_create(engine); 845 858 if (IS_ERR(ce)) ··· 866 851 return PTR_ERR(w->rq); 867 852 868 853 w->addr = i915_ggtt_offset(w->vma); 869 - tl = w->rq->context->timeline; 870 854 871 - /* some light mutex juggling required; think co-routines */ 872 - lockdep_unpin_lock(&tl->mutex, w->rq->cookie); 873 - mutex_unlock(&tl->mutex); 855 + switch_tl_lock(w->rq, NULL); 874 856 875 857 return 0; 876 858 } ··· 876 864 bool (*op)(u32 hwsp, u32 seqno)) 877 865 { 878 866 struct i915_request *rq = fetch_and_zero(&w->rq); 879 - struct intel_timeline *tl = rq->context->timeline; 880 867 u32 offset, end; 881 868 int err; 882 869 883 870 GEM_BUG_ON(w->addr - i915_ggtt_offset(w->vma) > w->vma->size); 884 871 885 872 i915_request_get(rq); 886 - mutex_lock(&tl->mutex); 887 - rq->cookie = lockdep_pin_lock(&tl->mutex); 873 + switch_tl_lock(NULL, rq); 888 874 i915_request_add(rq); 889 875 890 876 if (i915_request_wait(rq, 0, HZ) < 0) { ··· 911 901 static void cleanup_watcher(struct hwsp_watcher *w) 912 902 { 913 903 if (w->rq) { 914 - struct intel_timeline *tl = w->rq->context->timeline; 915 - 916 - mutex_lock(&tl->mutex); 917 - w->rq->cookie = lockdep_pin_lock(&tl->mutex); 904 + switch_tl_lock(NULL, w->rq); 918 905 919 906 i915_request_add(w->rq); 920 907 } ··· 949 942 } 950 943 951 944 i915_request_put(rq); 952 - rq = intel_context_create_request(ce); 945 + rq = i915_request_create(ce); 953 946 if (IS_ERR(rq)) 954 947 return rq; 955 948 ··· 984 977 if (IS_ERR(tl)) 985 978 return PTR_ERR(tl); 986 979 987 - if (!tl->hwsp_cacheline) 980 + if (!tl->has_initial_breadcrumb) 988 981 goto out_free; 989 982 990 983 for (i = 0; i < ARRAY_SIZE(watcher); i++) { ··· 1006 999 do { 1007 1000 struct i915_sw_fence *submit; 1008 1001 struct i915_request *rq; 1009 - u32 hwsp; 1002 + u32 hwsp, dummy; 1010 1003 1011 1004 submit = heap_fence_create(GFP_KERNEL); 1012 1005 if (!submit) { ··· 1024 1017 goto out; 1025 1018 } 1026 1019 1027 - /* Skip to the end, saving 30 minutes of nops */ 1028 - tl->seqno = -10u + 2 * (count & 3); 1029 - WRITE_ONCE(*(u32 *)tl->hwsp_seqno, tl->seqno); 1030 1020 ce->timeline = intel_timeline_get(tl); 1031 1021 1032 - rq = intel_context_create_request(ce); 1022 + /* Ensure timeline is mapped, done during first pin */ 1023 + err = intel_context_pin(ce); 1024 + if (err) { 1025 + intel_context_put(ce); 1026 + goto out; 1027 + } 1028 + 1029 + /* 1030 + * Start at a new wrap, and set seqno right before another wrap, 1031 + * saving 30 minutes of nops 1032 + */ 1033 + tl->seqno = -12u + 2 * (count & 3); 1034 + __intel_timeline_get_seqno(tl, &dummy); 1035 + 1036 + rq = i915_request_create(ce); 1033 1037 if (IS_ERR(rq)) { 1034 1038 err = PTR_ERR(rq); 1039 + intel_context_unpin(ce); 1035 1040 intel_context_put(ce); 1036 1041 goto out; 1037 1042 } ··· 1053 1034 GFP_KERNEL); 1054 1035 if (err < 0) { 1055 1036 i915_request_add(rq); 1037 + intel_context_unpin(ce); 1056 1038 intel_context_put(ce); 1057 1039 goto out; 1058 1040 } 1059 1041 1060 - mutex_lock(&watcher[0].rq->context->timeline->mutex); 1042 + switch_tl_lock(rq, watcher[0].rq); 1061 1043 err = intel_timeline_read_hwsp(rq, watcher[0].rq, &hwsp); 1062 1044 if (err == 0) 1063 1045 err = emit_read_hwsp(watcher[0].rq, /* before */ 1064 1046 rq->fence.seqno, hwsp, 1065 1047 &watcher[0].addr); 1066 - mutex_unlock(&watcher[0].rq->context->timeline->mutex); 1048 + switch_tl_lock(watcher[0].rq, rq); 1067 1049 if (err) { 1068 1050 i915_request_add(rq); 1051 + intel_context_unpin(ce); 1069 1052 intel_context_put(ce); 1070 1053 goto out; 1071 1054 } 1072 1055 1073 - mutex_lock(&watcher[1].rq->context->timeline->mutex); 1056 + switch_tl_lock(rq, watcher[1].rq); 1074 1057 err = intel_timeline_read_hwsp(rq, watcher[1].rq, &hwsp); 1075 1058 if (err == 0) 1076 1059 err = emit_read_hwsp(watcher[1].rq, /* after */ 1077 1060 rq->fence.seqno, hwsp, 1078 1061 &watcher[1].addr); 1079 - mutex_unlock(&watcher[1].rq->context->timeline->mutex); 1062 + switch_tl_lock(watcher[1].rq, rq); 1080 1063 if (err) { 1081 1064 i915_request_add(rq); 1065 + intel_context_unpin(ce); 1082 1066 intel_context_put(ce); 1083 1067 goto out; 1084 1068 } ··· 1090 1068 i915_request_add(rq); 1091 1069 1092 1070 rq = wrap_timeline(rq); 1071 + intel_context_unpin(ce); 1093 1072 intel_context_put(ce); 1094 1073 if (IS_ERR(rq)) { 1095 1074 err = PTR_ERR(rq); ··· 1130 1107 3 * watcher[1].rq->ring->size) 1131 1108 break; 1132 1109 1133 - } while (!__igt_timeout(end_time, NULL)); 1134 - WRITE_ONCE(*(u32 *)tl->hwsp_seqno, 0xdeadbeef); 1110 + } while (!__igt_timeout(end_time, NULL) && 1111 + count < (PAGE_SIZE / TIMELINE_SEQNO_BYTES - 1) / 2); 1135 1112 1136 1113 pr_info("%s: simulated %lu wraps\n", engine->name, count); 1137 1114 err = check_watcher(&watcher[1], "after", cmp_gte); ··· 1176 1153 } 1177 1154 1178 1155 GEM_BUG_ON(i915_active_fence_isset(&tl->last_request)); 1179 - tl->seqno = 0; 1180 - timeline_rollback(tl); 1181 - timeline_rollback(tl); 1156 + tl->seqno = -2u; 1182 1157 WRITE_ONCE(*(u32 *)tl->hwsp_seqno, tl->seqno); 1183 1158 1184 1159 for (i = 0; i < ARRAY_SIZE(rq); i++) { ··· 1256 1235 goto out; 1257 1236 1258 1237 tl = ce->timeline; 1259 - if (!tl->has_initial_breadcrumb || !tl->hwsp_cacheline) 1238 + if (!tl->has_initial_breadcrumb) 1260 1239 goto out; 1261 1240 1262 - timeline_rollback(tl); 1263 - timeline_rollback(tl); 1241 + tl->seqno = -4u; 1264 1242 WRITE_ONCE(*(u32 *)tl->hwsp_seqno, tl->seqno); 1265 1243 1266 1244 for (i = 0; i < ARRAY_SIZE(rq); i++) {
-4
drivers/gpu/drm/i915/i915_request.c
··· 863 863 rq->fence.seqno = seqno; 864 864 865 865 RCU_INIT_POINTER(rq->timeline, tl); 866 - RCU_INIT_POINTER(rq->hwsp_cacheline, tl->hwsp_cacheline); 867 866 rq->hwsp_seqno = tl->hwsp_seqno; 868 867 GEM_BUG_ON(__i915_request_is_complete(rq)); 869 868 ··· 1105 1106 goto await_fence; 1106 1107 1107 1108 if (i915_request_has_initial_breadcrumb(to)) 1108 - goto await_fence; 1109 - 1110 - if (!rcu_access_pointer(from->hwsp_cacheline)) 1111 1109 goto await_fence; 1112 1110 1113 1111 /*
+21 -10
drivers/gpu/drm/i915/i915_request.h
··· 237 237 */ 238 238 const u32 *hwsp_seqno; 239 239 240 - /* 241 - * If we need to access the timeline's seqno for this request in 242 - * another request, we need to keep a read reference to this associated 243 - * cacheline, so that we do not free and recycle it before the foreign 244 - * observers have completed. Hence, we keep a pointer to the cacheline 245 - * inside the timeline's HWSP vma, but it is only valid while this 246 - * request has not completed and guarded by the timeline mutex. 247 - */ 248 - struct intel_timeline_cacheline __rcu *hwsp_cacheline; 249 - 250 240 /** Position in the ring of the start of the request */ 251 241 u32 head; 252 242 ··· 604 614 */ 605 615 return rcu_dereference_protected(rq->timeline, 606 616 lockdep_is_held(&rq->engine->active.lock)); 617 + } 618 + 619 + static inline u32 620 + i915_request_active_seqno(const struct i915_request *rq) 621 + { 622 + u32 hwsp_phys_base = 623 + page_mask_bits(i915_request_active_timeline(rq)->hwsp_offset); 624 + u32 hwsp_relative_offset = offset_in_page(rq->hwsp_seqno); 625 + 626 + /* 627 + * Because of wraparound, we cannot simply take tl->hwsp_offset, 628 + * but instead use the fact that the relative for vaddr is the 629 + * offset as for hwsp_offset. Take the top bits from tl->hwsp_offset 630 + * and combine them with the relative offset in rq->hwsp_seqno. 631 + * 632 + * As rw->hwsp_seqno is rewritten when signaled, this only works 633 + * when the request isn't signaled yet, but at that point you 634 + * no longer need the offset. 635 + */ 636 + 637 + return hwsp_phys_base + hwsp_relative_offset; 607 638 } 608 639 609 640 #endif /* I915_REQUEST_H */