Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/i915: Lift timeline into intel_context

Move the timeline from being inside the intel_ring to intel_context
itself. This saves much pointer dancing and makes the relations of the
context to its timeline much clearer.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190809182518.20486-4-chris@chris-wilson.co.uk

+141 -136
+27 -8
drivers/gpu/drm/i915/gem/i915_gem_context.c
··· 489 489 i915_vm_put(vm); 490 490 } 491 491 492 + static void __set_timeline(struct intel_timeline **dst, 493 + struct intel_timeline *src) 494 + { 495 + struct intel_timeline *old = *dst; 496 + 497 + *dst = src ? intel_timeline_get(src) : NULL; 498 + 499 + if (old) 500 + intel_timeline_put(old); 501 + } 502 + 503 + static void __apply_timeline(struct intel_context *ce, void *timeline) 504 + { 505 + __set_timeline(&ce->timeline, timeline); 506 + } 507 + 508 + static void __assign_timeline(struct i915_gem_context *ctx, 509 + struct intel_timeline *timeline) 510 + { 511 + __set_timeline(&ctx->timeline, timeline); 512 + context_apply_all(ctx, __apply_timeline, timeline); 513 + } 514 + 492 515 static struct i915_gem_context * 493 516 i915_gem_create_context(struct drm_i915_private *dev_priv, unsigned int flags) 494 517 { ··· 554 531 return ERR_CAST(timeline); 555 532 } 556 533 557 - ctx->timeline = timeline; 534 + __assign_timeline(ctx, timeline); 535 + intel_timeline_put(timeline); 558 536 } 559 537 560 538 trace_i915_context_create(ctx); ··· 1955 1931 static int clone_timeline(struct i915_gem_context *dst, 1956 1932 struct i915_gem_context *src) 1957 1933 { 1958 - if (src->timeline) { 1959 - GEM_BUG_ON(src->timeline == dst->timeline); 1960 - 1961 - if (dst->timeline) 1962 - intel_timeline_put(dst->timeline); 1963 - dst->timeline = intel_timeline_get(src->timeline); 1964 - } 1934 + if (src->timeline) 1935 + __assign_timeline(dst, src->timeline); 1965 1936 1966 1937 return 0; 1967 1938 }
+1 -1
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
··· 2182 2182 static void eb_unpin_context(struct i915_execbuffer *eb) 2183 2183 { 2184 2184 struct intel_context *ce = eb->context; 2185 - struct intel_timeline *tl = ce->ring->timeline; 2185 + struct intel_timeline *tl = ce->timeline; 2186 2186 2187 2187 mutex_lock(&tl->mutex); 2188 2188 intel_context_exit(ce);
+16 -5
drivers/gpu/drm/i915/gt/intel_context.c
··· 68 68 goto err; 69 69 70 70 GEM_TRACE("%s context:%llx pin ring:{head:%04x, tail:%04x}\n", 71 - ce->engine->name, ce->ring->timeline->fence_context, 71 + ce->engine->name, ce->timeline->fence_context, 72 72 ce->ring->head, ce->ring->tail); 73 73 74 74 i915_gem_context_get(ce->gem_context); /* for ctx->ppgtt */ ··· 98 98 99 99 if (likely(atomic_dec_and_test(&ce->pin_count))) { 100 100 GEM_TRACE("%s context:%llx retire\n", 101 - ce->engine->name, ce->ring->timeline->fence_context); 101 + ce->engine->name, ce->timeline->fence_context); 102 102 103 103 ce->ops->unpin(ce); 104 104 ··· 143 143 struct intel_context *ce = container_of(active, typeof(*ce), active); 144 144 145 145 GEM_TRACE("%s context:%llx retire\n", 146 - ce->engine->name, ce->ring->timeline->fence_context); 146 + ce->engine->name, ce->timeline->fence_context); 147 147 148 148 if (ce->state) 149 149 __context_unpin_state(ce->state); 150 150 151 + intel_timeline_unpin(ce->timeline); 151 152 intel_ring_unpin(ce->ring); 152 153 intel_context_put(ce); 153 154 } ··· 164 163 if (err) 165 164 goto err_put; 166 165 166 + err = intel_timeline_pin(ce->timeline); 167 + if (err) 168 + goto err_ring; 169 + 167 170 if (!ce->state) 168 171 return 0; 169 172 170 173 err = __context_pin_state(ce->state); 171 174 if (err) 172 - goto err_ring; 175 + goto err_timeline; 173 176 174 177 return 0; 175 178 179 + err_timeline: 180 + intel_timeline_unpin(ce->timeline); 176 181 err_ring: 177 182 intel_ring_unpin(ce->ring); 178 183 err_put: ··· 225 218 226 219 ce->gem_context = ctx; 227 220 ce->vm = i915_vm_get(ctx->vm ?: &engine->gt->ggtt->vm); 221 + if (ctx->timeline) 222 + ce->timeline = intel_timeline_get(ctx->timeline); 228 223 229 224 ce->engine = engine; 230 225 ce->ops = engine->cops; ··· 244 235 245 236 void intel_context_fini(struct intel_context *ce) 246 237 { 238 + if (ce->timeline) 239 + intel_timeline_put(ce->timeline); 247 240 i915_vm_put(ce->vm); 248 241 249 242 mutex_destroy(&ce->pin_mutex); ··· 290 279 int intel_context_prepare_remote_request(struct intel_context *ce, 291 280 struct i915_request *rq) 292 281 { 293 - struct intel_timeline *tl = ce->ring->timeline; 282 + struct intel_timeline *tl = ce->timeline; 294 283 int err; 295 284 296 285 /* Only suitable for use in remotely modifying this context */
+4 -4
drivers/gpu/drm/i915/gt/intel_context.h
··· 120 120 121 121 static inline int __must_check 122 122 intel_context_timeline_lock(struct intel_context *ce) 123 - __acquires(&ce->ring->timeline->mutex) 123 + __acquires(&ce->timeline->mutex) 124 124 { 125 - return mutex_lock_interruptible(&ce->ring->timeline->mutex); 125 + return mutex_lock_interruptible(&ce->timeline->mutex); 126 126 } 127 127 128 128 static inline void intel_context_timeline_unlock(struct intel_context *ce) 129 - __releases(&ce->ring->timeline->mutex) 129 + __releases(&ce->timeline->mutex) 130 130 { 131 - mutex_unlock(&ce->ring->timeline->mutex); 131 + mutex_unlock(&ce->timeline->mutex); 132 132 } 133 133 134 134 int intel_context_prepare_remote_request(struct intel_context *ce,
+1
drivers/gpu/drm/i915/gt/intel_context_types.h
··· 53 53 54 54 struct i915_vma *state; 55 55 struct intel_ring *ring; 56 + struct intel_timeline *timeline; 56 57 57 58 unsigned long flags; 58 59 #define CONTEXT_ALLOC_BIT 0
+1 -3
drivers/gpu/drm/i915/gt/intel_engine.h
··· 196 196 #define CNL_HWS_CSB_WRITE_INDEX 0x2f 197 197 198 198 struct intel_ring * 199 - intel_engine_create_ring(struct intel_engine_cs *engine, 200 - struct intel_timeline *timeline, 201 - int size); 199 + intel_engine_create_ring(struct intel_engine_cs *engine, int size); 202 200 int intel_ring_pin(struct intel_ring *ring); 203 201 void intel_ring_reset(struct intel_ring *ring, u32 tail); 204 202 unsigned int intel_ring_update_space(struct intel_ring *ring);
-1
drivers/gpu/drm/i915/gt/intel_engine_cs.c
··· 680 680 goto out_frame; 681 681 682 682 INIT_LIST_HEAD(&frame->ring.request_list); 683 - frame->ring.timeline = &frame->timeline; 684 683 frame->ring.vaddr = frame->cs; 685 684 frame->ring.size = sizeof(frame->cs); 686 685 frame->ring.effective_size = frame->ring.size;
+5 -3
drivers/gpu/drm/i915/gt/intel_engine_types.h
··· 69 69 struct i915_vma *vma; 70 70 void *vaddr; 71 71 72 - struct intel_timeline *timeline; 73 72 struct list_head request_list; 74 73 struct list_head active_link; 75 74 ··· 285 286 286 287 struct intel_sseu sseu; 287 288 288 - struct intel_ring *buffer; 289 - 290 289 struct { 291 290 spinlock_t lock; 292 291 struct list_head requests; ··· 302 305 struct intel_wakeref wakeref; 303 306 struct drm_i915_gem_object *default_state; 304 307 void *pinned_default_state; 308 + 309 + struct { 310 + struct intel_ring *ring; 311 + struct intel_timeline *timeline; 312 + } legacy; 305 313 306 314 /* Rather than have every client wait upon all user interrupts, 307 315 * with the herd waking after every interrupt and each doing the
+12 -21
drivers/gpu/drm/i915/gt/intel_lrc.c
··· 2821 2821 2822 2822 int intel_execlists_submission_setup(struct intel_engine_cs *engine) 2823 2823 { 2824 - /* Intentionally left blank. */ 2825 - engine->buffer = NULL; 2826 - 2827 2824 tasklet_init(&engine->execlists.tasklet, 2828 2825 execlists_submission_tasklet, (unsigned long)engine); 2829 2826 timer_setup(&engine->execlists.timer, execlists_submission_timer, 0); ··· 3068 3071 return ret; 3069 3072 } 3070 3073 3071 - static struct intel_timeline * 3072 - get_timeline(struct i915_gem_context *ctx, struct intel_gt *gt) 3073 - { 3074 - if (ctx->timeline) 3075 - return intel_timeline_get(ctx->timeline); 3076 - else 3077 - return intel_timeline_create(gt, NULL); 3078 - } 3079 - 3080 3074 static int __execlists_context_alloc(struct intel_context *ce, 3081 3075 struct intel_engine_cs *engine) 3082 3076 { 3083 3077 struct drm_i915_gem_object *ctx_obj; 3078 + struct intel_ring *ring; 3084 3079 struct i915_vma *vma; 3085 3080 u32 context_size; 3086 - struct intel_ring *ring; 3087 - struct intel_timeline *timeline; 3088 3081 int ret; 3089 3082 3090 3083 GEM_BUG_ON(ce->state); ··· 3096 3109 goto error_deref_obj; 3097 3110 } 3098 3111 3099 - timeline = get_timeline(ce->gem_context, engine->gt); 3100 - if (IS_ERR(timeline)) { 3101 - ret = PTR_ERR(timeline); 3102 - goto error_deref_obj; 3112 + if (!ce->timeline) { 3113 + struct intel_timeline *tl; 3114 + 3115 + tl = intel_timeline_create(engine->gt, NULL); 3116 + if (IS_ERR(tl)) { 3117 + ret = PTR_ERR(tl); 3118 + goto error_deref_obj; 3119 + } 3120 + 3121 + ce->timeline = tl; 3103 3122 } 3104 3123 3105 - ring = intel_engine_create_ring(engine, timeline, 3106 - (unsigned long)ce->ring); 3107 - intel_timeline_put(timeline); 3124 + ring = intel_engine_create_ring(engine, (unsigned long)ce->ring); 3108 3125 if (IS_ERR(ring)) { 3109 3126 ret = PTR_ERR(ring); 3110 3127 goto error_deref_obj;
+30 -33
drivers/gpu/drm/i915/gt/intel_ringbuffer.c
··· 636 636 static int xcs_resume(struct intel_engine_cs *engine) 637 637 { 638 638 struct drm_i915_private *dev_priv = engine->i915; 639 - struct intel_ring *ring = engine->buffer; 639 + struct intel_ring *ring = engine->legacy.ring; 640 640 int ret = 0; 641 641 642 642 GEM_TRACE("%s: ring:{HEAD:%04x, TAIL:%04x}\n", ··· 832 832 */ 833 833 __i915_request_reset(rq, stalled); 834 834 835 - GEM_BUG_ON(rq->ring != engine->buffer); 835 + GEM_BUG_ON(rq->ring != engine->legacy.ring); 836 836 head = rq->head; 837 837 } else { 838 - head = engine->buffer->tail; 838 + head = engine->legacy.ring->tail; 839 839 } 840 - engine->buffer->head = intel_ring_wrap(engine->buffer, head); 840 + engine->legacy.ring->head = intel_ring_wrap(engine->legacy.ring, head); 841 841 842 842 spin_unlock_irqrestore(&engine->active.lock, flags); 843 843 } ··· 1192 1192 if (atomic_fetch_inc(&ring->pin_count)) 1193 1193 return 0; 1194 1194 1195 - ret = intel_timeline_pin(ring->timeline); 1196 - if (ret) 1197 - goto err_unpin; 1198 - 1199 1195 flags = PIN_GLOBAL; 1200 1196 1201 1197 /* Ring wraparound at offset 0 sometimes hangs. No idea why. */ ··· 1204 1208 1205 1209 ret = i915_vma_pin(vma, 0, 0, flags); 1206 1210 if (unlikely(ret)) 1207 - goto err_timeline; 1211 + goto err_unpin; 1208 1212 1209 1213 if (i915_vma_is_map_and_fenceable(vma)) 1210 1214 addr = (void __force *)i915_vma_pin_iomap(vma); ··· 1221 1225 GEM_BUG_ON(ring->vaddr); 1222 1226 ring->vaddr = addr; 1223 1227 1224 - GEM_TRACE("ring:%llx pin\n", ring->timeline->fence_context); 1225 1228 return 0; 1226 1229 1227 1230 err_ring: 1228 1231 i915_vma_unpin(vma); 1229 - err_timeline: 1230 - intel_timeline_unpin(ring->timeline); 1231 1232 err_unpin: 1232 1233 atomic_dec(&ring->pin_count); 1233 1234 return ret; ··· 1247 1254 if (!atomic_dec_and_test(&ring->pin_count)) 1248 1255 return; 1249 1256 1250 - GEM_TRACE("ring:%llx unpin\n", ring->timeline->fence_context); 1251 - 1252 1257 /* Discard any unused bytes beyond that submitted to hw. */ 1253 1258 intel_ring_reset(ring, ring->tail); 1254 1259 ··· 1261 1270 1262 1271 i915_vma_unpin(vma); 1263 1272 i915_vma_make_purgeable(vma); 1264 - 1265 - intel_timeline_unpin(ring->timeline); 1266 1273 } 1267 1274 1268 1275 static struct i915_vma *create_ring_vma(struct i915_ggtt *ggtt, int size) ··· 1295 1306 } 1296 1307 1297 1308 struct intel_ring * 1298 - intel_engine_create_ring(struct intel_engine_cs *engine, 1299 - struct intel_timeline *timeline, 1300 - int size) 1309 + intel_engine_create_ring(struct intel_engine_cs *engine, int size) 1301 1310 { 1302 1311 struct drm_i915_private *i915 = engine->i915; 1303 1312 struct intel_ring *ring; ··· 1310 1323 1311 1324 kref_init(&ring->ref); 1312 1325 INIT_LIST_HEAD(&ring->request_list); 1313 - ring->timeline = intel_timeline_get(timeline); 1314 1326 1315 1327 ring->size = size; 1316 1328 /* Workaround an erratum on the i830 which causes a hang if ··· 1339 1353 i915_vma_close(ring->vma); 1340 1354 i915_vma_put(ring->vma); 1341 1355 1342 - intel_timeline_put(ring->timeline); 1343 1356 kfree(ring); 1344 1357 } 1345 1358 ··· 1470 1485 struct intel_engine_cs *engine = ce->engine; 1471 1486 1472 1487 /* One ringbuffer to rule them all */ 1473 - GEM_BUG_ON(!engine->buffer); 1474 - ce->ring = engine->buffer; 1488 + GEM_BUG_ON(!engine->legacy.ring); 1489 + ce->ring = engine->legacy.ring; 1490 + ce->timeline = intel_timeline_get(engine->legacy.timeline); 1475 1491 1476 1492 GEM_BUG_ON(ce->state); 1477 1493 if (engine->context_size) { ··· 2151 2165 2152 2166 intel_engine_cleanup_common(engine); 2153 2167 2154 - intel_ring_unpin(engine->buffer); 2155 - intel_ring_put(engine->buffer); 2168 + intel_ring_unpin(engine->legacy.ring); 2169 + intel_ring_put(engine->legacy.ring); 2170 + 2171 + intel_timeline_unpin(engine->legacy.timeline); 2172 + intel_timeline_put(engine->legacy.timeline); 2156 2173 2157 2174 kfree(engine); 2158 2175 } ··· 2339 2350 } 2340 2351 GEM_BUG_ON(timeline->has_initial_breadcrumb); 2341 2352 2342 - ring = intel_engine_create_ring(engine, timeline, SZ_16K); 2343 - intel_timeline_put(timeline); 2353 + err = intel_timeline_pin(timeline); 2354 + if (err) 2355 + goto err_timeline; 2356 + 2357 + ring = intel_engine_create_ring(engine, SZ_16K); 2344 2358 if (IS_ERR(ring)) { 2345 2359 err = PTR_ERR(ring); 2346 - goto err; 2360 + goto err_timeline_unpin; 2347 2361 } 2348 2362 2349 2363 err = intel_ring_pin(ring); 2350 2364 if (err) 2351 2365 goto err_ring; 2352 2366 2353 - GEM_BUG_ON(engine->buffer); 2354 - engine->buffer = ring; 2367 + GEM_BUG_ON(engine->legacy.ring); 2368 + engine->legacy.ring = ring; 2369 + engine->legacy.timeline = timeline; 2355 2370 2356 2371 err = intel_engine_init_common(engine); 2357 2372 if (err) 2358 - goto err_unpin; 2373 + goto err_ring_unpin; 2359 2374 2360 - GEM_BUG_ON(ring->timeline->hwsp_ggtt != engine->status_page.vma); 2375 + GEM_BUG_ON(timeline->hwsp_ggtt != engine->status_page.vma); 2361 2376 2362 2377 return 0; 2363 2378 2364 - err_unpin: 2379 + err_ring_unpin: 2365 2380 intel_ring_unpin(ring); 2366 2381 err_ring: 2367 2382 intel_ring_put(ring); 2383 + err_timeline_unpin: 2384 + intel_timeline_unpin(timeline); 2385 + err_timeline: 2386 + intel_timeline_put(timeline); 2368 2387 err: 2369 2388 intel_engine_cleanup_common(engine); 2370 2389 return err;
+23 -39
drivers/gpu/drm/i915/gt/mock_engine.c
··· 32 32 #include "mock_engine.h" 33 33 #include "selftests/mock_request.h" 34 34 35 - struct mock_ring { 36 - struct intel_ring base; 37 - struct intel_timeline timeline; 38 - }; 39 - 40 35 static void mock_timeline_pin(struct intel_timeline *tl) 41 36 { 42 37 tl->pin_count++; ··· 46 51 static struct intel_ring *mock_ring(struct intel_engine_cs *engine) 47 52 { 48 53 const unsigned long sz = PAGE_SIZE / 2; 49 - struct mock_ring *ring; 54 + struct intel_ring *ring; 50 55 51 56 ring = kzalloc(sizeof(*ring) + sz, GFP_KERNEL); 52 57 if (!ring) 53 58 return NULL; 54 59 55 - if (intel_timeline_init(&ring->timeline, engine->gt, NULL)) { 56 - kfree(ring); 57 - return NULL; 58 - } 60 + kref_init(&ring->ref); 61 + ring->size = sz; 62 + ring->effective_size = sz; 63 + ring->vaddr = (void *)(ring + 1); 64 + atomic_set(&ring->pin_count, 1); 59 65 60 - kref_init(&ring->base.ref); 61 - ring->base.size = sz; 62 - ring->base.effective_size = sz; 63 - ring->base.vaddr = (void *)(ring + 1); 64 - ring->base.timeline = &ring->timeline; 65 - atomic_set(&ring->base.pin_count, 1); 66 + INIT_LIST_HEAD(&ring->request_list); 67 + intel_ring_update_space(ring); 66 68 67 - INIT_LIST_HEAD(&ring->base.request_list); 68 - intel_ring_update_space(&ring->base); 69 - 70 - return &ring->base; 71 - } 72 - 73 - static void mock_ring_free(struct intel_ring *base) 74 - { 75 - struct mock_ring *ring = container_of(base, typeof(*ring), base); 76 - 77 - intel_timeline_fini(&ring->timeline); 78 - kfree(ring); 69 + return ring; 79 70 } 80 71 81 72 static struct i915_request *first_request(struct mock_engine *engine) ··· 112 131 113 132 static void mock_context_unpin(struct intel_context *ce) 114 133 { 115 - mock_timeline_unpin(ce->ring->timeline); 116 134 } 117 135 118 136 static void mock_context_destroy(struct kref *ref) ··· 120 140 121 141 GEM_BUG_ON(intel_context_is_pinned(ce)); 122 142 123 - if (test_bit(CONTEXT_ALLOC_BIT, &ce->flags)) 124 - mock_ring_free(ce->ring); 143 + if (test_bit(CONTEXT_ALLOC_BIT, &ce->flags)) { 144 + kfree(ce->ring); 145 + mock_timeline_unpin(ce->timeline); 146 + } 125 147 126 148 intel_context_fini(ce); 127 149 intel_context_free(ce); ··· 135 153 if (!ce->ring) 136 154 return -ENOMEM; 137 155 156 + GEM_BUG_ON(ce->timeline); 157 + ce->timeline = intel_timeline_create(ce->engine->gt, NULL); 158 + if (IS_ERR(ce->timeline)) { 159 + kfree(ce->engine); 160 + return PTR_ERR(ce->timeline); 161 + } 162 + 163 + mock_timeline_pin(ce->timeline); 164 + 138 165 return 0; 139 166 } 140 167 141 168 static int mock_context_pin(struct intel_context *ce) 142 169 { 143 - int ret; 144 - 145 - ret = intel_context_active_acquire(ce); 146 - if (ret) 147 - return ret; 148 - 149 - mock_timeline_pin(ce->ring->timeline); 150 - return 0; 170 + return intel_context_active_acquire(ce); 151 171 } 152 172 153 173 static const struct intel_context_ops mock_context_ops = {
+1 -1
drivers/gpu/drm/i915/gt/selftest_context.c
··· 32 32 33 33 static int context_sync(struct intel_context *ce) 34 34 { 35 - struct intel_timeline *tl = ce->ring->timeline; 35 + struct intel_timeline *tl = ce->timeline; 36 36 int err = 0; 37 37 38 38 do {
+3 -3
drivers/gpu/drm/i915/i915_active.c
··· 246 246 struct llist_node *head = NULL, *tail = NULL; 247 247 struct llist_node *pos, *next; 248 248 249 - GEM_BUG_ON(node->timeline != engine->kernel_context->ring->timeline->fence_context); 249 + GEM_BUG_ON(node->timeline != engine->kernel_context->timeline->fence_context); 250 250 251 251 /* 252 252 * Rebuild the llist excluding our node. We may perform this ··· 568 568 * i915_active_acquire_barrier() 569 569 */ 570 570 for_each_engine_masked(engine, i915, mask, tmp) { 571 - u64 idx = engine->kernel_context->ring->timeline->fence_context; 571 + u64 idx = engine->kernel_context->timeline->fence_context; 572 572 struct active_node *node; 573 573 574 574 node = reuse_idle_barrier(ref, idx); ··· 665 665 struct llist_node *node, *next; 666 666 667 667 GEM_BUG_ON(intel_engine_is_virtual(engine)); 668 - GEM_BUG_ON(rq->timeline != engine->kernel_context->ring->timeline); 668 + GEM_BUG_ON(rq->timeline != engine->kernel_context->timeline); 669 669 670 670 /* 671 671 * Attach the list of proto-fences to the in-flight request such
+5 -5
drivers/gpu/drm/i915/i915_request.c
··· 306 306 307 307 local_irq_enable(); 308 308 309 - intel_context_exit(rq->hw_context); 310 - intel_context_unpin(rq->hw_context); 311 - 312 309 i915_request_remove_from_client(rq); 313 310 list_del(&rq->link); 311 + 312 + intel_context_exit(rq->hw_context); 313 + intel_context_unpin(rq->hw_context); 314 314 315 315 free_capture_list(rq); 316 316 i915_sched_node_fini(&rq->sched); ··· 608 608 struct i915_request * 609 609 __i915_request_create(struct intel_context *ce, gfp_t gfp) 610 610 { 611 - struct intel_timeline *tl = ce->ring->timeline; 611 + struct intel_timeline *tl = ce->timeline; 612 612 struct i915_request *rq; 613 613 u32 seqno; 614 614 int ret; ··· 760 760 goto err_unlock; 761 761 762 762 /* Check that we do not interrupt ourselves with a new request */ 763 - rq->cookie = lockdep_pin_lock(&ce->ring->timeline->mutex); 763 + rq->cookie = lockdep_pin_lock(&ce->timeline->mutex); 764 764 765 765 return rq; 766 766
+12 -9
drivers/gpu/drm/i915/selftests/i915_gem_evict.c
··· 48 48 { 49 49 unsigned long unbound, bound, count; 50 50 struct drm_i915_gem_object *obj; 51 - u64 size; 52 51 53 52 count = 0; 54 - for (size = 0; 55 - size + I915_GTT_PAGE_SIZE <= i915->ggtt.vm.total; 56 - size += I915_GTT_PAGE_SIZE) { 53 + do { 57 54 struct i915_vma *vma; 58 55 59 56 obj = i915_gem_object_create_internal(i915, I915_GTT_PAGE_SIZE); 60 57 if (IS_ERR(obj)) 61 58 return PTR_ERR(obj); 62 59 63 - quirk_add(obj, objects); 64 - 65 60 vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 0); 66 - if (IS_ERR(vma)) 67 - return PTR_ERR(vma); 61 + if (IS_ERR(vma)) { 62 + i915_gem_object_put(obj); 63 + if (vma == ERR_PTR(-ENOSPC)) 64 + break; 68 65 66 + return PTR_ERR(vma); 67 + } 68 + 69 + quirk_add(obj, objects); 69 70 count++; 70 - } 71 + } while (1); 72 + pr_debug("Filled GGTT with %lu pages [%llu total]\n", 73 + count, i915->ggtt.vm.total / PAGE_SIZE); 71 74 72 75 bound = 0; 73 76 unbound = 0;