Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'drm-intel-fixes-2019-10-10' of git://anongit.freedesktop.org/drm/drm-intel into drm-fixes

- Fix CML display by adding a missing ID.
- Drop redundant list_del_init
- Only enqueue already completed requests to avoid races
- Fixup preempt-to-busy vs reset of a virtual request
- Protect peeking at execlists->active
- execlists->active is serialised by the tasklet

drm-intel-next-fixes-2019-09-19:
- Extend old HSW workaround to fix some GPU hangs on Haswell GT2
- Fix return error code on GEM mmap.
- White list a chicken bit register for push constants legacy mode on Mesa
- Fix resume issue related to GGTT restore
- Remove incorrect BUG_ON on execlist's schedule-out
- Fix unrecoverable GPU hangs with Vulkan compute workloads on SKL

drm-intel-next-fixes-2019-09-26:
- Fix concurrence on cases where requests where getting retired at same time as resubmitted to HW
- Fix gen9 display resolutions by setting the right max plane width
- Fix GPU hang on preemption
- Mark contents as dirty on a write fault. This was breaking cursor sprite with dumb buffers.

Signed-off-by: Dave Airlie <airlied@redhat.com>

From: Rodrigo Vivi <rodrigo.vivi@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20191010143039.GA15313@intel.com

+189 -83
+14 -1
drivers/gpu/drm/i915/display/intel_display.c
··· 3280 3280 switch (fb->modifier) { 3281 3281 case DRM_FORMAT_MOD_LINEAR: 3282 3282 case I915_FORMAT_MOD_X_TILED: 3283 - return 4096; 3283 + /* 3284 + * Validated limit is 4k, but has 5k should 3285 + * work apart from the following features: 3286 + * - Ytile (already limited to 4k) 3287 + * - FP16 (already limited to 4k) 3288 + * - render compression (already limited to 4k) 3289 + * - KVMR sprite and cursor (don't care) 3290 + * - horizontal panning (TODO verify this) 3291 + * - pipe and plane scaling (TODO verify this) 3292 + */ 3293 + if (cpp == 8) 3294 + return 4096; 3295 + else 3296 + return 5120; 3284 3297 case I915_FORMAT_MOD_Y_TILED_CCS: 3285 3298 case I915_FORMAT_MOD_Yf_TILED_CCS: 3286 3299 /* FIXME AUX plane? */
+7 -5
drivers/gpu/drm/i915/gem/i915_gem_mman.c
··· 245 245 246 246 wakeref = intel_runtime_pm_get(rpm); 247 247 248 - srcu = intel_gt_reset_trylock(ggtt->vm.gt); 249 - if (srcu < 0) { 250 - ret = srcu; 248 + ret = intel_gt_reset_trylock(ggtt->vm.gt, &srcu); 249 + if (ret) 251 250 goto err_rpm; 252 - } 253 251 254 252 ret = i915_mutex_lock_interruptible(dev); 255 253 if (ret) ··· 316 318 intel_wakeref_auto(&i915->ggtt.userfault_wakeref, 317 319 msecs_to_jiffies_timeout(CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND)); 318 320 319 - i915_vma_set_ggtt_write(vma); 321 + if (write) { 322 + GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj)); 323 + i915_vma_set_ggtt_write(vma); 324 + obj->mm.dirty = true; 325 + } 320 326 321 327 err_fence: 322 328 i915_vma_unpin_fence(vma);
-3
drivers/gpu/drm/i915/gem/i915_gem_pm.c
··· 241 241 mutex_lock(&i915->drm.struct_mutex); 242 242 intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL); 243 243 244 - i915_gem_restore_gtt_mappings(i915); 245 - i915_gem_restore_fences(i915); 246 - 247 244 if (i915_gem_init_hw(i915)) 248 245 goto err_wedged; 249 246
+14
drivers/gpu/drm/i915/gt/intel_engine.h
··· 136 136 return READ_ONCE(*execlists->active); 137 137 } 138 138 139 + static inline void 140 + execlists_active_lock_bh(struct intel_engine_execlists *execlists) 141 + { 142 + local_bh_disable(); /* prevent local softirq and lock recursion */ 143 + tasklet_lock(&execlists->tasklet); 144 + } 145 + 146 + static inline void 147 + execlists_active_unlock_bh(struct intel_engine_execlists *execlists) 148 + { 149 + tasklet_unlock(&execlists->tasklet); 150 + local_bh_enable(); /* restore softirq, and kick ksoftirqd! */ 151 + } 152 + 139 153 struct i915_request * 140 154 execlists_unwind_incomplete_requests(struct intel_engine_execlists *execlists); 141 155
+7 -9
drivers/gpu/drm/i915/gt/intel_engine_cs.c
··· 1197 1197 struct drm_printer *m) 1198 1198 { 1199 1199 struct drm_i915_private *dev_priv = engine->i915; 1200 - const struct intel_engine_execlists * const execlists = 1201 - &engine->execlists; 1202 - unsigned long flags; 1200 + struct intel_engine_execlists * const execlists = &engine->execlists; 1203 1201 u64 addr; 1204 1202 1205 1203 if (engine->id == RENDER_CLASS && IS_GEN_RANGE(dev_priv, 4, 7)) ··· 1279 1281 idx, hws[idx * 2], hws[idx * 2 + 1]); 1280 1282 } 1281 1283 1282 - spin_lock_irqsave(&engine->active.lock, flags); 1284 + execlists_active_lock_bh(execlists); 1283 1285 for (port = execlists->active; (rq = *port); port++) { 1284 1286 char hdr[80]; 1285 1287 int len; ··· 1307 1309 hwsp_seqno(rq)); 1308 1310 print_request(m, rq, hdr); 1309 1311 } 1310 - spin_unlock_irqrestore(&engine->active.lock, flags); 1312 + execlists_active_unlock_bh(execlists); 1311 1313 } else if (INTEL_GEN(dev_priv) > 6) { 1312 1314 drm_printf(m, "\tPP_DIR_BASE: 0x%08x\n", 1313 1315 ENGINE_READ(engine, RING_PP_DIR_BASE)); ··· 1438 1440 if (!intel_engine_supports_stats(engine)) 1439 1441 return -ENODEV; 1440 1442 1441 - spin_lock_irqsave(&engine->active.lock, flags); 1442 - write_seqlock(&engine->stats.lock); 1443 + execlists_active_lock_bh(execlists); 1444 + write_seqlock_irqsave(&engine->stats.lock, flags); 1443 1445 1444 1446 if (unlikely(engine->stats.enabled == ~0)) { 1445 1447 err = -EBUSY; ··· 1467 1469 } 1468 1470 1469 1471 unlock: 1470 - write_sequnlock(&engine->stats.lock); 1471 - spin_unlock_irqrestore(&engine->active.lock, flags); 1472 + write_sequnlock_irqrestore(&engine->stats.lock, flags); 1473 + execlists_active_unlock_bh(execlists); 1472 1474 1473 1475 return err; 1474 1476 }
+64 -39
drivers/gpu/drm/i915/gt/intel_lrc.c
··· 631 631 struct intel_engine_cs *cur, *old; 632 632 633 633 trace_i915_request_out(rq); 634 - GEM_BUG_ON(intel_context_inflight(ce) != rq->engine); 635 634 636 635 old = READ_ONCE(ce->inflight); 637 636 do ··· 796 797 GEM_BUG_ON(prev == next); 797 798 GEM_BUG_ON(!assert_priority_queue(prev, next)); 798 799 800 + /* 801 + * We do not submit known completed requests. Therefore if the next 802 + * request is already completed, we can pretend to merge it in 803 + * with the previous context (and we will skip updating the ELSP 804 + * and tracking). Thus hopefully keeping the ELSP full with active 805 + * contexts, despite the best efforts of preempt-to-busy to confuse 806 + * us. 807 + */ 808 + if (i915_request_completed(next)) 809 + return true; 810 + 799 811 if (!can_merge_ctx(prev->hw_context, next->hw_context)) 800 812 return false; 801 813 ··· 903 893 static struct i915_request * 904 894 last_active(const struct intel_engine_execlists *execlists) 905 895 { 906 - struct i915_request * const *last = execlists->active; 896 + struct i915_request * const *last = READ_ONCE(execlists->active); 907 897 908 898 while (*last && i915_request_completed(*last)) 909 899 last++; ··· 1182 1172 continue; 1183 1173 } 1184 1174 1185 - if (i915_request_completed(rq)) { 1186 - ve->request = NULL; 1187 - ve->base.execlists.queue_priority_hint = INT_MIN; 1188 - rb_erase_cached(rb, &execlists->virtual); 1189 - RB_CLEAR_NODE(rb); 1190 - 1191 - rq->engine = engine; 1192 - __i915_request_submit(rq); 1193 - 1194 - spin_unlock(&ve->base.active.lock); 1195 - 1196 - rb = rb_first_cached(&execlists->virtual); 1197 - continue; 1198 - } 1199 - 1200 1175 if (last && !can_merge_rq(last, rq)) { 1201 1176 spin_unlock(&ve->base.active.lock); 1202 1177 return; /* leave this for another */ ··· 1232 1237 GEM_BUG_ON(ve->siblings[0] != engine); 1233 1238 } 1234 1239 1235 - __i915_request_submit(rq); 1236 - if (!i915_request_completed(rq)) { 1240 + if (__i915_request_submit(rq)) { 1237 1241 submit = true; 1238 1242 last = rq; 1243 + } 1244 + 1245 + /* 1246 + * Hmm, we have a bunch of virtual engine requests, 1247 + * but the first one was already completed (thanks 1248 + * preempt-to-busy!). Keep looking at the veng queue 1249 + * until we have no more relevant requests (i.e. 1250 + * the normal submit queue has higher priority). 1251 + */ 1252 + if (!submit) { 1253 + spin_unlock(&ve->base.active.lock); 1254 + rb = rb_first_cached(&execlists->virtual); 1255 + continue; 1239 1256 } 1240 1257 } 1241 1258 ··· 1261 1254 int i; 1262 1255 1263 1256 priolist_for_each_request_consume(rq, rn, p, i) { 1264 - if (i915_request_completed(rq)) 1265 - goto skip; 1257 + bool merge = true; 1266 1258 1267 1259 /* 1268 1260 * Can we combine this request with the current port? ··· 1302 1296 ctx_single_port_submission(rq->hw_context)) 1303 1297 goto done; 1304 1298 1305 - *port = execlists_schedule_in(last, port - execlists->pending); 1306 - port++; 1299 + merge = false; 1307 1300 } 1308 1301 1309 - last = rq; 1310 - submit = true; 1311 - skip: 1312 - __i915_request_submit(rq); 1302 + if (__i915_request_submit(rq)) { 1303 + if (!merge) { 1304 + *port = execlists_schedule_in(last, port - execlists->pending); 1305 + port++; 1306 + last = NULL; 1307 + } 1308 + 1309 + GEM_BUG_ON(last && 1310 + !can_merge_ctx(last->hw_context, 1311 + rq->hw_context)); 1312 + 1313 + submit = true; 1314 + last = rq; 1315 + } 1313 1316 } 1314 1317 1315 1318 rb_erase_cached(&p->node, &execlists->queue); ··· 1608 1593 static void __execlists_submission_tasklet(struct intel_engine_cs *const engine) 1609 1594 { 1610 1595 lockdep_assert_held(&engine->active.lock); 1611 - if (!engine->execlists.pending[0]) 1596 + if (!engine->execlists.pending[0]) { 1597 + rcu_read_lock(); /* protect peeking at execlists->active */ 1612 1598 execlists_dequeue(engine); 1599 + rcu_read_unlock(); 1600 + } 1613 1601 } 1614 1602 1615 1603 /* ··· 2417 2399 2418 2400 static struct i915_request *active_request(struct i915_request *rq) 2419 2401 { 2420 - const struct list_head * const list = &rq->timeline->requests; 2421 2402 const struct intel_context * const ce = rq->hw_context; 2422 2403 struct i915_request *active = NULL; 2404 + struct list_head *list; 2423 2405 2406 + if (!i915_request_is_active(rq)) /* unwound, but incomplete! */ 2407 + return rq; 2408 + 2409 + list = &rq->timeline->requests; 2424 2410 list_for_each_entry_from_reverse(rq, list, link) { 2425 2411 if (i915_request_completed(rq)) 2426 2412 break; ··· 2587 2565 int i; 2588 2566 2589 2567 priolist_for_each_request_consume(rq, rn, p, i) { 2590 - list_del_init(&rq->sched.link); 2591 2568 __i915_request_submit(rq); 2592 2569 dma_fence_set_error(&rq->fence, -EIO); 2593 2570 i915_request_mark_complete(rq); ··· 3652 3631 virtual_bond_execute(struct i915_request *rq, struct dma_fence *signal) 3653 3632 { 3654 3633 struct virtual_engine *ve = to_virtual_engine(rq->engine); 3634 + intel_engine_mask_t allowed, exec; 3655 3635 struct ve_bond *bond; 3656 3636 3657 - bond = virtual_find_bond(ve, to_request(signal)->engine); 3658 - if (bond) { 3659 - intel_engine_mask_t old, new, cmp; 3637 + allowed = ~to_request(signal)->engine->mask; 3660 3638 3661 - cmp = READ_ONCE(rq->execution_mask); 3662 - do { 3663 - old = cmp; 3664 - new = cmp & bond->sibling_mask; 3665 - } while ((cmp = cmpxchg(&rq->execution_mask, old, new)) != old); 3666 - } 3639 + bond = virtual_find_bond(ve, to_request(signal)->engine); 3640 + if (bond) 3641 + allowed &= bond->sibling_mask; 3642 + 3643 + /* Restrict the bonded request to run on only the available engines */ 3644 + exec = READ_ONCE(rq->execution_mask); 3645 + while (!try_cmpxchg(&rq->execution_mask, &exec, exec & allowed)) 3646 + ; 3647 + 3648 + /* Prevent the master from being re-run on the bonded engines */ 3649 + to_request(signal)->execution_mask &= ~allowed; 3667 3650 } 3668 3651 3669 3652 struct intel_context *
+4 -8
drivers/gpu/drm/i915/gt/intel_reset.c
··· 42 42 struct intel_engine_cs *engine = rq->engine; 43 43 struct i915_gem_context *hung_ctx = rq->gem_context; 44 44 45 - lockdep_assert_held(&engine->active.lock); 46 - 47 45 if (!i915_request_is_active(rq)) 48 46 return; 49 47 48 + lockdep_assert_held(&engine->active.lock); 50 49 list_for_each_entry_continue(rq, &engine->active.requests, sched.link) 51 50 if (rq->gem_context == hung_ctx) 52 51 i915_request_skip(rq, -EIO); ··· 122 123 rq->fence.seqno, 123 124 yesno(guilty)); 124 125 125 - lockdep_assert_held(&rq->engine->active.lock); 126 126 GEM_BUG_ON(i915_request_completed(rq)); 127 127 128 128 if (guilty) { ··· 1212 1214 intel_runtime_pm_put(&gt->i915->runtime_pm, wakeref); 1213 1215 } 1214 1216 1215 - int intel_gt_reset_trylock(struct intel_gt *gt) 1217 + int intel_gt_reset_trylock(struct intel_gt *gt, int *srcu) 1216 1218 { 1217 - int srcu; 1218 - 1219 1219 might_lock(&gt->reset.backoff_srcu); 1220 1220 might_sleep(); 1221 1221 ··· 1228 1232 1229 1233 rcu_read_lock(); 1230 1234 } 1231 - srcu = srcu_read_lock(&gt->reset.backoff_srcu); 1235 + *srcu = srcu_read_lock(&gt->reset.backoff_srcu); 1232 1236 rcu_read_unlock(); 1233 1237 1234 - return srcu; 1238 + return 0; 1235 1239 } 1236 1240 1237 1241 void intel_gt_reset_unlock(struct intel_gt *gt, int tag)
+1 -1
drivers/gpu/drm/i915/gt/intel_reset.h
··· 38 38 39 39 void __i915_request_reset(struct i915_request *rq, bool guilty); 40 40 41 - int __must_check intel_gt_reset_trylock(struct intel_gt *gt); 41 + int __must_check intel_gt_reset_trylock(struct intel_gt *gt, int *srcu); 42 42 void intel_gt_reset_unlock(struct intel_gt *gt, int tag); 43 43 44 44 void intel_gt_set_wedged(struct intel_gt *gt);
+1 -1
drivers/gpu/drm/i915/gt/intel_ringbuffer.c
··· 1573 1573 struct intel_engine_cs *engine = rq->engine; 1574 1574 enum intel_engine_id id; 1575 1575 const int num_engines = 1576 - IS_HSW_GT1(i915) ? RUNTIME_INFO(i915)->num_engines - 1 : 0; 1576 + IS_HASWELL(i915) ? RUNTIME_INFO(i915)->num_engines - 1 : 0; 1577 1577 bool force_restore = false; 1578 1578 int len; 1579 1579 u32 *cs;
+3
drivers/gpu/drm/i915/gt/intel_workarounds.c
··· 1063 1063 1064 1064 /* WaAllowUMDToModifyHDCChicken1:skl,bxt,kbl,glk,cfl */ 1065 1065 whitelist_reg(w, GEN8_HDC_CHICKEN1); 1066 + 1067 + /* WaSendPushConstantsFromMMIO:skl,bxt */ 1068 + whitelist_reg(w, COMMON_SLICE_CHICKEN2); 1066 1069 } 1067 1070 1068 1071 static void skl_whitelist_build(struct intel_engine_cs *engine)
+5
drivers/gpu/drm/i915/i915_drv.c
··· 1924 1924 if (ret) 1925 1925 DRM_ERROR("failed to re-enable GGTT\n"); 1926 1926 1927 + mutex_lock(&dev_priv->drm.struct_mutex); 1928 + i915_gem_restore_gtt_mappings(dev_priv); 1929 + i915_gem_restore_fences(dev_priv); 1930 + mutex_unlock(&dev_priv->drm.struct_mutex); 1931 + 1927 1932 intel_csr_ucode_resume(dev_priv); 1928 1933 1929 1934 i915_restore_state(dev_priv);
+6
drivers/gpu/drm/i915/i915_gem.h
··· 77 77 78 78 #define I915_GEM_IDLE_TIMEOUT (HZ / 5) 79 79 80 + static inline void tasklet_lock(struct tasklet_struct *t) 81 + { 82 + while (!tasklet_trylock(t)) 83 + cpu_relax(); 84 + } 85 + 80 86 static inline void __tasklet_disable_sync_once(struct tasklet_struct *t) 81 87 { 82 88 if (!atomic_fetch_inc(&t->count))
+54 -15
drivers/gpu/drm/i915/i915_request.c
··· 194 194 } 195 195 } 196 196 197 + static void remove_from_engine(struct i915_request *rq) 198 + { 199 + struct intel_engine_cs *engine, *locked; 200 + 201 + /* 202 + * Virtual engines complicate acquiring the engine timeline lock, 203 + * as their rq->engine pointer is not stable until under that 204 + * engine lock. The simple ploy we use is to take the lock then 205 + * check that the rq still belongs to the newly locked engine. 206 + */ 207 + locked = READ_ONCE(rq->engine); 208 + spin_lock(&locked->active.lock); 209 + while (unlikely(locked != (engine = READ_ONCE(rq->engine)))) { 210 + spin_unlock(&locked->active.lock); 211 + spin_lock(&engine->active.lock); 212 + locked = engine; 213 + } 214 + list_del(&rq->sched.link); 215 + spin_unlock(&locked->active.lock); 216 + } 217 + 197 218 static bool i915_request_retire(struct i915_request *rq) 198 219 { 199 220 struct i915_active_request *active, *next; ··· 280 259 * request that we have removed from the HW and put back on a run 281 260 * queue. 282 261 */ 283 - spin_lock(&rq->engine->active.lock); 284 - list_del(&rq->sched.link); 285 - spin_unlock(&rq->engine->active.lock); 262 + remove_from_engine(rq); 286 263 287 264 spin_lock(&rq->lock); 288 265 i915_request_mark_complete(rq); ··· 377 358 return 0; 378 359 } 379 360 380 - void __i915_request_submit(struct i915_request *request) 361 + bool __i915_request_submit(struct i915_request *request) 381 362 { 382 363 struct intel_engine_cs *engine = request->engine; 364 + bool result = false; 383 365 384 366 GEM_TRACE("%s fence %llx:%lld, current %d\n", 385 367 engine->name, ··· 389 369 390 370 GEM_BUG_ON(!irqs_disabled()); 391 371 lockdep_assert_held(&engine->active.lock); 372 + 373 + /* 374 + * With the advent of preempt-to-busy, we frequently encounter 375 + * requests that we have unsubmitted from HW, but left running 376 + * until the next ack and so have completed in the meantime. On 377 + * resubmission of that completed request, we can skip 378 + * updating the payload, and execlists can even skip submitting 379 + * the request. 380 + * 381 + * We must remove the request from the caller's priority queue, 382 + * and the caller must only call us when the request is in their 383 + * priority queue, under the active.lock. This ensures that the 384 + * request has *not* yet been retired and we can safely move 385 + * the request into the engine->active.list where it will be 386 + * dropped upon retiring. (Otherwise if resubmit a *retired* 387 + * request, this would be a horrible use-after-free.) 388 + */ 389 + if (i915_request_completed(request)) 390 + goto xfer; 392 391 393 392 if (i915_gem_context_is_banned(request->gem_context)) 394 393 i915_request_skip(request, -EIO); ··· 432 393 i915_sw_fence_signaled(&request->semaphore)) 433 394 engine->saturated |= request->sched.semaphores; 434 395 435 - /* We may be recursing from the signal callback of another i915 fence */ 396 + engine->emit_fini_breadcrumb(request, 397 + request->ring->vaddr + request->postfix); 398 + 399 + trace_i915_request_execute(request); 400 + engine->serial++; 401 + result = true; 402 + 403 + xfer: /* We may be recursing from the signal callback of another i915 fence */ 436 404 spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING); 437 405 438 - list_move_tail(&request->sched.link, &engine->active.requests); 439 - 440 - GEM_BUG_ON(test_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags)); 441 - set_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags); 406 + if (!test_and_set_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags)) 407 + list_move_tail(&request->sched.link, &engine->active.requests); 442 408 443 409 if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags) && 444 410 !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &request->fence.flags) && ··· 454 410 455 411 spin_unlock(&request->lock); 456 412 457 - engine->emit_fini_breadcrumb(request, 458 - request->ring->vaddr + request->postfix); 459 - 460 - engine->serial++; 461 - 462 - trace_i915_request_execute(request); 413 + return result; 463 414 } 464 415 465 416 void i915_request_submit(struct i915_request *request)
+1 -1
drivers/gpu/drm/i915/i915_request.h
··· 292 292 293 293 void i915_request_add(struct i915_request *rq); 294 294 295 - void __i915_request_submit(struct i915_request *request); 295 + bool __i915_request_submit(struct i915_request *request); 296 296 void i915_request_submit(struct i915_request *request); 297 297 298 298 void i915_request_skip(struct i915_request *request, int error);
+1
drivers/gpu/drm/i915/intel_pch.c
··· 69 69 WARN_ON(!IS_CANNONLAKE(dev_priv) && !IS_COFFEELAKE(dev_priv)); 70 70 return PCH_CNP; 71 71 case INTEL_PCH_CMP_DEVICE_ID_TYPE: 72 + case INTEL_PCH_CMP2_DEVICE_ID_TYPE: 72 73 DRM_DEBUG_KMS("Found Comet Lake PCH (CMP)\n"); 73 74 WARN_ON(!IS_COFFEELAKE(dev_priv)); 74 75 /* CometPoint is CNP Compatible */
+1
drivers/gpu/drm/i915/intel_pch.h
··· 41 41 #define INTEL_PCH_CNP_DEVICE_ID_TYPE 0xA300 42 42 #define INTEL_PCH_CNP_LP_DEVICE_ID_TYPE 0x9D80 43 43 #define INTEL_PCH_CMP_DEVICE_ID_TYPE 0x0280 44 + #define INTEL_PCH_CMP2_DEVICE_ID_TYPE 0x0680 44 45 #define INTEL_PCH_ICP_DEVICE_ID_TYPE 0x3480 45 46 #define INTEL_PCH_MCC_DEVICE_ID_TYPE 0x4B00 46 47 #define INTEL_PCH_MCC2_DEVICE_ID_TYPE 0x3880
+6
drivers/gpu/drm/i915/selftests/i915_gem.c
··· 118 118 with_intel_runtime_pm(&i915->runtime_pm, wakeref) { 119 119 intel_gt_sanitize(&i915->gt, false); 120 120 i915_gem_sanitize(i915); 121 + 122 + mutex_lock(&i915->drm.struct_mutex); 123 + i915_gem_restore_gtt_mappings(i915); 124 + i915_gem_restore_fences(i915); 125 + mutex_unlock(&i915->drm.struct_mutex); 126 + 121 127 i915_gem_resume(i915); 122 128 } 123 129 }