Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'drm-intel-fixes-2020-03-12' of git://anongit.freedesktop.org/drm/drm-intel into drm-fixes

drm/i915 fixes for v5.6-rc6:
- hard lockup fix
- GVT fixes
- 32-bit alignment issue fix
- timeline wait fixes
- cacheline_retire and free

Signed-off-by: Dave Airlie <airlied@redhat.com>

From: Jani Nikula <jani.nikula@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/87lfo6ksvw.fsf@intel.com

+66 -29
+2 -1
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
··· 423 423 if (unlikely(entry->flags & eb->invalid_flags)) 424 424 return -EINVAL; 425 425 426 - if (unlikely(entry->alignment && !is_power_of_2(entry->alignment))) 426 + if (unlikely(entry->alignment && 427 + !is_power_of_2_u64(entry->alignment))) 427 428 return -EINVAL; 428 429 429 430 /*
+18 -11
drivers/gpu/drm/i915/gt/intel_lrc.c
··· 1679 1679 if (!intel_engine_has_timeslices(engine)) 1680 1680 return false; 1681 1681 1682 - if (list_is_last(&rq->sched.link, &engine->active.requests)) 1683 - return false; 1684 - 1685 - hint = max(rq_prio(list_next_entry(rq, sched.link)), 1686 - engine->execlists.queue_priority_hint); 1682 + hint = engine->execlists.queue_priority_hint; 1683 + if (!list_is_last(&rq->sched.link, &engine->active.requests)) 1684 + hint = max(hint, rq_prio(list_next_entry(rq, sched.link))); 1687 1685 1688 1686 return hint >= effective_prio(rq); 1689 1687 } ··· 1721 1723 return; 1722 1724 1723 1725 set_timer_ms(&engine->execlists.timer, active_timeslice(engine)); 1726 + } 1727 + 1728 + static void start_timeslice(struct intel_engine_cs *engine) 1729 + { 1730 + struct intel_engine_execlists *execlists = &engine->execlists; 1731 + 1732 + execlists->switch_priority_hint = execlists->queue_priority_hint; 1733 + 1734 + if (timer_pending(&execlists->timer)) 1735 + return; 1736 + 1737 + set_timer_ms(&execlists->timer, timeslice(engine)); 1724 1738 } 1725 1739 1726 1740 static void record_preemption(struct intel_engine_execlists *execlists) ··· 1898 1888 * Even if ELSP[1] is occupied and not worthy 1899 1889 * of timeslices, our queue might be. 1900 1890 */ 1901 - if (!execlists->timer.expires && 1902 - need_timeslice(engine, last)) 1903 - set_timer_ms(&execlists->timer, 1904 - timeslice(engine)); 1905 - 1891 + start_timeslice(engine); 1906 1892 return; 1907 1893 } 1908 1894 } ··· 1933 1927 1934 1928 if (last && !can_merge_rq(last, rq)) { 1935 1929 spin_unlock(&ve->base.active.lock); 1936 - return; /* leave this for another */ 1930 + start_timeslice(engine); 1931 + return; /* leave this for another sibling */ 1937 1932 } 1938 1933 1939 1934 ENGINE_TRACE(engine,
+6 -2
drivers/gpu/drm/i915/gt/intel_timeline.c
··· 192 192 193 193 static void cacheline_free(struct intel_timeline_cacheline *cl) 194 194 { 195 + if (!i915_active_acquire_if_busy(&cl->active)) { 196 + __idle_cacheline_free(cl); 197 + return; 198 + } 199 + 195 200 GEM_BUG_ON(ptr_test_bit(cl->vaddr, CACHELINE_FREE)); 196 201 cl->vaddr = ptr_set_bit(cl->vaddr, CACHELINE_FREE); 197 202 198 - if (i915_active_is_idle(&cl->active)) 199 - __idle_cacheline_free(cl); 203 + i915_active_release(&cl->active); 200 204 } 201 205 202 206 int intel_timeline_init(struct intel_timeline *timeline,
+2 -1
drivers/gpu/drm/i915/gvt/display.c
··· 457 457 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; 458 458 459 459 /* TODO: add more platforms support */ 460 - if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) { 460 + if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv) || 461 + IS_COFFEELAKE(dev_priv)) { 461 462 if (connected) { 462 463 vgpu_vreg_t(vgpu, SFUSE_STRAP) |= 463 464 SFUSE_STRAP_DDID_DETECTED;
+2 -3
drivers/gpu/drm/i915/gvt/opregion.c
··· 147 147 /* there's features depending on version! */ 148 148 v->header.version = 155; 149 149 v->header.header_size = sizeof(v->header); 150 - v->header.vbt_size = sizeof(struct vbt) - sizeof(v->header); 150 + v->header.vbt_size = sizeof(struct vbt); 151 151 v->header.bdb_offset = offsetof(struct vbt, bdb_header); 152 152 153 153 strcpy(&v->bdb_header.signature[0], "BIOS_DATA_BLOCK"); 154 154 v->bdb_header.version = 186; /* child_dev_size = 33 */ 155 155 v->bdb_header.header_size = sizeof(v->bdb_header); 156 156 157 - v->bdb_header.bdb_size = sizeof(struct vbt) - sizeof(struct vbt_header) 158 - - sizeof(struct bdb_header); 157 + v->bdb_header.bdb_size = sizeof(struct vbt) - sizeof(struct vbt_header); 159 158 160 159 /* general features */ 161 160 v->general_features_header.id = BDB_GENERAL_FEATURES;
+9 -3
drivers/gpu/drm/i915/gvt/vgpu.c
··· 272 272 { 273 273 struct intel_gvt *gvt = vgpu->gvt; 274 274 275 - mutex_lock(&vgpu->vgpu_lock); 276 - 277 275 WARN(vgpu->active, "vGPU is still active!\n"); 278 276 277 + /* 278 + * remove idr first so later clean can judge if need to stop 279 + * service if no active vgpu. 280 + */ 281 + mutex_lock(&gvt->lock); 282 + idr_remove(&gvt->vgpu_idr, vgpu->id); 283 + mutex_unlock(&gvt->lock); 284 + 285 + mutex_lock(&vgpu->vgpu_lock); 279 286 intel_gvt_debugfs_remove_vgpu(vgpu); 280 287 intel_vgpu_clean_sched_policy(vgpu); 281 288 intel_vgpu_clean_submission(vgpu); ··· 297 290 mutex_unlock(&vgpu->vgpu_lock); 298 291 299 292 mutex_lock(&gvt->lock); 300 - idr_remove(&gvt->vgpu_idr, vgpu->id); 301 293 if (idr_is_empty(&gvt->vgpu_idr)) 302 294 intel_gvt_clean_irq(gvt); 303 295 intel_gvt_update_vgpu_types(gvt);
+20 -8
drivers/gpu/drm/i915/i915_request.c
··· 527 527 return NOTIFY_DONE; 528 528 } 529 529 530 + static void irq_semaphore_cb(struct irq_work *wrk) 531 + { 532 + struct i915_request *rq = 533 + container_of(wrk, typeof(*rq), semaphore_work); 534 + 535 + i915_schedule_bump_priority(rq, I915_PRIORITY_NOSEMAPHORE); 536 + i915_request_put(rq); 537 + } 538 + 530 539 static int __i915_sw_fence_call 531 540 semaphore_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state) 532 541 { 533 - struct i915_request *request = 534 - container_of(fence, typeof(*request), semaphore); 542 + struct i915_request *rq = container_of(fence, typeof(*rq), semaphore); 535 543 536 544 switch (state) { 537 545 case FENCE_COMPLETE: 538 - i915_schedule_bump_priority(request, I915_PRIORITY_NOSEMAPHORE); 546 + if (!(READ_ONCE(rq->sched.attr.priority) & I915_PRIORITY_NOSEMAPHORE)) { 547 + i915_request_get(rq); 548 + init_irq_work(&rq->semaphore_work, irq_semaphore_cb); 549 + irq_work_queue(&rq->semaphore_work); 550 + } 539 551 break; 540 552 541 553 case FENCE_FREE: 542 - i915_request_put(request); 554 + i915_request_put(rq); 543 555 break; 544 556 } 545 557 ··· 788 776 struct dma_fence *fence; 789 777 int err; 790 778 791 - GEM_BUG_ON(i915_request_timeline(rq) == 792 - rcu_access_pointer(signal->timeline)); 779 + if (i915_request_timeline(rq) == rcu_access_pointer(signal->timeline)) 780 + return 0; 793 781 794 782 if (i915_request_started(signal)) 795 783 return 0; ··· 833 821 return 0; 834 822 835 823 err = 0; 836 - if (intel_timeline_sync_is_later(i915_request_timeline(rq), fence)) 824 + if (!intel_timeline_sync_is_later(i915_request_timeline(rq), fence)) 837 825 err = i915_sw_fence_await_dma_fence(&rq->submit, 838 826 fence, 0, 839 827 I915_FENCE_GFP); ··· 1330 1318 * decide whether to preempt the entire chain so that it is ready to 1331 1319 * run at the earliest possible convenience. 1332 1320 */ 1333 - i915_sw_fence_commit(&rq->semaphore); 1334 1321 if (attr && rq->engine->schedule) 1335 1322 rq->engine->schedule(rq, attr); 1323 + i915_sw_fence_commit(&rq->semaphore); 1336 1324 i915_sw_fence_commit(&rq->submit); 1337 1325 } 1338 1326
+2
drivers/gpu/drm/i915/i915_request.h
··· 26 26 #define I915_REQUEST_H 27 27 28 28 #include <linux/dma-fence.h> 29 + #include <linux/irq_work.h> 29 30 #include <linux/lockdep.h> 30 31 31 32 #include "gem/i915_gem_context_types.h" ··· 209 208 }; 210 209 struct list_head execute_cb; 211 210 struct i915_sw_fence semaphore; 211 + struct irq_work semaphore_work; 212 212 213 213 /* 214 214 * A list of everyone we wait upon, and everyone who waits upon us.
+5
drivers/gpu/drm/i915/i915_utils.h
··· 234 234 __idx; \ 235 235 }) 236 236 237 + static inline bool is_power_of_2_u64(u64 n) 238 + { 239 + return (n != 0 && ((n & (n - 1)) == 0)); 240 + } 241 + 237 242 static inline void __list_del_many(struct list_head *head, 238 243 struct list_head *first) 239 244 {