Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'drm-intel-fixes-2019-01-11' of git://anongit.freedesktop.org/drm/drm-intel into drm-fixes

i915 fixes for v5.0-rc2:
- Disable PSR for Apple panels
- Broxton ERR_PTR error state fix
- Kabylake VECS workaround fix
- Unwind failure on pinning the gen7 ppgtt
- GVT workload request allocation fix

Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
From: Jani Nikula <jani.nikula@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/87pnt35z8h.fsf@intel.com

+98 -39
+2
drivers/gpu/drm/drm_dp_helper.c
··· 1273 1273 { OUI(0x00, 0x22, 0xb9), DEVICE_ID_ANY, true, BIT(DP_DPCD_QUIRK_CONSTANT_N) }, 1274 1274 /* LG LP140WF6-SPM1 eDP panel */ 1275 1275 { OUI(0x00, 0x22, 0xb9), DEVICE_ID('s', 'i', 'v', 'a', 'r', 'T'), false, BIT(DP_DPCD_QUIRK_CONSTANT_N) }, 1276 + /* Apple panels need some additional handling to support PSR */ 1277 + { OUI(0x00, 0x10, 0xfa), DEVICE_ID_ANY, false, BIT(DP_DPCD_QUIRK_NO_PSR) } 1276 1278 }; 1277 1279 1278 1280 #undef OUI
+42 -22
drivers/gpu/drm/i915/gvt/scheduler.c
··· 356 356 return 0; 357 357 } 358 358 359 + static int 360 + intel_gvt_workload_req_alloc(struct intel_vgpu_workload *workload) 361 + { 362 + struct intel_vgpu *vgpu = workload->vgpu; 363 + struct intel_vgpu_submission *s = &vgpu->submission; 364 + struct i915_gem_context *shadow_ctx = s->shadow_ctx; 365 + struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; 366 + struct intel_engine_cs *engine = dev_priv->engine[workload->ring_id]; 367 + struct i915_request *rq; 368 + int ret = 0; 369 + 370 + lockdep_assert_held(&dev_priv->drm.struct_mutex); 371 + 372 + if (workload->req) 373 + goto out; 374 + 375 + rq = i915_request_alloc(engine, shadow_ctx); 376 + if (IS_ERR(rq)) { 377 + gvt_vgpu_err("fail to allocate gem request\n"); 378 + ret = PTR_ERR(rq); 379 + goto out; 380 + } 381 + workload->req = i915_request_get(rq); 382 + out: 383 + return ret; 384 + } 385 + 359 386 /** 360 387 * intel_gvt_scan_and_shadow_workload - audit the workload by scanning and 361 388 * shadow it as well, include ringbuffer,wa_ctx and ctx. ··· 399 372 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; 400 373 struct intel_engine_cs *engine = dev_priv->engine[workload->ring_id]; 401 374 struct intel_context *ce; 402 - struct i915_request *rq; 403 375 int ret; 404 376 405 377 lockdep_assert_held(&dev_priv->drm.struct_mutex); 406 378 407 - if (workload->req) 379 + if (workload->shadow) 408 380 return 0; 409 381 410 382 ret = set_context_ppgtt_from_shadow(workload, shadow_ctx); ··· 443 417 goto err_shadow; 444 418 } 445 419 446 - rq = i915_request_alloc(engine, shadow_ctx); 447 - if (IS_ERR(rq)) { 448 - gvt_vgpu_err("fail to allocate gem request\n"); 449 - ret = PTR_ERR(rq); 450 - goto err_shadow; 451 - } 452 - workload->req = i915_request_get(rq); 453 - 454 - ret = populate_shadow_context(workload); 455 - if (ret) 456 - goto err_req; 457 - 420 + workload->shadow = true; 458 421 return 0; 459 - err_req: 460 - rq = fetch_and_zero(&workload->req); 461 - i915_request_put(rq); 462 422 err_shadow: 463 423 release_shadow_wa_ctx(&workload->wa_ctx); 464 424 err_unpin: ··· 683 671 mutex_lock(&vgpu->vgpu_lock); 684 672 mutex_lock(&dev_priv->drm.struct_mutex); 685 673 674 + ret = intel_gvt_workload_req_alloc(workload); 675 + if (ret) 676 + goto err_req; 677 + 686 678 ret = intel_gvt_scan_and_shadow_workload(workload); 687 679 if (ret) 688 680 goto out; 689 681 682 + ret = populate_shadow_context(workload); 683 + if (ret) { 684 + release_shadow_wa_ctx(&workload->wa_ctx); 685 + goto out; 686 + } 687 + 690 688 ret = prepare_workload(workload); 691 - 692 689 out: 693 - if (ret) 694 - workload->status = ret; 695 - 696 690 if (!IS_ERR_OR_NULL(workload->req)) { 697 691 gvt_dbg_sched("ring id %d submit workload to i915 %p\n", 698 692 ring_id, workload->req); 699 693 i915_request_add(workload->req); 700 694 workload->dispatched = true; 701 695 } 702 - 696 + err_req: 697 + if (ret) 698 + workload->status = ret; 703 699 mutex_unlock(&dev_priv->drm.struct_mutex); 704 700 mutex_unlock(&vgpu->vgpu_lock); 705 701 return ret;
+1
drivers/gpu/drm/i915/gvt/scheduler.h
··· 83 83 struct i915_request *req; 84 84 /* if this workload has been dispatched to i915? */ 85 85 bool dispatched; 86 + bool shadow; /* if workload has done shadow of guest request */ 86 87 int status; 87 88 88 89 struct intel_vgpu_mm *shadow_mm;
+9 -3
drivers/gpu/drm/i915/i915_debugfs.c
··· 984 984 intel_runtime_pm_get(i915); 985 985 gpu = i915_capture_gpu_state(i915); 986 986 intel_runtime_pm_put(i915); 987 - if (!gpu) 988 - return -ENOMEM; 987 + if (IS_ERR(gpu)) 988 + return PTR_ERR(gpu); 989 989 990 990 file->private_data = gpu; 991 991 return 0; ··· 1018 1018 1019 1019 static int i915_error_state_open(struct inode *inode, struct file *file) 1020 1020 { 1021 - file->private_data = i915_first_error_state(inode->i_private); 1021 + struct i915_gpu_state *error; 1022 + 1023 + error = i915_first_error_state(inode->i_private); 1024 + if (IS_ERR(error)) 1025 + return PTR_ERR(error); 1026 + 1027 + file->private_data = error; 1022 1028 return 0; 1023 1029 } 1024 1030
+12 -3
drivers/gpu/drm/i915/i915_gem_gtt.c
··· 2075 2075 int gen6_ppgtt_pin(struct i915_hw_ppgtt *base) 2076 2076 { 2077 2077 struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(base); 2078 + int err; 2078 2079 2079 2080 /* 2080 2081 * Workaround the limited maximum vma->pin_count and the aliasing_ppgtt ··· 2091 2090 * allocator works in address space sizes, so it's multiplied by page 2092 2091 * size. We allocate at the top of the GTT to avoid fragmentation. 2093 2092 */ 2094 - return i915_vma_pin(ppgtt->vma, 2095 - 0, GEN6_PD_ALIGN, 2096 - PIN_GLOBAL | PIN_HIGH); 2093 + err = i915_vma_pin(ppgtt->vma, 2094 + 0, GEN6_PD_ALIGN, 2095 + PIN_GLOBAL | PIN_HIGH); 2096 + if (err) 2097 + goto unpin; 2098 + 2099 + return 0; 2100 + 2101 + unpin: 2102 + ppgtt->pin_count = 0; 2103 + return err; 2097 2104 } 2098 2105 2099 2106 void gen6_ppgtt_unpin(struct i915_hw_ppgtt *base)
+14 -9
drivers/gpu/drm/i915/i915_gpu_error.c
··· 1907 1907 { 1908 1908 struct i915_gpu_state *error; 1909 1909 1910 + /* Check if GPU capture has been disabled */ 1911 + error = READ_ONCE(i915->gpu_error.first_error); 1912 + if (IS_ERR(error)) 1913 + return error; 1914 + 1910 1915 error = kzalloc(sizeof(*error), GFP_ATOMIC); 1911 - if (!error) 1912 - return NULL; 1916 + if (!error) { 1917 + i915_disable_error_state(i915, -ENOMEM); 1918 + return ERR_PTR(-ENOMEM); 1919 + } 1913 1920 1914 1921 kref_init(&error->ref); 1915 1922 error->i915 = i915; ··· 1952 1945 return; 1953 1946 1954 1947 error = i915_capture_gpu_state(i915); 1955 - if (!error) { 1956 - DRM_DEBUG_DRIVER("out of memory, not capturing error state\n"); 1957 - i915_disable_error_state(i915, -ENOMEM); 1948 + if (IS_ERR(error)) 1958 1949 return; 1959 - } 1960 1950 1961 1951 i915_error_capture_msg(i915, error, engine_mask, error_msg); 1962 1952 DRM_INFO("%s\n", error->error_msg); ··· 1991 1987 1992 1988 spin_lock_irq(&i915->gpu_error.lock); 1993 1989 error = i915->gpu_error.first_error; 1994 - if (error) 1990 + if (!IS_ERR_OR_NULL(error)) 1995 1991 i915_gpu_state_get(error); 1996 1992 spin_unlock_irq(&i915->gpu_error.lock); 1997 1993 ··· 2004 2000 2005 2001 spin_lock_irq(&i915->gpu_error.lock); 2006 2002 error = i915->gpu_error.first_error; 2007 - i915->gpu_error.first_error = NULL; 2003 + if (error != ERR_PTR(-ENODEV)) /* if disabled, always disabled */ 2004 + i915->gpu_error.first_error = NULL; 2008 2005 spin_unlock_irq(&i915->gpu_error.lock); 2009 2006 2010 - if (!IS_ERR(error)) 2007 + if (!IS_ERR_OR_NULL(error)) 2011 2008 i915_gpu_state_put(error); 2012 2009 } 2013 2010
+3 -1
drivers/gpu/drm/i915/i915_sysfs.c
··· 521 521 ssize_t ret; 522 522 523 523 gpu = i915_first_error_state(i915); 524 - if (gpu) { 524 + if (IS_ERR(gpu)) { 525 + ret = PTR_ERR(gpu); 526 + } else if (gpu) { 525 527 ret = i915_gpu_state_copy_to_buffer(gpu, buf, off, count); 526 528 i915_gpu_state_put(gpu); 527 529 } else {
+2 -1
drivers/gpu/drm/i915/intel_lrc.c
··· 2244 2244 if (ret) 2245 2245 return ret; 2246 2246 2247 + intel_engine_init_workarounds(engine); 2248 + 2247 2249 if (HAS_LOGICAL_RING_ELSQ(i915)) { 2248 2250 execlists->submit_reg = i915->regs + 2249 2251 i915_mmio_reg_offset(RING_EXECLIST_SQ_CONTENTS(engine)); ··· 2312 2310 } 2313 2311 2314 2312 intel_engine_init_whitelist(engine); 2315 - intel_engine_init_workarounds(engine); 2316 2313 2317 2314 return 0; 2318 2315 }
+6
drivers/gpu/drm/i915/intel_psr.c
··· 274 274 DRM_DEBUG_KMS("eDP panel supports PSR version %x\n", 275 275 intel_dp->psr_dpcd[0]); 276 276 277 + if (drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_NO_PSR)) { 278 + DRM_DEBUG_KMS("PSR support not currently available for this panel\n"); 279 + return; 280 + } 281 + 277 282 if (!(intel_dp->edp_dpcd[1] & DP_EDP_SET_POWER_CAP)) { 278 283 DRM_DEBUG_KMS("Panel lacks power state control, PSR cannot be enabled\n"); 279 284 return; 280 285 } 286 + 281 287 dev_priv->psr.sink_support = true; 282 288 dev_priv->psr.sink_sync_latency = 283 289 intel_dp_get_sink_sync_latency(intel_dp);
+7
include/drm/drm_dp_helper.h
··· 1365 1365 * to 16 bits. So will give a constant value (0x8000) for compatability. 1366 1366 */ 1367 1367 DP_DPCD_QUIRK_CONSTANT_N, 1368 + /** 1369 + * @DP_DPCD_QUIRK_NO_PSR: 1370 + * 1371 + * The device does not support PSR even if reports that it supports or 1372 + * driver still need to implement proper handling for such device. 1373 + */ 1374 + DP_DPCD_QUIRK_NO_PSR, 1368 1375 }; 1369 1376 1370 1377 /**