Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'drm-intel-next-fixes-2016-07-25' of git://anongit.freedesktop.org/drm-intel into drm-next

Bunch of fixes for the 4.8 merge pull, nothing out of the ordinary. All
suitably marked up with cc: stable where needed.

* tag 'drm-intel-next-fixes-2016-07-25' of git://anongit.freedesktop.org/drm-intel:
drm/i915/gen9: Add WaInPlaceDecompressionHang
drm/i915/guc: Revert "drm/i915/guc: enable GuC loading & submission by default"
drm/i915/bxt: Fix inadvertent CPU snooping due to incorrect MOCS config
drm/i915/gen9: Clean up MOCS table definitions
drm/i915: Set legacy properties when using legacy gamma set IOCTL. (v2)
drm/i915: Enable polling when we don't have hpd
drm/i915/vlv: Disable HPD in valleyview_crt_detect_hotplug()
drm/i915/vlv: Reset the ADPA in vlv_display_power_well_init()
drm/i915/vlv: Make intel_crt_reset() per-encoder
drm/i915: Unbreak interrupts on pre-gen6
drm/i915/breadcrumbs: Queue hangcheck before sleeping

+289 -63
+3
drivers/gpu/drm/i915/i915_drv.c
··· 2413 2413 2414 2414 assert_forcewakes_inactive(dev_priv); 2415 2415 2416 + if (!IS_VALLEYVIEW(dev_priv) || !IS_CHERRYVIEW(dev_priv)) 2417 + intel_hpd_poll_init(dev_priv); 2418 + 2416 2419 DRM_DEBUG_KMS("Device suspended\n"); 2417 2420 return 0; 2418 2421 }
+7
drivers/gpu/drm/i915/i915_drv.h
··· 284 284 u32 short_port_mask; 285 285 struct work_struct dig_port_work; 286 286 287 + struct work_struct poll_init_work; 288 + bool poll_enabled; 289 + 287 290 /* 288 291 * if we get a HPD irq from DP and a HPD irq from non-DP 289 292 * the non-DP HPD could block the workqueue on a mode config ··· 2746 2743 #define SKL_REVID_D0 0x3 2747 2744 #define SKL_REVID_E0 0x4 2748 2745 #define SKL_REVID_F0 0x5 2746 + #define SKL_REVID_G0 0x6 2747 + #define SKL_REVID_H0 0x7 2749 2748 2750 2749 #define IS_SKL_REVID(p, since, until) (IS_SKYLAKE(p) && IS_REVID(p, since, until)) 2751 2750 ··· 2962 2957 void intel_hpd_init_work(struct drm_i915_private *dev_priv); 2963 2958 void intel_hpd_cancel_work(struct drm_i915_private *dev_priv); 2964 2959 bool intel_hpd_pin_to_port(enum hpd_pin pin, enum port *port); 2960 + bool intel_hpd_disable(struct drm_i915_private *dev_priv, enum hpd_pin pin); 2961 + void intel_hpd_enable(struct drm_i915_private *dev_priv, enum hpd_pin pin); 2965 2962 2966 2963 /* i915_irq.c */ 2967 2964 static inline void i915_queue_hangcheck(struct drm_i915_private *dev_priv)
-9
drivers/gpu/drm/i915/i915_gem.c
··· 1501 1501 break; 1502 1502 } 1503 1503 1504 - /* Ensure that even if the GPU hangs, we get woken up. 1505 - * 1506 - * However, note that if no one is waiting, we never notice 1507 - * a gpu hang. Eventually, we will have to wait for a resource 1508 - * held by the GPU and so trigger a hangcheck. In the most 1509 - * pathological case, this will be upon memory starvation! 1510 - */ 1511 - i915_queue_hangcheck(req->i915); 1512 - 1513 1504 timeout_remain = io_schedule_timeout(timeout_remain); 1514 1505 if (timeout_remain == 0) { 1515 1506 ret = -ETIME;
+4 -4
drivers/gpu/drm/i915/i915_params.c
··· 54 54 .verbose_state_checks = 1, 55 55 .nuclear_pageflip = 0, 56 56 .edp_vswing = 0, 57 - .enable_guc_loading = -1, 58 - .enable_guc_submission = -1, 57 + .enable_guc_loading = 0, 58 + .enable_guc_submission = 0, 59 59 .guc_log_level = -1, 60 60 .enable_dp_mst = true, 61 61 .inject_load_failure = 0, ··· 203 203 module_param_named_unsafe(enable_guc_loading, i915.enable_guc_loading, int, 0400); 204 204 MODULE_PARM_DESC(enable_guc_loading, 205 205 "Enable GuC firmware loading " 206 - "(-1=auto [default], 0=never, 1=if available, 2=required)"); 206 + "(-1=auto, 0=never [default], 1=if available, 2=required)"); 207 207 208 208 module_param_named_unsafe(enable_guc_submission, i915.enable_guc_submission, int, 0400); 209 209 MODULE_PARM_DESC(enable_guc_submission, 210 210 "Enable GuC submission " 211 - "(-1=auto [default], 0=never, 1=if available, 2=required)"); 211 + "(-1=auto, 0=never [default], 1=if available, 2=required)"); 212 212 213 213 module_param_named(guc_log_level, i915.guc_log_level, int, 0400); 214 214 MODULE_PARM_DESC(guc_log_level,
+3
drivers/gpu/drm/i915/i915_reg.h
··· 1686 1686 1687 1687 #define GEN7_TLB_RD_ADDR _MMIO(0x4700) 1688 1688 1689 + #define GEN9_GAMT_ECO_REG_RW_IA _MMIO(0x4ab0) 1690 + #define GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS (1<<18) 1691 + 1689 1692 #define GAMT_CHKN_BIT_REG _MMIO(0x4ab8) 1690 1693 #define GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING (1<<28) 1691 1694
+9
drivers/gpu/drm/i915/intel_breadcrumbs.c
··· 93 93 if (!b->irq_enabled || 94 94 test_bit(engine->id, &i915->gpu_error.missed_irq_rings)) 95 95 mod_timer(&b->fake_irq, jiffies + 1); 96 + 97 + /* Ensure that even if the GPU hangs, we get woken up. 98 + * 99 + * However, note that if no one is waiting, we never notice 100 + * a gpu hang. Eventually, we will have to wait for a resource 101 + * held by the GPU and so trigger a hangcheck. In the most 102 + * pathological case, this will be upon memory starvation! 103 + */ 104 + i915_queue_hangcheck(i915); 96 105 } 97 106 98 107 static void __intel_breadcrumbs_disable_irq(struct intel_breadcrumbs *b)
+23 -5
drivers/gpu/drm/i915/intel_crt.c
··· 329 329 struct drm_device *dev = connector->dev; 330 330 struct intel_crt *crt = intel_attached_crt(connector); 331 331 struct drm_i915_private *dev_priv = to_i915(dev); 332 + bool reenable_hpd; 332 333 u32 adpa; 333 334 bool ret; 334 335 u32 save_adpa; 336 + 337 + /* 338 + * Doing a force trigger causes a hpd interrupt to get sent, which can 339 + * get us stuck in a loop if we're polling: 340 + * - We enable power wells and reset the ADPA 341 + * - output_poll_exec does force probe on VGA, triggering a hpd 342 + * - HPD handler waits for poll to unlock dev->mode_config.mutex 343 + * - output_poll_exec shuts off the ADPA, unlocks 344 + * dev->mode_config.mutex 345 + * - HPD handler runs, resets ADPA and brings us back to the start 346 + * 347 + * Just disable HPD interrupts here to prevent this 348 + */ 349 + reenable_hpd = intel_hpd_disable(dev_priv, crt->base.hpd_pin); 335 350 336 351 save_adpa = adpa = I915_READ(crt->adpa_reg); 337 352 DRM_DEBUG_KMS("trigger hotplug detect cycle: adpa=0x%x\n", adpa); ··· 371 356 ret = false; 372 357 373 358 DRM_DEBUG_KMS("valleyview hotplug adpa=0x%x, result %d\n", adpa, ret); 359 + 360 + if (reenable_hpd) 361 + intel_hpd_enable(dev_priv, crt->base.hpd_pin); 374 362 375 363 return ret; 376 364 } ··· 735 717 return 0; 736 718 } 737 719 738 - static void intel_crt_reset(struct drm_connector *connector) 720 + void intel_crt_reset(struct drm_encoder *encoder) 739 721 { 740 - struct drm_device *dev = connector->dev; 722 + struct drm_device *dev = encoder->dev; 741 723 struct drm_i915_private *dev_priv = to_i915(dev); 742 - struct intel_crt *crt = intel_attached_crt(connector); 724 + struct intel_crt *crt = intel_encoder_to_crt(to_intel_encoder(encoder)); 743 725 744 726 if (INTEL_INFO(dev)->gen >= 5) { 745 727 u32 adpa; ··· 761 743 */ 762 744 763 745 static const struct drm_connector_funcs intel_crt_connector_funcs = { 764 - .reset = intel_crt_reset, 765 746 .dpms = drm_atomic_helper_connector_dpms, 766 747 .detect = intel_crt_detect, 767 748 .fill_modes = drm_helper_probe_single_connector_modes, ··· 779 762 }; 780 763 781 764 static const struct drm_encoder_funcs intel_crt_enc_funcs = { 765 + .reset = intel_crt_reset, 782 766 .destroy = intel_encoder_destroy, 783 767 }; 784 768 ··· 922 904 dev_priv->fdi_rx_config = I915_READ(FDI_RX_CTL(PIPE_A)) & fdi_config; 923 905 } 924 906 925 - intel_crt_reset(connector); 907 + intel_crt_reset(&crt->base.base); 926 908 }
+43 -1
drivers/gpu/drm/i915/intel_display.c
··· 13924 13924 13925 13925 #undef for_each_intel_crtc_masked 13926 13926 13927 + /* 13928 + * FIXME: Remove this once i915 is fully DRIVER_ATOMIC by calling 13929 + * drm_atomic_helper_legacy_gamma_set() directly. 13930 + */ 13931 + static int intel_atomic_legacy_gamma_set(struct drm_crtc *crtc, 13932 + u16 *red, u16 *green, u16 *blue, 13933 + uint32_t size) 13934 + { 13935 + struct drm_device *dev = crtc->dev; 13936 + struct drm_mode_config *config = &dev->mode_config; 13937 + struct drm_crtc_state *state; 13938 + int ret; 13939 + 13940 + ret = drm_atomic_helper_legacy_gamma_set(crtc, red, green, blue, size); 13941 + if (ret) 13942 + return ret; 13943 + 13944 + /* 13945 + * Make sure we update the legacy properties so this works when 13946 + * atomic is not enabled. 13947 + */ 13948 + 13949 + state = crtc->state; 13950 + 13951 + drm_object_property_set_value(&crtc->base, 13952 + config->degamma_lut_property, 13953 + (state->degamma_lut) ? 13954 + state->degamma_lut->base.id : 0); 13955 + 13956 + drm_object_property_set_value(&crtc->base, 13957 + config->ctm_property, 13958 + (state->ctm) ? 13959 + state->ctm->base.id : 0); 13960 + 13961 + drm_object_property_set_value(&crtc->base, 13962 + config->gamma_lut_property, 13963 + (state->gamma_lut) ? 13964 + state->gamma_lut->base.id : 0); 13965 + 13966 + return 0; 13967 + } 13968 + 13927 13969 static const struct drm_crtc_funcs intel_crtc_funcs = { 13928 - .gamma_set = drm_atomic_helper_legacy_gamma_set, 13970 + .gamma_set = intel_atomic_legacy_gamma_set, 13929 13971 .set_config = drm_atomic_helper_set_config, 13930 13972 .set_property = drm_atomic_helper_crtc_set_property, 13931 13973 .destroy = intel_crtc_destroy,
+3 -1
drivers/gpu/drm/i915/intel_drv.h
··· 1102 1102 1103 1103 /* intel_crt.c */ 1104 1104 void intel_crt_init(struct drm_device *dev); 1105 - 1105 + void intel_crt_reset(struct drm_encoder *encoder); 1106 1106 1107 1107 /* intel_ddi.c */ 1108 1108 void intel_ddi_clk_select(struct intel_encoder *encoder, ··· 1425 1425 1426 1426 /* intel_dvo.c */ 1427 1427 void intel_dvo_init(struct drm_device *dev); 1428 + /* intel_hotplug.c */ 1429 + void intel_hpd_poll_init(struct drm_i915_private *dev_priv); 1428 1430 1429 1431 1430 1432 /* legacy fbdev emulation in intel_fbdev.c */
+108 -15
drivers/gpu/drm/i915/intel_hotplug.c
··· 452 452 * 453 453 * This is a separate step from interrupt enabling to simplify the locking rules 454 454 * in the driver load and resume code. 455 + * 456 + * Also see: intel_hpd_poll_init(), which enables connector polling 455 457 */ 456 458 void intel_hpd_init(struct drm_i915_private *dev_priv) 457 459 { 458 - struct drm_device *dev = &dev_priv->drm; 459 - struct drm_mode_config *mode_config = &dev->mode_config; 460 - struct drm_connector *connector; 461 460 int i; 462 461 463 462 for_each_hpd_pin(i) { 464 463 dev_priv->hotplug.stats[i].count = 0; 465 464 dev_priv->hotplug.stats[i].state = HPD_ENABLED; 466 465 } 467 - list_for_each_entry(connector, &mode_config->connector_list, head) { 468 - struct intel_connector *intel_connector = to_intel_connector(connector); 469 - connector->polled = intel_connector->polled; 470 466 471 - /* MST has a dynamic intel_connector->encoder and it's reprobing 472 - * is all handled by the MST helpers. */ 473 - if (intel_connector->mst_port) 474 - continue; 475 - 476 - if (!connector->polled && I915_HAS_HOTPLUG(dev) && 477 - intel_connector->encoder->hpd_pin > HPD_NONE) 478 - connector->polled = DRM_CONNECTOR_POLL_HPD; 479 - } 467 + WRITE_ONCE(dev_priv->hotplug.poll_enabled, false); 468 + schedule_work(&dev_priv->hotplug.poll_init_work); 480 469 481 470 /* 482 471 * Interrupt setup is already guaranteed to be single-threaded, this is ··· 477 488 spin_unlock_irq(&dev_priv->irq_lock); 478 489 } 479 490 491 + void i915_hpd_poll_init_work(struct work_struct *work) { 492 + struct drm_i915_private *dev_priv = 493 + container_of(work, struct drm_i915_private, 494 + hotplug.poll_init_work); 495 + struct drm_device *dev = &dev_priv->drm; 496 + struct drm_mode_config *mode_config = &dev->mode_config; 497 + struct drm_connector *connector; 498 + bool enabled; 499 + 500 + mutex_lock(&dev->mode_config.mutex); 501 + 502 + enabled = READ_ONCE(dev_priv->hotplug.poll_enabled); 503 + 504 + list_for_each_entry(connector, &mode_config->connector_list, head) { 505 + struct intel_connector *intel_connector = 506 + to_intel_connector(connector); 507 + connector->polled = intel_connector->polled; 508 + 509 + /* MST has a dynamic intel_connector->encoder and it's reprobing 510 + * is all handled by the MST helpers. */ 511 + if (intel_connector->mst_port) 512 + continue; 513 + 514 + if (!connector->polled && I915_HAS_HOTPLUG(dev) && 515 + intel_connector->encoder->hpd_pin > HPD_NONE) { 516 + connector->polled = enabled ? 517 + DRM_CONNECTOR_POLL_CONNECT | 518 + DRM_CONNECTOR_POLL_DISCONNECT : 519 + DRM_CONNECTOR_POLL_HPD; 520 + } 521 + } 522 + 523 + if (enabled) 524 + drm_kms_helper_poll_enable_locked(dev); 525 + 526 + mutex_unlock(&dev->mode_config.mutex); 527 + 528 + /* 529 + * We might have missed any hotplugs that happened while we were 530 + * in the middle of disabling polling 531 + */ 532 + if (!enabled) 533 + drm_helper_hpd_irq_event(dev); 534 + } 535 + 536 + /** 537 + * intel_hpd_poll_init - enables/disables polling for connectors with hpd 538 + * @dev_priv: i915 device instance 539 + * @enabled: Whether to enable or disable polling 540 + * 541 + * This function enables polling for all connectors, regardless of whether or 542 + * not they support hotplug detection. Under certain conditions HPD may not be 543 + * functional. On most Intel GPUs, this happens when we enter runtime suspend. 544 + * On Valleyview and Cherryview systems, this also happens when we shut off all 545 + * of the powerwells. 546 + * 547 + * Since this function can get called in contexts where we're already holding 548 + * dev->mode_config.mutex, we do the actual hotplug enabling in a seperate 549 + * worker. 550 + * 551 + * Also see: intel_hpd_init(), which restores hpd handling. 552 + */ 553 + void intel_hpd_poll_init(struct drm_i915_private *dev_priv) 554 + { 555 + WRITE_ONCE(dev_priv->hotplug.poll_enabled, true); 556 + 557 + /* 558 + * We might already be holding dev->mode_config.mutex, so do this in a 559 + * seperate worker 560 + * As well, there's no issue if we race here since we always reschedule 561 + * this worker anyway 562 + */ 563 + schedule_work(&dev_priv->hotplug.poll_init_work); 564 + } 565 + 480 566 void intel_hpd_init_work(struct drm_i915_private *dev_priv) 481 567 { 482 568 INIT_WORK(&dev_priv->hotplug.hotplug_work, i915_hotplug_work_func); 483 569 INIT_WORK(&dev_priv->hotplug.dig_port_work, i915_digport_work_func); 570 + INIT_WORK(&dev_priv->hotplug.poll_init_work, i915_hpd_poll_init_work); 484 571 INIT_DELAYED_WORK(&dev_priv->hotplug.reenable_work, 485 572 intel_hpd_irq_storm_reenable_work); 486 573 } ··· 573 508 574 509 cancel_work_sync(&dev_priv->hotplug.dig_port_work); 575 510 cancel_work_sync(&dev_priv->hotplug.hotplug_work); 511 + cancel_work_sync(&dev_priv->hotplug.poll_init_work); 576 512 cancel_delayed_work_sync(&dev_priv->hotplug.reenable_work); 513 + } 514 + 515 + bool intel_hpd_disable(struct drm_i915_private *dev_priv, enum hpd_pin pin) 516 + { 517 + bool ret = false; 518 + 519 + if (pin == HPD_NONE) 520 + return false; 521 + 522 + spin_lock_irq(&dev_priv->irq_lock); 523 + if (dev_priv->hotplug.stats[pin].state == HPD_ENABLED) { 524 + dev_priv->hotplug.stats[pin].state = HPD_DISABLED; 525 + ret = true; 526 + } 527 + spin_unlock_irq(&dev_priv->irq_lock); 528 + 529 + return ret; 530 + } 531 + 532 + void intel_hpd_enable(struct drm_i915_private *dev_priv, enum hpd_pin pin) 533 + { 534 + if (pin == HPD_NONE) 535 + return; 536 + 537 + spin_lock_irq(&dev_priv->irq_lock); 538 + dev_priv->hotplug.stats[pin].state = HPD_ENABLED; 539 + spin_unlock_irq(&dev_priv->irq_lock); 577 540 }
+61 -27
drivers/gpu/drm/i915/intel_mocs.c
··· 66 66 #define L3_WB 3 67 67 68 68 /* Target cache */ 69 - #define ELLC 0 70 - #define LLC 1 71 - #define LLC_ELLC 2 69 + #define LE_TC_PAGETABLE 0 70 + #define LE_TC_LLC 1 71 + #define LE_TC_LLC_ELLC 2 72 + #define LE_TC_LLC_ELLC_ALT 3 72 73 73 74 /* 74 75 * MOCS tables ··· 97 96 * end. 98 97 */ 99 98 static const struct drm_i915_mocs_entry skylake_mocs_table[] = { 100 - /* { 0x00000009, 0x0010 } */ 101 - { (LE_CACHEABILITY(LE_UC) | LE_TGT_CACHE(LLC_ELLC) | LE_LRUM(0) | 102 - LE_AOM(0) | LE_RSC(0) | LE_SCC(0) | LE_PFM(0) | LE_SCF(0)), 103 - (L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_UC)) }, 104 - /* { 0x00000038, 0x0030 } */ 105 - { (LE_CACHEABILITY(LE_PAGETABLE) | LE_TGT_CACHE(LLC_ELLC) | LE_LRUM(3) | 106 - LE_AOM(0) | LE_RSC(0) | LE_SCC(0) | LE_PFM(0) | LE_SCF(0)), 107 - (L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_WB)) }, 108 - /* { 0x0000003b, 0x0030 } */ 109 - { (LE_CACHEABILITY(LE_WB) | LE_TGT_CACHE(LLC_ELLC) | LE_LRUM(3) | 110 - LE_AOM(0) | LE_RSC(0) | LE_SCC(0) | LE_PFM(0) | LE_SCF(0)), 111 - (L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_WB)) } 99 + { /* 0x00000009 */ 100 + .control_value = LE_CACHEABILITY(LE_UC) | 101 + LE_TGT_CACHE(LE_TC_LLC_ELLC) | 102 + LE_LRUM(0) | LE_AOM(0) | LE_RSC(0) | LE_SCC(0) | 103 + LE_PFM(0) | LE_SCF(0), 104 + 105 + /* 0x0010 */ 106 + .l3cc_value = L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_UC), 107 + }, 108 + { 109 + /* 0x00000038 */ 110 + .control_value = LE_CACHEABILITY(LE_PAGETABLE) | 111 + LE_TGT_CACHE(LE_TC_LLC_ELLC) | 112 + LE_LRUM(3) | LE_AOM(0) | LE_RSC(0) | LE_SCC(0) | 113 + LE_PFM(0) | LE_SCF(0), 114 + /* 0x0030 */ 115 + .l3cc_value = L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_WB), 116 + }, 117 + { 118 + /* 0x0000003b */ 119 + .control_value = LE_CACHEABILITY(LE_WB) | 120 + LE_TGT_CACHE(LE_TC_LLC_ELLC) | 121 + LE_LRUM(3) | LE_AOM(0) | LE_RSC(0) | LE_SCC(0) | 122 + LE_PFM(0) | LE_SCF(0), 123 + /* 0x0030 */ 124 + .l3cc_value = L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_WB), 125 + }, 112 126 }; 113 127 114 128 /* NOTE: the LE_TGT_CACHE is not used on Broxton */ 115 129 static const struct drm_i915_mocs_entry broxton_mocs_table[] = { 116 - /* { 0x00000009, 0x0010 } */ 117 - { (LE_CACHEABILITY(LE_UC) | LE_TGT_CACHE(LLC_ELLC) | LE_LRUM(0) | 118 - LE_AOM(0) | LE_RSC(0) | LE_SCC(0) | LE_PFM(0) | LE_SCF(0)), 119 - (L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_UC)) }, 120 - /* { 0x00000038, 0x0030 } */ 121 - { (LE_CACHEABILITY(LE_PAGETABLE) | LE_TGT_CACHE(LLC_ELLC) | LE_LRUM(3) | 122 - LE_AOM(0) | LE_RSC(0) | LE_SCC(0) | LE_PFM(0) | LE_SCF(0)), 123 - (L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_WB)) }, 124 - /* { 0x0000003b, 0x0030 } */ 125 - { (LE_CACHEABILITY(LE_WB) | LE_TGT_CACHE(LLC_ELLC) | LE_LRUM(3) | 126 - LE_AOM(0) | LE_RSC(0) | LE_SCC(0) | LE_PFM(0) | LE_SCF(0)), 127 - (L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_WB)) } 130 + { 131 + /* 0x00000009 */ 132 + .control_value = LE_CACHEABILITY(LE_UC) | 133 + LE_TGT_CACHE(LE_TC_LLC_ELLC) | 134 + LE_LRUM(0) | LE_AOM(0) | LE_RSC(0) | LE_SCC(0) | 135 + LE_PFM(0) | LE_SCF(0), 136 + 137 + /* 0x0010 */ 138 + .l3cc_value = L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_UC), 139 + }, 140 + { 141 + /* 0x00000038 */ 142 + .control_value = LE_CACHEABILITY(LE_PAGETABLE) | 143 + LE_TGT_CACHE(LE_TC_LLC_ELLC) | 144 + LE_LRUM(3) | LE_AOM(0) | LE_RSC(0) | LE_SCC(0) | 145 + LE_PFM(0) | LE_SCF(0), 146 + 147 + /* 0x0030 */ 148 + .l3cc_value = L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_WB), 149 + }, 150 + { 151 + /* 0x00000039 */ 152 + .control_value = LE_CACHEABILITY(LE_UC) | 153 + LE_TGT_CACHE(LE_TC_LLC_ELLC) | 154 + LE_LRUM(3) | LE_AOM(0) | LE_RSC(0) | LE_SCC(0) | 155 + LE_PFM(0) | LE_SCF(0), 156 + 157 + /* 0x0030 */ 158 + .l3cc_value = L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_WB), 159 + }, 128 160 }; 129 161 130 162 /**
+16 -1
drivers/gpu/drm/i915/intel_ringbuffer.c
··· 1109 1109 /* WaDisableGafsUnitClkGating:skl */ 1110 1110 WA_SET_BIT(GEN7_UCGCTL4, GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE); 1111 1111 1112 + /* WaInPlaceDecompressionHang:skl */ 1113 + if (IS_SKL_REVID(dev_priv, SKL_REVID_H0, REVID_FOREVER)) 1114 + WA_SET_BIT(GEN9_GAMT_ECO_REG_RW_IA, 1115 + GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS); 1116 + 1112 1117 /* WaDisableLSQCROPERFforOCL:skl */ 1113 1118 ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4); 1114 1119 if (ret) ··· 1183 1178 WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2, 1184 1179 GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION); 1185 1180 1181 + /* WaInPlaceDecompressionHang:bxt */ 1182 + if (IS_BXT_REVID(dev_priv, BXT_REVID_C0, REVID_FOREVER)) 1183 + WA_SET_BIT(GEN9_GAMT_ECO_REG_RW_IA, 1184 + GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS); 1185 + 1186 1186 return 0; 1187 1187 } 1188 1188 ··· 1234 1224 WA_SET_BIT_MASKED( 1235 1225 GEN7_HALF_SLICE_CHICKEN1, 1236 1226 GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE); 1227 + 1228 + /* WaInPlaceDecompressionHang:kbl */ 1229 + WA_SET_BIT(GEN9_GAMT_ECO_REG_RW_IA, 1230 + GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS); 1237 1231 1238 1232 /* WaDisableLSQCROPERFforOCL:kbl */ 1239 1233 ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4); ··· 1319 1305 if (IS_GEN(dev_priv, 6, 7)) 1320 1306 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING)); 1321 1307 1322 - I915_WRITE_IMR(engine, ~engine->irq_keep_mask); 1308 + if (INTEL_INFO(dev_priv)->gen >= 6) 1309 + I915_WRITE_IMR(engine, ~engine->irq_keep_mask); 1323 1310 1324 1311 return init_workarounds_ring(engine); 1325 1312 }
+9
drivers/gpu/drm/i915/intel_runtime_pm.c
··· 1078 1078 1079 1079 static void vlv_display_power_well_init(struct drm_i915_private *dev_priv) 1080 1080 { 1081 + struct intel_encoder *encoder; 1081 1082 enum pipe pipe; 1082 1083 1083 1084 /* ··· 1114 1113 1115 1114 intel_hpd_init(dev_priv); 1116 1115 1116 + /* Re-enable the ADPA, if we have one */ 1117 + for_each_intel_encoder(&dev_priv->drm, encoder) { 1118 + if (encoder->type == INTEL_OUTPUT_ANALOG) 1119 + intel_crt_reset(&encoder->base); 1120 + } 1121 + 1117 1122 i915_redisable_vga_power_on(&dev_priv->drm); 1118 1123 } 1119 1124 ··· 1133 1126 synchronize_irq(dev_priv->drm.irq); 1134 1127 1135 1128 intel_power_sequencer_reset(dev_priv); 1129 + 1130 + intel_hpd_poll_init(dev_priv); 1136 1131 } 1137 1132 1138 1133 static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,