Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'drm-intel-fixes' of git://people.freedesktop.org/~danvet/drm-intel into drm-next

So here's my promised pile of fixes for 3.9. I've dropped the core prep
patches for vt-switchless suspend/resume as discussed on irc. Highlights:
- Fix dmar on g4x. Not really gfx related, but I'm fed up with getting
blamed for dmar crapouts.
- Disable wc ptes updates on ilk when dmar is enabled (Chris). So again,
dmar, but this time gfx related :(
- Reduced range support for hsw, using the pipe CSC (Ville).
- Fixup pll limits for gen3/4 (Patrick Jakobsson). The sdvo patch is
already confirmed to fix 2 bug reports, so added cc: stable on that one.
- Regression fix for 8bit fb console (Ville).
- Preserve lane reversal bits on DDI/FDI ports (Damien).
- Page flip vs. gpu hang fixes (Ville). Unfortuntely not quite all of
them, need to decide what to do with the currently still in-flight ones.
- Panel fitter regression fix from Mika Kuoppala (was accidentally left on
on some pipes with the new modset code since 3.7). This also improves
the modeset sequence and might help a few other unrelated issues with
lvds.
- Write backlight regs even harder ... another installement in our eternal
fight against the BIOS and backlights.
- Fixup lid notifier vs. suspend/resume races (Zhang Rui). Prep work for
new ACPI stuff, but closing the race itself seems worthwile on its own.
- A few other small fixes and tiny cleanups all over.

Lots of the patches are cc: stable since I've stalled on a few
not-so-important fixes for 3.8 due to the grumpy noise Linus made.

* 'drm-intel-fixes' of git://people.freedesktop.org/~danvet/drm-intel: (33 commits)
intel/iommu: force writebuffer-flush quirk on Gen 4 Chipsets
drm/i915: Disable WC PTE updates to w/a buggy IOMMU on ILK
drm/i915: Implement pipe CSC based limited range RGB output
drm/i915: inverted brightness quirk for Acer Aspire 4736Z
drm/i915: Print the hw context status is debugfs
drm/i915: Use HAS_L3_GPU_CACHE in i915_gem_l3_remap
drm/i915: Fix PIPE_CONTROL DW/QW write through global GTT on IVB+
drm/i915: Set i9xx sdvo clock limits according to specifications
drm/i915: Set i9xx lvds clock limits according to specifications
drm/i915: Preserve the DDI link reversal configuration
drm/i915: Preserve the FDI line reversal override bit on CPT
drm/i915: add missing \n to UTS_RELEASE in the error_state
drm: Use C8 instead of RGB332 when determining the format from depth/bpp
drm: Fill depth/bits_per_pixel for C8 format
drm/i915: don't clflush gem objects in stolen memory
drm/i915: Don't wait for page flips if there was GPU reset
drm/i915: Kill obj->pending_flip
drm/i915: Fix a typo in a intel_modeset_stage_output_state() comment
drm/i915: remove bogus mutex_unlock from error-path
drm/i915: Print the pipe control page GTT address
...

+482 -238
+39 -3
drivers/char/agp/intel-gtt.c
··· 572 572 intel_gtt_teardown_scratch_page(); 573 573 } 574 574 575 + /* Certain Gen5 chipsets require require idling the GPU before 576 + * unmapping anything from the GTT when VT-d is enabled. 577 + */ 578 + static inline int needs_ilk_vtd_wa(void) 579 + { 580 + #ifdef CONFIG_INTEL_IOMMU 581 + const unsigned short gpu_devid = intel_private.pcidev->device; 582 + 583 + /* Query intel_iommu to see if we need the workaround. Presumably that 584 + * was loaded first. 585 + */ 586 + if ((gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB || 587 + gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG) && 588 + intel_iommu_gfx_mapped) 589 + return 1; 590 + #endif 591 + return 0; 592 + } 593 + 594 + static bool intel_gtt_can_wc(void) 595 + { 596 + if (INTEL_GTT_GEN <= 2) 597 + return false; 598 + 599 + if (INTEL_GTT_GEN >= 6) 600 + return false; 601 + 602 + /* Reports of major corruption with ILK vt'd enabled */ 603 + if (needs_ilk_vtd_wa()) 604 + return false; 605 + 606 + return true; 607 + } 608 + 575 609 static int intel_gtt_init(void) 576 610 { 577 611 u32 gma_addr; ··· 635 601 gtt_map_size = intel_private.gtt_total_entries * 4; 636 602 637 603 intel_private.gtt = NULL; 638 - if (INTEL_GTT_GEN < 6 && INTEL_GTT_GEN > 2) 604 + if (intel_gtt_can_wc()) 639 605 intel_private.gtt = ioremap_wc(intel_private.gtt_bus_addr, 640 606 gtt_map_size); 641 607 if (intel_private.gtt == NULL) ··· 1106 1072 writel(addr | pte_flags, intel_private.gtt + entry); 1107 1073 } 1108 1074 1109 - 1110 1075 static int i9xx_setup(void) 1111 1076 { 1112 1077 u32 reg_addr, gtt_addr; ··· 1404 1371 } 1405 1372 EXPORT_SYMBOL(intel_gmch_probe); 1406 1373 1407 - void intel_gtt_get(size_t *gtt_total, size_t *stolen_size) 1374 + void intel_gtt_get(size_t *gtt_total, size_t *stolen_size, 1375 + phys_addr_t *mappable_base, unsigned long *mappable_end) 1408 1376 { 1409 1377 *gtt_total = intel_private.gtt_total_entries << PAGE_SHIFT; 1410 1378 *stolen_size = intel_private.stolen_size; 1379 + *mappable_base = intel_private.gma_bus_addr; 1380 + *mappable_end = intel_private.gtt_mappable_entries << PAGE_SHIFT; 1411 1381 } 1412 1382 EXPORT_SYMBOL(intel_gtt_get); 1413 1383
+2 -1
drivers/gpu/drm/drm_crtc.c
··· 2267 2267 2268 2268 switch (bpp) { 2269 2269 case 8: 2270 - fmt = DRM_FORMAT_RGB332; 2270 + fmt = DRM_FORMAT_C8; 2271 2271 break; 2272 2272 case 16: 2273 2273 if (depth == 15) ··· 3870 3870 int *bpp) 3871 3871 { 3872 3872 switch (format) { 3873 + case DRM_FORMAT_C8: 3873 3874 case DRM_FORMAT_RGB332: 3874 3875 case DRM_FORMAT_BGR233: 3875 3876 *depth = 8;
+11 -2
drivers/gpu/drm/i915/i915_debugfs.c
··· 694 694 695 695 seq_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec, 696 696 error->time.tv_usec); 697 - seq_printf(m, "Kernel: " UTS_RELEASE); 697 + seq_printf(m, "Kernel: " UTS_RELEASE "\n"); 698 698 seq_printf(m, "PCI ID: 0x%04x\n", dev->pci_device); 699 699 seq_printf(m, "EIR: 0x%08x\n", error->eir); 700 700 seq_printf(m, "IER: 0x%08x\n", error->ier); ··· 1484 1484 struct drm_info_node *node = (struct drm_info_node *) m->private; 1485 1485 struct drm_device *dev = node->minor->dev; 1486 1486 drm_i915_private_t *dev_priv = dev->dev_private; 1487 - int ret; 1487 + struct intel_ring_buffer *ring; 1488 + int ret, i; 1488 1489 1489 1490 ret = mutex_lock_interruptible(&dev->mode_config.mutex); 1490 1491 if (ret) ··· 1501 1500 seq_printf(m, "render context "); 1502 1501 describe_obj(m, dev_priv->ips.renderctx); 1503 1502 seq_printf(m, "\n"); 1503 + } 1504 + 1505 + for_each_ring(ring, dev_priv, i) { 1506 + if (ring->default_context) { 1507 + seq_printf(m, "HW default context %s ring ", ring->name); 1508 + describe_obj(m, ring->default_context->obj); 1509 + seq_printf(m, "\n"); 1510 + } 1504 1511 } 1505 1512 1506 1513 mutex_unlock(&dev->mode_config.mutex);
+1
drivers/gpu/drm/i915/i915_dma.c
··· 1610 1610 mutex_init(&dev_priv->dpio_lock); 1611 1611 1612 1612 mutex_init(&dev_priv->rps.hw_lock); 1613 + mutex_init(&dev_priv->modeset_restore_lock); 1613 1614 1614 1615 if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) 1615 1616 dev_priv->num_pipe = 3;
+8 -5
drivers/gpu/drm/i915/i915_drv.c
··· 470 470 { 471 471 struct drm_i915_private *dev_priv = dev->dev_private; 472 472 473 + /* ignore lid events during suspend */ 474 + mutex_lock(&dev_priv->modeset_restore_lock); 475 + dev_priv->modeset_restore = MODESET_SUSPENDED; 476 + mutex_unlock(&dev_priv->modeset_restore_lock); 477 + 473 478 intel_set_power_well(dev, true); 474 479 475 480 drm_kms_helper_poll_disable(dev); ··· 500 495 i915_save_state(dev); 501 496 502 497 intel_opregion_fini(dev); 503 - 504 - /* Modeset on resume, not lid events */ 505 - dev_priv->modeset_on_lid = 0; 506 498 507 499 console_lock(); 508 500 intel_fbdev_set_suspend(dev, 1); ··· 576 574 577 575 intel_opregion_init(dev); 578 576 579 - dev_priv->modeset_on_lid = 0; 580 - 581 577 /* 582 578 * The console lock can be pretty contented on resume due 583 579 * to all the printk activity. Try to keep it out of the hot ··· 588 588 schedule_work(&dev_priv->console_resume_work); 589 589 } 590 590 591 + mutex_lock(&dev_priv->modeset_restore_lock); 592 + dev_priv->modeset_restore = MODESET_DONE; 593 + mutex_unlock(&dev_priv->modeset_restore_lock); 591 594 return error; 592 595 } 593 596
+12 -12
drivers/gpu/drm/i915/i915_drv.h
··· 399 399 400 400 /* global gtt ops */ 401 401 int (*gtt_probe)(struct drm_device *dev, size_t *gtt_total, 402 - size_t *stolen); 402 + size_t *stolen, phys_addr_t *mappable_base, 403 + unsigned long *mappable_end); 403 404 void (*gtt_remove)(struct drm_device *dev); 404 405 void (*gtt_clear_range)(struct drm_device *dev, 405 406 unsigned int first_entry, ··· 847 846 unsigned int stop_rings; 848 847 }; 849 848 849 + enum modeset_restore { 850 + MODESET_ON_LID_OPEN, 851 + MODESET_DONE, 852 + MODESET_SUSPENDED, 853 + }; 854 + 850 855 typedef struct drm_i915_private { 851 856 struct drm_device *dev; 852 857 struct kmem_cache *slab; ··· 926 919 927 920 /* overlay */ 928 921 struct intel_overlay *overlay; 929 - bool sprite_scaling_enabled; 922 + unsigned int sprite_scaling_enabled; 930 923 931 924 /* LVDS info */ 932 925 int backlight_level; /* restore backlight to this value */ ··· 974 967 975 968 unsigned long quirks; 976 969 977 - /* Register state */ 978 - bool modeset_on_lid; 970 + enum modeset_restore modeset_restore; 971 + struct mutex modeset_restore_lock; 979 972 980 973 struct i915_gtt gtt; 981 974 ··· 1040 1033 bool hw_contexts_disabled; 1041 1034 uint32_t hw_context_size; 1042 1035 1043 - bool fdi_rx_polarity_reversed; 1036 + u32 fdi_rx_config; 1044 1037 1045 1038 struct i915_suspend_saved_registers regfile; 1046 1039 ··· 1215 1208 1216 1209 /** for phy allocated objects */ 1217 1210 struct drm_i915_gem_phys_object *phys_obj; 1218 - 1219 - /** 1220 - * Number of crtcs where this object is currently the fb, but 1221 - * will be page flipped away on the next vblank. When it 1222 - * reaches 0, dev_priv->pending_flip_queue will be woken up. 1223 - */ 1224 - atomic_t pending_flip; 1225 1211 }; 1226 1212 #define to_gem_object(obj) (&((struct drm_i915_gem_object *)(obj))->base) 1227 1213
+42 -20
drivers/gpu/drm/i915/i915_gem.c
··· 3021 3021 if (obj->pages == NULL) 3022 3022 return; 3023 3023 3024 + /* 3025 + * Stolen memory is always coherent with the GPU as it is explicitly 3026 + * marked as wc by the system, or the system is cache-coherent. 3027 + */ 3028 + if (obj->stolen) 3029 + return; 3030 + 3024 3031 /* If the GPU is snooping the contents of the CPU cache, 3025 3032 * we do not need to manually clear the CPU cache lines. However, 3026 3033 * the caches are only snooped when the render cache is ··· 3872 3865 u32 misccpctl; 3873 3866 int i; 3874 3867 3875 - if (!IS_IVYBRIDGE(dev)) 3868 + if (!HAS_L3_GPU_CACHE(dev)) 3876 3869 return; 3877 3870 3878 3871 if (!dev_priv->l3_parity.remap_info) ··· 3937 3930 return true; 3938 3931 } 3939 3932 3940 - int 3941 - i915_gem_init_hw(struct drm_device *dev) 3933 + static int i915_gem_init_rings(struct drm_device *dev) 3942 3934 { 3943 - drm_i915_private_t *dev_priv = dev->dev_private; 3935 + struct drm_i915_private *dev_priv = dev->dev_private; 3944 3936 int ret; 3945 - 3946 - if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt()) 3947 - return -EIO; 3948 - 3949 - if (IS_HASWELL(dev) && (I915_READ(0x120010) == 1)) 3950 - I915_WRITE(0x9008, I915_READ(0x9008) | 0xf0000); 3951 - 3952 - i915_gem_l3_remap(dev); 3953 - 3954 - i915_gem_init_swizzling(dev); 3955 3937 3956 3938 ret = intel_init_render_ring_buffer(dev); 3957 3939 if (ret) ··· 3960 3964 3961 3965 ret = i915_gem_set_seqno(dev, ((u32)~0 - 0x1000)); 3962 3966 if (ret) 3967 + goto cleanup_blt_ring; 3968 + 3969 + return 0; 3970 + 3971 + cleanup_blt_ring: 3972 + intel_cleanup_ring_buffer(&dev_priv->ring[BCS]); 3973 + cleanup_bsd_ring: 3974 + intel_cleanup_ring_buffer(&dev_priv->ring[VCS]); 3975 + cleanup_render_ring: 3976 + intel_cleanup_ring_buffer(&dev_priv->ring[RCS]); 3977 + 3978 + return ret; 3979 + } 3980 + 3981 + int 3982 + i915_gem_init_hw(struct drm_device *dev) 3983 + { 3984 + drm_i915_private_t *dev_priv = dev->dev_private; 3985 + int ret; 3986 + 3987 + if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt()) 3988 + return -EIO; 3989 + 3990 + if (IS_HASWELL(dev) && (I915_READ(0x120010) == 1)) 3991 + I915_WRITE(0x9008, I915_READ(0x9008) | 0xf0000); 3992 + 3993 + i915_gem_l3_remap(dev); 3994 + 3995 + i915_gem_init_swizzling(dev); 3996 + 3997 + ret = i915_gem_init_rings(dev); 3998 + if (ret) 3963 3999 return ret; 3964 4000 3965 4001 /* ··· 4002 3974 i915_gem_init_ppgtt(dev); 4003 3975 4004 3976 return 0; 4005 - 4006 - cleanup_bsd_ring: 4007 - intel_cleanup_ring_buffer(&dev_priv->ring[VCS]); 4008 - cleanup_render_ring: 4009 - intel_cleanup_ring_buffer(&dev_priv->ring[RCS]); 4010 - return ret; 4011 3977 } 4012 3978 4013 3979 int i915_gem_init(struct drm_device *dev)
+2 -10
drivers/gpu/drm/i915/i915_gem_context.c
··· 126 126 127 127 static void do_destroy(struct i915_hw_context *ctx) 128 128 { 129 - struct drm_device *dev = ctx->obj->base.dev; 130 - struct drm_i915_private *dev_priv = dev->dev_private; 131 - 132 129 if (ctx->file_priv) 133 130 idr_remove(&ctx->file_priv->context_idr, ctx->id); 134 - else 135 - BUG_ON(ctx != dev_priv->ring[RCS].default_context); 136 131 137 132 drm_gem_object_unreference(&ctx->obj->base); 138 133 kfree(ctx); ··· 237 242 void i915_gem_context_init(struct drm_device *dev) 238 243 { 239 244 struct drm_i915_private *dev_priv = dev->dev_private; 240 - uint32_t ctx_size; 241 245 242 246 if (!HAS_HW_CONTEXTS(dev)) { 243 247 dev_priv->hw_contexts_disabled = true; ··· 248 254 dev_priv->ring[RCS].default_context) 249 255 return; 250 256 251 - ctx_size = get_context_size(dev); 252 - dev_priv->hw_context_size = get_context_size(dev); 253 - dev_priv->hw_context_size = round_up(dev_priv->hw_context_size, 4096); 257 + dev_priv->hw_context_size = round_up(get_context_size(dev), 4096); 254 258 255 - if (ctx_size <= 0 || ctx_size > (1<<20)) { 259 + if (dev_priv->hw_context_size > (1<<20)) { 256 260 dev_priv->hw_contexts_disabled = true; 257 261 return; 258 262 }
+14 -9
drivers/gpu/drm/i915/i915_gem_gtt.c
··· 725 725 726 726 static int gen6_gmch_probe(struct drm_device *dev, 727 727 size_t *gtt_total, 728 - size_t *stolen) 728 + size_t *stolen, 729 + phys_addr_t *mappable_base, 730 + unsigned long *mappable_end) 729 731 { 730 732 struct drm_i915_private *dev_priv = dev->dev_private; 731 733 phys_addr_t gtt_bus_addr; ··· 735 733 u16 snb_gmch_ctl; 736 734 int ret; 737 735 736 + *mappable_base = pci_resource_start(dev->pdev, 2); 737 + *mappable_end = pci_resource_len(dev->pdev, 2); 738 + 738 739 /* 64/512MB is the current min/max we actually know of, but this is just 739 740 * a coarse sanity check. 740 741 */ 741 - if ((dev_priv->gtt.mappable_end < (64<<20) || 742 - (dev_priv->gtt.mappable_end > (512<<20)))) { 742 + if ((*mappable_end < (64<<20) || (*mappable_end > (512<<20)))) { 743 743 DRM_ERROR("Unknown GMADR size (%lx)\n", 744 744 dev_priv->gtt.mappable_end); 745 745 return -ENXIO; ··· 786 782 787 783 static int i915_gmch_probe(struct drm_device *dev, 788 784 size_t *gtt_total, 789 - size_t *stolen) 785 + size_t *stolen, 786 + phys_addr_t *mappable_base, 787 + unsigned long *mappable_end) 790 788 { 791 789 struct drm_i915_private *dev_priv = dev->dev_private; 792 790 int ret; ··· 799 793 return -EIO; 800 794 } 801 795 802 - intel_gtt_get(gtt_total, stolen); 796 + intel_gtt_get(gtt_total, stolen, mappable_base, mappable_end); 803 797 804 798 dev_priv->gtt.do_idle_maps = needs_idle_maps(dev_priv->dev); 805 799 dev_priv->gtt.gtt_clear_range = i915_ggtt_clear_range; ··· 820 814 unsigned long gtt_size; 821 815 int ret; 822 816 823 - gtt->mappable_base = pci_resource_start(dev->pdev, 2); 824 - gtt->mappable_end = pci_resource_len(dev->pdev, 2); 825 - 826 817 if (INTEL_INFO(dev)->gen <= 5) { 827 818 dev_priv->gtt.gtt_probe = i915_gmch_probe; 828 819 dev_priv->gtt.gtt_remove = i915_gmch_remove; ··· 829 826 } 830 827 831 828 ret = dev_priv->gtt.gtt_probe(dev, &dev_priv->gtt.total, 832 - &dev_priv->gtt.stolen_size); 829 + &dev_priv->gtt.stolen_size, 830 + &gtt->mappable_base, 831 + &gtt->mappable_end); 833 832 if (ret) 834 833 return ret; 835 834
+43 -61
drivers/gpu/drm/i915/i915_irq.c
··· 1924 1924 * This register is the same on all known PCH chips. 1925 1925 */ 1926 1926 1927 - static void ironlake_enable_pch_hotplug(struct drm_device *dev) 1927 + static void ibx_enable_hotplug(struct drm_device *dev) 1928 1928 { 1929 1929 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1930 1930 u32 hotplug; ··· 1937 1937 I915_WRITE(PCH_PORT_HOTPLUG, hotplug); 1938 1938 } 1939 1939 1940 + static void ibx_irq_postinstall(struct drm_device *dev) 1941 + { 1942 + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1943 + u32 mask; 1944 + 1945 + if (HAS_PCH_IBX(dev)) 1946 + mask = SDE_HOTPLUG_MASK | 1947 + SDE_GMBUS | 1948 + SDE_AUX_MASK; 1949 + else 1950 + mask = SDE_HOTPLUG_MASK_CPT | 1951 + SDE_GMBUS_CPT | 1952 + SDE_AUX_MASK_CPT; 1953 + 1954 + I915_WRITE(SDEIIR, I915_READ(SDEIIR)); 1955 + I915_WRITE(SDEIMR, ~mask); 1956 + I915_WRITE(SDEIER, mask); 1957 + POSTING_READ(SDEIER); 1958 + 1959 + ibx_enable_hotplug(dev); 1960 + } 1961 + 1940 1962 static int ironlake_irq_postinstall(struct drm_device *dev) 1941 1963 { 1942 1964 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; ··· 1967 1945 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE | 1968 1946 DE_AUX_CHANNEL_A; 1969 1947 u32 render_irqs; 1970 - u32 hotplug_mask; 1971 - u32 pch_irq_mask; 1972 1948 1973 1949 dev_priv->irq_mask = ~display_mask; 1974 1950 ··· 1994 1974 I915_WRITE(GTIER, render_irqs); 1995 1975 POSTING_READ(GTIER); 1996 1976 1997 - if (HAS_PCH_CPT(dev)) { 1998 - hotplug_mask = (SDE_CRT_HOTPLUG_CPT | 1999 - SDE_PORTB_HOTPLUG_CPT | 2000 - SDE_PORTC_HOTPLUG_CPT | 2001 - SDE_PORTD_HOTPLUG_CPT | 2002 - SDE_GMBUS_CPT | 2003 - SDE_AUX_MASK_CPT); 2004 - } else { 2005 - hotplug_mask = (SDE_CRT_HOTPLUG | 2006 - SDE_PORTB_HOTPLUG | 2007 - SDE_PORTC_HOTPLUG | 2008 - SDE_PORTD_HOTPLUG | 2009 - SDE_GMBUS | 2010 - SDE_AUX_MASK); 2011 - } 2012 - 2013 - pch_irq_mask = ~hotplug_mask; 2014 - 2015 - I915_WRITE(SDEIIR, I915_READ(SDEIIR)); 2016 - I915_WRITE(SDEIMR, pch_irq_mask); 2017 - I915_WRITE(SDEIER, hotplug_mask); 2018 - POSTING_READ(SDEIER); 2019 - 2020 - ironlake_enable_pch_hotplug(dev); 1977 + ibx_irq_postinstall(dev); 2021 1978 2022 1979 if (IS_IRONLAKE_M(dev)) { 2023 1980 /* Clear & enable PCU event interrupts */ ··· 2017 2020 DE_PLANEA_FLIP_DONE_IVB | 2018 2021 DE_AUX_CHANNEL_A_IVB; 2019 2022 u32 render_irqs; 2020 - u32 hotplug_mask; 2021 - u32 pch_irq_mask; 2022 2023 2023 2024 dev_priv->irq_mask = ~display_mask; 2024 2025 ··· 2040 2045 I915_WRITE(GTIER, render_irqs); 2041 2046 POSTING_READ(GTIER); 2042 2047 2043 - hotplug_mask = (SDE_CRT_HOTPLUG_CPT | 2044 - SDE_PORTB_HOTPLUG_CPT | 2045 - SDE_PORTC_HOTPLUG_CPT | 2046 - SDE_PORTD_HOTPLUG_CPT | 2047 - SDE_GMBUS_CPT | 2048 - SDE_AUX_MASK_CPT); 2049 - pch_irq_mask = ~hotplug_mask; 2050 - 2051 - I915_WRITE(SDEIIR, I915_READ(SDEIIR)); 2052 - I915_WRITE(SDEIMR, pch_irq_mask); 2053 - I915_WRITE(SDEIER, hotplug_mask); 2054 - POSTING_READ(SDEIER); 2055 - 2056 - ironlake_enable_pch_hotplug(dev); 2048 + ibx_irq_postinstall(dev); 2057 2049 2058 2050 return 0; 2059 2051 } ··· 2119 2137 u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN); 2120 2138 2121 2139 /* Note HDMI and DP share bits */ 2122 - if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS) 2123 - hotplug_en |= HDMIB_HOTPLUG_INT_EN; 2124 - if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS) 2125 - hotplug_en |= HDMIC_HOTPLUG_INT_EN; 2126 - if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS) 2127 - hotplug_en |= HDMID_HOTPLUG_INT_EN; 2140 + if (dev_priv->hotplug_supported_mask & PORTB_HOTPLUG_INT_STATUS) 2141 + hotplug_en |= PORTB_HOTPLUG_INT_EN; 2142 + if (dev_priv->hotplug_supported_mask & PORTC_HOTPLUG_INT_STATUS) 2143 + hotplug_en |= PORTC_HOTPLUG_INT_EN; 2144 + if (dev_priv->hotplug_supported_mask & PORTD_HOTPLUG_INT_STATUS) 2145 + hotplug_en |= PORTD_HOTPLUG_INT_EN; 2128 2146 if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_I915) 2129 2147 hotplug_en |= SDVOC_HOTPLUG_INT_EN; 2130 2148 if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_I915) ··· 2390 2408 if (I915_HAS_HOTPLUG(dev)) { 2391 2409 hotplug_en = I915_READ(PORT_HOTPLUG_EN); 2392 2410 2393 - if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS) 2394 - hotplug_en |= HDMIB_HOTPLUG_INT_EN; 2395 - if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS) 2396 - hotplug_en |= HDMIC_HOTPLUG_INT_EN; 2397 - if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS) 2398 - hotplug_en |= HDMID_HOTPLUG_INT_EN; 2411 + if (dev_priv->hotplug_supported_mask & PORTB_HOTPLUG_INT_STATUS) 2412 + hotplug_en |= PORTB_HOTPLUG_INT_EN; 2413 + if (dev_priv->hotplug_supported_mask & PORTC_HOTPLUG_INT_STATUS) 2414 + hotplug_en |= PORTC_HOTPLUG_INT_EN; 2415 + if (dev_priv->hotplug_supported_mask & PORTD_HOTPLUG_INT_STATUS) 2416 + hotplug_en |= PORTD_HOTPLUG_INT_EN; 2399 2417 if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_I915) 2400 2418 hotplug_en |= SDVOC_HOTPLUG_INT_EN; 2401 2419 if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_I915) ··· 2624 2642 2625 2643 /* Note HDMI and DP share hotplug bits */ 2626 2644 hotplug_en = 0; 2627 - if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS) 2628 - hotplug_en |= HDMIB_HOTPLUG_INT_EN; 2629 - if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS) 2630 - hotplug_en |= HDMIC_HOTPLUG_INT_EN; 2631 - if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS) 2632 - hotplug_en |= HDMID_HOTPLUG_INT_EN; 2645 + if (dev_priv->hotplug_supported_mask & PORTB_HOTPLUG_INT_STATUS) 2646 + hotplug_en |= PORTB_HOTPLUG_INT_EN; 2647 + if (dev_priv->hotplug_supported_mask & PORTC_HOTPLUG_INT_STATUS) 2648 + hotplug_en |= PORTC_HOTPLUG_INT_EN; 2649 + if (dev_priv->hotplug_supported_mask & PORTD_HOTPLUG_INT_STATUS) 2650 + hotplug_en |= PORTD_HOTPLUG_INT_EN; 2633 2651 if (IS_G4X(dev)) { 2634 2652 if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_G4X) 2635 2653 hotplug_en |= SDVOC_HOTPLUG_INT_EN;
+69 -23
drivers/gpu/drm/i915/i915_reg.h
··· 308 308 #define DISPLAY_PLANE_A (0<<20) 309 309 #define DISPLAY_PLANE_B (1<<20) 310 310 #define GFX_OP_PIPE_CONTROL(len) ((0x3<<29)|(0x3<<27)|(0x2<<24)|(len-2)) 311 + #define PIPE_CONTROL_GLOBAL_GTT_IVB (1<<24) /* gen7+ */ 311 312 #define PIPE_CONTROL_CS_STALL (1<<20) 312 313 #define PIPE_CONTROL_TLB_INVALIDATE (1<<18) 313 314 #define PIPE_CONTROL_QW_WRITE (1<<14) ··· 1236 1235 #define MAD_DIMM_A_SIZE_SHIFT 0 1237 1236 #define MAD_DIMM_A_SIZE_MASK (0xff << MAD_DIMM_A_SIZE_SHIFT) 1238 1237 1238 + /** snb MCH registers for priority tuning */ 1239 + #define MCH_SSKPD (MCHBAR_MIRROR_BASE_SNB + 0x5d10) 1240 + #define MCH_SSKPD_WM0_MASK 0x3f 1241 + #define MCH_SSKPD_WM0_VAL 0xc 1239 1242 1240 1243 /* Clocking configuration register */ 1241 1244 #define CLKCFG 0x10c00 ··· 1630 1625 1631 1626 /* Hotplug control (945+ only) */ 1632 1627 #define PORT_HOTPLUG_EN (dev_priv->info->display_mmio_offset + 0x61110) 1633 - #define HDMIB_HOTPLUG_INT_EN (1 << 29) 1634 - #define DPB_HOTPLUG_INT_EN (1 << 29) 1635 - #define HDMIC_HOTPLUG_INT_EN (1 << 28) 1636 - #define DPC_HOTPLUG_INT_EN (1 << 28) 1637 - #define HDMID_HOTPLUG_INT_EN (1 << 27) 1638 - #define DPD_HOTPLUG_INT_EN (1 << 27) 1628 + #define PORTB_HOTPLUG_INT_EN (1 << 29) 1629 + #define PORTC_HOTPLUG_INT_EN (1 << 28) 1630 + #define PORTD_HOTPLUG_INT_EN (1 << 27) 1639 1631 #define SDVOB_HOTPLUG_INT_EN (1 << 26) 1640 1632 #define SDVOC_HOTPLUG_INT_EN (1 << 25) 1641 1633 #define TV_HOTPLUG_INT_EN (1 << 18) ··· 1655 1653 1656 1654 #define PORT_HOTPLUG_STAT (dev_priv->info->display_mmio_offset + 0x61114) 1657 1655 /* HDMI/DP bits are gen4+ */ 1658 - #define DPB_HOTPLUG_LIVE_STATUS (1 << 29) 1659 - #define DPC_HOTPLUG_LIVE_STATUS (1 << 28) 1660 - #define DPD_HOTPLUG_LIVE_STATUS (1 << 27) 1661 - #define DPD_HOTPLUG_INT_STATUS (3 << 21) 1662 - #define DPC_HOTPLUG_INT_STATUS (3 << 19) 1663 - #define DPB_HOTPLUG_INT_STATUS (3 << 17) 1664 - /* HDMI bits are shared with the DP bits */ 1665 - #define HDMIB_HOTPLUG_LIVE_STATUS (1 << 29) 1666 - #define HDMIC_HOTPLUG_LIVE_STATUS (1 << 28) 1667 - #define HDMID_HOTPLUG_LIVE_STATUS (1 << 27) 1668 - #define HDMID_HOTPLUG_INT_STATUS (3 << 21) 1669 - #define HDMIC_HOTPLUG_INT_STATUS (3 << 19) 1670 - #define HDMIB_HOTPLUG_INT_STATUS (3 << 17) 1656 + #define PORTB_HOTPLUG_LIVE_STATUS (1 << 29) 1657 + #define PORTC_HOTPLUG_LIVE_STATUS (1 << 28) 1658 + #define PORTD_HOTPLUG_LIVE_STATUS (1 << 27) 1659 + #define PORTD_HOTPLUG_INT_STATUS (3 << 21) 1660 + #define PORTC_HOTPLUG_INT_STATUS (3 << 19) 1661 + #define PORTB_HOTPLUG_INT_STATUS (3 << 17) 1671 1662 /* CRT/TV common between gen3+ */ 1672 1663 #define CRT_HOTPLUG_INT_STATUS (1 << 11) 1673 1664 #define TV_HOTPLUG_INT_STATUS (1 << 10) ··· 2949 2954 #define CURSOR_ENABLE 0x80000000 2950 2955 #define CURSOR_GAMMA_ENABLE 0x40000000 2951 2956 #define CURSOR_STRIDE_MASK 0x30000000 2957 + #define CURSOR_PIPE_CSC_ENABLE (1<<24) 2952 2958 #define CURSOR_FORMAT_SHIFT 24 2953 2959 #define CURSOR_FORMAT_MASK (0x07 << CURSOR_FORMAT_SHIFT) 2954 2960 #define CURSOR_FORMAT_2C (0x00 << CURSOR_FORMAT_SHIFT) ··· 3011 3015 #define DISPPLANE_RGBA888 (0xf<<26) 3012 3016 #define DISPPLANE_STEREO_ENABLE (1<<25) 3013 3017 #define DISPPLANE_STEREO_DISABLE 0 3018 + #define DISPPLANE_PIPE_CSC_ENABLE (1<<24) 3014 3019 #define DISPPLANE_SEL_PIPE_SHIFT 24 3015 3020 #define DISPPLANE_SEL_PIPE_MASK (3<<DISPPLANE_SEL_PIPE_SHIFT) 3016 3021 #define DISPPLANE_SEL_PIPE_A 0 ··· 3100 3103 #define DVS_FORMAT_RGBX101010 (1<<25) 3101 3104 #define DVS_FORMAT_RGBX888 (2<<25) 3102 3105 #define DVS_FORMAT_RGBX161616 (3<<25) 3106 + #define DVS_PIPE_CSC_ENABLE (1<<24) 3103 3107 #define DVS_SOURCE_KEY (1<<22) 3104 3108 #define DVS_RGB_ORDER_XBGR (1<<20) 3105 3109 #define DVS_YUV_BYTE_ORDER_MASK (3<<16) ··· 3168 3170 #define SPRITE_FORMAT_RGBX161616 (3<<25) 3169 3171 #define SPRITE_FORMAT_YUV444 (4<<25) 3170 3172 #define SPRITE_FORMAT_XR_BGR101010 (5<<25) /* Extended range */ 3171 - #define SPRITE_CSC_ENABLE (1<<24) 3173 + #define SPRITE_PIPE_CSC_ENABLE (1<<24) 3172 3174 #define SPRITE_SOURCE_KEY (1<<22) 3173 3175 #define SPRITE_RGB_ORDER_RGBX (1<<20) /* only for 888 and 161616 */ 3174 3176 #define SPRITE_YUV_TO_RGB_CSC_DISABLE (1<<19) ··· 3915 3917 #define FDI_10BPC (1<<16) 3916 3918 #define FDI_6BPC (2<<16) 3917 3919 #define FDI_12BPC (3<<16) 3918 - #define FDI_LINK_REVERSE_OVERWRITE (1<<15) 3920 + #define FDI_RX_LINK_REVERSAL_OVERRIDE (1<<15) 3919 3921 #define FDI_DMI_LINK_REVERSE_MASK (1<<14) 3920 3922 #define FDI_RX_PLL_ENABLE (1<<13) 3921 3923 #define FDI_FS_ERR_CORRECT_ENABLE (1<<11) ··· 4270 4272 #define GEN6_PCODE_READ_MIN_FREQ_TABLE 0x9 4271 4273 #define GEN6_PCODE_WRITE_RC6VIDS 0x4 4272 4274 #define GEN6_PCODE_READ_RC6VIDS 0x5 4273 - #define GEN6_ENCODE_RC6_VID(mv) (((mv) / 5) - 245) < 0 ?: 0 4274 - #define GEN6_DECODE_RC6_VID(vids) (((vids) * 5) > 0 ? ((vids) * 5) + 245 : 0) 4275 + #define GEN6_ENCODE_RC6_VID(mv) (((mv) - 245) / 5) 4276 + #define GEN6_DECODE_RC6_VID(vids) (((vids) * 5) + 245) 4275 4277 #define GEN6_PCODE_DATA 0x138128 4276 4278 #define GEN6_PCODE_FREQ_IA_RATIO_SHIFT 8 4277 4279 ··· 4514 4516 #define DDI_BUF_EMP_800MV_0DB_HSW (7<<24) /* Sel7 */ 4515 4517 #define DDI_BUF_EMP_800MV_3_5DB_HSW (8<<24) /* Sel8 */ 4516 4518 #define DDI_BUF_EMP_MASK (0xf<<24) 4519 + #define DDI_BUF_PORT_REVERSAL (1<<16) 4517 4520 #define DDI_BUF_IS_IDLE (1<<7) 4518 4521 #define DDI_A_4_LANES (1<<4) 4519 4522 #define DDI_PORT_WIDTH_X1 (0<<1) ··· 4647 4648 #define WM_DBG_DISALLOW_MULTIPLE_LP (1<<0) 4648 4649 #define WM_DBG_DISALLOW_MAXFIFO (1<<1) 4649 4650 #define WM_DBG_DISALLOW_SPRITE (1<<2) 4651 + 4652 + /* pipe CSC */ 4653 + #define _PIPE_A_CSC_COEFF_RY_GY 0x49010 4654 + #define _PIPE_A_CSC_COEFF_BY 0x49014 4655 + #define _PIPE_A_CSC_COEFF_RU_GU 0x49018 4656 + #define _PIPE_A_CSC_COEFF_BU 0x4901c 4657 + #define _PIPE_A_CSC_COEFF_RV_GV 0x49020 4658 + #define _PIPE_A_CSC_COEFF_BV 0x49024 4659 + #define _PIPE_A_CSC_MODE 0x49028 4660 + #define _PIPE_A_CSC_PREOFF_HI 0x49030 4661 + #define _PIPE_A_CSC_PREOFF_ME 0x49034 4662 + #define _PIPE_A_CSC_PREOFF_LO 0x49038 4663 + #define _PIPE_A_CSC_POSTOFF_HI 0x49040 4664 + #define _PIPE_A_CSC_POSTOFF_ME 0x49044 4665 + #define _PIPE_A_CSC_POSTOFF_LO 0x49048 4666 + 4667 + #define _PIPE_B_CSC_COEFF_RY_GY 0x49110 4668 + #define _PIPE_B_CSC_COEFF_BY 0x49114 4669 + #define _PIPE_B_CSC_COEFF_RU_GU 0x49118 4670 + #define _PIPE_B_CSC_COEFF_BU 0x4911c 4671 + #define _PIPE_B_CSC_COEFF_RV_GV 0x49120 4672 + #define _PIPE_B_CSC_COEFF_BV 0x49124 4673 + #define _PIPE_B_CSC_MODE 0x49128 4674 + #define _PIPE_B_CSC_PREOFF_HI 0x49130 4675 + #define _PIPE_B_CSC_PREOFF_ME 0x49134 4676 + #define _PIPE_B_CSC_PREOFF_LO 0x49138 4677 + #define _PIPE_B_CSC_POSTOFF_HI 0x49140 4678 + #define _PIPE_B_CSC_POSTOFF_ME 0x49144 4679 + #define _PIPE_B_CSC_POSTOFF_LO 0x49148 4680 + 4681 + #define CSC_BLACK_SCREEN_OFFSET (1 << 2) 4682 + #define CSC_POSITION_BEFORE_GAMMA (1 << 1) 4683 + #define CSC_MODE_YUV_TO_RGB (1 << 0) 4684 + 4685 + #define PIPE_CSC_COEFF_RY_GY(pipe) _PIPE(pipe, _PIPE_A_CSC_COEFF_RY_GY, _PIPE_B_CSC_COEFF_RY_GY) 4686 + #define PIPE_CSC_COEFF_BY(pipe) _PIPE(pipe, _PIPE_A_CSC_COEFF_BY, _PIPE_B_CSC_COEFF_BY) 4687 + #define PIPE_CSC_COEFF_RU_GU(pipe) _PIPE(pipe, _PIPE_A_CSC_COEFF_RU_GU, _PIPE_B_CSC_COEFF_RU_GU) 4688 + #define PIPE_CSC_COEFF_BU(pipe) _PIPE(pipe, _PIPE_A_CSC_COEFF_BU, _PIPE_B_CSC_COEFF_BU) 4689 + #define PIPE_CSC_COEFF_RV_GV(pipe) _PIPE(pipe, _PIPE_A_CSC_COEFF_RV_GV, _PIPE_B_CSC_COEFF_RV_GV) 4690 + #define PIPE_CSC_COEFF_BV(pipe) _PIPE(pipe, _PIPE_A_CSC_COEFF_BV, _PIPE_B_CSC_COEFF_BV) 4691 + #define PIPE_CSC_MODE(pipe) _PIPE(pipe, _PIPE_A_CSC_MODE, _PIPE_B_CSC_MODE) 4692 + #define PIPE_CSC_PREOFF_HI(pipe) _PIPE(pipe, _PIPE_A_CSC_PREOFF_HI, _PIPE_B_CSC_PREOFF_HI) 4693 + #define PIPE_CSC_PREOFF_ME(pipe) _PIPE(pipe, _PIPE_A_CSC_PREOFF_ME, _PIPE_B_CSC_PREOFF_ME) 4694 + #define PIPE_CSC_PREOFF_LO(pipe) _PIPE(pipe, _PIPE_A_CSC_PREOFF_LO, _PIPE_B_CSC_PREOFF_LO) 4695 + #define PIPE_CSC_POSTOFF_HI(pipe) _PIPE(pipe, _PIPE_A_CSC_POSTOFF_HI, _PIPE_B_CSC_POSTOFF_HI) 4696 + #define PIPE_CSC_POSTOFF_ME(pipe) _PIPE(pipe, _PIPE_A_CSC_POSTOFF_ME, _PIPE_B_CSC_POSTOFF_ME) 4697 + #define PIPE_CSC_POSTOFF_LO(pipe) _PIPE(pipe, _PIPE_A_CSC_POSTOFF_LO, _PIPE_B_CSC_POSTOFF_LO) 4650 4698 4651 4699 #endif /* _I915_REG_H_ */
+9 -5
drivers/gpu/drm/i915/intel_crt.c
··· 800 800 dev_priv->hotplug_supported_mask |= CRT_HOTPLUG_INT_STATUS; 801 801 802 802 /* 803 - * TODO: find a proper way to discover whether we need to set the 804 - * polarity reversal bit or not, instead of relying on the BIOS. 803 + * TODO: find a proper way to discover whether we need to set the the 804 + * polarity and link reversal bits or not, instead of relying on the 805 + * BIOS. 805 806 */ 806 - if (HAS_PCH_LPT(dev)) 807 - dev_priv->fdi_rx_polarity_reversed = 808 - !!(I915_READ(_FDI_RXA_CTL) & FDI_RX_POLARITY_REVERSED_LPT); 807 + if (HAS_PCH_LPT(dev)) { 808 + u32 fdi_config = FDI_RX_POLARITY_REVERSED_LPT | 809 + FDI_RX_LINK_REVERSAL_OVERRIDE; 810 + 811 + dev_priv->fdi_rx_config = I915_READ(_FDI_RXA_CTL) & fdi_config; 812 + } 809 813 }
+18 -7
drivers/gpu/drm/i915/intel_ddi.c
··· 180 180 FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90); 181 181 182 182 /* Enable the PCH Receiver FDI PLL */ 183 - rx_ctl_val = FDI_RX_PLL_ENABLE | FDI_RX_ENHANCE_FRAME_ENABLE | 184 - ((intel_crtc->fdi_lanes - 1) << 19); 185 - if (dev_priv->fdi_rx_polarity_reversed) 186 - rx_ctl_val |= FDI_RX_POLARITY_REVERSED_LPT; 183 + rx_ctl_val = dev_priv->fdi_rx_config | FDI_RX_ENHANCE_FRAME_ENABLE | 184 + FDI_RX_PLL_ENABLE | ((intel_crtc->fdi_lanes - 1) << 19); 187 185 I915_WRITE(_FDI_RXA_CTL, rx_ctl_val); 188 186 POSTING_READ(_FDI_RXA_CTL); 189 187 udelay(220); ··· 203 205 DP_TP_CTL_LINK_TRAIN_PAT1 | 204 206 DP_TP_CTL_ENABLE); 205 207 206 - /* Configure and enable DDI_BUF_CTL for DDI E with next voltage */ 208 + /* Configure and enable DDI_BUF_CTL for DDI E with next voltage. 209 + * DDI E does not support port reversal, the functionality is 210 + * achieved on the PCH side in FDI_RX_CTL, so no need to set the 211 + * port reversal bit */ 207 212 I915_WRITE(DDI_BUF_CTL(PORT_E), 208 213 DDI_BUF_CTL_ENABLE | 209 214 ((intel_crtc->fdi_lanes - 1) << 1) | ··· 681 680 intel_crtc->eld_vld = false; 682 681 if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) { 683 682 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 683 + struct intel_digital_port *intel_dig_port = 684 + enc_to_dig_port(encoder); 684 685 685 - intel_dp->DP = DDI_BUF_CTL_ENABLE | DDI_BUF_EMP_400MV_0DB_HSW; 686 + intel_dp->DP = intel_dig_port->port_reversal | 687 + DDI_BUF_CTL_ENABLE | DDI_BUF_EMP_400MV_0DB_HSW; 686 688 switch (intel_dp->lane_count) { 687 689 case 1: 688 690 intel_dp->DP |= DDI_PORT_WIDTH_X1; ··· 1308 1304 uint32_t tmp; 1309 1305 1310 1306 if (type == INTEL_OUTPUT_HDMI) { 1307 + struct intel_digital_port *intel_dig_port = 1308 + enc_to_dig_port(encoder); 1309 + 1311 1310 /* In HDMI/DVI mode, the port width, and swing/emphasis values 1312 1311 * are ignored so nothing special needs to be done besides 1313 1312 * enabling the port. 1314 1313 */ 1315 - I915_WRITE(DDI_BUF_CTL(port), DDI_BUF_CTL_ENABLE); 1314 + I915_WRITE(DDI_BUF_CTL(port), 1315 + intel_dig_port->port_reversal | DDI_BUF_CTL_ENABLE); 1316 1316 } else if (type == INTEL_OUTPUT_EDP) { 1317 1317 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 1318 1318 ··· 1493 1485 1494 1486 void intel_ddi_init(struct drm_device *dev, enum port port) 1495 1487 { 1488 + struct drm_i915_private *dev_priv = dev->dev_private; 1496 1489 struct intel_digital_port *intel_dig_port; 1497 1490 struct intel_encoder *intel_encoder; 1498 1491 struct drm_encoder *encoder; ··· 1534 1525 intel_encoder->get_hw_state = intel_ddi_get_hw_state; 1535 1526 1536 1527 intel_dig_port->port = port; 1528 + intel_dig_port->port_reversal = I915_READ(DDI_BUF_CTL(port)) & 1529 + DDI_BUF_PORT_REVERSAL; 1537 1530 if (hdmi_connector) 1538 1531 intel_dig_port->hdmi.sdvox_reg = DDI_BUF_CTL(port); 1539 1532 else
+95 -13
drivers/gpu/drm/i915/intel_display.c
··· 154 154 .vco = { .min = 1400000, .max = 2800000 }, 155 155 .n = { .min = 1, .max = 6 }, 156 156 .m = { .min = 70, .max = 120 }, 157 - .m1 = { .min = 10, .max = 22 }, 158 - .m2 = { .min = 5, .max = 9 }, 157 + .m1 = { .min = 8, .max = 18 }, 158 + .m2 = { .min = 3, .max = 7 }, 159 159 .p = { .min = 5, .max = 80 }, 160 160 .p1 = { .min = 1, .max = 8 }, 161 161 .p2 = { .dot_limit = 200000, ··· 168 168 .vco = { .min = 1400000, .max = 2800000 }, 169 169 .n = { .min = 1, .max = 6 }, 170 170 .m = { .min = 70, .max = 120 }, 171 - .m1 = { .min = 10, .max = 22 }, 172 - .m2 = { .min = 5, .max = 9 }, 171 + .m1 = { .min = 8, .max = 18 }, 172 + .m2 = { .min = 3, .max = 7 }, 173 173 .p = { .min = 7, .max = 98 }, 174 174 .p1 = { .min = 1, .max = 8 }, 175 175 .p2 = { .dot_limit = 112000, ··· 2226 2226 bool was_interruptible = dev_priv->mm.interruptible; 2227 2227 int ret; 2228 2228 2229 - WARN_ON(waitqueue_active(&dev_priv->pending_flip_queue)); 2230 - 2231 - wait_event(dev_priv->pending_flip_queue, 2232 - i915_reset_in_progress(&dev_priv->gpu_error) || 2233 - atomic_read(&obj->pending_flip) == 0); 2234 - 2235 2229 /* Big Hammer, we also need to ensure that any pending 2236 2230 * MI_WAIT_FOR_EVENT inside a user batch buffer on the 2237 2231 * current scanout is retired before unpinning the old ··· 2868 2874 { 2869 2875 struct drm_device *dev = crtc->dev; 2870 2876 struct drm_i915_private *dev_priv = dev->dev_private; 2877 + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2871 2878 unsigned long flags; 2872 2879 bool pending; 2873 2880 2874 - if (i915_reset_in_progress(&dev_priv->gpu_error)) 2881 + if (i915_reset_in_progress(&dev_priv->gpu_error) || 2882 + intel_crtc->reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter)) 2875 2883 return false; 2876 2884 2877 2885 spin_lock_irqsave(&dev->event_lock, flags); ··· 3611 3615 intel_update_watermarks(dev); 3612 3616 3613 3617 intel_enable_pll(dev_priv, pipe); 3618 + 3619 + for_each_encoder_on_crtc(dev, crtc, encoder) 3620 + if (encoder->pre_enable) 3621 + encoder->pre_enable(encoder); 3622 + 3614 3623 intel_enable_pipe(dev_priv, pipe, false); 3615 3624 intel_enable_plane(dev_priv, plane, pipe); 3616 3625 ··· 3638 3637 struct intel_encoder *encoder; 3639 3638 int pipe = intel_crtc->pipe; 3640 3639 int plane = intel_crtc->plane; 3640 + u32 pctl; 3641 3641 3642 3642 3643 3643 if (!intel_crtc->active) ··· 3658 3656 3659 3657 intel_disable_plane(dev_priv, plane, pipe); 3660 3658 intel_disable_pipe(dev_priv, pipe); 3659 + 3660 + /* Disable pannel fitter if it is on this pipe. */ 3661 + pctl = I915_READ(PFIT_CONTROL); 3662 + if ((pctl & PFIT_ENABLE) && 3663 + ((pctl & PFIT_PIPE_MASK) >> PFIT_PIPE_SHIFT) == pipe) 3664 + I915_WRITE(PFIT_CONTROL, 0); 3665 + 3661 3666 intel_disable_pll(dev_priv, pipe); 3662 3667 3663 3668 intel_crtc->active = false; ··· 5118 5109 POSTING_READ(PIPECONF(pipe)); 5119 5110 } 5120 5111 5112 + /* 5113 + * Set up the pipe CSC unit. 5114 + * 5115 + * Currently only full range RGB to limited range RGB conversion 5116 + * is supported, but eventually this should handle various 5117 + * RGB<->YCbCr scenarios as well. 5118 + */ 5119 + static void intel_set_pipe_csc(struct drm_crtc *crtc, 5120 + const struct drm_display_mode *adjusted_mode) 5121 + { 5122 + struct drm_device *dev = crtc->dev; 5123 + struct drm_i915_private *dev_priv = dev->dev_private; 5124 + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5125 + int pipe = intel_crtc->pipe; 5126 + uint16_t coeff = 0x7800; /* 1.0 */ 5127 + 5128 + /* 5129 + * TODO: Check what kind of values actually come out of the pipe 5130 + * with these coeff/postoff values and adjust to get the best 5131 + * accuracy. Perhaps we even need to take the bpc value into 5132 + * consideration. 5133 + */ 5134 + 5135 + if (adjusted_mode->private_flags & INTEL_MODE_LIMITED_COLOR_RANGE) 5136 + coeff = ((235 - 16) * (1 << 12) / 255) & 0xff8; /* 0.xxx... */ 5137 + 5138 + /* 5139 + * GY/GU and RY/RU should be the other way around according 5140 + * to BSpec, but reality doesn't agree. Just set them up in 5141 + * a way that results in the correct picture. 5142 + */ 5143 + I915_WRITE(PIPE_CSC_COEFF_RY_GY(pipe), coeff << 16); 5144 + I915_WRITE(PIPE_CSC_COEFF_BY(pipe), 0); 5145 + 5146 + I915_WRITE(PIPE_CSC_COEFF_RU_GU(pipe), coeff); 5147 + I915_WRITE(PIPE_CSC_COEFF_BU(pipe), 0); 5148 + 5149 + I915_WRITE(PIPE_CSC_COEFF_RV_GV(pipe), 0); 5150 + I915_WRITE(PIPE_CSC_COEFF_BV(pipe), coeff << 16); 5151 + 5152 + I915_WRITE(PIPE_CSC_PREOFF_HI(pipe), 0); 5153 + I915_WRITE(PIPE_CSC_PREOFF_ME(pipe), 0); 5154 + I915_WRITE(PIPE_CSC_PREOFF_LO(pipe), 0); 5155 + 5156 + if (INTEL_INFO(dev)->gen > 6) { 5157 + uint16_t postoff = 0; 5158 + 5159 + if (adjusted_mode->private_flags & INTEL_MODE_LIMITED_COLOR_RANGE) 5160 + postoff = (16 * (1 << 13) / 255) & 0x1fff; 5161 + 5162 + I915_WRITE(PIPE_CSC_POSTOFF_HI(pipe), postoff); 5163 + I915_WRITE(PIPE_CSC_POSTOFF_ME(pipe), postoff); 5164 + I915_WRITE(PIPE_CSC_POSTOFF_LO(pipe), postoff); 5165 + 5166 + I915_WRITE(PIPE_CSC_MODE(pipe), 0); 5167 + } else { 5168 + uint32_t mode = CSC_MODE_YUV_TO_RGB; 5169 + 5170 + if (adjusted_mode->private_flags & INTEL_MODE_LIMITED_COLOR_RANGE) 5171 + mode |= CSC_BLACK_SCREEN_OFFSET; 5172 + 5173 + I915_WRITE(PIPE_CSC_MODE(pipe), mode); 5174 + } 5175 + } 5176 + 5121 5177 static void haswell_set_pipeconf(struct drm_crtc *crtc, 5122 5178 struct drm_display_mode *adjusted_mode, 5123 5179 bool dither) ··· 5771 5697 5772 5698 haswell_set_pipeconf(crtc, adjusted_mode, dither); 5773 5699 5700 + intel_set_pipe_csc(crtc, adjusted_mode); 5701 + 5774 5702 /* Set up the display plane register */ 5775 - I915_WRITE(DSPCNTR(plane), DISPPLANE_GAMMA_ENABLE); 5703 + I915_WRITE(DSPCNTR(plane), DISPPLANE_GAMMA_ENABLE | DISPPLANE_PIPE_CSC_ENABLE); 5776 5704 POSTING_READ(DSPCNTR(plane)); 5777 5705 5778 5706 ret = intel_pipe_set_base(crtc, x, y, fb); ··· 6179 6103 cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE); 6180 6104 cntl |= CURSOR_MODE_DISABLE; 6181 6105 } 6106 + if (IS_HASWELL(dev)) 6107 + cntl |= CURSOR_PIPE_CSC_ENABLE; 6182 6108 I915_WRITE(CURCNTR_IVB(pipe), cntl); 6183 6109 6184 6110 intel_crtc->cursor_visible = visible; ··· 7313 7235 work->enable_stall_check = true; 7314 7236 7315 7237 atomic_inc(&intel_crtc->unpin_work_count); 7238 + intel_crtc->reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter); 7316 7239 7317 7240 ret = dev_priv->display.queue_flip(dev, crtc, fb, obj); 7318 7241 if (ret) ··· 7955 7876 struct intel_encoder *encoder; 7956 7877 int count, ro; 7957 7878 7958 - /* The upper layers ensure that we either disabl a crtc or have a list 7879 + /* The upper layers ensure that we either disable a crtc or have a list 7959 7880 * of connectors. For paranoia, double-check this. */ 7960 7881 WARN_ON(!set->fb && (set->num_connectors != 0)); 7961 7882 WARN_ON(set->fb && (set->num_connectors == 0)); ··· 8734 8655 8735 8656 /* Acer/Packard Bell NCL20 */ 8736 8657 { 0x2a42, 0x1025, 0x034b, quirk_invert_brightness }, 8658 + 8659 + /* Acer Aspire 4736Z */ 8660 + { 0x2a42, 0x1025, 0x0260, quirk_invert_brightness }, 8737 8661 }; 8738 8662 8739 8663 static void intel_init_quirks(struct drm_device *dev)
+6 -6
drivers/gpu/drm/i915/intel_dp.c
··· 2302 2302 2303 2303 switch (intel_dig_port->port) { 2304 2304 case PORT_B: 2305 - bit = DPB_HOTPLUG_LIVE_STATUS; 2305 + bit = PORTB_HOTPLUG_LIVE_STATUS; 2306 2306 break; 2307 2307 case PORT_C: 2308 - bit = DPC_HOTPLUG_LIVE_STATUS; 2308 + bit = PORTC_HOTPLUG_LIVE_STATUS; 2309 2309 break; 2310 2310 case PORT_D: 2311 - bit = DPD_HOTPLUG_LIVE_STATUS; 2311 + bit = PORTD_HOTPLUG_LIVE_STATUS; 2312 2312 break; 2313 2313 default: 2314 2314 return connector_status_unknown; ··· 2837 2837 name = "DPDDC-A"; 2838 2838 break; 2839 2839 case PORT_B: 2840 - dev_priv->hotplug_supported_mask |= DPB_HOTPLUG_INT_STATUS; 2840 + dev_priv->hotplug_supported_mask |= PORTB_HOTPLUG_INT_STATUS; 2841 2841 name = "DPDDC-B"; 2842 2842 break; 2843 2843 case PORT_C: 2844 - dev_priv->hotplug_supported_mask |= DPC_HOTPLUG_INT_STATUS; 2844 + dev_priv->hotplug_supported_mask |= PORTC_HOTPLUG_INT_STATUS; 2845 2845 name = "DPDDC-C"; 2846 2846 break; 2847 2847 case PORT_D: 2848 - dev_priv->hotplug_supported_mask |= DPD_HOTPLUG_INT_STATUS; 2848 + dev_priv->hotplug_supported_mask |= PORTD_HOTPLUG_INT_STATUS; 2849 2849 name = "DPDDC-D"; 2850 2850 break; 2851 2851 default:
+4
drivers/gpu/drm/i915/intel_drv.h
··· 235 235 /* We can share PLLs across outputs if the timings match */ 236 236 struct intel_pch_pll *pch_pll; 237 237 uint32_t ddi_pll_sel; 238 + 239 + /* reset counter value when the last flip was submitted */ 240 + unsigned int reset_counter; 238 241 }; 239 242 240 243 struct intel_plane { ··· 393 390 struct intel_digital_port { 394 391 struct intel_encoder base; 395 392 enum port port; 393 + u32 port_reversal; 396 394 struct intel_dp dp; 397 395 struct intel_hdmi hdmi; 398 396 };
+5 -5
drivers/gpu/drm/i915/intel_hdmi.c
··· 802 802 803 803 switch (intel_dig_port->port) { 804 804 case PORT_B: 805 - bit = HDMIB_HOTPLUG_LIVE_STATUS; 805 + bit = PORTB_HOTPLUG_LIVE_STATUS; 806 806 break; 807 807 case PORT_C: 808 - bit = HDMIC_HOTPLUG_LIVE_STATUS; 808 + bit = PORTC_HOTPLUG_LIVE_STATUS; 809 809 break; 810 810 default: 811 811 bit = 0; ··· 1021 1021 switch (port) { 1022 1022 case PORT_B: 1023 1023 intel_hdmi->ddc_bus = GMBUS_PORT_DPB; 1024 - dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS; 1024 + dev_priv->hotplug_supported_mask |= PORTB_HOTPLUG_INT_STATUS; 1025 1025 break; 1026 1026 case PORT_C: 1027 1027 intel_hdmi->ddc_bus = GMBUS_PORT_DPC; 1028 - dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS; 1028 + dev_priv->hotplug_supported_mask |= PORTC_HOTPLUG_INT_STATUS; 1029 1029 break; 1030 1030 case PORT_D: 1031 1031 intel_hdmi->ddc_bus = GMBUS_PORT_DPD; 1032 - dev_priv->hotplug_supported_mask |= HDMID_HOTPLUG_INT_STATUS; 1032 + dev_priv->hotplug_supported_mask |= PORTD_HOTPLUG_INT_STATUS; 1033 1033 break; 1034 1034 case PORT_A: 1035 1035 /* Internal port only for eDP. */
+44 -36
drivers/gpu/drm/i915/intel_lvds.c
··· 51 51 52 52 u32 pfit_control; 53 53 u32 pfit_pgm_ratios; 54 - bool pfit_dirty; 55 54 bool is_dual_link; 56 55 u32 reg; 57 56 ··· 150 151 I915_WRITE(lvds_encoder->reg, temp); 151 152 } 152 153 154 + static void intel_pre_enable_lvds(struct intel_encoder *encoder) 155 + { 156 + struct drm_device *dev = encoder->base.dev; 157 + struct intel_lvds_encoder *enc = to_lvds_encoder(&encoder->base); 158 + struct drm_i915_private *dev_priv = dev->dev_private; 159 + 160 + if (HAS_PCH_SPLIT(dev) || !enc->pfit_control) 161 + return; 162 + 163 + /* 164 + * Enable automatic panel scaling so that non-native modes 165 + * fill the screen. The panel fitter should only be 166 + * adjusted whilst the pipe is disabled, according to 167 + * register description and PRM. 168 + */ 169 + DRM_DEBUG_KMS("applying panel-fitter: %x, %x\n", 170 + enc->pfit_control, 171 + enc->pfit_pgm_ratios); 172 + 173 + I915_WRITE(PFIT_PGM_RATIOS, enc->pfit_pgm_ratios); 174 + I915_WRITE(PFIT_CONTROL, enc->pfit_control); 175 + } 176 + 153 177 /** 154 178 * Sets the power state for the panel. 155 179 */ ··· 193 171 } 194 172 195 173 I915_WRITE(lvds_encoder->reg, I915_READ(lvds_encoder->reg) | LVDS_PORT_EN); 196 - 197 - if (lvds_encoder->pfit_dirty) { 198 - /* 199 - * Enable automatic panel scaling so that non-native modes 200 - * fill the screen. The panel fitter should only be 201 - * adjusted whilst the pipe is disabled, according to 202 - * register description and PRM. 203 - */ 204 - DRM_DEBUG_KMS("applying panel-fitter: %x, %x\n", 205 - lvds_encoder->pfit_control, 206 - lvds_encoder->pfit_pgm_ratios); 207 - 208 - I915_WRITE(PFIT_PGM_RATIOS, lvds_encoder->pfit_pgm_ratios); 209 - I915_WRITE(PFIT_CONTROL, lvds_encoder->pfit_control); 210 - lvds_encoder->pfit_dirty = false; 211 - } 212 174 213 175 I915_WRITE(ctl_reg, I915_READ(ctl_reg) | POWER_TARGET_ON); 214 176 POSTING_READ(lvds_encoder->reg); ··· 222 216 I915_WRITE(ctl_reg, I915_READ(ctl_reg) & ~POWER_TARGET_ON); 223 217 if (wait_for((I915_READ(stat_reg) & PP_ON) == 0, 1000)) 224 218 DRM_ERROR("timed out waiting for panel to power off\n"); 225 - 226 - if (lvds_encoder->pfit_control) { 227 - I915_WRITE(PFIT_CONTROL, 0); 228 - lvds_encoder->pfit_dirty = true; 229 - } 230 219 231 220 I915_WRITE(lvds_encoder->reg, I915_READ(lvds_encoder->reg) & ~LVDS_PORT_EN); 232 221 POSTING_READ(lvds_encoder->reg); ··· 462 461 pfit_pgm_ratios != lvds_encoder->pfit_pgm_ratios) { 463 462 lvds_encoder->pfit_control = pfit_control; 464 463 lvds_encoder->pfit_pgm_ratios = pfit_pgm_ratios; 465 - lvds_encoder->pfit_dirty = true; 466 464 } 467 465 dev_priv->lvds_border_bits = border; 468 466 ··· 547 547 }; 548 548 549 549 /* 550 - * Lid events. Note the use of 'modeset_on_lid': 551 - * - we set it on lid close, and reset it on open 550 + * Lid events. Note the use of 'modeset': 551 + * - we set it to MODESET_ON_LID_OPEN on lid close, 552 + * and set it to MODESET_DONE on open 552 553 * - we use it as a "only once" bit (ie we ignore 553 - * duplicate events where it was already properly 554 - * set/reset) 555 - * - the suspend/resume paths will also set it to 556 - * zero, since they restore the mode ("lid open"). 554 + * duplicate events where it was already properly set) 555 + * - the suspend/resume paths will set it to 556 + * MODESET_SUSPENDED and ignore the lid open event, 557 + * because they restore the mode ("lid open"). 557 558 */ 558 559 static int intel_lid_notify(struct notifier_block *nb, unsigned long val, 559 560 void *unused) ··· 568 567 if (dev->switch_power_state != DRM_SWITCH_POWER_ON) 569 568 return NOTIFY_OK; 570 569 570 + mutex_lock(&dev_priv->modeset_restore_lock); 571 + if (dev_priv->modeset_restore == MODESET_SUSPENDED) 572 + goto exit; 571 573 /* 572 574 * check and update the status of LVDS connector after receiving 573 575 * the LID nofication event. ··· 579 575 580 576 /* Don't force modeset on machines where it causes a GPU lockup */ 581 577 if (dmi_check_system(intel_no_modeset_on_lid)) 582 - return NOTIFY_OK; 578 + goto exit; 583 579 if (!acpi_lid_open()) { 584 - dev_priv->modeset_on_lid = 1; 585 - return NOTIFY_OK; 580 + /* do modeset on next lid open event */ 581 + dev_priv->modeset_restore = MODESET_ON_LID_OPEN; 582 + goto exit; 586 583 } 587 584 588 - if (!dev_priv->modeset_on_lid) 589 - return NOTIFY_OK; 590 - 591 - dev_priv->modeset_on_lid = 0; 585 + if (dev_priv->modeset_restore == MODESET_DONE) 586 + goto exit; 592 587 593 588 drm_modeset_lock_all(dev); 594 589 intel_modeset_setup_hw_state(dev, true); 595 590 drm_modeset_unlock_all(dev); 596 591 592 + dev_priv->modeset_restore = MODESET_DONE; 593 + 594 + exit: 595 + mutex_unlock(&dev_priv->modeset_restore_lock); 597 596 return NOTIFY_OK; 598 597 } 599 598 ··· 1100 1093 DRM_MODE_ENCODER_LVDS); 1101 1094 1102 1095 intel_encoder->enable = intel_enable_lvds; 1096 + intel_encoder->pre_enable = intel_pre_enable_lvds; 1103 1097 intel_encoder->pre_pll_enable = intel_pre_pll_enable_lvds; 1104 1098 intel_encoder->disable = intel_disable_lvds; 1105 1099 intel_encoder->get_hw_state = intel_lvds_get_hw_state;
+8 -5
drivers/gpu/drm/i915/intel_panel.c
··· 321 321 if (dev_priv->backlight_level == 0) 322 322 dev_priv->backlight_level = intel_panel_get_max_backlight(dev); 323 323 324 + dev_priv->backlight_enabled = true; 325 + intel_panel_actually_set_backlight(dev, dev_priv->backlight_level); 326 + 324 327 if (INTEL_INFO(dev)->gen >= 4) { 325 328 uint32_t reg, tmp; 326 329 ··· 359 356 } 360 357 361 358 set_level: 362 - /* Call below after setting BLC_PWM_CPU_CTL2 and BLC_PWM_PCH_CTL1. 363 - * BLC_PWM_CPU_CTL may be cleared to zero automatically when these 364 - * registers are set. 359 + /* Check the current backlight level and try to set again if it's zero. 360 + * On some machines, BLC_PWM_CPU_CTL is cleared to zero automatically 361 + * when BLC_PWM_CPU_CTL2 and BLC_PWM_PCH_CTL1 are written. 365 362 */ 366 - dev_priv->backlight_enabled = true; 367 - intel_panel_actually_set_backlight(dev, dev_priv->backlight_level); 363 + if (!intel_panel_get_backlight(dev)) 364 + intel_panel_actually_set_backlight(dev, dev_priv->backlight_level); 368 365 } 369 366 370 367 static void intel_panel_init_backlight(struct drm_device *dev)
+17 -1
drivers/gpu/drm/i915/intel_pm.c
··· 2289 2289 i915_gem_object_unpin(ctx); 2290 2290 err_unref: 2291 2291 drm_gem_object_unreference(&ctx->base); 2292 - mutex_unlock(&dev->struct_mutex); 2293 2292 return NULL; 2294 2293 } 2295 2294 ··· 3583 3584 } 3584 3585 } 3585 3586 3587 + static void gen6_check_mch_setup(struct drm_device *dev) 3588 + { 3589 + struct drm_i915_private *dev_priv = dev->dev_private; 3590 + uint32_t tmp; 3591 + 3592 + tmp = I915_READ(MCH_SSKPD); 3593 + if ((tmp & MCH_SSKPD_WM0_MASK) != MCH_SSKPD_WM0_VAL) { 3594 + DRM_INFO("Wrong MCH_SSKPD value: 0x%08x\n", tmp); 3595 + DRM_INFO("This can cause pipe underruns and display issues.\n"); 3596 + DRM_INFO("Please upgrade your BIOS to fix this.\n"); 3597 + } 3598 + } 3599 + 3586 3600 static void gen6_init_clock_gating(struct drm_device *dev) 3587 3601 { 3588 3602 struct drm_i915_private *dev_priv = dev->dev_private; ··· 3688 3676 I915_WRITE(GEN6_GT_MODE, _MASKED_BIT_ENABLE(GEN6_GT_MODE_HI)); 3689 3677 3690 3678 cpt_init_clock_gating(dev); 3679 + 3680 + gen6_check_mch_setup(dev); 3691 3681 } 3692 3682 3693 3683 static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv) ··· 3875 3861 I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr); 3876 3862 3877 3863 cpt_init_clock_gating(dev); 3864 + 3865 + gen6_check_mch_setup(dev); 3878 3866 } 3879 3867 3880 3868 static void valleyview_init_clock_gating(struct drm_device *dev)
+5 -1
drivers/gpu/drm/i915/intel_ringbuffer.c
··· 318 318 * TLB invalidate requires a post-sync write. 319 319 */ 320 320 flags |= PIPE_CONTROL_QW_WRITE; 321 + flags |= PIPE_CONTROL_GLOBAL_GTT_IVB; 321 322 322 323 /* Workaround: we must issue a pipe_control with CS-stall bit 323 324 * set before a pipe_control command that has the state cache ··· 332 331 333 332 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4)); 334 333 intel_ring_emit(ring, flags); 335 - intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); 334 + intel_ring_emit(ring, scratch_addr); 336 335 intel_ring_emit(ring, 0); 337 336 intel_ring_advance(ring); 338 337 ··· 467 466 pc->cpu_page = kmap(sg_page(obj->pages->sgl)); 468 467 if (pc->cpu_page == NULL) 469 468 goto err_unpin; 469 + 470 + DRM_DEBUG_DRIVER("%s pipe control offset: 0x%08x\n", 471 + ring->name, pc->gtt_offset); 470 472 471 473 pc->obj = obj; 472 474 ring->private = pc;
+19 -11
drivers/gpu/drm/i915/intel_sprite.c
··· 50 50 u32 sprctl, sprscale = 0; 51 51 unsigned long sprsurf_offset, linear_offset; 52 52 int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0); 53 + bool scaling_was_enabled = dev_priv->sprite_scaling_enabled; 53 54 54 55 sprctl = I915_READ(SPRCTL(pipe)); 55 56 ··· 90 89 sprctl |= SPRITE_TRICKLE_FEED_DISABLE; 91 90 sprctl |= SPRITE_ENABLE; 92 91 92 + if (IS_HASWELL(dev)) 93 + sprctl |= SPRITE_PIPE_CSC_ENABLE; 94 + 93 95 /* Sizes are 0 based */ 94 96 src_w--; 95 97 src_h--; ··· 107 103 * when scaling is disabled. 108 104 */ 109 105 if (crtc_w != src_w || crtc_h != src_h) { 110 - if (!dev_priv->sprite_scaling_enabled) { 111 - dev_priv->sprite_scaling_enabled = true; 106 + dev_priv->sprite_scaling_enabled |= 1 << pipe; 107 + 108 + if (!scaling_was_enabled) { 112 109 intel_update_watermarks(dev); 113 110 intel_wait_for_vblank(dev, pipe); 114 111 } 115 112 sprscale = SPRITE_SCALE_ENABLE | (src_w << 16) | src_h; 116 - } else { 117 - if (dev_priv->sprite_scaling_enabled) { 118 - dev_priv->sprite_scaling_enabled = false; 119 - /* potentially re-enable LP watermarks */ 120 - intel_update_watermarks(dev); 121 - } 122 - } 113 + } else 114 + dev_priv->sprite_scaling_enabled &= ~(1 << pipe); 123 115 124 116 I915_WRITE(SPRSTRIDE(pipe), fb->pitches[0]); 125 117 I915_WRITE(SPRPOS(pipe), (crtc_y << 16) | crtc_x); ··· 141 141 I915_WRITE(SPRCTL(pipe), sprctl); 142 142 I915_MODIFY_DISPBASE(SPRSURF(pipe), obj->gtt_offset + sprsurf_offset); 143 143 POSTING_READ(SPRSURF(pipe)); 144 + 145 + /* potentially re-enable LP watermarks */ 146 + if (scaling_was_enabled && !dev_priv->sprite_scaling_enabled) 147 + intel_update_watermarks(dev); 144 148 } 145 149 146 150 static void ··· 154 150 struct drm_i915_private *dev_priv = dev->dev_private; 155 151 struct intel_plane *intel_plane = to_intel_plane(plane); 156 152 int pipe = intel_plane->pipe; 153 + bool scaling_was_enabled = dev_priv->sprite_scaling_enabled; 157 154 158 155 I915_WRITE(SPRCTL(pipe), I915_READ(SPRCTL(pipe)) & ~SPRITE_ENABLE); 159 156 /* Can't leave the scaler enabled... */ ··· 164 159 I915_MODIFY_DISPBASE(SPRSURF(pipe), 0); 165 160 POSTING_READ(SPRSURF(pipe)); 166 161 167 - dev_priv->sprite_scaling_enabled = false; 168 - intel_update_watermarks(dev); 162 + dev_priv->sprite_scaling_enabled &= ~(1 << pipe); 163 + 164 + /* potentially re-enable LP watermarks */ 165 + if (scaling_was_enabled && !dev_priv->sprite_scaling_enabled) 166 + intel_update_watermarks(dev); 169 167 } 170 168 171 169 static int
+7 -1
drivers/iommu/intel-iommu.c
··· 4253 4253 { 4254 4254 /* 4255 4255 * Mobile 4 Series Chipset neglects to set RWBF capability, 4256 - * but needs it: 4256 + * but needs it. Same seems to hold for the desktop versions. 4257 4257 */ 4258 4258 printk(KERN_INFO "DMAR: Forcing write-buffer flush capability\n"); 4259 4259 rwbf_quirk = 1; 4260 4260 } 4261 4261 4262 4262 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf); 4263 + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_rwbf); 4264 + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_rwbf); 4265 + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_rwbf); 4266 + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_rwbf); 4267 + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_rwbf); 4268 + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_rwbf); 4263 4269 4264 4270 #define GGC 0x52 4265 4271 #define GGC_MEMORY_SIZE_MASK (0xf << 8)
+2 -1
include/drm/intel-gtt.h
··· 3 3 #ifndef _DRM_INTEL_GTT_H 4 4 #define _DRM_INTEL_GTT_H 5 5 6 - void intel_gtt_get(size_t *gtt_total, size_t *stolen_size); 6 + void intel_gtt_get(size_t *gtt_total, size_t *stolen_size, 7 + phys_addr_t *mappable_base, unsigned long *mappable_end); 7 8 8 9 int intel_gmch_probe(struct pci_dev *bridge_pdev, struct pci_dev *gpu_pdev, 9 10 struct agp_bridge_data *bridge);