Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'drm-intel-fixes-2013-07-03' of git://people.freedesktop.org/~danvet/drm-intel into drm-next

Pile of fixes for 3.11. A bit large in patch count, but that's simply due
to two fixes being split up into really small parts. Also I've included a
few more vlv patches than I'd have included for other platforms. But since
vlv is officially supported for the first time only in 3.11 that shouldn't
result in unbearable risks.

Highlights:
- ghost eDP fixes for hsw from Paulo
- fix PCH detection in virtualized enviroments (Rui Guo)
- duct-tape dma sg construction when swiotlb is in use (Konrad), dupe with
a patch in your drm-fixes branch
- fix sdvo hotplug on i965g
- tune down a bunch of dmesg ERRORs which can be hit under normal
conditions
- detect invalid pitches for tiled scanout buffers (Chris)
- a pile of vlv fixes from Ville: rps improvements, fixes for the dpll
LPF, fixup the sprite mmio offsets
- fix context size on hsw (Ben)
- locking fixes for the hotplug code, specifically the storm handling
- fix get_config on CPT (Xiong Zhang)
- Fix the domain tracking when an unlocked seqno wait was interrupt
(Chris), this seems to explain tons of little corruption bugs in the
ddx. Chris also added a nice igt to exercise this.
- work around stack-corrupting vnsprintf in our error state dumper

* tag 'drm-intel-fixes-2013-07-03' of git://people.freedesktop.org/~danvet/drm-intel: (39 commits)
drm/i915: Don't try to tear down the stolen drm_mm if it's not there
drm/i915: Break up the large vsnprintf() in print_error_buffers()
drm/i915: Refactor the wait_rendering completion into a common routine
drm/i915: Only clear write-domains after a successful wait-seqno
drm/i915: correct intel_dp_get_config() function for DevCPT
drm/i915: fix hpd interrupt register locking
drm/i915: fold the no-irq check into intel_hpd_irq_handler
drm/i915: fold the queue_work into intel_hpd_irq_handler
drm/i915: fold the hpd_irq_setup call into intel_hpd_irq_handler
drm/i915: s/hotplug_irq_storm_detect/intel_hpd_irq_handler/
drm/i915: close tiny race in the ilk pcu even interrupt setup
drm/i915: fix locking around ironlake_enable|disable_display_irq
drm/i915: Fix context sizes on HSW
drm/i915: Fix VLV sprite register offsets
Revert "drm/i915: Don't use the HDMI port color range bit on Valleyview"
drm/i915: s/LFP/LPF in DPIO PLL register names
drm/i915: Fix VLV PLL LPF coefficients for DAC
drm/i915: Jump to at least RPe on VLV when increasing the GPU frequency
drm/i915: Don't increase the GPU frequency from the delayed VLV rps timer
drm/i915: GEN6_RP_INTERRUPT_LIMITS doesn't seem to exist on VLV
...

+497 -321
+84 -35
drivers/gpu/drm/i915/i915_debugfs.c
··· 647 647 return purgeable ? " purgeable" : ""; 648 648 } 649 649 650 - static void i915_error_vprintf(struct drm_i915_error_state_buf *e, 651 - const char *f, va_list args) 650 + static bool __i915_error_ok(struct drm_i915_error_state_buf *e) 652 651 { 653 - unsigned len; 654 652 655 653 if (!e->err && WARN(e->bytes > (e->size - 1), "overflow")) { 656 654 e->err = -ENOSPC; 657 - return; 655 + return false; 658 656 } 659 657 660 658 if (e->bytes == e->size - 1 || e->err) 661 - return; 659 + return false; 662 660 663 - /* Seek the first printf which is hits start position */ 664 - if (e->pos < e->start) { 665 - len = vsnprintf(NULL, 0, f, args); 666 - if (e->pos + len <= e->start) { 667 - e->pos += len; 668 - return; 669 - } 661 + return true; 662 + } 670 663 671 - /* First vsnprintf needs to fit in full for memmove*/ 672 - if (len >= e->size) { 673 - e->err = -EIO; 674 - return; 675 - } 664 + static bool __i915_error_seek(struct drm_i915_error_state_buf *e, 665 + unsigned len) 666 + { 667 + if (e->pos + len <= e->start) { 668 + e->pos += len; 669 + return false; 676 670 } 677 671 678 - len = vsnprintf(e->buf + e->bytes, e->size - e->bytes, f, args); 679 - if (len >= e->size - e->bytes) 680 - len = e->size - e->bytes - 1; 672 + /* First vsnprintf needs to fit in its entirety for memmove */ 673 + if (len >= e->size) { 674 + e->err = -EIO; 675 + return false; 676 + } 681 677 678 + return true; 679 + } 680 + 681 + static void __i915_error_advance(struct drm_i915_error_state_buf *e, 682 + unsigned len) 683 + { 682 684 /* If this is first printf in this window, adjust it so that 683 685 * start position matches start of the buffer 684 686 */ 687 + 685 688 if (e->pos < e->start) { 686 689 const size_t off = e->start - e->pos; 687 690 ··· 704 701 e->pos += len; 705 702 } 706 703 704 + static void i915_error_vprintf(struct drm_i915_error_state_buf *e, 705 + const char *f, va_list args) 706 + { 707 + unsigned len; 708 + 709 + if (!__i915_error_ok(e)) 710 + return; 711 + 712 + /* Seek the first printf which is hits start position */ 713 + if (e->pos < e->start) { 714 + len = vsnprintf(NULL, 0, f, args); 715 + if (!__i915_error_seek(e, len)) 716 + return; 717 + } 718 + 719 + len = vsnprintf(e->buf + e->bytes, e->size - e->bytes, f, args); 720 + if (len >= e->size - e->bytes) 721 + len = e->size - e->bytes - 1; 722 + 723 + __i915_error_advance(e, len); 724 + } 725 + 726 + static void i915_error_puts(struct drm_i915_error_state_buf *e, 727 + const char *str) 728 + { 729 + unsigned len; 730 + 731 + if (!__i915_error_ok(e)) 732 + return; 733 + 734 + len = strlen(str); 735 + 736 + /* Seek the first printf which is hits start position */ 737 + if (e->pos < e->start) { 738 + if (!__i915_error_seek(e, len)) 739 + return; 740 + } 741 + 742 + if (len >= e->size - e->bytes) 743 + len = e->size - e->bytes - 1; 744 + memcpy(e->buf + e->bytes, str, len); 745 + 746 + __i915_error_advance(e, len); 747 + } 748 + 707 749 void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...) 708 750 { 709 751 va_list args; ··· 759 711 } 760 712 761 713 #define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__) 714 + #define err_puts(e, s) i915_error_puts(e, s) 762 715 763 716 static void print_error_buffers(struct drm_i915_error_state_buf *m, 764 717 const char *name, ··· 769 720 err_printf(m, "%s [%d]:\n", name, count); 770 721 771 722 while (count--) { 772 - err_printf(m, " %08x %8u %02x %02x %x %x%s%s%s%s%s%s%s", 723 + err_printf(m, " %08x %8u %02x %02x %x %x", 773 724 err->gtt_offset, 774 725 err->size, 775 726 err->read_domains, 776 727 err->write_domain, 777 - err->rseqno, err->wseqno, 778 - pin_flag(err->pinned), 779 - tiling_flag(err->tiling), 780 - dirty_flag(err->dirty), 781 - purgeable_flag(err->purgeable), 782 - err->ring != -1 ? " " : "", 783 - ring_str(err->ring), 784 - cache_level_str(err->cache_level)); 728 + err->rseqno, err->wseqno); 729 + err_puts(m, pin_flag(err->pinned)); 730 + err_puts(m, tiling_flag(err->tiling)); 731 + err_puts(m, dirty_flag(err->dirty)); 732 + err_puts(m, purgeable_flag(err->purgeable)); 733 + err_puts(m, err->ring != -1 ? " " : ""); 734 + err_puts(m, ring_str(err->ring)); 735 + err_puts(m, cache_level_str(err->cache_level)); 785 736 786 737 if (err->name) 787 738 err_printf(m, " (name: %d)", err->name); 788 739 if (err->fence_reg != I915_FENCE_REG_NONE) 789 740 err_printf(m, " (fence: %d)", err->fence_reg); 790 741 791 - err_printf(m, "\n"); 742 + err_puts(m, "\n"); 792 743 err++; 793 744 } 794 745 } ··· 1532 1483 struct drm_device *dev = node->minor->dev; 1533 1484 struct drm_i915_private *dev_priv = dev->dev_private; 1534 1485 1535 - if (!IS_ULT(dev)) { 1486 + if (!HAS_IPS(dev)) { 1536 1487 seq_puts(m, "not supported\n"); 1537 1488 return 0; 1538 1489 } ··· 1911 1862 seq_printf(m, "DPIO_CORE_CLK_B: 0x%08x\n", 1912 1863 vlv_dpio_read(dev_priv, _DPIO_CORE_CLK_B)); 1913 1864 1914 - seq_printf(m, "DPIO_LFP_COEFF_A: 0x%08x\n", 1915 - vlv_dpio_read(dev_priv, _DPIO_LFP_COEFF_A)); 1916 - seq_printf(m, "DPIO_LFP_COEFF_B: 0x%08x\n", 1917 - vlv_dpio_read(dev_priv, _DPIO_LFP_COEFF_B)); 1865 + seq_printf(m, "DPIO_LPF_COEFF_A: 0x%08x\n", 1866 + vlv_dpio_read(dev_priv, _DPIO_LPF_COEFF_A)); 1867 + seq_printf(m, "DPIO_LPF_COEFF_B: 0x%08x\n", 1868 + vlv_dpio_read(dev_priv, _DPIO_LPF_COEFF_B)); 1918 1869 1919 1870 seq_printf(m, "DPIO_FASTCLK_DISABLE: 0x%08x\n", 1920 1871 vlv_dpio_read(dev_priv, DPIO_FASTCLK_DISABLE));
+16 -2
drivers/gpu/drm/i915/i915_drv.c
··· 465 465 * make graphics device passthrough work easy for VMM, that only 466 466 * need to expose ISA bridge to let driver know the real hardware 467 467 * underneath. This is a requirement from virtualization team. 468 + * 469 + * In some virtualized environments (e.g. XEN), there is irrelevant 470 + * ISA bridge in the system. To work reliably, we should scan trhough 471 + * all the ISA bridge devices and check for the first match, instead 472 + * of only checking the first one. 468 473 */ 469 474 pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL); 470 - if (pch) { 475 + while (pch) { 476 + struct pci_dev *curr = pch; 471 477 if (pch->vendor == PCI_VENDOR_ID_INTEL) { 472 478 unsigned short id; 473 479 id = pch->device & INTEL_PCH_DEVICE_ID_MASK; ··· 502 496 DRM_DEBUG_KMS("Found LynxPoint LP PCH\n"); 503 497 WARN_ON(!IS_HASWELL(dev)); 504 498 WARN_ON(!IS_ULT(dev)); 499 + } else { 500 + goto check_next; 505 501 } 502 + pci_dev_put(pch); 503 + break; 506 504 } 507 - pci_dev_put(pch); 505 + check_next: 506 + pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, curr); 507 + pci_dev_put(curr); 508 508 } 509 + if (!pch) 510 + DRM_DEBUG_KMS("No PCH found?\n"); 509 511 } 510 512 511 513 bool i915_semaphore_is_enabled(struct drm_device *dev)
+2
drivers/gpu/drm/i915/i915_drv.h
··· 1474 1474 #define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr) 1475 1475 #define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc) 1476 1476 1477 + #define HAS_IPS(dev) (IS_ULT(dev)) 1478 + 1477 1479 #define HAS_PIPE_CONTROL(dev) (INTEL_INFO(dev)->gen >= 5) 1478 1480 1479 1481 #define HAS_DDI(dev) (INTEL_INFO(dev)->has_ddi)
+36 -28
drivers/gpu/drm/i915/i915_gem.c
··· 1087 1087 interruptible, NULL); 1088 1088 } 1089 1089 1090 + static int 1091 + i915_gem_object_wait_rendering__tail(struct drm_i915_gem_object *obj, 1092 + struct intel_ring_buffer *ring) 1093 + { 1094 + i915_gem_retire_requests_ring(ring); 1095 + 1096 + /* Manually manage the write flush as we may have not yet 1097 + * retired the buffer. 1098 + * 1099 + * Note that the last_write_seqno is always the earlier of 1100 + * the two (read/write) seqno, so if we haved successfully waited, 1101 + * we know we have passed the last write. 1102 + */ 1103 + obj->last_write_seqno = 0; 1104 + obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS; 1105 + 1106 + return 0; 1107 + } 1108 + 1090 1109 /** 1091 1110 * Ensures that all rendering to the object has completed and the object is 1092 1111 * safe to unbind from the GTT or access from the CPU. ··· 1126 1107 if (ret) 1127 1108 return ret; 1128 1109 1129 - i915_gem_retire_requests_ring(ring); 1130 - 1131 - /* Manually manage the write flush as we may have not yet 1132 - * retired the buffer. 1133 - */ 1134 - if (obj->last_write_seqno && 1135 - i915_seqno_passed(seqno, obj->last_write_seqno)) { 1136 - obj->last_write_seqno = 0; 1137 - obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS; 1138 - } 1139 - 1140 - return 0; 1110 + return i915_gem_object_wait_rendering__tail(obj, ring); 1141 1111 } 1142 1112 1143 1113 /* A nonblocking variant of the above wait. This is a highly dangerous routine ··· 1162 1154 mutex_unlock(&dev->struct_mutex); 1163 1155 ret = __wait_seqno(ring, seqno, reset_counter, true, NULL); 1164 1156 mutex_lock(&dev->struct_mutex); 1157 + if (ret) 1158 + return ret; 1165 1159 1166 - i915_gem_retire_requests_ring(ring); 1167 - 1168 - /* Manually manage the write flush as we may have not yet 1169 - * retired the buffer. 1170 - */ 1171 - if (obj->last_write_seqno && 1172 - i915_seqno_passed(seqno, obj->last_write_seqno)) { 1173 - obj->last_write_seqno = 0; 1174 - obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS; 1175 - } 1176 - 1177 - return ret; 1160 + return i915_gem_object_wait_rendering__tail(obj, ring); 1178 1161 } 1179 1162 1180 1163 /** ··· 1801 1802 gfp |= __GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD; 1802 1803 gfp &= ~(__GFP_IO | __GFP_WAIT); 1803 1804 } 1804 - 1805 + #ifdef CONFIG_SWIOTLB 1806 + if (swiotlb_nr_tbl()) { 1807 + st->nents++; 1808 + sg_set_page(sg, page, PAGE_SIZE, 0); 1809 + sg = sg_next(sg); 1810 + continue; 1811 + } 1812 + #endif 1805 1813 if (!i || page_to_pfn(page) != last_pfn + 1) { 1806 1814 if (i) 1807 1815 sg = sg_next(sg); ··· 1819 1813 } 1820 1814 last_pfn = page_to_pfn(page); 1821 1815 } 1822 - 1823 - sg_mark_end(sg); 1816 + #ifdef CONFIG_SWIOTLB 1817 + if (!swiotlb_nr_tbl()) 1818 + #endif 1819 + sg_mark_end(sg); 1824 1820 obj->pages = st; 1825 1821 1826 1822 if (i915_gem_object_needs_bit17_swizzle(obj)) ··· 3111 3103 * before evicting everything in a vain attempt to find space. 3112 3104 */ 3113 3105 if (obj->base.size > gtt_max) { 3114 - DRM_ERROR("Attempting to bind an object larger than the aperture: object=%zd > %s aperture=%ld\n", 3106 + DRM_ERROR("Attempting to bind an object larger than the aperture: object=%zd > %s aperture=%zu\n", 3115 3107 obj->base.size, 3116 3108 map_and_fenceable ? "mappable" : "total", 3117 3109 gtt_max);
+1 -1
drivers/gpu/drm/i915/i915_gem_context.c
··· 113 113 case 7: 114 114 reg = I915_READ(GEN7_CXT_SIZE); 115 115 if (IS_HASWELL(dev)) 116 - ret = HSW_CXT_TOTAL_SIZE(reg) * 64; 116 + ret = HSW_CXT_TOTAL_SIZE; 117 117 else 118 118 ret = GEN7_CXT_TOTAL_SIZE(reg) * 64; 119 119 break;
+6 -3
drivers/gpu/drm/i915/i915_gem_stolen.c
··· 147 147 { 148 148 struct drm_i915_private *dev_priv = dev->dev_private; 149 149 150 - if (dev_priv->mm.stolen_base == 0) 150 + if (!drm_mm_initialized(&dev_priv->mm.stolen)) 151 151 return -ENODEV; 152 152 153 153 if (size < dev_priv->cfb_size) ··· 178 178 void i915_gem_cleanup_stolen(struct drm_device *dev) 179 179 { 180 180 struct drm_i915_private *dev_priv = dev->dev_private; 181 + 182 + if (!drm_mm_initialized(&dev_priv->mm.stolen)) 183 + return; 181 184 182 185 i915_gem_stolen_cleanup_compression(dev); 183 186 drm_mm_takedown(&dev_priv->mm.stolen); ··· 303 300 struct drm_i915_gem_object *obj; 304 301 struct drm_mm_node *stolen; 305 302 306 - if (dev_priv->mm.stolen_base == 0) 303 + if (!drm_mm_initialized(&dev_priv->mm.stolen)) 307 304 return NULL; 308 305 309 306 DRM_DEBUG_KMS("creating stolen object: size=%x\n", size); ··· 334 331 struct drm_i915_gem_object *obj; 335 332 struct drm_mm_node *stolen; 336 333 337 - if (dev_priv->mm.stolen_base == 0) 334 + if (!drm_mm_initialized(&dev_priv->mm.stolen)) 338 335 return NULL; 339 336 340 337 DRM_DEBUG_KMS("creating preallocated stolen object: stolen_offset=%x, gtt_offset=%x, size=%x\n",
+75 -62
drivers/gpu/drm/i915/i915_irq.c
··· 70 70 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS 71 71 }; 72 72 73 - static const u32 hpd_status_i965[] = { 74 - [HPD_CRT] = CRT_HOTPLUG_INT_STATUS, 75 - [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I965, 76 - [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I965, 77 - [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS, 78 - [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS, 79 - [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS 80 - }; 81 - 82 73 static const u32 hpd_status_i915[] = { /* i915 and valleyview are the same */ 83 74 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS, 84 75 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915, ··· 79 88 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS 80 89 }; 81 90 82 - static void ibx_hpd_irq_setup(struct drm_device *dev); 83 - static void i915_hpd_irq_setup(struct drm_device *dev); 84 - 85 91 /* For display hotplug interrupt */ 86 92 static void 87 93 ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask) 88 94 { 95 + assert_spin_locked(&dev_priv->irq_lock); 96 + 89 97 if ((dev_priv->irq_mask & mask) != 0) { 90 98 dev_priv->irq_mask &= ~mask; 91 99 I915_WRITE(DEIMR, dev_priv->irq_mask); ··· 95 105 static void 96 106 ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask) 97 107 { 108 + assert_spin_locked(&dev_priv->irq_lock); 109 + 98 110 if ((dev_priv->irq_mask & mask) != mask) { 99 111 dev_priv->irq_mask |= mask; 100 112 I915_WRITE(DEIMR, dev_priv->irq_mask); ··· 109 117 struct drm_i915_private *dev_priv = dev->dev_private; 110 118 struct intel_crtc *crtc; 111 119 enum pipe pipe; 120 + 121 + assert_spin_locked(&dev_priv->irq_lock); 112 122 113 123 for_each_pipe(pipe) { 114 124 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); ··· 702 708 703 709 mutex_lock(&dev_priv->rps.hw_lock); 704 710 705 - if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) 711 + if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) { 706 712 new_delay = dev_priv->rps.cur_delay + 1; 707 - else 713 + 714 + /* 715 + * For better performance, jump directly 716 + * to RPe if we're below it. 717 + */ 718 + if (IS_VALLEYVIEW(dev_priv->dev) && 719 + dev_priv->rps.cur_delay < dev_priv->rps.rpe_delay) 720 + new_delay = dev_priv->rps.rpe_delay; 721 + } else 708 722 new_delay = dev_priv->rps.cur_delay - 1; 709 723 710 724 /* sysfs frequency interfaces may have snuck in while servicing the 711 725 * interrupt 712 726 */ 713 - if (!(new_delay > dev_priv->rps.max_delay || 714 - new_delay < dev_priv->rps.min_delay)) { 727 + if (new_delay >= dev_priv->rps.min_delay && 728 + new_delay <= dev_priv->rps.max_delay) { 715 729 if (IS_VALLEYVIEW(dev_priv->dev)) 716 730 valleyview_set_rps(dev_priv->dev, new_delay); 717 731 else ··· 872 870 #define HPD_STORM_DETECT_PERIOD 1000 873 871 #define HPD_STORM_THRESHOLD 5 874 872 875 - static inline bool hotplug_irq_storm_detect(struct drm_device *dev, 876 - u32 hotplug_trigger, 877 - const u32 *hpd) 873 + static inline void intel_hpd_irq_handler(struct drm_device *dev, 874 + u32 hotplug_trigger, 875 + const u32 *hpd) 878 876 { 879 877 drm_i915_private_t *dev_priv = dev->dev_private; 880 - unsigned long irqflags; 881 878 int i; 882 - bool ret = false; 879 + bool storm_detected = false; 883 880 884 - spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 881 + if (!hotplug_trigger) 882 + return; 885 883 884 + spin_lock(&dev_priv->irq_lock); 886 885 for (i = 1; i < HPD_NUM_PINS; i++) { 887 886 888 887 if (!(hpd[i] & hotplug_trigger) || ··· 900 897 dev_priv->hpd_stats[i].hpd_mark = HPD_MARK_DISABLED; 901 898 dev_priv->hpd_event_bits &= ~(1 << i); 902 899 DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", i); 903 - ret = true; 900 + storm_detected = true; 904 901 } else { 905 902 dev_priv->hpd_stats[i].hpd_cnt++; 906 903 } 907 904 } 908 905 909 - spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 906 + if (storm_detected) 907 + dev_priv->display.hpd_irq_setup(dev); 908 + spin_unlock(&dev_priv->irq_lock); 910 909 911 - return ret; 910 + queue_work(dev_priv->wq, 911 + &dev_priv->hotplug_work); 912 912 } 913 913 914 914 static void gmbus_irq_handler(struct drm_device *dev) ··· 1018 1012 1019 1013 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n", 1020 1014 hotplug_status); 1021 - if (hotplug_trigger) { 1022 - if (hotplug_irq_storm_detect(dev, hotplug_trigger, hpd_status_i915)) 1023 - i915_hpd_irq_setup(dev); 1024 - queue_work(dev_priv->wq, 1025 - &dev_priv->hotplug_work); 1026 - } 1015 + 1016 + intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915); 1017 + 1027 1018 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); 1028 1019 I915_READ(PORT_HOTPLUG_STAT); 1029 1020 } ··· 1046 1043 int pipe; 1047 1044 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK; 1048 1045 1049 - if (hotplug_trigger) { 1050 - if (hotplug_irq_storm_detect(dev, hotplug_trigger, hpd_ibx)) 1051 - ibx_hpd_irq_setup(dev); 1052 - queue_work(dev_priv->wq, &dev_priv->hotplug_work); 1053 - } 1046 + intel_hpd_irq_handler(dev, hotplug_trigger, hpd_ibx); 1047 + 1054 1048 if (pch_iir & SDE_AUDIO_POWER_MASK) { 1055 1049 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >> 1056 1050 SDE_AUDIO_POWER_SHIFT); ··· 1148 1148 int pipe; 1149 1149 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT; 1150 1150 1151 - if (hotplug_trigger) { 1152 - if (hotplug_irq_storm_detect(dev, hotplug_trigger, hpd_cpt)) 1153 - ibx_hpd_irq_setup(dev); 1154 - queue_work(dev_priv->wq, &dev_priv->hotplug_work); 1155 - } 1151 + intel_hpd_irq_handler(dev, hotplug_trigger, hpd_cpt); 1152 + 1156 1153 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) { 1157 1154 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >> 1158 1155 SDE_AUDIO_POWER_SHIFT_CPT); ··· 1215 1218 /* On Haswell, also mask ERR_INT because we don't want to risk 1216 1219 * generating "unclaimed register" interrupts from inside the interrupt 1217 1220 * handler. */ 1218 - if (IS_HASWELL(dev)) 1221 + if (IS_HASWELL(dev)) { 1222 + spin_lock(&dev_priv->irq_lock); 1219 1223 ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB); 1224 + spin_unlock(&dev_priv->irq_lock); 1225 + } 1220 1226 1221 1227 gt_iir = I915_READ(GTIIR); 1222 1228 if (gt_iir) { ··· 1272 1272 ret = IRQ_HANDLED; 1273 1273 } 1274 1274 1275 - if (IS_HASWELL(dev) && ivb_can_enable_err_int(dev)) 1276 - ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB); 1275 + if (IS_HASWELL(dev)) { 1276 + spin_lock(&dev_priv->irq_lock); 1277 + if (ivb_can_enable_err_int(dev)) 1278 + ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB); 1279 + spin_unlock(&dev_priv->irq_lock); 1280 + } 1277 1281 1278 1282 I915_WRITE(DEIER, de_ier); 1279 1283 POSTING_READ(DEIER); ··· 2702 2698 2703 2699 static int ironlake_irq_postinstall(struct drm_device *dev) 2704 2700 { 2701 + unsigned long irqflags; 2702 + 2705 2703 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2706 2704 /* enable kind of interrupts always enabled */ 2707 2705 u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | ··· 2717 2711 /* should always can generate irq */ 2718 2712 I915_WRITE(DEIIR, I915_READ(DEIIR)); 2719 2713 I915_WRITE(DEIMR, dev_priv->irq_mask); 2720 - I915_WRITE(DEIER, display_mask | DE_PIPEA_VBLANK | DE_PIPEB_VBLANK); 2714 + I915_WRITE(DEIER, display_mask | 2715 + DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT); 2721 2716 POSTING_READ(DEIER); 2722 2717 2723 2718 dev_priv->gt_irq_mask = ~0; ··· 2740 2733 ibx_irq_postinstall(dev); 2741 2734 2742 2735 if (IS_IRONLAKE_M(dev)) { 2743 - /* Clear & enable PCU event interrupts */ 2744 - I915_WRITE(DEIIR, DE_PCU_EVENT); 2745 - I915_WRITE(DEIER, I915_READ(DEIER) | DE_PCU_EVENT); 2736 + /* Enable PCU event interrupts 2737 + * 2738 + * spinlocking not required here for correctness since interrupt 2739 + * setup is guaranteed to run in single-threaded context. But we 2740 + * need it to make the assert_spin_locked happy. */ 2741 + spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2746 2742 ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT); 2743 + spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2747 2744 } 2748 2745 2749 2746 return 0; ··· 3223 3212 3224 3213 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n", 3225 3214 hotplug_status); 3226 - if (hotplug_trigger) { 3227 - if (hotplug_irq_storm_detect(dev, hotplug_trigger, hpd_status_i915)) 3228 - i915_hpd_irq_setup(dev); 3229 - queue_work(dev_priv->wq, 3230 - &dev_priv->hotplug_work); 3231 - } 3215 + 3216 + intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915); 3217 + 3232 3218 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); 3233 3219 POSTING_READ(PORT_HOTPLUG_STAT); 3234 3220 } ··· 3377 3369 struct intel_encoder *intel_encoder; 3378 3370 u32 hotplug_en; 3379 3371 3372 + assert_spin_locked(&dev_priv->irq_lock); 3373 + 3380 3374 if (I915_HAS_HOTPLUG(dev)) { 3381 3375 hotplug_en = I915_READ(PORT_HOTPLUG_EN); 3382 3376 hotplug_en &= ~HOTPLUG_INT_EN_MASK; ··· 3459 3449 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); 3460 3450 u32 hotplug_trigger = hotplug_status & (IS_G4X(dev) ? 3461 3451 HOTPLUG_INT_STATUS_G4X : 3462 - HOTPLUG_INT_STATUS_I965); 3452 + HOTPLUG_INT_STATUS_I915); 3463 3453 3464 3454 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n", 3465 3455 hotplug_status); 3466 - if (hotplug_trigger) { 3467 - if (hotplug_irq_storm_detect(dev, hotplug_trigger, 3468 - IS_G4X(dev) ? hpd_status_gen4 : hpd_status_i965)) 3469 - i915_hpd_irq_setup(dev); 3470 - queue_work(dev_priv->wq, 3471 - &dev_priv->hotplug_work); 3472 - } 3456 + 3457 + intel_hpd_irq_handler(dev, hotplug_trigger, 3458 + IS_G4X(dev) ? hpd_status_gen4 : hpd_status_i915); 3459 + 3473 3460 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); 3474 3461 I915_READ(PORT_HOTPLUG_STAT); 3475 3462 } ··· 3662 3655 struct drm_i915_private *dev_priv = dev->dev_private; 3663 3656 struct drm_mode_config *mode_config = &dev->mode_config; 3664 3657 struct drm_connector *connector; 3658 + unsigned long irqflags; 3665 3659 int i; 3666 3660 3667 3661 for (i = 1; i < HPD_NUM_PINS; i++) { ··· 3675 3667 if (!connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE) 3676 3668 connector->polled = DRM_CONNECTOR_POLL_HPD; 3677 3669 } 3670 + 3671 + /* Interrupt setup is already guaranteed to be single-threaded, this is 3672 + * just to make the assert_spin_locked checks happy. */ 3673 + spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3678 3674 if (dev_priv->display.hpd_irq_setup) 3679 3675 dev_priv->display.hpd_irq_setup(dev); 3676 + spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3680 3677 }
+40 -42
drivers/gpu/drm/i915/i915_reg.h
··· 448 448 #define _DPIO_PLL_CML_B 0x806c 449 449 #define DPIO_PLL_CML(pipe) _PIPE(pipe, _DPIO_PLL_CML_A, _DPIO_PLL_CML_B) 450 450 451 - #define _DPIO_LFP_COEFF_A 0x8048 452 - #define _DPIO_LFP_COEFF_B 0x8068 453 - #define DPIO_LFP_COEFF(pipe) _PIPE(pipe, _DPIO_LFP_COEFF_A, _DPIO_LFP_COEFF_B) 451 + #define _DPIO_LPF_COEFF_A 0x8048 452 + #define _DPIO_LPF_COEFF_B 0x8068 453 + #define DPIO_LPF_COEFF(pipe) _PIPE(pipe, _DPIO_LPF_COEFF_A, _DPIO_LPF_COEFF_B) 454 454 455 455 #define DPIO_CALIBRATION 0x80ac 456 456 ··· 1718 1718 GEN7_CXT_EXTENDED_SIZE(ctx_reg) + \ 1719 1719 GEN7_CXT_GT1_SIZE(ctx_reg) + \ 1720 1720 GEN7_CXT_VFSTATE_SIZE(ctx_reg)) 1721 - #define HSW_CXT_POWER_SIZE(ctx_reg) ((ctx_reg >> 26) & 0x3f) 1722 - #define HSW_CXT_RING_SIZE(ctx_reg) ((ctx_reg >> 23) & 0x7) 1723 - #define HSW_CXT_RENDER_SIZE(ctx_reg) ((ctx_reg >> 15) & 0xff) 1724 - #define HSW_CXT_TOTAL_SIZE(ctx_reg) (HSW_CXT_POWER_SIZE(ctx_reg) + \ 1725 - HSW_CXT_RING_SIZE(ctx_reg) + \ 1726 - HSW_CXT_RENDER_SIZE(ctx_reg) + \ 1727 - GEN7_CXT_VFSTATE_SIZE(ctx_reg)) 1728 - 1721 + /* Haswell does have the CXT_SIZE register however it does not appear to be 1722 + * valid. Now, docs explain in dwords what is in the context object. The full 1723 + * size is 70720 bytes, however, the power context and execlist context will 1724 + * never be saved (power context is stored elsewhere, and execlists don't work 1725 + * on HSW) - so the final size is 66944 bytes, which rounds to 17 pages. 1726 + */ 1727 + #define HSW_CXT_TOTAL_SIZE (17 * PAGE_SIZE) 1729 1728 1730 1729 /* 1731 1730 * Overlay regs ··· 1873 1874 /* SDVO is different across gen3/4 */ 1874 1875 #define SDVOC_HOTPLUG_INT_STATUS_G4X (1 << 3) 1875 1876 #define SDVOB_HOTPLUG_INT_STATUS_G4X (1 << 2) 1877 + /* 1878 + * Bspec seems to be seriously misleaded about the SDVO hpd bits on i965g/gm, 1879 + * since reality corrobates that they're the same as on gen3. But keep these 1880 + * bits here (and the comment!) to help any other lost wanderers back onto the 1881 + * right tracks. 1882 + */ 1876 1883 #define SDVOC_HOTPLUG_INT_STATUS_I965 (3 << 4) 1877 1884 #define SDVOB_HOTPLUG_INT_STATUS_I965 (3 << 2) 1878 1885 #define SDVOC_HOTPLUG_INT_STATUS_I915 (1 << 7) ··· 1886 1881 #define HOTPLUG_INT_STATUS_G4X (CRT_HOTPLUG_INT_STATUS | \ 1887 1882 SDVOB_HOTPLUG_INT_STATUS_G4X | \ 1888 1883 SDVOC_HOTPLUG_INT_STATUS_G4X | \ 1889 - PORTB_HOTPLUG_INT_STATUS | \ 1890 - PORTC_HOTPLUG_INT_STATUS | \ 1891 - PORTD_HOTPLUG_INT_STATUS) 1892 - 1893 - #define HOTPLUG_INT_STATUS_I965 (CRT_HOTPLUG_INT_STATUS | \ 1894 - SDVOB_HOTPLUG_INT_STATUS_I965 | \ 1895 - SDVOC_HOTPLUG_INT_STATUS_I965 | \ 1896 1884 PORTB_HOTPLUG_INT_STATUS | \ 1897 1885 PORTC_HOTPLUG_INT_STATUS | \ 1898 1886 PORTD_HOTPLUG_INT_STATUS) ··· 3486 3488 #define SPRGAMC(pipe) _PIPE(pipe, _SPRA_GAMC, _SPRB_GAMC) 3487 3489 #define SPRSURFLIVE(pipe) _PIPE(pipe, _SPRA_SURFLIVE, _SPRB_SURFLIVE) 3488 3490 3489 - #define _SPACNTR 0x72180 3491 + #define _SPACNTR (VLV_DISPLAY_BASE + 0x72180) 3490 3492 #define SP_ENABLE (1<<31) 3491 3493 #define SP_GEAMMA_ENABLE (1<<30) 3492 3494 #define SP_PIXFORMAT_MASK (0xf<<26) ··· 3505 3507 #define SP_YUV_ORDER_YVYU (2<<16) 3506 3508 #define SP_YUV_ORDER_VYUY (3<<16) 3507 3509 #define SP_TILED (1<<10) 3508 - #define _SPALINOFF 0x72184 3509 - #define _SPASTRIDE 0x72188 3510 - #define _SPAPOS 0x7218c 3511 - #define _SPASIZE 0x72190 3512 - #define _SPAKEYMINVAL 0x72194 3513 - #define _SPAKEYMSK 0x72198 3514 - #define _SPASURF 0x7219c 3515 - #define _SPAKEYMAXVAL 0x721a0 3516 - #define _SPATILEOFF 0x721a4 3517 - #define _SPACONSTALPHA 0x721a8 3518 - #define _SPAGAMC 0x721f4 3510 + #define _SPALINOFF (VLV_DISPLAY_BASE + 0x72184) 3511 + #define _SPASTRIDE (VLV_DISPLAY_BASE + 0x72188) 3512 + #define _SPAPOS (VLV_DISPLAY_BASE + 0x7218c) 3513 + #define _SPASIZE (VLV_DISPLAY_BASE + 0x72190) 3514 + #define _SPAKEYMINVAL (VLV_DISPLAY_BASE + 0x72194) 3515 + #define _SPAKEYMSK (VLV_DISPLAY_BASE + 0x72198) 3516 + #define _SPASURF (VLV_DISPLAY_BASE + 0x7219c) 3517 + #define _SPAKEYMAXVAL (VLV_DISPLAY_BASE + 0x721a0) 3518 + #define _SPATILEOFF (VLV_DISPLAY_BASE + 0x721a4) 3519 + #define _SPACONSTALPHA (VLV_DISPLAY_BASE + 0x721a8) 3520 + #define _SPAGAMC (VLV_DISPLAY_BASE + 0x721f4) 3519 3521 3520 - #define _SPBCNTR 0x72280 3521 - #define _SPBLINOFF 0x72284 3522 - #define _SPBSTRIDE 0x72288 3523 - #define _SPBPOS 0x7228c 3524 - #define _SPBSIZE 0x72290 3525 - #define _SPBKEYMINVAL 0x72294 3526 - #define _SPBKEYMSK 0x72298 3527 - #define _SPBSURF 0x7229c 3528 - #define _SPBKEYMAXVAL 0x722a0 3529 - #define _SPBTILEOFF 0x722a4 3530 - #define _SPBCONSTALPHA 0x722a8 3531 - #define _SPBGAMC 0x722f4 3522 + #define _SPBCNTR (VLV_DISPLAY_BASE + 0x72280) 3523 + #define _SPBLINOFF (VLV_DISPLAY_BASE + 0x72284) 3524 + #define _SPBSTRIDE (VLV_DISPLAY_BASE + 0x72288) 3525 + #define _SPBPOS (VLV_DISPLAY_BASE + 0x7228c) 3526 + #define _SPBSIZE (VLV_DISPLAY_BASE + 0x72290) 3527 + #define _SPBKEYMINVAL (VLV_DISPLAY_BASE + 0x72294) 3528 + #define _SPBKEYMSK (VLV_DISPLAY_BASE + 0x72298) 3529 + #define _SPBSURF (VLV_DISPLAY_BASE + 0x7229c) 3530 + #define _SPBKEYMAXVAL (VLV_DISPLAY_BASE + 0x722a0) 3531 + #define _SPBTILEOFF (VLV_DISPLAY_BASE + 0x722a4) 3532 + #define _SPBCONSTALPHA (VLV_DISPLAY_BASE + 0x722a8) 3533 + #define _SPBGAMC (VLV_DISPLAY_BASE + 0x722f4) 3532 3534 3533 3535 #define SPCNTR(pipe, plane) _PIPE(pipe * 2 + plane, _SPACNTR, _SPBCNTR) 3534 3536 #define SPLINOFF(pipe, plane) _PIPE(pipe * 2 + plane, _SPALINOFF, _SPBLINOFF)
+6 -1
drivers/gpu/drm/i915/intel_ddi.c
··· 1356 1356 intel_encoder->cloneable = false; 1357 1357 intel_encoder->hot_plug = intel_ddi_hot_plug; 1358 1358 1359 - intel_dp_init_connector(intel_dig_port, dp_connector); 1359 + if (!intel_dp_init_connector(intel_dig_port, dp_connector)) { 1360 + drm_encoder_cleanup(encoder); 1361 + kfree(intel_dig_port); 1362 + kfree(dp_connector); 1363 + return; 1364 + } 1360 1365 1361 1366 if (intel_encoder->type != INTEL_OUTPUT_EDP) { 1362 1367 hdmi_connector = kzalloc(sizeof(struct intel_connector),
+28 -10
drivers/gpu/drm/i915/intel_display.c
··· 3250 3250 /* IPS only exists on ULT machines and is tied to pipe A. */ 3251 3251 static bool hsw_crtc_supports_ips(struct intel_crtc *crtc) 3252 3252 { 3253 - return IS_ULT(crtc->base.dev) && crtc->pipe == PIPE_A; 3253 + return HAS_IPS(crtc->base.dev) && crtc->pipe == PIPE_A; 3254 3254 } 3255 3255 3256 3256 static void hsw_enable_ips(struct intel_crtc *crtc) ··· 4069 4069 pipe_config->pipe_bpp = 8*3; 4070 4070 } 4071 4071 4072 - if (IS_HASWELL(dev)) 4072 + if (HAS_IPS(dev)) 4073 4073 hsw_compute_ips_config(crtc, pipe_config); 4074 4074 4075 4075 /* XXX: PCH clock sharing is done in ->mode_set, so make sure the old ··· 4404 4404 4405 4405 /* Set HBR and RBR LPF coefficients */ 4406 4406 if (crtc->config.port_clock == 162000 || 4407 + intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_ANALOG) || 4407 4408 intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_HDMI)) 4408 - vlv_dpio_write(dev_priv, DPIO_LFP_COEFF(pipe), 4409 + vlv_dpio_write(dev_priv, DPIO_LPF_COEFF(pipe), 4409 4410 0x005f0021); 4410 4411 else 4411 - vlv_dpio_write(dev_priv, DPIO_LFP_COEFF(pipe), 4412 + vlv_dpio_write(dev_priv, DPIO_LPF_COEFF(pipe), 4412 4413 0x00d0000f); 4413 4414 4414 4415 if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_EDP) || ··· 8754 8753 } 8755 8754 8756 8755 if (ret) { 8757 - DRM_ERROR("failed to set mode on [CRTC:%d], err = %d\n", 8758 - set->crtc->base.id, ret); 8756 + DRM_DEBUG_KMS("failed to set mode on [CRTC:%d], err = %d\n", 8757 + set->crtc->base.id, ret); 8759 8758 fail: 8760 8759 intel_set_config_restore_state(dev, config); 8761 8760 ··· 9122 9121 struct drm_mode_fb_cmd2 *mode_cmd, 9123 9122 struct drm_i915_gem_object *obj) 9124 9123 { 9124 + int pitch_limit; 9125 9125 int ret; 9126 9126 9127 9127 if (obj->tiling_mode == I915_TILING_Y) { ··· 9136 9134 return -EINVAL; 9137 9135 } 9138 9136 9139 - /* FIXME <= Gen4 stride limits are bit unclear */ 9140 - if (mode_cmd->pitches[0] > 32768) { 9141 - DRM_DEBUG("pitch (%d) must be at less than 32768\n", 9142 - mode_cmd->pitches[0]); 9137 + if (INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev)) { 9138 + pitch_limit = 32*1024; 9139 + } else if (INTEL_INFO(dev)->gen >= 4) { 9140 + if (obj->tiling_mode) 9141 + pitch_limit = 16*1024; 9142 + else 9143 + pitch_limit = 32*1024; 9144 + } else if (INTEL_INFO(dev)->gen >= 3) { 9145 + if (obj->tiling_mode) 9146 + pitch_limit = 8*1024; 9147 + else 9148 + pitch_limit = 16*1024; 9149 + } else 9150 + /* XXX DSPC is limited to 4k tiled */ 9151 + pitch_limit = 8*1024; 9152 + 9153 + if (mode_cmd->pitches[0] > pitch_limit) { 9154 + DRM_DEBUG("%s pitch (%d) must be at less than %d\n", 9155 + obj->tiling_mode ? "tiled" : "linear", 9156 + mode_cmd->pitches[0], pitch_limit); 9143 9157 return -EINVAL; 9144 9158 } 9145 9159
+130 -85
drivers/gpu/drm/i915/intel_dp.c
··· 1324 1324 struct intel_crtc_config *pipe_config) 1325 1325 { 1326 1326 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 1327 - struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; 1328 1327 u32 tmp, flags = 0; 1328 + struct drm_device *dev = encoder->base.dev; 1329 + struct drm_i915_private *dev_priv = dev->dev_private; 1330 + enum port port = dp_to_dig_port(intel_dp)->port; 1331 + struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); 1329 1332 1330 - tmp = I915_READ(intel_dp->output_reg); 1333 + if ((port == PORT_A) || !HAS_PCH_CPT(dev)) { 1334 + tmp = I915_READ(intel_dp->output_reg); 1335 + if (tmp & DP_SYNC_HS_HIGH) 1336 + flags |= DRM_MODE_FLAG_PHSYNC; 1337 + else 1338 + flags |= DRM_MODE_FLAG_NHSYNC; 1331 1339 1332 - if (tmp & DP_SYNC_HS_HIGH) 1333 - flags |= DRM_MODE_FLAG_PHSYNC; 1334 - else 1335 - flags |= DRM_MODE_FLAG_NHSYNC; 1340 + if (tmp & DP_SYNC_VS_HIGH) 1341 + flags |= DRM_MODE_FLAG_PVSYNC; 1342 + else 1343 + flags |= DRM_MODE_FLAG_NVSYNC; 1344 + } else { 1345 + tmp = I915_READ(TRANS_DP_CTL(crtc->pipe)); 1346 + if (tmp & TRANS_DP_HSYNC_ACTIVE_HIGH) 1347 + flags |= DRM_MODE_FLAG_PHSYNC; 1348 + else 1349 + flags |= DRM_MODE_FLAG_NHSYNC; 1336 1350 1337 - if (tmp & DP_SYNC_VS_HIGH) 1338 - flags |= DRM_MODE_FLAG_PVSYNC; 1339 - else 1340 - flags |= DRM_MODE_FLAG_NVSYNC; 1351 + if (tmp & TRANS_DP_VSYNC_ACTIVE_HIGH) 1352 + flags |= DRM_MODE_FLAG_PVSYNC; 1353 + else 1354 + flags |= DRM_MODE_FLAG_NVSYNC; 1355 + } 1341 1356 1342 1357 pipe_config->adjusted_mode.flags |= flags; 1343 1358 } ··· 2696 2681 } 2697 2682 2698 2683 static void 2699 - intel_dp_destroy(struct drm_connector *connector) 2684 + intel_dp_connector_destroy(struct drm_connector *connector) 2700 2685 { 2701 - struct intel_dp *intel_dp = intel_attached_dp(connector); 2702 2686 struct intel_connector *intel_connector = to_intel_connector(connector); 2703 2687 2704 2688 if (!IS_ERR_OR_NULL(intel_connector->edid)) 2705 2689 kfree(intel_connector->edid); 2706 2690 2707 - if (is_edp(intel_dp)) 2691 + /* Can't call is_edp() since the encoder may have been destroyed 2692 + * already. */ 2693 + if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) 2708 2694 intel_panel_fini(&intel_connector->panel); 2709 2695 2710 2696 drm_sysfs_connector_remove(connector); ··· 2739 2723 .detect = intel_dp_detect, 2740 2724 .fill_modes = drm_helper_probe_single_connector_modes, 2741 2725 .set_property = intel_dp_set_property, 2742 - .destroy = intel_dp_destroy, 2726 + .destroy = intel_dp_connector_destroy, 2743 2727 }; 2744 2728 2745 2729 static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = { ··· 2970 2954 I915_READ(pp_div_reg)); 2971 2955 } 2972 2956 2973 - void 2957 + static bool intel_edp_init_connector(struct intel_dp *intel_dp, 2958 + struct intel_connector *intel_connector) 2959 + { 2960 + struct drm_connector *connector = &intel_connector->base; 2961 + struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 2962 + struct drm_device *dev = intel_dig_port->base.base.dev; 2963 + struct drm_i915_private *dev_priv = dev->dev_private; 2964 + struct drm_display_mode *fixed_mode = NULL; 2965 + struct edp_power_seq power_seq = { 0 }; 2966 + bool has_dpcd; 2967 + struct drm_display_mode *scan; 2968 + struct edid *edid; 2969 + 2970 + if (!is_edp(intel_dp)) 2971 + return true; 2972 + 2973 + intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq); 2974 + 2975 + /* Cache DPCD and EDID for edp. */ 2976 + ironlake_edp_panel_vdd_on(intel_dp); 2977 + has_dpcd = intel_dp_get_dpcd(intel_dp); 2978 + ironlake_edp_panel_vdd_off(intel_dp, false); 2979 + 2980 + if (has_dpcd) { 2981 + if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) 2982 + dev_priv->no_aux_handshake = 2983 + intel_dp->dpcd[DP_MAX_DOWNSPREAD] & 2984 + DP_NO_AUX_HANDSHAKE_LINK_TRAINING; 2985 + } else { 2986 + /* if this fails, presume the device is a ghost */ 2987 + DRM_INFO("failed to retrieve link info, disabling eDP\n"); 2988 + return false; 2989 + } 2990 + 2991 + /* We now know it's not a ghost, init power sequence regs. */ 2992 + intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, 2993 + &power_seq); 2994 + 2995 + ironlake_edp_panel_vdd_on(intel_dp); 2996 + edid = drm_get_edid(connector, &intel_dp->adapter); 2997 + if (edid) { 2998 + if (drm_add_edid_modes(connector, edid)) { 2999 + drm_mode_connector_update_edid_property(connector, 3000 + edid); 3001 + drm_edid_to_eld(connector, edid); 3002 + } else { 3003 + kfree(edid); 3004 + edid = ERR_PTR(-EINVAL); 3005 + } 3006 + } else { 3007 + edid = ERR_PTR(-ENOENT); 3008 + } 3009 + intel_connector->edid = edid; 3010 + 3011 + /* prefer fixed mode from EDID if available */ 3012 + list_for_each_entry(scan, &connector->probed_modes, head) { 3013 + if ((scan->type & DRM_MODE_TYPE_PREFERRED)) { 3014 + fixed_mode = drm_mode_duplicate(dev, scan); 3015 + break; 3016 + } 3017 + } 3018 + 3019 + /* fallback to VBT if available for eDP */ 3020 + if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) { 3021 + fixed_mode = drm_mode_duplicate(dev, 3022 + dev_priv->vbt.lfp_lvds_vbt_mode); 3023 + if (fixed_mode) 3024 + fixed_mode->type |= DRM_MODE_TYPE_PREFERRED; 3025 + } 3026 + 3027 + ironlake_edp_panel_vdd_off(intel_dp, false); 3028 + 3029 + intel_panel_init(&intel_connector->panel, fixed_mode); 3030 + intel_panel_setup_backlight(connector); 3031 + 3032 + return true; 3033 + } 3034 + 3035 + bool 2974 3036 intel_dp_init_connector(struct intel_digital_port *intel_dig_port, 2975 3037 struct intel_connector *intel_connector) 2976 3038 { ··· 3057 2963 struct intel_encoder *intel_encoder = &intel_dig_port->base; 3058 2964 struct drm_device *dev = intel_encoder->base.dev; 3059 2965 struct drm_i915_private *dev_priv = dev->dev_private; 3060 - struct drm_display_mode *fixed_mode = NULL; 3061 - struct edp_power_seq power_seq = { 0 }; 3062 2966 enum port port = intel_dig_port->port; 3063 2967 const char *name = NULL; 3064 - int type; 2968 + int type, error; 3065 2969 3066 2970 /* Preserve the current hw state. */ 3067 2971 intel_dp->DP = I915_READ(intel_dp->output_reg); ··· 3157 3065 BUG(); 3158 3066 } 3159 3067 3160 - if (is_edp(intel_dp)) 3161 - intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq); 3068 + error = intel_dp_i2c_init(intel_dp, intel_connector, name); 3069 + WARN(error, "intel_dp_i2c_init failed with error %d for port %c\n", 3070 + error, port_name(port)); 3162 3071 3163 - intel_dp_i2c_init(intel_dp, intel_connector, name); 3164 - 3165 - /* Cache DPCD and EDID for edp. */ 3166 - if (is_edp(intel_dp)) { 3167 - bool ret; 3168 - struct drm_display_mode *scan; 3169 - struct edid *edid; 3170 - 3171 - ironlake_edp_panel_vdd_on(intel_dp); 3172 - ret = intel_dp_get_dpcd(intel_dp); 3173 - ironlake_edp_panel_vdd_off(intel_dp, false); 3174 - 3175 - if (ret) { 3176 - if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) 3177 - dev_priv->no_aux_handshake = 3178 - intel_dp->dpcd[DP_MAX_DOWNSPREAD] & 3179 - DP_NO_AUX_HANDSHAKE_LINK_TRAINING; 3180 - } else { 3181 - /* if this fails, presume the device is a ghost */ 3182 - DRM_INFO("failed to retrieve link info, disabling eDP\n"); 3183 - intel_dp_encoder_destroy(&intel_encoder->base); 3184 - intel_dp_destroy(connector); 3185 - return; 3072 + if (!intel_edp_init_connector(intel_dp, intel_connector)) { 3073 + i2c_del_adapter(&intel_dp->adapter); 3074 + if (is_edp(intel_dp)) { 3075 + cancel_delayed_work_sync(&intel_dp->panel_vdd_work); 3076 + mutex_lock(&dev->mode_config.mutex); 3077 + ironlake_panel_vdd_off_sync(intel_dp); 3078 + mutex_unlock(&dev->mode_config.mutex); 3186 3079 } 3187 - 3188 - /* We now know it's not a ghost, init power sequence regs. */ 3189 - intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, 3190 - &power_seq); 3191 - 3192 - ironlake_edp_panel_vdd_on(intel_dp); 3193 - edid = drm_get_edid(connector, &intel_dp->adapter); 3194 - if (edid) { 3195 - if (drm_add_edid_modes(connector, edid)) { 3196 - drm_mode_connector_update_edid_property(connector, edid); 3197 - drm_edid_to_eld(connector, edid); 3198 - } else { 3199 - kfree(edid); 3200 - edid = ERR_PTR(-EINVAL); 3201 - } 3202 - } else { 3203 - edid = ERR_PTR(-ENOENT); 3204 - } 3205 - intel_connector->edid = edid; 3206 - 3207 - /* prefer fixed mode from EDID if available */ 3208 - list_for_each_entry(scan, &connector->probed_modes, head) { 3209 - if ((scan->type & DRM_MODE_TYPE_PREFERRED)) { 3210 - fixed_mode = drm_mode_duplicate(dev, scan); 3211 - break; 3212 - } 3213 - } 3214 - 3215 - /* fallback to VBT if available for eDP */ 3216 - if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) { 3217 - fixed_mode = drm_mode_duplicate(dev, dev_priv->vbt.lfp_lvds_vbt_mode); 3218 - if (fixed_mode) 3219 - fixed_mode->type |= DRM_MODE_TYPE_PREFERRED; 3220 - } 3221 - 3222 - ironlake_edp_panel_vdd_off(intel_dp, false); 3223 - } 3224 - 3225 - if (is_edp(intel_dp)) { 3226 - intel_panel_init(&intel_connector->panel, fixed_mode); 3227 - intel_panel_setup_backlight(connector); 3080 + drm_sysfs_connector_remove(connector); 3081 + drm_connector_cleanup(connector); 3082 + return false; 3228 3083 } 3229 3084 3230 3085 intel_dp_add_properties(intel_dp, connector); ··· 3184 3145 u32 temp = I915_READ(PEG_BAND_GAP_DATA); 3185 3146 I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd); 3186 3147 } 3148 + 3149 + return true; 3187 3150 } 3188 3151 3189 3152 void ··· 3231 3190 intel_encoder->cloneable = false; 3232 3191 intel_encoder->hot_plug = intel_dp_hot_plug; 3233 3192 3234 - intel_dp_init_connector(intel_dig_port, intel_connector); 3193 + if (!intel_dp_init_connector(intel_dig_port, intel_connector)) { 3194 + drm_encoder_cleanup(encoder); 3195 + kfree(intel_dig_port); 3196 + kfree(intel_connector); 3197 + } 3235 3198 }
+3 -2
drivers/gpu/drm/i915/intel_drv.h
··· 141 141 bool (*get_hw_state)(struct intel_encoder *, enum pipe *pipe); 142 142 /* Reconstructs the equivalent mode flags for the current hardware 143 143 * state. This must be called _after_ display->get_pipe_config has 144 - * pre-filled the pipe config. */ 144 + * pre-filled the pipe config. Note that intel_encoder->base.crtc must 145 + * be set correctly before calling this function. */ 145 146 void (*get_config)(struct intel_encoder *, 146 147 struct intel_crtc_config *pipe_config); 147 148 int crtc_mask; ··· 587 586 extern bool intel_is_dual_link_lvds(struct drm_device *dev); 588 587 extern void intel_dp_init(struct drm_device *dev, int output_reg, 589 588 enum port port); 590 - extern void intel_dp_init_connector(struct intel_digital_port *intel_dig_port, 589 + extern bool intel_dp_init_connector(struct intel_digital_port *intel_dig_port, 591 590 struct intel_connector *intel_connector); 592 591 extern void intel_dp_init_link_config(struct intel_dp *intel_dp); 593 592 extern void intel_dp_start_link_train(struct intel_dp *intel_dp);
+1 -1
drivers/gpu/drm/i915/intel_hdmi.c
··· 602 602 u32 hdmi_val; 603 603 604 604 hdmi_val = SDVO_ENCODING_HDMI; 605 - if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev)) 605 + if (!HAS_PCH_SPLIT(dev)) 606 606 hdmi_val |= intel_hdmi->color_range; 607 607 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) 608 608 hdmi_val |= SDVO_VSYNC_ACTIVE_HIGH;
+4 -4
drivers/gpu/drm/i915/intel_opregion.c
··· 311 311 312 312 list_for_each_entry(acpi_cdev, &acpi_video_bus->children, node) { 313 313 if (i >= 8) { 314 - dev_printk(KERN_ERR, &dev->pdev->dev, 315 - "More than 8 outputs detected via ACPI\n"); 314 + dev_dbg(&dev->pdev->dev, 315 + "More than 8 outputs detected via ACPI\n"); 316 316 return; 317 317 } 318 318 status = ··· 338 338 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 339 339 int output_type = ACPI_OTHER_OUTPUT; 340 340 if (i >= 8) { 341 - dev_printk(KERN_ERR, &dev->pdev->dev, 342 - "More than 8 outputs in connector list\n"); 341 + dev_dbg(&dev->pdev->dev, 342 + "More than 8 outputs in connector list\n"); 343 343 return; 344 344 } 345 345 switch (connector->connector_type) {
+65 -45
drivers/gpu/drm/i915/intel_pm.c
··· 3069 3069 trace_intel_gpu_freq_change(val * 50); 3070 3070 } 3071 3071 3072 - void valleyview_set_rps(struct drm_device *dev, u8 val) 3072 + /* 3073 + * Wait until the previous freq change has completed, 3074 + * or the timeout elapsed, and then update our notion 3075 + * of the current GPU frequency. 3076 + */ 3077 + static void vlv_update_rps_cur_delay(struct drm_i915_private *dev_priv) 3073 3078 { 3074 - struct drm_i915_private *dev_priv = dev->dev_private; 3075 3079 unsigned long timeout = jiffies + msecs_to_jiffies(10); 3076 - u32 limits = gen6_rps_limits(dev_priv, &val); 3077 3080 u32 pval; 3078 3081 3079 3082 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); 3080 - WARN_ON(val > dev_priv->rps.max_delay); 3081 - WARN_ON(val < dev_priv->rps.min_delay); 3082 - 3083 - DRM_DEBUG_DRIVER("gpu freq request from %d to %d\n", 3084 - vlv_gpu_freq(dev_priv->mem_freq, 3085 - dev_priv->rps.cur_delay), 3086 - vlv_gpu_freq(dev_priv->mem_freq, val)); 3087 - 3088 - if (val == dev_priv->rps.cur_delay) 3089 - return; 3090 - 3091 - vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, val); 3092 3083 3093 3084 do { 3094 3085 pval = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS); ··· 3090 3099 udelay(10); 3091 3100 } while (pval & 1); 3092 3101 3093 - pval = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS); 3094 - if ((pval >> 8) != val) 3095 - DRM_DEBUG_DRIVER("punit overrode freq: %d requested, but got %d\n", 3096 - val, pval >> 8); 3102 + pval >>= 8; 3097 3103 3098 - /* Make sure we continue to get interrupts 3099 - * until we hit the minimum or maximum frequencies. 3100 - */ 3101 - I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, limits); 3104 + if (pval != dev_priv->rps.cur_delay) 3105 + DRM_DEBUG_DRIVER("Punit overrode GPU freq: %d MHz (%u) requested, but got %d Mhz (%u)\n", 3106 + vlv_gpu_freq(dev_priv->mem_freq, dev_priv->rps.cur_delay), 3107 + dev_priv->rps.cur_delay, 3108 + vlv_gpu_freq(dev_priv->mem_freq, pval), pval); 3102 3109 3103 - dev_priv->rps.cur_delay = pval >> 8; 3110 + dev_priv->rps.cur_delay = pval; 3111 + } 3112 + 3113 + void valleyview_set_rps(struct drm_device *dev, u8 val) 3114 + { 3115 + struct drm_i915_private *dev_priv = dev->dev_private; 3116 + 3117 + gen6_rps_limits(dev_priv, &val); 3118 + 3119 + WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); 3120 + WARN_ON(val > dev_priv->rps.max_delay); 3121 + WARN_ON(val < dev_priv->rps.min_delay); 3122 + 3123 + vlv_update_rps_cur_delay(dev_priv); 3124 + 3125 + DRM_DEBUG_DRIVER("GPU freq request from %d MHz (%u) to %d MHz (%u)\n", 3126 + vlv_gpu_freq(dev_priv->mem_freq, 3127 + dev_priv->rps.cur_delay), 3128 + dev_priv->rps.cur_delay, 3129 + vlv_gpu_freq(dev_priv->mem_freq, val), val); 3130 + 3131 + if (val == dev_priv->rps.cur_delay) 3132 + return; 3133 + 3134 + vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, val); 3135 + 3136 + dev_priv->rps.cur_delay = val; 3104 3137 3105 3138 trace_intel_gpu_freq_change(vlv_gpu_freq(dev_priv->mem_freq, val)); 3106 3139 } ··· 3461 3446 * min freq available. 3462 3447 */ 3463 3448 mutex_lock(&dev_priv->rps.hw_lock); 3464 - valleyview_set_rps(dev_priv->dev, dev_priv->rps.rpe_delay); 3449 + if (dev_priv->rps.cur_delay > dev_priv->rps.rpe_delay) 3450 + valleyview_set_rps(dev_priv->dev, dev_priv->rps.rpe_delay); 3465 3451 mutex_unlock(&dev_priv->rps.hw_lock); 3466 3452 } 3467 3453 ··· 3512 3496 { 3513 3497 struct drm_i915_private *dev_priv = dev->dev_private; 3514 3498 struct intel_ring_buffer *ring; 3515 - u32 gtfifodbg, val, rpe; 3499 + u32 gtfifodbg, val; 3516 3500 int i; 3517 3501 3518 3502 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); ··· 3573 3557 DRM_DEBUG_DRIVER("GPLL enabled? %s\n", val & 0x10 ? "yes" : "no"); 3574 3558 DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val); 3575 3559 3576 - DRM_DEBUG_DRIVER("current GPU freq: %d\n", 3577 - vlv_gpu_freq(dev_priv->mem_freq, (val >> 8) & 0xff)); 3578 3560 dev_priv->rps.cur_delay = (val >> 8) & 0xff; 3561 + DRM_DEBUG_DRIVER("current GPU freq: %d MHz (%u)\n", 3562 + vlv_gpu_freq(dev_priv->mem_freq, 3563 + dev_priv->rps.cur_delay), 3564 + dev_priv->rps.cur_delay); 3579 3565 3580 3566 dev_priv->rps.max_delay = valleyview_rps_max_freq(dev_priv); 3581 3567 dev_priv->rps.hw_max = dev_priv->rps.max_delay; 3582 - DRM_DEBUG_DRIVER("max GPU freq: %d\n", vlv_gpu_freq(dev_priv->mem_freq, 3583 - dev_priv->rps.max_delay)); 3568 + DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n", 3569 + vlv_gpu_freq(dev_priv->mem_freq, 3570 + dev_priv->rps.max_delay), 3571 + dev_priv->rps.max_delay); 3584 3572 3585 - rpe = valleyview_rps_rpe_freq(dev_priv); 3586 - DRM_DEBUG_DRIVER("RPe GPU freq: %d\n", 3587 - vlv_gpu_freq(dev_priv->mem_freq, rpe)); 3588 - dev_priv->rps.rpe_delay = rpe; 3573 + dev_priv->rps.rpe_delay = valleyview_rps_rpe_freq(dev_priv); 3574 + DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n", 3575 + vlv_gpu_freq(dev_priv->mem_freq, 3576 + dev_priv->rps.rpe_delay), 3577 + dev_priv->rps.rpe_delay); 3589 3578 3590 - val = valleyview_rps_min_freq(dev_priv); 3591 - DRM_DEBUG_DRIVER("min GPU freq: %d\n", vlv_gpu_freq(dev_priv->mem_freq, 3592 - val)); 3593 - dev_priv->rps.min_delay = val; 3579 + dev_priv->rps.min_delay = valleyview_rps_min_freq(dev_priv); 3580 + DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n", 3581 + vlv_gpu_freq(dev_priv->mem_freq, 3582 + dev_priv->rps.min_delay), 3583 + dev_priv->rps.min_delay); 3594 3584 3595 - DRM_DEBUG_DRIVER("setting GPU freq to %d\n", 3596 - vlv_gpu_freq(dev_priv->mem_freq, rpe)); 3585 + DRM_DEBUG_DRIVER("setting GPU freq to %d MHz (%u)\n", 3586 + vlv_gpu_freq(dev_priv->mem_freq, 3587 + dev_priv->rps.rpe_delay), 3588 + dev_priv->rps.rpe_delay); 3597 3589 3598 3590 INIT_DELAYED_WORK(&dev_priv->rps.vlv_work, vlv_rps_timer_work); 3599 3591 3600 - valleyview_set_rps(dev_priv->dev, rpe); 3592 + valleyview_set_rps(dev_priv->dev, dev_priv->rps.rpe_delay); 3601 3593 3602 3594 /* requires MSI enabled */ 3603 3595 I915_WRITE(GEN6_PMIER, GEN6_PM_RPS_EVENTS); ··· 4857 4833 /* WaDisableDopClockGating:vlv */ 4858 4834 I915_WRITE(GEN7_ROW_CHICKEN2, 4859 4835 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE)); 4860 - 4861 - /* WaForceL3Serialization:vlv */ 4862 - I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) & 4863 - ~L3SQ_URB_READ_CAM_MATCH_DISABLE); 4864 4836 4865 4837 /* This is required by WaCatErrorRejectionIssue:vlv */ 4866 4838 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,