Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'drm-intel-next-2012-12-21' of git://people.freedesktop.org/~danvet/drm-intel into drm-next

Daniel writes:
- seqno wrap fixes and debug infrastructure from Mika Kuoppala and Chris
Wilson
- some leftover kill-agp on gen6+ patches from Ben
- hotplug improvements from Damien
- clear fb when allocated from stolen, avoids dirt on the fbcon (Chris)
- Stolen mem support from Chris Wilson, one of the many steps to get to
real fastboot support.
- Some DDI code cleanups from Paulo.
- Some refactorings around lvds and dp code.
- some random little bits&pieces

* tag 'drm-intel-next-2012-12-21' of git://people.freedesktop.org/~danvet/drm-intel: (93 commits)
drm/i915: Return the real error code from intel_set_mode()
drm/i915: Make GSM void
drm/i915: Move GSM mapping into dev_priv
drm/i915: Move even more gtt code to i915_gem_gtt
drm/i915: Make next_seqno debugs entry to use i915_gem_set_seqno
drm/i915: Introduce i915_gem_set_seqno()
drm/i915: Always clear semaphore mboxes on seqno wrap
drm/i915: Initialize hardware semaphore state on ring init
drm/i915: Introduce ring set_seqno
drm/i915: Missed conversion to gtt_pte_t
drm/i915: Bug on unsupported swizzled platforms
drm/i915: BUG() if fences are used on unsupported platform
drm/i915: fixup overlay stolen memory leak
drm/i915: clean up PIPECONF bpc #defines
drm/i915: add intel_dp_set_signal_levels
drm/i915: remove leftover display.update_wm assignment
drm/i915: check for the PCH when setting pch_transcoder
drm/i915: Clear the stolen fb before enabling
drm/i915: Access to snooped system memory through the GTT is incoherent
drm/i915: Remove stale comment about intel_dp_detect()
...

Conflicts:
drivers/gpu/drm/i915/intel_display.c

+1576 -1115
-1
drivers/char/agp/intel-gtt.c
··· 602 602 iounmap(intel_private.registers); 603 603 return -ENOMEM; 604 604 } 605 - intel_private.base.gtt = intel_private.gtt; 606 605 607 606 global_cache_flush(); /* FIXME: ? */ 608 607
+63 -33
drivers/gpu/drm/drm_mm.c
··· 102 102 } 103 103 EXPORT_SYMBOL(drm_mm_pre_get); 104 104 105 - static inline unsigned long drm_mm_hole_node_start(struct drm_mm_node *hole_node) 106 - { 107 - return hole_node->start + hole_node->size; 108 - } 109 - 110 - static inline unsigned long drm_mm_hole_node_end(struct drm_mm_node *hole_node) 111 - { 112 - struct drm_mm_node *next_node = 113 - list_entry(hole_node->node_list.next, struct drm_mm_node, 114 - node_list); 115 - 116 - return next_node->start; 117 - } 118 - 119 105 static void drm_mm_insert_helper(struct drm_mm_node *hole_node, 120 106 struct drm_mm_node *node, 121 107 unsigned long size, unsigned alignment, ··· 113 127 unsigned long adj_start = hole_start; 114 128 unsigned long adj_end = hole_end; 115 129 116 - BUG_ON(!hole_node->hole_follows || node->allocated); 130 + BUG_ON(node->allocated); 117 131 118 132 if (mm->color_adjust) 119 133 mm->color_adjust(hole_node, color, &adj_start, &adj_end); ··· 141 155 BUG_ON(node->start + node->size > adj_end); 142 156 143 157 node->hole_follows = 0; 144 - if (node->start + node->size < hole_end) { 158 + if (__drm_mm_hole_node_start(node) < hole_end) { 145 159 list_add(&node->hole_stack, &mm->hole_stack); 146 160 node->hole_follows = 1; 147 161 } 148 162 } 163 + 164 + struct drm_mm_node *drm_mm_create_block(struct drm_mm *mm, 165 + unsigned long start, 166 + unsigned long size, 167 + bool atomic) 168 + { 169 + struct drm_mm_node *hole, *node; 170 + unsigned long end = start + size; 171 + unsigned long hole_start; 172 + unsigned long hole_end; 173 + 174 + drm_mm_for_each_hole(hole, mm, hole_start, hole_end) { 175 + if (hole_start > start || hole_end < end) 176 + continue; 177 + 178 + node = drm_mm_kmalloc(mm, atomic); 179 + if (unlikely(node == NULL)) 180 + return NULL; 181 + 182 + node->start = start; 183 + node->size = size; 184 + node->mm = mm; 185 + node->allocated = 1; 186 + 187 + INIT_LIST_HEAD(&node->hole_stack); 188 + list_add(&node->node_list, &hole->node_list); 189 + 190 + if (start == hole_start) { 191 + hole->hole_follows = 0; 192 + list_del_init(&hole->hole_stack); 193 + } 194 + 195 + node->hole_follows = 0; 196 + if (end != hole_end) { 197 + list_add(&node->hole_stack, &mm->hole_stack); 198 + node->hole_follows = 1; 199 + } 200 + 201 + return node; 202 + } 203 + 204 + WARN(1, "no hole found for block 0x%lx + 0x%lx\n", start, size); 205 + return NULL; 206 + } 207 + EXPORT_SYMBOL(drm_mm_create_block); 149 208 150 209 struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *hole_node, 151 210 unsigned long size, ··· 282 251 BUG_ON(node->start + node->size > end); 283 252 284 253 node->hole_follows = 0; 285 - if (node->start + node->size < hole_end) { 254 + if (__drm_mm_hole_node_start(node) < hole_end) { 286 255 list_add(&node->hole_stack, &mm->hole_stack); 287 256 node->hole_follows = 1; 288 257 } ··· 356 325 list_entry(node->node_list.prev, struct drm_mm_node, node_list); 357 326 358 327 if (node->hole_follows) { 359 - BUG_ON(drm_mm_hole_node_start(node) 360 - == drm_mm_hole_node_end(node)); 328 + BUG_ON(__drm_mm_hole_node_start(node) == 329 + __drm_mm_hole_node_end(node)); 361 330 list_del(&node->hole_stack); 362 331 } else 363 - BUG_ON(drm_mm_hole_node_start(node) 364 - != drm_mm_hole_node_end(node)); 332 + BUG_ON(__drm_mm_hole_node_start(node) != 333 + __drm_mm_hole_node_end(node)); 334 + 365 335 366 336 if (!prev_node->hole_follows) { 367 337 prev_node->hole_follows = 1; ··· 420 388 { 421 389 struct drm_mm_node *entry; 422 390 struct drm_mm_node *best; 391 + unsigned long adj_start; 392 + unsigned long adj_end; 423 393 unsigned long best_size; 424 394 425 395 BUG_ON(mm->scanned_blocks); ··· 429 395 best = NULL; 430 396 best_size = ~0UL; 431 397 432 - list_for_each_entry(entry, &mm->hole_stack, hole_stack) { 433 - unsigned long adj_start = drm_mm_hole_node_start(entry); 434 - unsigned long adj_end = drm_mm_hole_node_end(entry); 435 - 398 + drm_mm_for_each_hole(entry, mm, adj_start, adj_end) { 436 399 if (mm->color_adjust) { 437 400 mm->color_adjust(entry, color, &adj_start, &adj_end); 438 401 if (adj_end <= adj_start) 439 402 continue; 440 403 } 441 404 442 - BUG_ON(!entry->hole_follows); 443 405 if (!check_free_hole(adj_start, adj_end, size, alignment)) 444 406 continue; 445 407 ··· 462 432 { 463 433 struct drm_mm_node *entry; 464 434 struct drm_mm_node *best; 435 + unsigned long adj_start; 436 + unsigned long adj_end; 465 437 unsigned long best_size; 466 438 467 439 BUG_ON(mm->scanned_blocks); ··· 471 439 best = NULL; 472 440 best_size = ~0UL; 473 441 474 - list_for_each_entry(entry, &mm->hole_stack, hole_stack) { 475 - unsigned long adj_start = drm_mm_hole_node_start(entry) < start ? 476 - start : drm_mm_hole_node_start(entry); 477 - unsigned long adj_end = drm_mm_hole_node_end(entry) > end ? 478 - end : drm_mm_hole_node_end(entry); 479 - 480 - BUG_ON(!entry->hole_follows); 442 + drm_mm_for_each_hole(entry, mm, adj_start, adj_end) { 443 + if (adj_start < start) 444 + adj_start = start; 445 + if (adj_end > end) 446 + adj_end = end; 481 447 482 448 if (mm->color_adjust) { 483 449 mm->color_adjust(entry, color, &adj_start, &adj_end);
+88 -7
drivers/gpu/drm/i915/i915_debugfs.c
··· 102 102 static void 103 103 describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) 104 104 { 105 - seq_printf(m, "%p: %s%s %8zdKiB %04x %04x %d %d %d%s%s%s", 105 + seq_printf(m, "%p: %s%s %8zdKiB %02x %02x %d %d %d%s%s%s", 106 106 &obj->base, 107 107 get_pin_flag(obj), 108 108 get_tiling_flag(obj), ··· 124 124 if (obj->gtt_space != NULL) 125 125 seq_printf(m, " (gtt offset: %08x, size: %08x)", 126 126 obj->gtt_offset, (unsigned int)obj->gtt_space->size); 127 + if (obj->stolen) 128 + seq_printf(m, " (stolen: %08lx)", obj->stolen->start); 127 129 if (obj->pin_mappable || obj->fault_mappable) { 128 130 char s[3], *t = s; 129 131 if (obj->pin_mappable) ··· 389 387 struct intel_ring_buffer *ring) 390 388 { 391 389 if (ring->get_seqno) { 392 - seq_printf(m, "Current sequence (%s): %d\n", 390 + seq_printf(m, "Current sequence (%s): %u\n", 393 391 ring->name, ring->get_seqno(ring, false)); 394 392 } 395 393 } ··· 546 544 struct drm_device *dev = node->minor->dev; 547 545 drm_i915_private_t *dev_priv = dev->dev_private; 548 546 struct intel_ring_buffer *ring; 549 - const volatile u32 __iomem *hws; 547 + const u32 *hws; 550 548 int i; 551 549 552 550 ring = &dev_priv->ring[(uintptr_t)node->info_ent->data]; 553 - hws = (volatile u32 __iomem *)ring->status_page.page_addr; 551 + hws = ring->status_page.page_addr; 554 552 if (hws == NULL) 555 553 return 0; 556 554 ··· 610 608 seq_printf(m, "%s [%d]:\n", name, count); 611 609 612 610 while (count--) { 613 - seq_printf(m, " %08x %8u %04x %04x %x %x%s%s%s%s%s%s%s", 611 + seq_printf(m, " %08x %8u %02x %02x %x %x%s%s%s%s%s%s%s", 614 612 err->gtt_offset, 615 613 err->size, 616 614 err->read_domains, ··· 841 839 .write = i915_error_state_write, 842 840 .llseek = default_llseek, 843 841 .release = i915_error_state_release, 842 + }; 843 + 844 + static ssize_t 845 + i915_next_seqno_read(struct file *filp, 846 + char __user *ubuf, 847 + size_t max, 848 + loff_t *ppos) 849 + { 850 + struct drm_device *dev = filp->private_data; 851 + drm_i915_private_t *dev_priv = dev->dev_private; 852 + char buf[80]; 853 + int len; 854 + int ret; 855 + 856 + ret = mutex_lock_interruptible(&dev->struct_mutex); 857 + if (ret) 858 + return ret; 859 + 860 + len = snprintf(buf, sizeof(buf), 861 + "next_seqno : 0x%x\n", 862 + dev_priv->next_seqno); 863 + 864 + mutex_unlock(&dev->struct_mutex); 865 + 866 + if (len > sizeof(buf)) 867 + len = sizeof(buf); 868 + 869 + return simple_read_from_buffer(ubuf, max, ppos, buf, len); 870 + } 871 + 872 + static ssize_t 873 + i915_next_seqno_write(struct file *filp, 874 + const char __user *ubuf, 875 + size_t cnt, 876 + loff_t *ppos) 877 + { 878 + struct drm_device *dev = filp->private_data; 879 + char buf[20]; 880 + u32 val = 1; 881 + int ret; 882 + 883 + if (cnt > 0) { 884 + if (cnt > sizeof(buf) - 1) 885 + return -EINVAL; 886 + 887 + if (copy_from_user(buf, ubuf, cnt)) 888 + return -EFAULT; 889 + buf[cnt] = 0; 890 + 891 + ret = kstrtouint(buf, 0, &val); 892 + if (ret < 0) 893 + return ret; 894 + } 895 + 896 + ret = mutex_lock_interruptible(&dev->struct_mutex); 897 + if (ret) 898 + return ret; 899 + 900 + ret = i915_gem_set_seqno(dev, val); 901 + 902 + mutex_unlock(&dev->struct_mutex); 903 + 904 + return ret ?: cnt; 905 + } 906 + 907 + static const struct file_operations i915_next_seqno_fops = { 908 + .owner = THIS_MODULE, 909 + .open = simple_open, 910 + .read = i915_next_seqno_read, 911 + .write = i915_next_seqno_write, 912 + .llseek = default_llseek, 844 913 }; 845 914 846 915 static int i915_rstdby_delays(struct seq_file *m, void *unused) ··· 1624 1551 return 0; 1625 1552 } 1626 1553 1627 - ret = mutex_lock_interruptible(&dev->mode_config.mutex); 1554 + ret = mutex_lock_interruptible(&dev_priv->dpio_lock); 1628 1555 if (ret) 1629 1556 return ret; 1630 1557 ··· 1653 1580 seq_printf(m, "DPIO_FASTCLK_DISABLE: 0x%08x\n", 1654 1581 intel_dpio_read(dev_priv, DPIO_FASTCLK_DISABLE)); 1655 1582 1656 - mutex_unlock(&dev->mode_config.mutex); 1583 + mutex_unlock(&dev_priv->dpio_lock); 1657 1584 1658 1585 return 0; 1659 1586 } ··· 2178 2105 if (ret) 2179 2106 return ret; 2180 2107 2108 + ret = i915_debugfs_create(minor->debugfs_root, minor, 2109 + "i915_next_seqno", 2110 + &i915_next_seqno_fops); 2111 + if (ret) 2112 + return ret; 2113 + 2181 2114 return drm_debugfs_create_files(i915_debugfs_list, 2182 2115 I915_DEBUGFS_ENTRIES, 2183 2116 minor->debugfs_root, minor); ··· 2206 2127 drm_debugfs_remove_files((struct drm_info_list *) &i915_ring_stop_fops, 2207 2128 1, minor); 2208 2129 drm_debugfs_remove_files((struct drm_info_list *) &i915_error_state_fops, 2130 + 1, minor); 2131 + drm_debugfs_remove_files((struct drm_info_list *) &i915_next_seqno_fops, 2209 2132 1, minor); 2210 2133 } 2211 2134
+34 -16
drivers/gpu/drm/i915/i915_dma.c
··· 1297 1297 if (ret) 1298 1298 goto cleanup_vga_switcheroo; 1299 1299 1300 + ret = drm_irq_install(dev); 1301 + if (ret) 1302 + goto cleanup_gem_stolen; 1303 + 1304 + /* Important: The output setup functions called by modeset_init need 1305 + * working irqs for e.g. gmbus and dp aux transfers. */ 1300 1306 intel_modeset_init(dev); 1301 1307 1302 1308 ret = i915_gem_init(dev); 1303 1309 if (ret) 1304 - goto cleanup_gem_stolen; 1305 - 1306 - intel_modeset_gem_init(dev); 1310 + goto cleanup_irq; 1307 1311 1308 1312 INIT_WORK(&dev_priv->console_resume_work, intel_console_resume); 1309 1313 1310 - ret = drm_irq_install(dev); 1311 - if (ret) 1312 - goto cleanup_gem; 1314 + intel_modeset_gem_init(dev); 1313 1315 1314 1316 /* Always safe in the mode setting case. */ 1315 1317 /* FIXME: do pre/post-mode set stuff in core KMS code */ ··· 1319 1317 1320 1318 ret = intel_fbdev_init(dev); 1321 1319 if (ret) 1322 - goto cleanup_irq; 1320 + goto cleanup_gem; 1321 + 1322 + /* Only enable hotplug handling once the fbdev is fully set up. */ 1323 + intel_hpd_init(dev); 1324 + 1325 + /* 1326 + * Some ports require correctly set-up hpd registers for detection to 1327 + * work properly (leading to ghost connected connector status), e.g. VGA 1328 + * on gm45. Hence we can only set up the initial fbdev config after hpd 1329 + * irqs are fully enabled. Now we should scan for the initial config 1330 + * only once hotplug handling is enabled, but due to screwed-up locking 1331 + * around kms/fbdev init we can't protect the fdbev initial config 1332 + * scanning against hotplug events. Hence do this first and ignore the 1333 + * tiny window where we will loose hotplug notifactions. 1334 + */ 1335 + intel_fbdev_initial_config(dev); 1336 + 1337 + /* Only enable hotplug handling once the fbdev is fully set up. */ 1338 + dev_priv->enable_hotplug_processing = true; 1323 1339 1324 1340 drm_kms_helper_poll_init(dev); 1325 1341 ··· 1346 1326 1347 1327 return 0; 1348 1328 1349 - cleanup_irq: 1350 - drm_irq_uninstall(dev); 1351 1329 cleanup_gem: 1352 1330 mutex_lock(&dev->struct_mutex); 1353 1331 i915_gem_cleanup_ringbuffer(dev); 1354 1332 mutex_unlock(&dev->struct_mutex); 1355 1333 i915_gem_cleanup_aliasing_ppgtt(dev); 1334 + cleanup_irq: 1335 + drm_irq_uninstall(dev); 1356 1336 cleanup_gem_stolen: 1357 1337 i915_gem_cleanup_stolen(dev); 1358 1338 cleanup_vga_switcheroo: ··· 1602 1582 spin_lock_init(&dev_priv->irq_lock); 1603 1583 spin_lock_init(&dev_priv->error_lock); 1604 1584 spin_lock_init(&dev_priv->rps.lock); 1605 - spin_lock_init(&dev_priv->dpio_lock); 1585 + mutex_init(&dev_priv->dpio_lock); 1606 1586 1607 1587 mutex_init(&dev_priv->rps.hw_lock); 1608 1588 ··· 1633 1613 /* Must be done after probing outputs */ 1634 1614 intel_opregion_init(dev); 1635 1615 acpi_video_register(); 1636 - 1637 - setup_timer(&dev_priv->hangcheck_timer, i915_hangcheck_elapsed, 1638 - (unsigned long) dev); 1639 1616 1640 1617 if (IS_GEN5(dev)) 1641 1618 intel_gpu_ips_init(dev_priv); ··· 1740 1723 mutex_unlock(&dev->struct_mutex); 1741 1724 i915_gem_cleanup_aliasing_ppgtt(dev); 1742 1725 i915_gem_cleanup_stolen(dev); 1743 - drm_mm_takedown(&dev_priv->mm.stolen); 1744 - 1745 - intel_cleanup_overlay(dev); 1746 1726 1747 1727 if (!I915_NEED_GFX_HWS(dev)) 1748 1728 i915_free_hws(dev); ··· 1752 1738 intel_teardown_mchbar(dev); 1753 1739 1754 1740 destroy_workqueue(dev_priv->wq); 1741 + pm_qos_remove_request(&dev_priv->pm_qos); 1742 + 1743 + if (dev_priv->slab) 1744 + kmem_cache_destroy(dev_priv->slab); 1755 1745 1756 1746 pci_dev_put(dev_priv->bridge_dev); 1757 1747 kfree(dev->dev_private);
+2
drivers/gpu/drm/i915/i915_drv.c
··· 565 565 intel_modeset_init_hw(dev); 566 566 intel_modeset_setup_hw_state(dev, false); 567 567 drm_irq_install(dev); 568 + intel_hpd_init(dev); 568 569 } 569 570 570 571 intel_opregion_init(dev); ··· 871 870 872 871 drm_irq_uninstall(dev); 873 872 drm_irq_install(dev); 873 + intel_hpd_init(dev); 874 874 } else { 875 875 mutex_unlock(&dev->struct_mutex); 876 876 }
+70 -12
drivers/gpu/drm/i915/i915_drv.h
··· 30 30 #ifndef _I915_DRV_H_ 31 31 #define _I915_DRV_H_ 32 32 33 + #include <uapi/drm/i915_drm.h> 34 + 33 35 #include "i915_reg.h" 34 36 #include "intel_bios.h" 35 37 #include "intel_ringbuffer.h" ··· 42 40 #include <linux/backlight.h> 43 41 #include <linux/intel-iommu.h> 44 42 #include <linux/kref.h> 43 + #include <linux/pm_qos.h> 45 44 46 45 /* General customization: 47 46 */ ··· 86 83 }; 87 84 #define port_name(p) ((p) + 'A') 88 85 89 - #define I915_GEM_GPU_DOMAINS (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT)) 86 + #define I915_GEM_GPU_DOMAINS \ 87 + (I915_GEM_DOMAIN_RENDER | \ 88 + I915_GEM_DOMAIN_SAMPLER | \ 89 + I915_GEM_DOMAIN_COMMAND | \ 90 + I915_GEM_DOMAIN_INSTRUCTION | \ 91 + I915_GEM_DOMAIN_VERTEX) 90 92 91 93 #define for_each_pipe(p) for ((p) = 0; (p) < dev_priv->num_pipe; (p)++) 92 94 ··· 108 100 int fp1_reg; 109 101 }; 110 102 #define I915_NUM_PLLS 2 103 + 104 + /* Used by dp and fdi links */ 105 + struct intel_link_m_n { 106 + uint32_t tu; 107 + uint32_t gmch_m; 108 + uint32_t gmch_n; 109 + uint32_t link_m; 110 + uint32_t link_n; 111 + }; 112 + 113 + void intel_link_compute_m_n(int bpp, int nlanes, 114 + int pixel_clock, int link_clock, 115 + struct intel_link_m_n *m_n); 111 116 112 117 struct intel_ddi_plls { 113 118 int spll_refcount; ··· 297 276 struct drm_i915_gem_object *obj); 298 277 int (*update_plane)(struct drm_crtc *crtc, struct drm_framebuffer *fb, 299 278 int x, int y); 279 + void (*hpd_irq_setup)(struct drm_device *dev); 300 280 /* clock updates for mode set */ 301 281 /* cursor updates */ 302 282 /* render clock increase/decrease */ ··· 599 577 struct mutex hw_lock; 600 578 }; 601 579 580 + /* defined intel_pm.c */ 581 + extern spinlock_t mchdev_lock; 582 + 602 583 struct intel_ilk_power_mgmt { 603 584 u8 cur_delay; 604 585 u8 min_delay; ··· 644 619 645 620 typedef struct drm_i915_private { 646 621 struct drm_device *dev; 622 + struct kmem_cache *slab; 647 623 648 624 const struct intel_device_info *info; 649 625 ··· 659 633 /** forcewake_count is protected by gt_lock */ 660 634 unsigned forcewake_count; 661 635 /** gt_lock is also taken in irq contexts. */ 662 - struct spinlock gt_lock; 636 + spinlock_t gt_lock; 663 637 664 638 struct intel_gmbus gmbus[GMBUS_NUM_PORTS]; 639 + 665 640 666 641 /** gmbus_mutex protects against concurrent usage of the single hw gmbus 667 642 * controller on different i2c buses. */ ··· 673 646 */ 674 647 uint32_t gpio_mmio_base; 675 648 649 + wait_queue_head_t gmbus_wait_queue; 650 + 676 651 struct pci_dev *bridge_dev; 677 652 struct intel_ring_buffer ring[I915_NUM_RINGS]; 678 - uint32_t next_seqno; 653 + uint32_t last_seqno, next_seqno; 679 654 680 655 drm_dma_handle_t *status_page_dmah; 681 656 struct resource mch_res; ··· 687 658 /* protects the irq masks */ 688 659 spinlock_t irq_lock; 689 660 661 + /* To control wakeup latency, e.g. for irq-driven dp aux transfers. */ 662 + struct pm_qos_request pm_qos; 663 + 690 664 /* DPIO indirect register protection */ 691 - spinlock_t dpio_lock; 665 + struct mutex dpio_lock; 692 666 693 667 /** Cached value of IMR to avoid reads in updating the bitfield */ 694 668 u32 pipestat[2]; ··· 701 669 702 670 u32 hotplug_supported_mask; 703 671 struct work_struct hotplug_work; 672 + bool enable_hotplug_processing; 704 673 705 674 int num_pipe; 706 675 int num_pch_pll; ··· 743 710 unsigned int display_clock_mode:1; 744 711 int lvds_ssc_freq; 745 712 unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */ 746 - unsigned int lvds_val; /* used for checking LVDS channel mode */ 747 713 struct { 748 714 int rate; 749 715 int lanes; ··· 803 771 unsigned long gtt_start; 804 772 unsigned long gtt_mappable_end; 805 773 unsigned long gtt_end; 774 + unsigned long stolen_base; /* limited to low memory (32-bit) */ 775 + 776 + /** "Graphics Stolen Memory" holds the global PTEs */ 777 + void __iomem *gsm; 806 778 807 779 struct io_mapping *gtt_mapping; 808 780 phys_addr_t gtt_base_addr; ··· 979 943 I915_CACHE_LLC_MLC, /* gen6+, in docs at least! */ 980 944 }; 981 945 946 + #define I915_GTT_RESERVED ((struct drm_mm_node *)0x1) 947 + 982 948 struct drm_i915_gem_object_ops { 983 949 /* Interface between the GEM object and its backing storage. 984 950 * get_pages() is called once prior to the use of the associated set ··· 1006 968 1007 969 /** Current space allocated to this object in the GTT, if any. */ 1008 970 struct drm_mm_node *gtt_space; 971 + /** Stolen memory for this object, instead of being backed by shmem. */ 972 + struct drm_mm_node *stolen; 1009 973 struct list_head gtt_list; 1010 974 1011 975 /** This object's place on the active/inactive lists */ ··· 1178 1138 1179 1139 struct drm_i915_file_private { 1180 1140 struct { 1181 - struct spinlock lock; 1141 + spinlock_t lock; 1182 1142 struct list_head request_list; 1183 1143 } mm; 1184 1144 struct idr context_idr; ··· 1263 1223 #define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc) 1264 1224 1265 1225 #define HAS_PIPE_CONTROL(dev) (INTEL_INFO(dev)->gen >= 5) 1226 + 1227 + #define HAS_DDI(dev) (IS_HASWELL(dev)) 1266 1228 1267 1229 #define INTEL_PCH_DEVICE_ID_MASK 0xff00 1268 1230 #define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00 ··· 1362 1320 void i915_handle_error(struct drm_device *dev, bool wedged); 1363 1321 1364 1322 extern void intel_irq_init(struct drm_device *dev); 1323 + extern void intel_hpd_init(struct drm_device *dev); 1365 1324 extern void intel_gt_init(struct drm_device *dev); 1366 1325 extern void intel_gt_reset(struct drm_device *dev); 1367 1326 ··· 1431 1388 int i915_gem_wait_ioctl(struct drm_device *dev, void *data, 1432 1389 struct drm_file *file_priv); 1433 1390 void i915_gem_load(struct drm_device *dev); 1391 + void *i915_gem_object_alloc(struct drm_device *dev); 1392 + void i915_gem_object_free(struct drm_i915_gem_object *obj); 1434 1393 int i915_gem_init_object(struct drm_gem_object *obj); 1435 1394 void i915_gem_object_init(struct drm_i915_gem_object *obj, 1436 1395 const struct drm_i915_gem_object_ops *ops); 1437 1396 struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, 1438 1397 size_t size); 1439 1398 void i915_gem_free_object(struct drm_gem_object *obj); 1399 + 1440 1400 int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj, 1441 1401 uint32_t alignment, 1442 1402 bool map_and_fenceable, ··· 1497 1451 return (int32_t)(seq1 - seq2) >= 0; 1498 1452 } 1499 1453 1500 - extern int i915_gem_get_seqno(struct drm_device *dev, u32 *seqno); 1501 - 1454 + int __must_check i915_gem_get_seqno(struct drm_device *dev, u32 *seqno); 1455 + int __must_check i915_gem_set_seqno(struct drm_device *dev, u32 seqno); 1502 1456 int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj); 1503 1457 int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj); 1504 1458 ··· 1605 1559 enum i915_cache_level cache_level); 1606 1560 void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj); 1607 1561 void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj); 1608 - void i915_gem_init_global_gtt(struct drm_device *dev, 1609 - unsigned long start, 1610 - unsigned long mappable_end, 1611 - unsigned long end); 1562 + void i915_gem_init_global_gtt(struct drm_device *dev); 1563 + void i915_gem_setup_global_gtt(struct drm_device *dev, unsigned long start, 1564 + unsigned long mappable_end, unsigned long end); 1612 1565 int i915_gem_gtt_init(struct drm_device *dev); 1613 1566 void i915_gem_gtt_fini(struct drm_device *dev); 1614 1567 static inline void i915_gem_chipset_flush(struct drm_device *dev) ··· 1627 1582 1628 1583 /* i915_gem_stolen.c */ 1629 1584 int i915_gem_init_stolen(struct drm_device *dev); 1585 + int i915_gem_stolen_setup_compression(struct drm_device *dev, int size); 1586 + void i915_gem_stolen_cleanup_compression(struct drm_device *dev); 1630 1587 void i915_gem_cleanup_stolen(struct drm_device *dev); 1588 + struct drm_i915_gem_object * 1589 + i915_gem_object_create_stolen(struct drm_device *dev, u32 size); 1590 + void i915_gem_object_release_stolen(struct drm_i915_gem_object *obj); 1631 1591 1632 1592 /* i915_gem_tiling.c */ 1593 + inline static bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj) 1594 + { 1595 + drm_i915_private_t *dev_priv = obj->base.dev->dev_private; 1596 + 1597 + return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 && 1598 + obj->tiling_mode != I915_TILING_NONE; 1599 + } 1600 + 1633 1601 void i915_gem_detect_bit_6_swizzle(struct drm_device *dev); 1634 1602 void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj); 1635 1603 void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj);
+85 -98
drivers/gpu/drm/i915/i915_gem.c
··· 163 163 return -ENODEV; 164 164 165 165 mutex_lock(&dev->struct_mutex); 166 - i915_gem_init_global_gtt(dev, args->gtt_start, 167 - args->gtt_end, args->gtt_end); 166 + i915_gem_setup_global_gtt(dev, args->gtt_start, args->gtt_end, 167 + args->gtt_end); 168 168 mutex_unlock(&dev->struct_mutex); 169 169 170 170 return 0; ··· 192 192 return 0; 193 193 } 194 194 195 + void *i915_gem_object_alloc(struct drm_device *dev) 196 + { 197 + struct drm_i915_private *dev_priv = dev->dev_private; 198 + return kmem_cache_alloc(dev_priv->slab, GFP_KERNEL | __GFP_ZERO); 199 + } 200 + 201 + void i915_gem_object_free(struct drm_i915_gem_object *obj) 202 + { 203 + struct drm_i915_private *dev_priv = obj->base.dev->dev_private; 204 + kmem_cache_free(dev_priv->slab, obj); 205 + } 206 + 195 207 static int 196 208 i915_gem_create(struct drm_file *file, 197 209 struct drm_device *dev, ··· 227 215 if (ret) { 228 216 drm_gem_object_release(&obj->base); 229 217 i915_gem_info_remove_obj(dev->dev_private, obj->base.size); 230 - kfree(obj); 218 + i915_gem_object_free(obj); 231 219 return ret; 232 220 } 233 221 ··· 269 257 270 258 return i915_gem_create(file, dev, 271 259 args->size, &args->handle); 272 - } 273 - 274 - static int i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj) 275 - { 276 - drm_i915_private_t *dev_priv = obj->base.dev->dev_private; 277 - 278 - return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 && 279 - obj->tiling_mode != I915_TILING_NONE; 280 260 } 281 261 282 262 static inline int ··· 411 407 loff_t offset; 412 408 int shmem_page_offset, page_length, ret = 0; 413 409 int obj_do_bit17_swizzling, page_do_bit17_swizzling; 414 - int hit_slowpath = 0; 415 410 int prefaulted = 0; 416 411 int needs_clflush = 0; 417 412 struct scatterlist *sg; ··· 472 469 if (ret == 0) 473 470 goto next_page; 474 471 475 - hit_slowpath = 1; 476 472 mutex_unlock(&dev->struct_mutex); 477 473 478 474 if (!prefaulted) { ··· 503 501 504 502 out: 505 503 i915_gem_object_unpin_pages(obj); 506 - 507 - if (hit_slowpath) { 508 - /* Fixup: Kill any reinstated backing storage pages */ 509 - if (obj->madv == __I915_MADV_PURGED) 510 - i915_gem_object_truncate(obj); 511 - } 512 504 513 505 return ret; 514 506 } ··· 834 838 i915_gem_object_unpin_pages(obj); 835 839 836 840 if (hit_slowpath) { 837 - /* Fixup: Kill any reinstated backing storage pages */ 838 - if (obj->madv == __I915_MADV_PURGED) 839 - i915_gem_object_truncate(obj); 840 - /* and flush dirty cachelines in case the object isn't in the cpu write 841 - * domain anymore. */ 842 - if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) { 841 + /* 842 + * Fixup: Flush cpu caches in case we didn't flush the dirty 843 + * cachelines in-line while writing and the object moved 844 + * out of the cpu write domain while we've dropped the lock. 845 + */ 846 + if (!needs_clflush_after && 847 + obj->base.write_domain != I915_GEM_DOMAIN_CPU) { 843 848 i915_gem_clflush_object(obj); 844 849 i915_gem_chipset_flush(dev); 845 850 } ··· 1340 1343 goto out; 1341 1344 1342 1345 trace_i915_gem_object_fault(obj, page_offset, true, write); 1346 + 1347 + /* Access to snoopable pages through the GTT is incoherent. */ 1348 + if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(dev)) { 1349 + ret = -EINVAL; 1350 + goto unlock; 1351 + } 1343 1352 1344 1353 /* Now bind it into the GTT if needed */ 1345 1354 ret = i915_gem_object_pin(obj, 0, true, false); ··· 1936 1933 } 1937 1934 1938 1935 static int 1939 - i915_gem_handle_seqno_wrap(struct drm_device *dev) 1936 + i915_gem_init_seqno(struct drm_device *dev, u32 seqno) 1940 1937 { 1941 1938 struct drm_i915_private *dev_priv = dev->dev_private; 1942 1939 struct intel_ring_buffer *ring; 1943 1940 int ret, i, j; 1944 1941 1945 - /* The hardware uses various monotonic 32-bit counters, if we 1946 - * detect that they will wraparound we need to idle the GPU 1947 - * and reset those counters. 1948 - */ 1949 - ret = 0; 1942 + /* Carefully retire all requests without writing to the rings */ 1950 1943 for_each_ring(ring, dev_priv, i) { 1951 - for (j = 0; j < ARRAY_SIZE(ring->sync_seqno); j++) 1952 - ret |= ring->sync_seqno[j] != 0; 1944 + ret = intel_ring_idle(ring); 1945 + if (ret) 1946 + return ret; 1953 1947 } 1954 - if (ret == 0) 1955 - return ret; 1956 - 1957 - ret = i915_gpu_idle(dev); 1958 - if (ret) 1959 - return ret; 1960 - 1961 1948 i915_gem_retire_requests(dev); 1949 + 1950 + /* Finally reset hw state */ 1962 1951 for_each_ring(ring, dev_priv, i) { 1952 + intel_ring_init_seqno(ring, seqno); 1953 + 1963 1954 for (j = 0; j < ARRAY_SIZE(ring->sync_seqno); j++) 1964 1955 ring->sync_seqno[j] = 0; 1965 1956 } 1957 + 1958 + return 0; 1959 + } 1960 + 1961 + int i915_gem_set_seqno(struct drm_device *dev, u32 seqno) 1962 + { 1963 + struct drm_i915_private *dev_priv = dev->dev_private; 1964 + int ret; 1965 + 1966 + if (seqno == 0) 1967 + return -EINVAL; 1968 + 1969 + /* HWS page needs to be set less than what we 1970 + * will inject to ring 1971 + */ 1972 + ret = i915_gem_init_seqno(dev, seqno - 1); 1973 + if (ret) 1974 + return ret; 1975 + 1976 + /* Carefully set the last_seqno value so that wrap 1977 + * detection still works 1978 + */ 1979 + dev_priv->next_seqno = seqno; 1980 + dev_priv->last_seqno = seqno - 1; 1981 + if (dev_priv->last_seqno == 0) 1982 + dev_priv->last_seqno--; 1966 1983 1967 1984 return 0; 1968 1985 } ··· 1994 1971 1995 1972 /* reserve 0 for non-seqno */ 1996 1973 if (dev_priv->next_seqno == 0) { 1997 - int ret = i915_gem_handle_seqno_wrap(dev); 1974 + int ret = i915_gem_init_seqno(dev, 0); 1998 1975 if (ret) 1999 1976 return ret; 2000 1977 2001 1978 dev_priv->next_seqno = 1; 2002 1979 } 2003 1980 2004 - *seqno = dev_priv->next_seqno++; 1981 + *seqno = dev_priv->last_seqno = dev_priv->next_seqno++; 2005 1982 return 0; 2006 1983 } 2007 1984 ··· 2671 2648 case 4: i965_write_fence_reg(dev, reg, obj); break; 2672 2649 case 3: i915_write_fence_reg(dev, reg, obj); break; 2673 2650 case 2: i830_write_fence_reg(dev, reg, obj); break; 2674 - default: break; 2651 + default: BUG(); 2675 2652 } 2676 2653 } 2677 2654 ··· 2846 2823 2847 2824 /* On non-LLC machines we have to be careful when putting differing 2848 2825 * types of snoopable memory together to avoid the prefetcher 2849 - * crossing memory domains and dieing. 2826 + * crossing memory domains and dying. 2850 2827 */ 2851 2828 if (HAS_LLC(dev)) 2852 2829 return true; ··· 3721 3698 { 3722 3699 struct drm_i915_gem_object *obj; 3723 3700 struct address_space *mapping; 3724 - u32 mask; 3701 + gfp_t mask; 3725 3702 3726 - obj = kzalloc(sizeof(*obj), GFP_KERNEL); 3703 + obj = i915_gem_object_alloc(dev); 3727 3704 if (obj == NULL) 3728 3705 return NULL; 3729 3706 3730 3707 if (drm_gem_object_init(dev, &obj->base, size) != 0) { 3731 - kfree(obj); 3708 + i915_gem_object_free(obj); 3732 3709 return NULL; 3733 3710 } 3734 3711 ··· 3800 3777 obj->pages_pin_count = 0; 3801 3778 i915_gem_object_put_pages(obj); 3802 3779 i915_gem_object_free_mmap_offset(obj); 3780 + i915_gem_object_release_stolen(obj); 3803 3781 3804 3782 BUG_ON(obj->pages); 3805 3783 ··· 3811 3787 i915_gem_info_remove_obj(dev_priv, obj->base.size); 3812 3788 3813 3789 kfree(obj->bit_17); 3814 - kfree(obj); 3790 + i915_gem_object_free(obj); 3815 3791 } 3816 3792 3817 3793 int ··· 3907 3883 I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL); 3908 3884 if (IS_GEN6(dev)) 3909 3885 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB)); 3910 - else 3886 + else if (IS_GEN7(dev)) 3911 3887 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB)); 3888 + else 3889 + BUG(); 3912 3890 } 3913 3891 3914 3892 static bool ··· 3945 3919 3946 3920 i915_gem_init_swizzling(dev); 3947 3921 3922 + dev_priv->next_seqno = dev_priv->last_seqno = (u32)~0 - 0x1000; 3923 + 3948 3924 ret = intel_init_render_ring_buffer(dev); 3949 3925 if (ret) 3950 3926 return ret; ··· 3962 3934 if (ret) 3963 3935 goto cleanup_bsd_ring; 3964 3936 } 3965 - 3966 - dev_priv->next_seqno = 1; 3967 3937 3968 3938 /* 3969 3939 * XXX: There was some w/a described somewhere suggesting loading ··· 3979 3953 return ret; 3980 3954 } 3981 3955 3982 - static bool 3983 - intel_enable_ppgtt(struct drm_device *dev) 3984 - { 3985 - if (i915_enable_ppgtt >= 0) 3986 - return i915_enable_ppgtt; 3987 - 3988 - #ifdef CONFIG_INTEL_IOMMU 3989 - /* Disable ppgtt on SNB if VT-d is on. */ 3990 - if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped) 3991 - return false; 3992 - #endif 3993 - 3994 - return true; 3995 - } 3996 - 3997 3956 int i915_gem_init(struct drm_device *dev) 3998 3957 { 3999 3958 struct drm_i915_private *dev_priv = dev->dev_private; 4000 - unsigned long gtt_size, mappable_size; 4001 3959 int ret; 4002 3960 4003 - gtt_size = dev_priv->mm.gtt->gtt_total_entries << PAGE_SHIFT; 4004 - mappable_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT; 4005 - 4006 3961 mutex_lock(&dev->struct_mutex); 4007 - if (intel_enable_ppgtt(dev) && HAS_ALIASING_PPGTT(dev)) { 4008 - /* PPGTT pdes are stolen from global gtt ptes, so shrink the 4009 - * aperture accordingly when using aliasing ppgtt. */ 4010 - gtt_size -= I915_PPGTT_PD_ENTRIES*PAGE_SIZE; 4011 - 4012 - i915_gem_init_global_gtt(dev, 0, mappable_size, gtt_size); 4013 - 4014 - ret = i915_gem_init_aliasing_ppgtt(dev); 4015 - if (ret) { 4016 - mutex_unlock(&dev->struct_mutex); 4017 - return ret; 4018 - } 4019 - } else { 4020 - /* Let GEM Manage all of the aperture. 4021 - * 4022 - * However, leave one page at the end still bound to the scratch 4023 - * page. There are a number of places where the hardware 4024 - * apparently prefetches past the end of the object, and we've 4025 - * seen multiple hangs with the GPU head pointer stuck in a 4026 - * batchbuffer bound at the last page of the aperture. One page 4027 - * should be enough to keep any prefetching inside of the 4028 - * aperture. 4029 - */ 4030 - i915_gem_init_global_gtt(dev, 0, mappable_size, 4031 - gtt_size); 4032 - } 4033 - 3962 + i915_gem_init_global_gtt(dev); 4034 3963 ret = i915_gem_init_hw(dev); 4035 3964 mutex_unlock(&dev->struct_mutex); 4036 3965 if (ret) { ··· 4086 4105 void 4087 4106 i915_gem_load(struct drm_device *dev) 4088 4107 { 4089 - int i; 4090 4108 drm_i915_private_t *dev_priv = dev->dev_private; 4109 + int i; 4110 + 4111 + dev_priv->slab = 4112 + kmem_cache_create("i915_gem_object", 4113 + sizeof(struct drm_i915_gem_object), 0, 4114 + SLAB_HWCACHE_ALIGN, 4115 + NULL); 4091 4116 4092 4117 INIT_LIST_HEAD(&dev_priv->mm.active_list); 4093 4118 INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
+2 -3
drivers/gpu/drm/i915/i915_gem_dmabuf.c
··· 281 281 if (IS_ERR(attach)) 282 282 return ERR_CAST(attach); 283 283 284 - 285 - obj = kzalloc(sizeof(*obj), GFP_KERNEL); 284 + obj = i915_gem_object_alloc(dev); 286 285 if (obj == NULL) { 287 286 ret = -ENOMEM; 288 287 goto fail_detach; ··· 289 290 290 291 ret = drm_gem_private_object_init(dev, &obj->base, dma_buf->size); 291 292 if (ret) { 292 - kfree(obj); 293 + i915_gem_object_free(obj); 293 294 goto fail_detach; 294 295 } 295 296
-53
drivers/gpu/drm/i915/i915_gem_execbuffer.c
··· 150 150 reloc->write_domain); 151 151 return ret; 152 152 } 153 - if (unlikely(reloc->write_domain && target_obj->pending_write_domain && 154 - reloc->write_domain != target_obj->pending_write_domain)) { 155 - DRM_DEBUG("Write domain conflict: " 156 - "obj %p target %d offset %d " 157 - "new %08x old %08x\n", 158 - obj, reloc->target_handle, 159 - (int) reloc->offset, 160 - reloc->write_domain, 161 - target_obj->pending_write_domain); 162 - return ret; 163 - } 164 153 165 154 target_obj->pending_read_domains |= reloc->read_domains; 166 155 target_obj->pending_write_domain |= reloc->write_domain; ··· 591 602 } 592 603 593 604 static int 594 - i915_gem_execbuffer_wait_for_flips(struct intel_ring_buffer *ring, u32 flips) 595 - { 596 - u32 plane, flip_mask; 597 - int ret; 598 - 599 - /* Check for any pending flips. As we only maintain a flip queue depth 600 - * of 1, we can simply insert a WAIT for the next display flip prior 601 - * to executing the batch and avoid stalling the CPU. 602 - */ 603 - 604 - for (plane = 0; flips >> plane; plane++) { 605 - if (((flips >> plane) & 1) == 0) 606 - continue; 607 - 608 - if (plane) 609 - flip_mask = MI_WAIT_FOR_PLANE_B_FLIP; 610 - else 611 - flip_mask = MI_WAIT_FOR_PLANE_A_FLIP; 612 - 613 - ret = intel_ring_begin(ring, 2); 614 - if (ret) 615 - return ret; 616 - 617 - intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask); 618 - intel_ring_emit(ring, MI_NOOP); 619 - intel_ring_advance(ring); 620 - } 621 - 622 - return 0; 623 - } 624 - 625 - static int 626 605 i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring, 627 606 struct list_head *objects) 628 607 { 629 608 struct drm_i915_gem_object *obj; 630 609 uint32_t flush_domains = 0; 631 - uint32_t flips = 0; 632 610 int ret; 633 611 634 612 list_for_each_entry(obj, objects, exec_list) { ··· 606 650 if (obj->base.write_domain & I915_GEM_DOMAIN_CPU) 607 651 i915_gem_clflush_object(obj); 608 652 609 - if (obj->base.pending_write_domain) 610 - flips |= atomic_read(&obj->pending_flip); 611 - 612 653 flush_domains |= obj->base.write_domain; 613 - } 614 - 615 - if (flips) { 616 - ret = i915_gem_execbuffer_wait_for_flips(ring, flips); 617 - if (ret) 618 - return ret; 619 654 } 620 655 621 656 if (flush_domains & I915_GEM_DOMAIN_CPU)
+93 -15
drivers/gpu/drm/i915/i915_gem_gtt.c
··· 282 282 uint32_t pd_offset; 283 283 struct intel_ring_buffer *ring; 284 284 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt; 285 - uint32_t __iomem *pd_addr; 285 + gtt_pte_t __iomem *pd_addr; 286 286 uint32_t pd_entry; 287 287 int i; 288 288 ··· 290 290 return; 291 291 292 292 293 - pd_addr = dev_priv->mm.gtt->gtt + ppgtt->pd_offset/sizeof(uint32_t); 293 + pd_addr = (gtt_pte_t __iomem*)dev_priv->mm.gsm + ppgtt->pd_offset/sizeof(gtt_pte_t); 294 294 for (i = 0; i < ppgtt->num_pd_entries; i++) { 295 295 dma_addr_t pt_addr; 296 296 ··· 367 367 { 368 368 struct drm_i915_private *dev_priv = dev->dev_private; 369 369 gtt_pte_t scratch_pte; 370 - gtt_pte_t __iomem *gtt_base = dev_priv->mm.gtt->gtt + first_entry; 370 + gtt_pte_t __iomem *gtt_base = (gtt_pte_t __iomem *) dev_priv->mm.gsm + first_entry; 371 371 const int max_entries = dev_priv->mm.gtt->gtt_total_entries - first_entry; 372 372 int i; 373 373 ··· 432 432 struct scatterlist *sg = st->sgl; 433 433 const int first_entry = obj->gtt_space->start >> PAGE_SHIFT; 434 434 const int max_entries = dev_priv->mm.gtt->gtt_total_entries - first_entry; 435 - gtt_pte_t __iomem *gtt_entries = dev_priv->mm.gtt->gtt + first_entry; 435 + gtt_pte_t __iomem *gtt_entries = 436 + (gtt_pte_t __iomem *)dev_priv->mm.gsm + first_entry; 436 437 int unused, i = 0; 437 438 unsigned int len, m = 0; 438 439 dma_addr_t addr; ··· 526 525 } 527 526 } 528 527 529 - void i915_gem_init_global_gtt(struct drm_device *dev, 530 - unsigned long start, 531 - unsigned long mappable_end, 532 - unsigned long end) 528 + void i915_gem_setup_global_gtt(struct drm_device *dev, 529 + unsigned long start, 530 + unsigned long mappable_end, 531 + unsigned long end) 533 532 { 534 533 drm_i915_private_t *dev_priv = dev->dev_private; 534 + struct drm_mm_node *entry; 535 + struct drm_i915_gem_object *obj; 536 + unsigned long hole_start, hole_end; 535 537 536 - /* Substract the guard page ... */ 538 + /* Subtract the guard page ... */ 537 539 drm_mm_init(&dev_priv->mm.gtt_space, start, end - start - PAGE_SIZE); 538 540 if (!HAS_LLC(dev)) 539 541 dev_priv->mm.gtt_space.color_adjust = i915_gtt_color_adjust; 542 + 543 + /* Mark any preallocated objects as occupied */ 544 + list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) { 545 + DRM_DEBUG_KMS("reserving preallocated space: %x + %zx\n", 546 + obj->gtt_offset, obj->base.size); 547 + 548 + BUG_ON(obj->gtt_space != I915_GTT_RESERVED); 549 + obj->gtt_space = drm_mm_create_block(&dev_priv->mm.gtt_space, 550 + obj->gtt_offset, 551 + obj->base.size, 552 + false); 553 + obj->has_global_gtt_mapping = 1; 554 + } 540 555 541 556 dev_priv->mm.gtt_start = start; 542 557 dev_priv->mm.gtt_mappable_end = mappable_end; ··· 560 543 dev_priv->mm.gtt_total = end - start; 561 544 dev_priv->mm.mappable_gtt_total = min(end, mappable_end) - start; 562 545 563 - /* ... but ensure that we clear the entire range. */ 564 - i915_ggtt_clear_range(dev, start / PAGE_SIZE, (end-start) / PAGE_SIZE); 546 + /* Clear any non-preallocated blocks */ 547 + drm_mm_for_each_hole(entry, &dev_priv->mm.gtt_space, 548 + hole_start, hole_end) { 549 + DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n", 550 + hole_start, hole_end); 551 + i915_ggtt_clear_range(dev, 552 + hole_start / PAGE_SIZE, 553 + (hole_end-hole_start) / PAGE_SIZE); 554 + } 555 + 556 + /* And finally clear the reserved guard page */ 557 + i915_ggtt_clear_range(dev, end / PAGE_SIZE - 1, 1); 558 + } 559 + 560 + static bool 561 + intel_enable_ppgtt(struct drm_device *dev) 562 + { 563 + if (i915_enable_ppgtt >= 0) 564 + return i915_enable_ppgtt; 565 + 566 + #ifdef CONFIG_INTEL_IOMMU 567 + /* Disable ppgtt on SNB if VT-d is on. */ 568 + if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped) 569 + return false; 570 + #endif 571 + 572 + return true; 573 + } 574 + 575 + void i915_gem_init_global_gtt(struct drm_device *dev) 576 + { 577 + struct drm_i915_private *dev_priv = dev->dev_private; 578 + unsigned long gtt_size, mappable_size; 579 + int ret; 580 + 581 + gtt_size = dev_priv->mm.gtt->gtt_total_entries << PAGE_SHIFT; 582 + mappable_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT; 583 + 584 + if (intel_enable_ppgtt(dev) && HAS_ALIASING_PPGTT(dev)) { 585 + /* PPGTT pdes are stolen from global gtt ptes, so shrink the 586 + * aperture accordingly when using aliasing ppgtt. */ 587 + gtt_size -= I915_PPGTT_PD_ENTRIES*PAGE_SIZE; 588 + 589 + i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size); 590 + 591 + ret = i915_gem_init_aliasing_ppgtt(dev); 592 + if (ret) { 593 + mutex_unlock(&dev->struct_mutex); 594 + return; 595 + } 596 + } else { 597 + /* Let GEM Manage all of the aperture. 598 + * 599 + * However, leave one page at the end still bound to the scratch 600 + * page. There are a number of places where the hardware 601 + * apparently prefetches past the end of the object, and we've 602 + * seen multiple hangs with the GPU head pointer stuck in a 603 + * batchbuffer bound at the last page of the aperture. One page 604 + * should be enough to keep any prefetching inside of the 605 + * aperture. 606 + */ 607 + i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size); 608 + } 565 609 } 566 610 567 611 static int setup_scratch_page(struct drm_device *dev) ··· 752 674 goto err_out; 753 675 } 754 676 755 - dev_priv->mm.gtt->gtt = ioremap_wc(gtt_bus_addr, 756 - dev_priv->mm.gtt->gtt_total_entries * sizeof(gtt_pte_t)); 757 - if (!dev_priv->mm.gtt->gtt) { 677 + dev_priv->mm.gsm = ioremap_wc(gtt_bus_addr, 678 + dev_priv->mm.gtt->gtt_total_entries * sizeof(gtt_pte_t)); 679 + if (!dev_priv->mm.gsm) { 758 680 DRM_ERROR("Failed to map the gtt page table\n"); 759 681 teardown_scratch_page(dev); 760 682 ret = -ENOMEM; ··· 778 700 void i915_gem_gtt_fini(struct drm_device *dev) 779 701 { 780 702 struct drm_i915_private *dev_priv = dev->dev_private; 781 - iounmap(dev_priv->mm.gtt->gtt); 703 + iounmap(dev_priv->mm.gsm); 782 704 teardown_scratch_page(dev); 783 705 if (INTEL_INFO(dev)->gen < 6) 784 706 intel_gmch_remove();
+217 -96
drivers/gpu/drm/i915/i915_gem_stolen.c
··· 42 42 * for is a boon. 43 43 */ 44 44 45 - #define PTE_ADDRESS_MASK 0xfffff000 46 - #define PTE_ADDRESS_MASK_HIGH 0x000000f0 /* i915+ */ 47 - #define PTE_MAPPING_TYPE_UNCACHED (0 << 1) 48 - #define PTE_MAPPING_TYPE_DCACHE (1 << 1) /* i830 only */ 49 - #define PTE_MAPPING_TYPE_CACHED (3 << 1) 50 - #define PTE_MAPPING_TYPE_MASK (3 << 1) 51 - #define PTE_VALID (1 << 0) 52 - 53 - /** 54 - * i915_stolen_to_phys - take an offset into stolen memory and turn it into 55 - * a physical one 56 - * @dev: drm device 57 - * @offset: address to translate 58 - * 59 - * Some chip functions require allocations from stolen space and need the 60 - * physical address of the memory in question. 61 - */ 62 - static unsigned long i915_stolen_to_phys(struct drm_device *dev, u32 offset) 45 + static unsigned long i915_stolen_to_physical(struct drm_device *dev) 63 46 { 64 47 struct drm_i915_private *dev_priv = dev->dev_private; 65 48 struct pci_dev *pdev = dev_priv->bridge_dev; 66 49 u32 base; 67 50 68 - #if 0 69 51 /* On the machines I have tested the Graphics Base of Stolen Memory 70 - * is unreliable, so compute the base by subtracting the stolen memory 71 - * from the Top of Low Usable DRAM which is where the BIOS places 72 - * the graphics stolen memory. 52 + * is unreliable, so on those compute the base by subtracting the 53 + * stolen memory from the Top of Low Usable DRAM which is where the 54 + * BIOS places the graphics stolen memory. 55 + * 56 + * On gen2, the layout is slightly different with the Graphics Segment 57 + * immediately following Top of Memory (or Top of Usable DRAM). Note 58 + * it appears that TOUD is only reported by 865g, so we just use the 59 + * top of memory as determined by the e820 probe. 60 + * 61 + * XXX gen2 requires an unavailable symbol and 945gm fails with 62 + * its value of TOLUD. 73 63 */ 74 - if (INTEL_INFO(dev)->gen > 3 || IS_G33(dev)) { 75 - /* top 32bits are reserved = 0 */ 64 + base = 0; 65 + if (INTEL_INFO(dev)->gen >= 6) { 66 + /* Read Base Data of Stolen Memory Register (BDSM) directly. 67 + * Note that there is also a MCHBAR miror at 0x1080c0 or 68 + * we could use device 2:0x5c instead. 69 + */ 70 + pci_read_config_dword(pdev, 0xB0, &base); 71 + base &= ~4095; /* lower bits used for locking register */ 72 + } else if (INTEL_INFO(dev)->gen > 3 || IS_G33(dev)) { 73 + /* Read Graphics Base of Stolen Memory directly */ 76 74 pci_read_config_dword(pdev, 0xA4, &base); 77 - } else { 78 - /* XXX presume 8xx is the same as i915 */ 79 - pci_bus_read_config_dword(pdev->bus, 2, 0x5C, &base); 80 - } 81 - #else 82 - if (INTEL_INFO(dev)->gen > 3 || IS_G33(dev)) { 83 - u16 val; 84 - pci_read_config_word(pdev, 0xb0, &val); 85 - base = val >> 4 << 20; 86 - } else { 75 + #if 0 76 + } else if (IS_GEN3(dev)) { 87 77 u8 val; 78 + /* Stolen is immediately below Top of Low Usable DRAM */ 88 79 pci_read_config_byte(pdev, 0x9c, &val); 89 80 base = val >> 3 << 27; 90 - } 91 - base -= dev_priv->mm.gtt->stolen_size; 81 + base -= dev_priv->mm.gtt->stolen_size; 82 + } else { 83 + /* Stolen is immediately above Top of Memory */ 84 + base = max_low_pfn_mapped << PAGE_SHIFT; 92 85 #endif 86 + } 93 87 94 - return base + offset; 88 + return base; 95 89 } 96 90 97 - static void i915_warn_stolen(struct drm_device *dev) 98 - { 99 - DRM_INFO("not enough stolen space for compressed buffer, disabling\n"); 100 - DRM_INFO("hint: you may be able to increase stolen memory size in the BIOS to avoid this\n"); 101 - } 102 - 103 - static void i915_setup_compression(struct drm_device *dev, int size) 91 + static int i915_setup_compression(struct drm_device *dev, int size) 104 92 { 105 93 struct drm_i915_private *dev_priv = dev->dev_private; 106 94 struct drm_mm_node *compressed_fb, *uninitialized_var(compressed_llb); 107 - unsigned long cfb_base; 108 - unsigned long ll_base = 0; 109 95 110 - /* Just in case the BIOS is doing something questionable. */ 111 - intel_disable_fbc(dev); 112 - 113 - compressed_fb = drm_mm_search_free(&dev_priv->mm.stolen, size, 4096, 0); 96 + /* Try to over-allocate to reduce reallocations and fragmentation */ 97 + compressed_fb = drm_mm_search_free(&dev_priv->mm.stolen, 98 + size <<= 1, 4096, 0); 99 + if (!compressed_fb) 100 + compressed_fb = drm_mm_search_free(&dev_priv->mm.stolen, 101 + size >>= 1, 4096, 0); 114 102 if (compressed_fb) 115 103 compressed_fb = drm_mm_get_block(compressed_fb, size, 4096); 116 104 if (!compressed_fb) 117 105 goto err; 118 106 119 - cfb_base = i915_stolen_to_phys(dev, compressed_fb->start); 120 - if (!cfb_base) 121 - goto err_fb; 122 - 123 - if (!(IS_GM45(dev) || HAS_PCH_SPLIT(dev))) { 107 + if (HAS_PCH_SPLIT(dev)) 108 + I915_WRITE(ILK_DPFC_CB_BASE, compressed_fb->start); 109 + else if (IS_GM45(dev)) { 110 + I915_WRITE(DPFC_CB_BASE, compressed_fb->start); 111 + } else { 124 112 compressed_llb = drm_mm_search_free(&dev_priv->mm.stolen, 125 113 4096, 4096, 0); 126 114 if (compressed_llb) ··· 117 129 if (!compressed_llb) 118 130 goto err_fb; 119 131 120 - ll_base = i915_stolen_to_phys(dev, compressed_llb->start); 121 - if (!ll_base) 122 - goto err_llb; 123 - } 132 + dev_priv->compressed_llb = compressed_llb; 124 133 125 - dev_priv->cfb_size = size; 134 + I915_WRITE(FBC_CFB_BASE, 135 + dev_priv->mm.stolen_base + compressed_fb->start); 136 + I915_WRITE(FBC_LL_BASE, 137 + dev_priv->mm.stolen_base + compressed_llb->start); 138 + } 126 139 127 140 dev_priv->compressed_fb = compressed_fb; 128 - if (HAS_PCH_SPLIT(dev)) 129 - I915_WRITE(ILK_DPFC_CB_BASE, compressed_fb->start); 130 - else if (IS_GM45(dev)) { 131 - I915_WRITE(DPFC_CB_BASE, compressed_fb->start); 132 - } else { 133 - I915_WRITE(FBC_CFB_BASE, cfb_base); 134 - I915_WRITE(FBC_LL_BASE, ll_base); 135 - dev_priv->compressed_llb = compressed_llb; 136 - } 141 + dev_priv->cfb_size = size; 137 142 138 - DRM_DEBUG_KMS("FBC base 0x%08lx, ll base 0x%08lx, size %dM\n", 139 - cfb_base, ll_base, size >> 20); 140 - return; 143 + DRM_DEBUG_KMS("reserved %d bytes of contiguous stolen space for FBC\n", 144 + size); 141 145 142 - err_llb: 143 - drm_mm_put_block(compressed_llb); 146 + return 0; 147 + 144 148 err_fb: 145 149 drm_mm_put_block(compressed_fb); 146 150 err: 147 - dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL; 148 - i915_warn_stolen(dev); 151 + return -ENOSPC; 149 152 } 150 153 151 - static void i915_cleanup_compression(struct drm_device *dev) 154 + int i915_gem_stolen_setup_compression(struct drm_device *dev, int size) 152 155 { 153 156 struct drm_i915_private *dev_priv = dev->dev_private; 154 157 155 - drm_mm_put_block(dev_priv->compressed_fb); 158 + if (dev_priv->mm.stolen_base == 0) 159 + return -ENODEV; 160 + 161 + if (size < dev_priv->cfb_size) 162 + return 0; 163 + 164 + /* Release any current block */ 165 + i915_gem_stolen_cleanup_compression(dev); 166 + 167 + return i915_setup_compression(dev, size); 168 + } 169 + 170 + void i915_gem_stolen_cleanup_compression(struct drm_device *dev) 171 + { 172 + struct drm_i915_private *dev_priv = dev->dev_private; 173 + 174 + if (dev_priv->cfb_size == 0) 175 + return; 176 + 177 + if (dev_priv->compressed_fb) 178 + drm_mm_put_block(dev_priv->compressed_fb); 179 + 156 180 if (dev_priv->compressed_llb) 157 181 drm_mm_put_block(dev_priv->compressed_llb); 182 + 183 + dev_priv->cfb_size = 0; 158 184 } 159 185 160 186 void i915_gem_cleanup_stolen(struct drm_device *dev) 161 187 { 162 - if (I915_HAS_FBC(dev) && i915_powersave) 163 - i915_cleanup_compression(dev); 188 + struct drm_i915_private *dev_priv = dev->dev_private; 189 + 190 + i915_gem_stolen_cleanup_compression(dev); 191 + drm_mm_takedown(&dev_priv->mm.stolen); 164 192 } 165 193 166 194 int i915_gem_init_stolen(struct drm_device *dev) 167 195 { 168 196 struct drm_i915_private *dev_priv = dev->dev_private; 169 - unsigned long prealloc_size = dev_priv->mm.gtt->stolen_size; 197 + 198 + dev_priv->mm.stolen_base = i915_stolen_to_physical(dev); 199 + if (dev_priv->mm.stolen_base == 0) 200 + return 0; 201 + 202 + DRM_DEBUG_KMS("found %d bytes of stolen memory at %08lx\n", 203 + dev_priv->mm.gtt->stolen_size, dev_priv->mm.stolen_base); 170 204 171 205 /* Basic memrange allocator for stolen space */ 172 - drm_mm_init(&dev_priv->mm.stolen, 0, prealloc_size); 173 - 174 - /* Try to set up FBC with a reasonable compressed buffer size */ 175 - if (I915_HAS_FBC(dev) && i915_powersave) { 176 - int cfb_size; 177 - 178 - /* Leave 1M for line length buffer & misc. */ 179 - 180 - /* Try to get a 32M buffer... */ 181 - if (prealloc_size > (36*1024*1024)) 182 - cfb_size = 32*1024*1024; 183 - else /* fall back to 7/8 of the stolen space */ 184 - cfb_size = prealloc_size * 7 / 8; 185 - i915_setup_compression(dev, cfb_size); 186 - } 206 + drm_mm_init(&dev_priv->mm.stolen, 0, dev_priv->mm.gtt->stolen_size); 187 207 188 208 return 0; 209 + } 210 + 211 + static struct sg_table * 212 + i915_pages_create_for_stolen(struct drm_device *dev, 213 + u32 offset, u32 size) 214 + { 215 + struct drm_i915_private *dev_priv = dev->dev_private; 216 + struct sg_table *st; 217 + struct scatterlist *sg; 218 + 219 + DRM_DEBUG_DRIVER("offset=0x%x, size=%d\n", offset, size); 220 + BUG_ON(offset > dev_priv->mm.gtt->stolen_size - size); 221 + 222 + /* We hide that we have no struct page backing our stolen object 223 + * by wrapping the contiguous physical allocation with a fake 224 + * dma mapping in a single scatterlist. 225 + */ 226 + 227 + st = kmalloc(sizeof(*st), GFP_KERNEL); 228 + if (st == NULL) 229 + return NULL; 230 + 231 + if (sg_alloc_table(st, 1, GFP_KERNEL)) { 232 + kfree(st); 233 + return NULL; 234 + } 235 + 236 + sg = st->sgl; 237 + sg->offset = offset; 238 + sg->length = size; 239 + 240 + sg_dma_address(sg) = (dma_addr_t)dev_priv->mm.stolen_base + offset; 241 + sg_dma_len(sg) = size; 242 + 243 + return st; 244 + } 245 + 246 + static int i915_gem_object_get_pages_stolen(struct drm_i915_gem_object *obj) 247 + { 248 + BUG(); 249 + return -EINVAL; 250 + } 251 + 252 + static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj) 253 + { 254 + /* Should only be called during free */ 255 + sg_free_table(obj->pages); 256 + kfree(obj->pages); 257 + } 258 + 259 + static const struct drm_i915_gem_object_ops i915_gem_object_stolen_ops = { 260 + .get_pages = i915_gem_object_get_pages_stolen, 261 + .put_pages = i915_gem_object_put_pages_stolen, 262 + }; 263 + 264 + static struct drm_i915_gem_object * 265 + _i915_gem_object_create_stolen(struct drm_device *dev, 266 + struct drm_mm_node *stolen) 267 + { 268 + struct drm_i915_gem_object *obj; 269 + 270 + obj = i915_gem_object_alloc(dev); 271 + if (obj == NULL) 272 + return NULL; 273 + 274 + if (drm_gem_private_object_init(dev, &obj->base, stolen->size)) 275 + goto cleanup; 276 + 277 + i915_gem_object_init(obj, &i915_gem_object_stolen_ops); 278 + 279 + obj->pages = i915_pages_create_for_stolen(dev, 280 + stolen->start, stolen->size); 281 + if (obj->pages == NULL) 282 + goto cleanup; 283 + 284 + obj->has_dma_mapping = true; 285 + obj->pages_pin_count = 1; 286 + obj->stolen = stolen; 287 + 288 + obj->base.write_domain = I915_GEM_DOMAIN_GTT; 289 + obj->base.read_domains = I915_GEM_DOMAIN_GTT; 290 + obj->cache_level = I915_CACHE_NONE; 291 + 292 + return obj; 293 + 294 + cleanup: 295 + i915_gem_object_free(obj); 296 + return NULL; 297 + } 298 + 299 + struct drm_i915_gem_object * 300 + i915_gem_object_create_stolen(struct drm_device *dev, u32 size) 301 + { 302 + struct drm_i915_private *dev_priv = dev->dev_private; 303 + struct drm_i915_gem_object *obj; 304 + struct drm_mm_node *stolen; 305 + 306 + if (dev_priv->mm.stolen_base == 0) 307 + return NULL; 308 + 309 + DRM_DEBUG_KMS("creating stolen object: size=%x\n", size); 310 + if (size == 0) 311 + return NULL; 312 + 313 + stolen = drm_mm_search_free(&dev_priv->mm.stolen, size, 4096, 0); 314 + if (stolen) 315 + stolen = drm_mm_get_block(stolen, size, 4096); 316 + if (stolen == NULL) 317 + return NULL; 318 + 319 + obj = _i915_gem_object_create_stolen(dev, stolen); 320 + if (obj) 321 + return obj; 322 + 323 + drm_mm_put_block(stolen); 324 + return NULL; 325 + } 326 + 327 + void 328 + i915_gem_object_release_stolen(struct drm_i915_gem_object *obj) 329 + { 330 + if (obj->stolen) { 331 + drm_mm_put_block(obj->stolen); 332 + obj->stolen = NULL; 333 + } 189 334 }
+12
drivers/gpu/drm/i915/i915_gem_tiling.c
··· 396 396 /* we have to maintain this existing ABI... */ 397 397 args->stride = obj->stride; 398 398 args->tiling_mode = obj->tiling_mode; 399 + 400 + /* Try to preallocate memory required to save swizzling on put-pages */ 401 + if (i915_gem_object_needs_bit17_swizzle(obj)) { 402 + if (obj->bit_17 == NULL) { 403 + obj->bit_17 = kmalloc(BITS_TO_LONGS(obj->base.size >> PAGE_SHIFT) * 404 + sizeof(long), GFP_KERNEL); 405 + } 406 + } else { 407 + kfree(obj->bit_17); 408 + obj->bit_17 = NULL; 409 + } 410 + 399 411 drm_gem_object_unreference(&obj->base); 400 412 mutex_unlock(&dev->struct_mutex); 401 413
+121 -41
drivers/gpu/drm/i915/i915_irq.c
··· 287 287 struct drm_mode_config *mode_config = &dev->mode_config; 288 288 struct intel_encoder *encoder; 289 289 290 + /* HPD irq before everything is fully set up. */ 291 + if (!dev_priv->enable_hotplug_processing) 292 + return; 293 + 290 294 mutex_lock(&mode_config->mutex); 291 295 DRM_DEBUG_KMS("running encoder hotplug functions\n"); 292 296 ··· 303 299 /* Just fire off a uevent and let userspace tell us what to do */ 304 300 drm_helper_hpd_irq_event(dev); 305 301 } 306 - 307 - /* defined intel_pm.c */ 308 - extern spinlock_t mchdev_lock; 309 302 310 303 static void ironlake_handle_rps_change(struct drm_device *dev) 311 304 { ··· 525 524 queue_work(dev_priv->wq, &dev_priv->rps.work); 526 525 } 527 526 527 + static void gmbus_irq_handler(struct drm_device *dev) 528 + { 529 + struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private; 530 + 531 + wake_up_all(&dev_priv->gmbus_wait_queue); 532 + } 533 + 534 + static void dp_aux_irq_handler(struct drm_device *dev) 535 + { 536 + struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private; 537 + 538 + wake_up_all(&dev_priv->gmbus_wait_queue); 539 + } 540 + 528 541 static irqreturn_t valleyview_irq_handler(int irq, void *arg) 529 542 { 530 543 struct drm_device *dev = (struct drm_device *) arg; ··· 548 533 unsigned long irqflags; 549 534 int pipe; 550 535 u32 pipe_stats[I915_MAX_PIPES]; 551 - bool blc_event; 552 536 553 537 atomic_inc(&dev_priv->irq_received); 554 538 ··· 604 590 I915_READ(PORT_HOTPLUG_STAT); 605 591 } 606 592 607 - if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 608 - blc_event = true; 593 + if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) 594 + gmbus_irq_handler(dev); 609 595 610 596 if (pm_iir & GEN6_PM_DEFERRED_EVENTS) 611 597 gen6_queue_rps_work(dev_priv, pm_iir); ··· 632 618 (pch_iir & SDE_AUDIO_POWER_MASK) >> 633 619 SDE_AUDIO_POWER_SHIFT); 634 620 621 + if (pch_iir & SDE_AUX_MASK) 622 + dp_aux_irq_handler(dev); 623 + 635 624 if (pch_iir & SDE_GMBUS) 636 - DRM_DEBUG_DRIVER("PCH GMBUS interrupt\n"); 625 + gmbus_irq_handler(dev); 637 626 638 627 if (pch_iir & SDE_AUDIO_HDCP_MASK) 639 628 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n"); ··· 679 662 SDE_AUDIO_POWER_SHIFT_CPT); 680 663 681 664 if (pch_iir & SDE_AUX_MASK_CPT) 682 - DRM_DEBUG_DRIVER("AUX channel interrupt\n"); 665 + dp_aux_irq_handler(dev); 683 666 684 667 if (pch_iir & SDE_GMBUS_CPT) 685 - DRM_DEBUG_DRIVER("PCH GMBUS interrupt\n"); 668 + gmbus_irq_handler(dev); 686 669 687 670 if (pch_iir & SDE_AUDIO_CP_REQ_CPT) 688 671 DRM_DEBUG_DRIVER("Audio CP request interrupt\n"); ··· 720 703 721 704 de_iir = I915_READ(DEIIR); 722 705 if (de_iir) { 706 + if (de_iir & DE_AUX_CHANNEL_A_IVB) 707 + dp_aux_irq_handler(dev); 708 + 723 709 if (de_iir & DE_GSE_IVB) 724 710 intel_opregion_gse_intr(dev); 725 711 ··· 778 758 struct drm_device *dev = (struct drm_device *) arg; 779 759 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 780 760 int ret = IRQ_NONE; 781 - u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir; 761 + u32 de_iir, gt_iir, de_ier, pm_iir; 782 762 783 763 atomic_inc(&dev_priv->irq_received); 784 764 ··· 789 769 790 770 de_iir = I915_READ(DEIIR); 791 771 gt_iir = I915_READ(GTIIR); 792 - pch_iir = I915_READ(SDEIIR); 793 772 pm_iir = I915_READ(GEN6_PMIIR); 794 773 795 - if (de_iir == 0 && gt_iir == 0 && pch_iir == 0 && 796 - (!IS_GEN6(dev) || pm_iir == 0)) 774 + if (de_iir == 0 && gt_iir == 0 && (!IS_GEN6(dev) || pm_iir == 0)) 797 775 goto done; 798 776 799 777 ret = IRQ_HANDLED; ··· 800 782 ilk_gt_irq_handler(dev, dev_priv, gt_iir); 801 783 else 802 784 snb_gt_irq_handler(dev, dev_priv, gt_iir); 785 + 786 + if (de_iir & DE_AUX_CHANNEL_A) 787 + dp_aux_irq_handler(dev); 803 788 804 789 if (de_iir & DE_GSE) 805 790 intel_opregion_gse_intr(dev); ··· 825 804 826 805 /* check event from PCH */ 827 806 if (de_iir & DE_PCH_EVENT) { 807 + u32 pch_iir = I915_READ(SDEIIR); 808 + 828 809 if (HAS_PCH_CPT(dev)) 829 810 cpt_irq_handler(dev, pch_iir); 830 811 else 831 812 ibx_irq_handler(dev, pch_iir); 813 + 814 + /* should clear PCH hotplug event before clear CPU irq */ 815 + I915_WRITE(SDEIIR, pch_iir); 832 816 } 833 817 834 818 if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT) ··· 842 816 if (IS_GEN6(dev) && pm_iir & GEN6_PM_DEFERRED_EVENTS) 843 817 gen6_queue_rps_work(dev_priv, pm_iir); 844 818 845 - /* should clear PCH hotplug event before clear CPU irq */ 846 - I915_WRITE(SDEIIR, pch_iir); 847 819 I915_WRITE(GTIIR, gt_iir); 848 820 I915_WRITE(DEIIR, de_iir); 849 821 I915_WRITE(GEN6_PMIIR, pm_iir); ··· 952 928 reloc_offset); 953 929 memcpy_fromio(d, s, PAGE_SIZE); 954 930 io_mapping_unmap_atomic(s); 931 + } else if (src->stolen) { 932 + unsigned long offset; 933 + 934 + offset = dev_priv->mm.stolen_base; 935 + offset += src->stolen->start; 936 + offset += i << PAGE_SHIFT; 937 + 938 + memcpy_fromio(d, (void __iomem *) offset, PAGE_SIZE); 955 939 } else { 956 940 struct page *page; 957 941 void *s; ··· 1106 1074 error->fence[i] = I915_READ(FENCE_REG_830_0 + (i * 4)); 1107 1075 break; 1108 1076 1077 + default: 1078 + BUG(); 1109 1079 } 1110 1080 } 1111 1081 ··· 1888 1854 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1889 1855 /* enable kind of interrupts always enabled */ 1890 1856 u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | 1891 - DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE; 1857 + DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE | 1858 + DE_AUX_CHANNEL_A; 1892 1859 u32 render_irqs; 1893 1860 u32 hotplug_mask; 1894 1861 ··· 1923 1888 hotplug_mask = (SDE_CRT_HOTPLUG_CPT | 1924 1889 SDE_PORTB_HOTPLUG_CPT | 1925 1890 SDE_PORTC_HOTPLUG_CPT | 1926 - SDE_PORTD_HOTPLUG_CPT); 1891 + SDE_PORTD_HOTPLUG_CPT | 1892 + SDE_GMBUS_CPT | 1893 + SDE_AUX_MASK_CPT); 1927 1894 } else { 1928 1895 hotplug_mask = (SDE_CRT_HOTPLUG | 1929 1896 SDE_PORTB_HOTPLUG | 1930 1897 SDE_PORTC_HOTPLUG | 1931 1898 SDE_PORTD_HOTPLUG | 1899 + SDE_GMBUS | 1932 1900 SDE_AUX_MASK); 1933 1901 } 1934 1902 ··· 1962 1924 DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | DE_PCH_EVENT_IVB | 1963 1925 DE_PLANEC_FLIP_DONE_IVB | 1964 1926 DE_PLANEB_FLIP_DONE_IVB | 1965 - DE_PLANEA_FLIP_DONE_IVB; 1927 + DE_PLANEA_FLIP_DONE_IVB | 1928 + DE_AUX_CHANNEL_A_IVB; 1966 1929 u32 render_irqs; 1967 1930 u32 hotplug_mask; 1968 1931 ··· 1992 1953 hotplug_mask = (SDE_CRT_HOTPLUG_CPT | 1993 1954 SDE_PORTB_HOTPLUG_CPT | 1994 1955 SDE_PORTC_HOTPLUG_CPT | 1995 - SDE_PORTD_HOTPLUG_CPT); 1956 + SDE_PORTD_HOTPLUG_CPT | 1957 + SDE_GMBUS_CPT | 1958 + SDE_AUX_MASK_CPT); 1996 1959 dev_priv->pch_irq_mask = ~hotplug_mask; 1997 1960 1998 1961 I915_WRITE(SDEIIR, I915_READ(SDEIIR)); ··· 2011 1970 { 2012 1971 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2013 1972 u32 enable_mask; 2014 - u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN); 2015 1973 u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV; 2016 1974 u32 render_irqs; 2017 1975 u16 msid; ··· 2039 1999 msid |= (1<<14); 2040 2000 pci_write_config_word(dev_priv->dev->pdev, 0x98, msid); 2041 2001 2002 + I915_WRITE(PORT_HOTPLUG_EN, 0); 2003 + POSTING_READ(PORT_HOTPLUG_EN); 2004 + 2042 2005 I915_WRITE(VLV_IMR, dev_priv->irq_mask); 2043 2006 I915_WRITE(VLV_IER, enable_mask); 2044 2007 I915_WRITE(VLV_IIR, 0xffffffff); ··· 2050 2007 POSTING_READ(VLV_IER); 2051 2008 2052 2009 i915_enable_pipestat(dev_priv, 0, pipestat_enable); 2010 + i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE); 2053 2011 i915_enable_pipestat(dev_priv, 1, pipestat_enable); 2054 2012 2055 2013 I915_WRITE(VLV_IIR, 0xffffffff); ··· 2071 2027 #endif 2072 2028 2073 2029 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE); 2030 + 2031 + return 0; 2032 + } 2033 + 2034 + static void valleyview_hpd_irq_setup(struct drm_device *dev) 2035 + { 2036 + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2037 + u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN); 2038 + 2074 2039 /* Note HDMI and DP share bits */ 2075 2040 if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS) 2076 2041 hotplug_en |= HDMIB_HOTPLUG_INT_EN; ··· 2097 2044 } 2098 2045 2099 2046 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en); 2100 - 2101 - return 0; 2102 2047 } 2103 2048 2104 2049 static void valleyview_irq_uninstall(struct drm_device *dev) ··· 2326 2275 I915_USER_INTERRUPT; 2327 2276 2328 2277 if (I915_HAS_HOTPLUG(dev)) { 2278 + I915_WRITE(PORT_HOTPLUG_EN, 0); 2279 + POSTING_READ(PORT_HOTPLUG_EN); 2280 + 2329 2281 /* Enable in IER... */ 2330 2282 enable_mask |= I915_DISPLAY_PORT_INTERRUPT; 2331 2283 /* and unmask in IMR */ ··· 2339 2285 I915_WRITE(IER, enable_mask); 2340 2286 POSTING_READ(IER); 2341 2287 2288 + intel_opregion_enable_asle(dev); 2289 + 2290 + return 0; 2291 + } 2292 + 2293 + static void i915_hpd_irq_setup(struct drm_device *dev) 2294 + { 2295 + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2296 + u32 hotplug_en; 2297 + 2342 2298 if (I915_HAS_HOTPLUG(dev)) { 2343 - u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN); 2299 + hotplug_en = I915_READ(PORT_HOTPLUG_EN); 2344 2300 2345 2301 if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS) 2346 2302 hotplug_en |= HDMIB_HOTPLUG_INT_EN; ··· 2371 2307 2372 2308 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en); 2373 2309 } 2374 - 2375 - intel_opregion_enable_asle(dev); 2376 - 2377 - return 0; 2378 2310 } 2379 2311 2380 2312 static irqreturn_t i915_irq_handler(int irq, void *arg) ··· 2530 2470 static int i965_irq_postinstall(struct drm_device *dev) 2531 2471 { 2532 2472 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2533 - u32 hotplug_en; 2534 2473 u32 enable_mask; 2535 2474 u32 error_mask; 2536 2475 ··· 2550 2491 2551 2492 dev_priv->pipestat[0] = 0; 2552 2493 dev_priv->pipestat[1] = 0; 2494 + i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE); 2553 2495 2554 2496 /* 2555 2497 * Enable some error detection, note the instruction error mask ··· 2570 2510 I915_WRITE(IMR, dev_priv->irq_mask); 2571 2511 I915_WRITE(IER, enable_mask); 2572 2512 POSTING_READ(IER); 2513 + 2514 + I915_WRITE(PORT_HOTPLUG_EN, 0); 2515 + POSTING_READ(PORT_HOTPLUG_EN); 2516 + 2517 + intel_opregion_enable_asle(dev); 2518 + 2519 + return 0; 2520 + } 2521 + 2522 + static void i965_hpd_irq_setup(struct drm_device *dev) 2523 + { 2524 + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2525 + u32 hotplug_en; 2573 2526 2574 2527 /* Note HDMI and DP share hotplug bits */ 2575 2528 hotplug_en = 0; ··· 2618 2545 /* Ignore TV since it's buggy */ 2619 2546 2620 2547 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en); 2621 - 2622 - intel_opregion_enable_asle(dev); 2623 - 2624 - return 0; 2625 2548 } 2626 2549 2627 2550 static irqreturn_t i965_irq_handler(int irq, void *arg) ··· 2713 2644 if (blc_event || (iir & I915_ASLE_INTERRUPT)) 2714 2645 intel_opregion_asle_intr(dev); 2715 2646 2647 + if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) 2648 + gmbus_irq_handler(dev); 2649 + 2716 2650 /* With MSI, interrupts are only generated when iir 2717 2651 * transitions from zero to nonzero. If another bit got 2718 2652 * set while we were handling the existing iir bits, then ··· 2771 2699 INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work); 2772 2700 INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work); 2773 2701 2702 + setup_timer(&dev_priv->hangcheck_timer, i915_hangcheck_elapsed, 2703 + (unsigned long) dev); 2704 + 2705 + pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE); 2706 + 2774 2707 dev->driver->get_vblank_counter = i915_get_vblank_counter; 2775 2708 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ 2776 2709 if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) { ··· 2796 2719 dev->driver->irq_uninstall = valleyview_irq_uninstall; 2797 2720 dev->driver->enable_vblank = valleyview_enable_vblank; 2798 2721 dev->driver->disable_vblank = valleyview_disable_vblank; 2799 - } else if (IS_IVYBRIDGE(dev)) { 2722 + dev_priv->display.hpd_irq_setup = valleyview_hpd_irq_setup; 2723 + } else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) { 2800 2724 /* Share pre & uninstall handlers with ILK/SNB */ 2801 - dev->driver->irq_handler = ivybridge_irq_handler; 2802 - dev->driver->irq_preinstall = ironlake_irq_preinstall; 2803 - dev->driver->irq_postinstall = ivybridge_irq_postinstall; 2804 - dev->driver->irq_uninstall = ironlake_irq_uninstall; 2805 - dev->driver->enable_vblank = ivybridge_enable_vblank; 2806 - dev->driver->disable_vblank = ivybridge_disable_vblank; 2807 - } else if (IS_HASWELL(dev)) { 2808 - /* Share interrupts handling with IVB */ 2809 2725 dev->driver->irq_handler = ivybridge_irq_handler; 2810 2726 dev->driver->irq_preinstall = ironlake_irq_preinstall; 2811 2727 dev->driver->irq_postinstall = ivybridge_irq_postinstall; ··· 2823 2753 dev->driver->irq_postinstall = i915_irq_postinstall; 2824 2754 dev->driver->irq_uninstall = i915_irq_uninstall; 2825 2755 dev->driver->irq_handler = i915_irq_handler; 2756 + dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 2826 2757 } else { 2827 2758 dev->driver->irq_preinstall = i965_irq_preinstall; 2828 2759 dev->driver->irq_postinstall = i965_irq_postinstall; 2829 2760 dev->driver->irq_uninstall = i965_irq_uninstall; 2830 2761 dev->driver->irq_handler = i965_irq_handler; 2762 + dev_priv->display.hpd_irq_setup = i965_hpd_irq_setup; 2831 2763 } 2832 2764 dev->driver->enable_vblank = i915_enable_vblank; 2833 2765 dev->driver->disable_vblank = i915_disable_vblank; 2834 2766 } 2767 + } 2768 + 2769 + void intel_hpd_init(struct drm_device *dev) 2770 + { 2771 + struct drm_i915_private *dev_priv = dev->dev_private; 2772 + 2773 + if (dev_priv->display.hpd_irq_setup) 2774 + dev_priv->display.hpd_irq_setup(dev); 2835 2775 }
+18 -40
drivers/gpu/drm/i915/i915_reg.h
··· 142 142 #define VGA_MSR_CGA_MODE (1<<0) 143 143 144 144 #define VGA_SR_INDEX 0x3c4 145 + #define SR01 1 145 146 #define VGA_SR_DATA 0x3c5 146 147 147 148 #define VGA_AR_INDEX 0x3c0 ··· 941 940 #define DPLL_LOCK_VLV (1<<15) 942 941 #define DPLL_INTEGRATED_CLOCK_VLV (1<<13) 943 942 944 - #define SRX_INDEX 0x3c4 945 - #define SRX_DATA 0x3c5 946 - #define SR01 1 947 - #define SR01_SCREEN_OFF (1<<5) 948 - 949 - #define PPCR 0x61204 950 - #define PPCR_ON (1<<0) 951 - 952 - #define DVOB 0x61140 953 - #define DVOB_ON (1<<31) 954 - #define DVOC 0x61160 955 - #define DVOC_ON (1<<31) 956 - #define LVDS 0x61180 957 - #define LVDS_ON (1<<31) 958 - 959 - /* Scratch pad debug 0 reg: 960 - */ 961 943 #define DPLL_FPA01_P1_POST_DIV_MASK_I830 0x001f0000 962 944 /* 963 945 * The i830 generation, in LVDS mode, defines P1 as the bit number set within ··· 1877 1893 #define PFIT_SCALING_PILLAR (2 << 26) 1878 1894 #define PFIT_SCALING_LETTER (3 << 26) 1879 1895 #define PFIT_PGM_RATIOS 0x61234 1880 - #define PFIT_VERT_SCALE_MASK 0xfff00000 1881 - #define PFIT_HORIZ_SCALE_MASK 0x0000fff0 1882 1896 /* Pre-965 */ 1883 1897 #define PFIT_VERT_SCALE_SHIFT 20 1884 1898 #define PFIT_VERT_SCALE_MASK 0xfff00000 ··· 2650 2668 #define PIPECONF_INTERLACED_DBL_ILK (4 << 21) /* ilk/snb only */ 2651 2669 #define PIPECONF_PFIT_PF_INTERLACED_DBL_ILK (5 << 21) /* ilk/snb only */ 2652 2670 #define PIPECONF_CXSR_DOWNCLOCK (1<<16) 2653 - #define PIPECONF_BPP_MASK (0x000000e0) 2654 - #define PIPECONF_BPP_8 (0<<5) 2655 - #define PIPECONF_BPP_10 (1<<5) 2656 - #define PIPECONF_BPP_6 (2<<5) 2657 - #define PIPECONF_BPP_12 (3<<5) 2671 + #define PIPECONF_BPC_MASK (0x7 << 5) 2672 + #define PIPECONF_8BPC (0<<5) 2673 + #define PIPECONF_10BPC (1<<5) 2674 + #define PIPECONF_6BPC (2<<5) 2675 + #define PIPECONF_12BPC (3<<5) 2658 2676 #define PIPECONF_DITHER_EN (1<<4) 2659 2677 #define PIPECONF_DITHER_TYPE_MASK (0x0000000c) 2660 2678 #define PIPECONF_DITHER_TYPE_SP (0<<2) ··· 2698 2716 #define PIPE_START_VBLANK_INTERRUPT_STATUS (1UL<<2) /* 965 or later */ 2699 2717 #define PIPE_VBLANK_INTERRUPT_STATUS (1UL<<1) 2700 2718 #define PIPE_OVERLAY_UPDATED_STATUS (1UL<<0) 2701 - #define PIPE_BPC_MASK (7 << 5) /* Ironlake */ 2702 - #define PIPE_8BPC (0 << 5) 2703 - #define PIPE_10BPC (1 << 5) 2704 - #define PIPE_6BPC (2 << 5) 2705 - #define PIPE_12BPC (3 << 5) 2706 2719 2707 2720 #define PIPESRC(pipe) _PIPE(pipe, _PIPEASRC, _PIPEBSRC) 2708 2721 #define PIPECONF(tran) _TRANSCODER(tran, _PIPEACONF, _PIPEBCONF) ··· 3555 3578 #define PORTD_PULSE_DURATION_6ms (2 << 18) 3556 3579 #define PORTD_PULSE_DURATION_100ms (3 << 18) 3557 3580 #define PORTD_PULSE_DURATION_MASK (3 << 18) 3558 - #define PORTD_HOTPLUG_NO_DETECT (0) 3559 - #define PORTD_HOTPLUG_SHORT_DETECT (1 << 16) 3560 - #define PORTD_HOTPLUG_LONG_DETECT (1 << 17) 3581 + #define PORTD_HOTPLUG_STATUS_MASK (0x3 << 16) 3582 + #define PORTD_HOTPLUG_NO_DETECT (0 << 16) 3583 + #define PORTD_HOTPLUG_SHORT_DETECT (1 << 16) 3584 + #define PORTD_HOTPLUG_LONG_DETECT (2 << 16) 3561 3585 #define PORTC_HOTPLUG_ENABLE (1 << 12) 3562 3586 #define PORTC_PULSE_DURATION_2ms (0) 3563 3587 #define PORTC_PULSE_DURATION_4_5ms (1 << 10) 3564 3588 #define PORTC_PULSE_DURATION_6ms (2 << 10) 3565 3589 #define PORTC_PULSE_DURATION_100ms (3 << 10) 3566 3590 #define PORTC_PULSE_DURATION_MASK (3 << 10) 3567 - #define PORTC_HOTPLUG_NO_DETECT (0) 3568 - #define PORTC_HOTPLUG_SHORT_DETECT (1 << 8) 3569 - #define PORTC_HOTPLUG_LONG_DETECT (1 << 9) 3591 + #define PORTC_HOTPLUG_STATUS_MASK (0x3 << 8) 3592 + #define PORTC_HOTPLUG_NO_DETECT (0 << 8) 3593 + #define PORTC_HOTPLUG_SHORT_DETECT (1 << 8) 3594 + #define PORTC_HOTPLUG_LONG_DETECT (2 << 8) 3570 3595 #define PORTB_HOTPLUG_ENABLE (1 << 4) 3571 3596 #define PORTB_PULSE_DURATION_2ms (0) 3572 3597 #define PORTB_PULSE_DURATION_4_5ms (1 << 2) 3573 3598 #define PORTB_PULSE_DURATION_6ms (2 << 2) 3574 3599 #define PORTB_PULSE_DURATION_100ms (3 << 2) 3575 3600 #define PORTB_PULSE_DURATION_MASK (3 << 2) 3576 - #define PORTB_HOTPLUG_NO_DETECT (0) 3577 - #define PORTB_HOTPLUG_SHORT_DETECT (1 << 0) 3578 - #define PORTB_HOTPLUG_LONG_DETECT (1 << 1) 3601 + #define PORTB_HOTPLUG_STATUS_MASK (0x3 << 0) 3602 + #define PORTB_HOTPLUG_NO_DETECT (0 << 0) 3603 + #define PORTB_HOTPLUG_SHORT_DETECT (1 << 0) 3604 + #define PORTB_HOTPLUG_LONG_DETECT (2 << 0) 3579 3605 3580 3606 #define PCH_GPIOA 0xc5010 3581 3607 #define PCH_GPIOB 0xc5014 ··· 3797 3817 #define TRANS_FSYNC_DELAY_HB2 (1<<27) 3798 3818 #define TRANS_FSYNC_DELAY_HB3 (2<<27) 3799 3819 #define TRANS_FSYNC_DELAY_HB4 (3<<27) 3800 - #define TRANS_DP_AUDIO_ONLY (1<<26) 3801 - #define TRANS_DP_VIDEO_AUDIO (0<<26) 3802 3820 #define TRANS_INTERLACE_MASK (7<<21) 3803 3821 #define TRANS_PROGRESSIVE (0<<21) 3804 3822 #define TRANS_INTERLACED (3<<21)
+1 -1
drivers/gpu/drm/i915/intel_crt.c
··· 776 776 777 777 crt->base.disable = intel_disable_crt; 778 778 crt->base.enable = intel_enable_crt; 779 - if (IS_HASWELL(dev)) 779 + if (HAS_DDI(dev)) 780 780 crt->base.get_hw_state = intel_ddi_get_hw_state; 781 781 else 782 782 crt->base.get_hw_state = intel_crt_get_hw_state;
+13 -11
drivers/gpu/drm/i915/intel_ddi.c
··· 84 84 * in either FDI or DP modes only, as HDMI connections will work with both 85 85 * of those 86 86 */ 87 - void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port, bool use_fdi_mode) 87 + static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port, 88 + bool use_fdi_mode) 88 89 { 89 90 struct drm_i915_private *dev_priv = dev->dev_private; 90 91 u32 reg; ··· 115 114 { 116 115 int port; 117 116 118 - if (IS_HASWELL(dev)) { 119 - for (port = PORT_A; port < PORT_E; port++) 120 - intel_prepare_ddi_buffers(dev, port, false); 117 + if (!HAS_DDI(dev)) 118 + return; 121 119 122 - /* DDI E is the suggested one to work in FDI mode, so program is as such by 123 - * default. It will have to be re-programmed in case a digital DP output 124 - * will be detected on it 125 - */ 126 - intel_prepare_ddi_buffers(dev, PORT_E, true); 127 - } 120 + for (port = PORT_A; port < PORT_E; port++) 121 + intel_prepare_ddi_buffers(dev, port, false); 122 + 123 + /* DDI E is the suggested one to work in FDI mode, so program is as such 124 + * by default. It will have to be re-programmed in case a digital DP 125 + * output will be detected on it 126 + */ 127 + intel_prepare_ddi_buffers(dev, PORT_E, true); 128 128 } 129 129 130 130 static const long hsw_ddi_buf_ctl_values[] = { ··· 1071 1069 if (port == PORT_A) 1072 1070 cpu_transcoder = TRANSCODER_EDP; 1073 1071 else 1074 - cpu_transcoder = pipe; 1072 + cpu_transcoder = (enum transcoder) pipe; 1075 1073 1076 1074 tmp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder)); 1077 1075
+185 -463
drivers/gpu/drm/i915/intel_display.c
··· 416 416 417 417 u32 intel_dpio_read(struct drm_i915_private *dev_priv, int reg) 418 418 { 419 - unsigned long flags; 420 - u32 val = 0; 419 + WARN_ON(!mutex_is_locked(&dev_priv->dpio_lock)); 421 420 422 - spin_lock_irqsave(&dev_priv->dpio_lock, flags); 423 421 if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100)) { 424 422 DRM_ERROR("DPIO idle wait timed out\n"); 425 - goto out_unlock; 423 + return 0; 426 424 } 427 425 428 426 I915_WRITE(DPIO_REG, reg); ··· 428 430 DPIO_BYTE); 429 431 if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100)) { 430 432 DRM_ERROR("DPIO read wait timed out\n"); 431 - goto out_unlock; 433 + return 0; 432 434 } 433 - val = I915_READ(DPIO_DATA); 434 435 435 - out_unlock: 436 - spin_unlock_irqrestore(&dev_priv->dpio_lock, flags); 437 - return val; 436 + return I915_READ(DPIO_DATA); 438 437 } 439 438 440 439 static void intel_dpio_write(struct drm_i915_private *dev_priv, int reg, 441 440 u32 val) 442 441 { 443 - unsigned long flags; 442 + WARN_ON(!mutex_is_locked(&dev_priv->dpio_lock)); 444 443 445 - spin_lock_irqsave(&dev_priv->dpio_lock, flags); 446 444 if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100)) { 447 445 DRM_ERROR("DPIO idle wait timed out\n"); 448 - goto out_unlock; 446 + return; 449 447 } 450 448 451 449 I915_WRITE(DPIO_DATA, val); ··· 450 456 DPIO_BYTE); 451 457 if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100)) 452 458 DRM_ERROR("DPIO write wait timed out\n"); 453 - 454 - out_unlock: 455 - spin_unlock_irqrestore(&dev_priv->dpio_lock, flags); 456 459 } 457 460 458 461 static void vlv_init_dpio(struct drm_device *dev) ··· 463 472 POSTING_READ(DPIO_CTL); 464 473 } 465 474 466 - static int intel_dual_link_lvds_callback(const struct dmi_system_id *id) 467 - { 468 - DRM_INFO("Forcing lvds to dual link mode on %s\n", id->ident); 469 - return 1; 470 - } 471 - 472 - static const struct dmi_system_id intel_dual_link_lvds[] = { 473 - { 474 - .callback = intel_dual_link_lvds_callback, 475 - .ident = "Apple MacBook Pro (Core i5/i7 Series)", 476 - .matches = { 477 - DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."), 478 - DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro8,2"), 479 - }, 480 - }, 481 - { } /* terminating entry */ 482 - }; 483 - 484 - static bool is_dual_link_lvds(struct drm_i915_private *dev_priv, 485 - unsigned int reg) 486 - { 487 - unsigned int val; 488 - 489 - /* use the module option value if specified */ 490 - if (i915_lvds_channel_mode > 0) 491 - return i915_lvds_channel_mode == 2; 492 - 493 - if (dmi_check_system(intel_dual_link_lvds)) 494 - return true; 495 - 496 - if (dev_priv->lvds_val) 497 - val = dev_priv->lvds_val; 498 - else { 499 - /* BIOS should set the proper LVDS register value at boot, but 500 - * in reality, it doesn't set the value when the lid is closed; 501 - * we need to check "the value to be set" in VBT when LVDS 502 - * register is uninitialized. 503 - */ 504 - val = I915_READ(reg); 505 - if (!(val & ~(LVDS_PIPE_MASK | LVDS_DETECTED))) 506 - val = dev_priv->bios_lvds_val; 507 - dev_priv->lvds_val = val; 508 - } 509 - return (val & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP; 510 - } 511 - 512 475 static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc, 513 476 int refclk) 514 477 { 515 478 struct drm_device *dev = crtc->dev; 516 - struct drm_i915_private *dev_priv = dev->dev_private; 517 479 const intel_limit_t *limit; 518 480 519 481 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { 520 - if (is_dual_link_lvds(dev_priv, PCH_LVDS)) { 482 + if (intel_is_dual_link_lvds(dev)) { 521 483 /* LVDS dual channel */ 522 484 if (refclk == 100000) 523 485 limit = &intel_limits_ironlake_dual_lvds_100m; ··· 494 550 static const intel_limit_t *intel_g4x_limit(struct drm_crtc *crtc) 495 551 { 496 552 struct drm_device *dev = crtc->dev; 497 - struct drm_i915_private *dev_priv = dev->dev_private; 498 553 const intel_limit_t *limit; 499 554 500 555 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { 501 - if (is_dual_link_lvds(dev_priv, LVDS)) 556 + if (intel_is_dual_link_lvds(dev)) 502 557 /* LVDS with dual channel */ 503 558 limit = &intel_limits_g4x_dual_channel_lvds; 504 559 else ··· 629 686 630 687 { 631 688 struct drm_device *dev = crtc->dev; 632 - struct drm_i915_private *dev_priv = dev->dev_private; 633 689 intel_clock_t clock; 634 690 int err = target; 635 691 636 - if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) && 637 - (I915_READ(LVDS)) != 0) { 692 + if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { 638 693 /* 639 - * For LVDS, if the panel is on, just rely on its current 640 - * settings for dual-channel. We haven't figured out how to 641 - * reliably set up different single/dual channel state, if we 642 - * even can. 694 + * For LVDS just rely on its current settings for dual-channel. 695 + * We haven't figured out how to reliably set up different 696 + * single/dual channel state, if we even can. 643 697 */ 644 - if (is_dual_link_lvds(dev_priv, LVDS)) 698 + if (intel_is_dual_link_lvds(dev)) 645 699 clock.p2 = limit->p2.p2_fast; 646 700 else 647 701 clock.p2 = limit->p2.p2_slow; ··· 691 751 intel_clock_t *best_clock) 692 752 { 693 753 struct drm_device *dev = crtc->dev; 694 - struct drm_i915_private *dev_priv = dev->dev_private; 695 754 intel_clock_t clock; 696 755 int max_n; 697 756 bool found; ··· 705 766 lvds_reg = PCH_LVDS; 706 767 else 707 768 lvds_reg = LVDS; 708 - if ((I915_READ(lvds_reg) & LVDS_CLKB_POWER_MASK) == 709 - LVDS_CLKB_POWER_UP) 769 + if (intel_is_dual_link_lvds(dev)) 710 770 clock.p2 = limit->p2.p2_fast; 711 771 else 712 772 clock.p2 = limit->p2.p2_slow; ··· 985 1047 } 986 1048 } 987 1049 1050 + /* 1051 + * ibx_digital_port_connected - is the specified port connected? 1052 + * @dev_priv: i915 private structure 1053 + * @port: the port to test 1054 + * 1055 + * Returns true if @port is connected, false otherwise. 1056 + */ 1057 + bool ibx_digital_port_connected(struct drm_i915_private *dev_priv, 1058 + struct intel_digital_port *port) 1059 + { 1060 + u32 bit; 1061 + 1062 + if (HAS_PCH_IBX(dev_priv->dev)) { 1063 + switch(port->port) { 1064 + case PORT_B: 1065 + bit = SDE_PORTB_HOTPLUG; 1066 + break; 1067 + case PORT_C: 1068 + bit = SDE_PORTC_HOTPLUG; 1069 + break; 1070 + case PORT_D: 1071 + bit = SDE_PORTD_HOTPLUG; 1072 + break; 1073 + default: 1074 + return true; 1075 + } 1076 + } else { 1077 + switch(port->port) { 1078 + case PORT_B: 1079 + bit = SDE_PORTB_HOTPLUG_CPT; 1080 + break; 1081 + case PORT_C: 1082 + bit = SDE_PORTC_HOTPLUG_CPT; 1083 + break; 1084 + case PORT_D: 1085 + bit = SDE_PORTD_HOTPLUG_CPT; 1086 + break; 1087 + default: 1088 + return true; 1089 + } 1090 + } 1091 + 1092 + return I915_READ(SDEISR) & bit; 1093 + } 1094 + 988 1095 static const char *state_string(bool enabled) 989 1096 { 990 1097 return enabled ? "on" : "off"; ··· 1108 1125 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, 1109 1126 pipe); 1110 1127 1111 - if (IS_HASWELL(dev_priv->dev)) { 1112 - /* On Haswell, DDI is used instead of FDI_TX_CTL */ 1128 + if (HAS_DDI(dev_priv->dev)) { 1129 + /* DDI does not have a specific FDI_TX register */ 1113 1130 reg = TRANS_DDI_FUNC_CTL(cpu_transcoder); 1114 1131 val = I915_READ(reg); 1115 1132 cur_state = !!(val & TRANS_DDI_FUNC_ENABLE); ··· 1153 1170 return; 1154 1171 1155 1172 /* On Haswell, DDI ports are responsible for the FDI PLL setup */ 1156 - if (IS_HASWELL(dev_priv->dev)) 1173 + if (HAS_DDI(dev_priv->dev)) 1157 1174 return; 1158 1175 1159 1176 reg = FDI_TX_CTL(pipe); ··· 1492 1509 intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value, 1493 1510 enum intel_sbi_destination destination) 1494 1511 { 1495 - unsigned long flags; 1496 1512 u32 tmp; 1497 1513 1498 - spin_lock_irqsave(&dev_priv->dpio_lock, flags); 1499 - if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0, 100)) { 1514 + WARN_ON(!mutex_is_locked(&dev_priv->dpio_lock)); 1515 + 1516 + if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0, 1517 + 100)) { 1500 1518 DRM_ERROR("timeout waiting for SBI to become ready\n"); 1501 - goto out_unlock; 1519 + return; 1502 1520 } 1503 1521 1504 1522 I915_WRITE(SBI_ADDR, (reg << 16)); ··· 1514 1530 if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_BUSY | SBI_RESPONSE_FAIL)) == 0, 1515 1531 100)) { 1516 1532 DRM_ERROR("timeout waiting for SBI to complete write transaction\n"); 1517 - goto out_unlock; 1533 + return; 1518 1534 } 1519 - 1520 - out_unlock: 1521 - spin_unlock_irqrestore(&dev_priv->dpio_lock, flags); 1522 1535 } 1523 1536 1524 1537 static u32 1525 1538 intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg, 1526 1539 enum intel_sbi_destination destination) 1527 1540 { 1528 - unsigned long flags; 1529 1541 u32 value = 0; 1542 + WARN_ON(!mutex_is_locked(&dev_priv->dpio_lock)); 1530 1543 1531 - spin_lock_irqsave(&dev_priv->dpio_lock, flags); 1532 - if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0, 100)) { 1544 + if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0, 1545 + 100)) { 1533 1546 DRM_ERROR("timeout waiting for SBI to become ready\n"); 1534 - goto out_unlock; 1547 + return 0; 1535 1548 } 1536 1549 1537 1550 I915_WRITE(SBI_ADDR, (reg << 16)); ··· 1542 1561 if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_BUSY | SBI_RESPONSE_FAIL)) == 0, 1543 1562 100)) { 1544 1563 DRM_ERROR("timeout waiting for SBI to complete read transaction\n"); 1545 - goto out_unlock; 1564 + return 0; 1546 1565 } 1547 1566 1548 - value = I915_READ(SBI_DATA); 1549 - 1550 - out_unlock: 1551 - spin_unlock_irqrestore(&dev_priv->dpio_lock, flags); 1552 - return value; 1567 + return I915_READ(SBI_DATA); 1553 1568 } 1554 1569 1555 1570 /** ··· 1677 1700 * make the BPC in transcoder be consistent with 1678 1701 * that in pipeconf reg. 1679 1702 */ 1680 - val &= ~PIPE_BPC_MASK; 1681 - val |= pipeconf_val & PIPE_BPC_MASK; 1703 + val &= ~PIPECONF_BPC_MASK; 1704 + val |= pipeconf_val & PIPECONF_BPC_MASK; 1682 1705 } 1683 1706 1684 1707 val &= ~TRANS_INTERLACE_MASK; ··· 1705 1728 BUG_ON(dev_priv->info->gen < 5); 1706 1729 1707 1730 /* FDI must be feeding us bits for PCH ports */ 1708 - assert_fdi_tx_enabled(dev_priv, cpu_transcoder); 1731 + assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder); 1709 1732 assert_fdi_rx_enabled(dev_priv, TRANSCODER_A); 1710 1733 1711 1734 /* Workaround: set timing override bit. */ ··· 1793 1816 { 1794 1817 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, 1795 1818 pipe); 1796 - enum transcoder pch_transcoder; 1819 + enum pipe pch_transcoder; 1797 1820 int reg; 1798 1821 u32 val; 1799 1822 1800 - if (IS_HASWELL(dev_priv->dev)) 1823 + if (HAS_PCH_LPT(dev_priv->dev)) 1801 1824 pch_transcoder = TRANSCODER_A; 1802 1825 else 1803 1826 pch_transcoder = pipe; ··· 1813 1836 if (pch_port) { 1814 1837 /* if driving the PCH, we need FDI enabled */ 1815 1838 assert_fdi_rx_pll_enabled(dev_priv, pch_transcoder); 1816 - assert_fdi_tx_pll_enabled(dev_priv, cpu_transcoder); 1839 + assert_fdi_tx_pll_enabled(dev_priv, 1840 + (enum pipe) cpu_transcoder); 1817 1841 } 1818 1842 /* FIXME: assert CPU port conditions for SNB+ */ 1819 1843 } ··· 2328 2350 return 0; 2329 2351 } 2330 2352 2331 - static void ironlake_set_pll_edp(struct drm_crtc *crtc, int clock) 2332 - { 2333 - struct drm_device *dev = crtc->dev; 2334 - struct drm_i915_private *dev_priv = dev->dev_private; 2335 - u32 dpa_ctl; 2336 - 2337 - DRM_DEBUG_KMS("eDP PLL enable for clock %d\n", clock); 2338 - dpa_ctl = I915_READ(DP_A); 2339 - dpa_ctl &= ~DP_PLL_FREQ_MASK; 2340 - 2341 - if (clock < 200000) { 2342 - u32 temp; 2343 - dpa_ctl |= DP_PLL_FREQ_160MHZ; 2344 - /* workaround for 160Mhz: 2345 - 1) program 0x4600c bits 15:0 = 0x8124 2346 - 2) program 0x46010 bit 0 = 1 2347 - 3) program 0x46034 bit 24 = 1 2348 - 4) program 0x64000 bit 14 = 1 2349 - */ 2350 - temp = I915_READ(0x4600c); 2351 - temp &= 0xffff0000; 2352 - I915_WRITE(0x4600c, temp | 0x8124); 2353 - 2354 - temp = I915_READ(0x46010); 2355 - I915_WRITE(0x46010, temp | 1); 2356 - 2357 - temp = I915_READ(0x46034); 2358 - I915_WRITE(0x46034, temp | (1 << 24)); 2359 - } else { 2360 - dpa_ctl |= DP_PLL_FREQ_270MHZ; 2361 - } 2362 - I915_WRITE(DP_A, dpa_ctl); 2363 - 2364 - POSTING_READ(DP_A); 2365 - udelay(500); 2366 - } 2367 - 2368 2353 static void intel_fdi_normal_train(struct drm_crtc *crtc) 2369 2354 { 2370 2355 struct drm_device *dev = crtc->dev; ··· 2756 2815 temp = I915_READ(reg); 2757 2816 temp &= ~((0x7 << 19) | (0x7 << 16)); 2758 2817 temp |= (intel_crtc->fdi_lanes - 1) << 19; 2759 - temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11; 2818 + temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11; 2760 2819 I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE); 2761 2820 2762 2821 POSTING_READ(reg); ··· 2769 2828 POSTING_READ(reg); 2770 2829 udelay(200); 2771 2830 2772 - /* On Haswell, the PLL configuration for ports and pipes is handled 2773 - * separately, as part of DDI setup */ 2774 - if (!IS_HASWELL(dev)) { 2775 - /* Enable CPU FDI TX PLL, always on for Ironlake */ 2776 - reg = FDI_TX_CTL(pipe); 2777 - temp = I915_READ(reg); 2778 - if ((temp & FDI_TX_PLL_ENABLE) == 0) { 2779 - I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE); 2831 + /* Enable CPU FDI TX PLL, always on for Ironlake */ 2832 + reg = FDI_TX_CTL(pipe); 2833 + temp = I915_READ(reg); 2834 + if ((temp & FDI_TX_PLL_ENABLE) == 0) { 2835 + I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE); 2780 2836 2781 - POSTING_READ(reg); 2782 - udelay(100); 2783 - } 2837 + POSTING_READ(reg); 2838 + udelay(100); 2784 2839 } 2785 2840 } 2786 2841 ··· 2826 2889 reg = FDI_RX_CTL(pipe); 2827 2890 temp = I915_READ(reg); 2828 2891 temp &= ~(0x7 << 16); 2829 - temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11; 2892 + temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11; 2830 2893 I915_WRITE(reg, temp & ~FDI_RX_ENABLE); 2831 2894 2832 2895 POSTING_READ(reg); ··· 2855 2918 } 2856 2919 /* BPC in FDI rx is consistent with that in PIPECONF */ 2857 2920 temp &= ~(0x07 << 16); 2858 - temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11; 2921 + temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11; 2859 2922 I915_WRITE(reg, temp); 2860 2923 2861 2924 POSTING_READ(reg); ··· 2928 2991 struct drm_i915_private *dev_priv = dev->dev_private; 2929 2992 u32 divsel, phaseinc, auxdiv, phasedir = 0; 2930 2993 u32 temp; 2994 + 2995 + mutex_lock(&dev_priv->dpio_lock); 2931 2996 2932 2997 /* It is necessary to ungate the pixclk gate prior to programming 2933 2998 * the divisors, and gate it back when it is done. ··· 3005 3066 udelay(24); 3006 3067 3007 3068 I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_UNGATE); 3069 + 3070 + mutex_unlock(&dev_priv->dpio_lock); 3008 3071 } 3009 3072 3010 3073 /* ··· 3087 3146 if (HAS_PCH_CPT(dev) && 3088 3147 (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) || 3089 3148 intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) { 3090 - u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) >> 5; 3149 + u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5; 3091 3150 reg = TRANS_DP_CTL(pipe); 3092 3151 temp = I915_READ(reg); 3093 3152 temp &= ~(TRANS_DP_PORT_SEL_MASK | ··· 3564 3623 3565 3624 /* Stop saying we're using TRANSCODER_EDP because some other CRTC might 3566 3625 * start using it. */ 3567 - intel_crtc->cpu_transcoder = intel_crtc->pipe; 3626 + intel_crtc->cpu_transcoder = (enum transcoder) intel_crtc->pipe; 3568 3627 3569 3628 intel_ddi_put_crtc_pll(crtc); 3570 3629 } ··· 3953 4012 return 133000; 3954 4013 } 3955 4014 3956 - struct fdi_m_n { 3957 - u32 tu; 3958 - u32 gmch_m; 3959 - u32 gmch_n; 3960 - u32 link_m; 3961 - u32 link_n; 3962 - }; 3963 - 3964 4015 static void 3965 - fdi_reduce_ratio(u32 *num, u32 *den) 4016 + intel_reduce_ratio(uint32_t *num, uint32_t *den) 3966 4017 { 3967 4018 while (*num > 0xffffff || *den > 0xffffff) { 3968 4019 *num >>= 1; ··· 3962 4029 } 3963 4030 } 3964 4031 3965 - static void 3966 - ironlake_compute_m_n(int bits_per_pixel, int nlanes, int pixel_clock, 3967 - int link_clock, struct fdi_m_n *m_n) 4032 + void 4033 + intel_link_compute_m_n(int bits_per_pixel, int nlanes, 4034 + int pixel_clock, int link_clock, 4035 + struct intel_link_m_n *m_n) 3968 4036 { 3969 - m_n->tu = 64; /* default size */ 3970 - 3971 - /* BUG_ON(pixel_clock > INT_MAX / 36); */ 4037 + m_n->tu = 64; 3972 4038 m_n->gmch_m = bits_per_pixel * pixel_clock; 3973 4039 m_n->gmch_n = link_clock * nlanes * 8; 3974 - fdi_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n); 3975 - 4040 + intel_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n); 3976 4041 m_n->link_m = pixel_clock; 3977 4042 m_n->link_n = link_clock; 3978 - fdi_reduce_ratio(&m_n->link_m, &m_n->link_n); 4043 + intel_reduce_ratio(&m_n->link_m, &m_n->link_n); 3979 4044 } 3980 4045 3981 4046 static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv) ··· 4220 4289 } 4221 4290 } 4222 4291 4223 - static void intel_update_lvds(struct drm_crtc *crtc, intel_clock_t *clock, 4224 - struct drm_display_mode *adjusted_mode) 4225 - { 4226 - struct drm_device *dev = crtc->dev; 4227 - struct drm_i915_private *dev_priv = dev->dev_private; 4228 - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4229 - int pipe = intel_crtc->pipe; 4230 - u32 temp; 4231 - 4232 - temp = I915_READ(LVDS); 4233 - temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP; 4234 - if (pipe == 1) { 4235 - temp |= LVDS_PIPEB_SELECT; 4236 - } else { 4237 - temp &= ~LVDS_PIPEB_SELECT; 4238 - } 4239 - /* set the corresponsding LVDS_BORDER bit */ 4240 - temp |= dev_priv->lvds_border_bits; 4241 - /* Set the B0-B3 data pairs corresponding to whether we're going to 4242 - * set the DPLLs for dual-channel mode or not. 4243 - */ 4244 - if (clock->p2 == 7) 4245 - temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP; 4246 - else 4247 - temp &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP); 4248 - 4249 - /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP) 4250 - * appropriately here, but we need to look more thoroughly into how 4251 - * panels behave in the two modes. 4252 - */ 4253 - /* set the dithering flag on LVDS as needed */ 4254 - if (INTEL_INFO(dev)->gen >= 4) { 4255 - if (dev_priv->lvds_dither) 4256 - temp |= LVDS_ENABLE_DITHER; 4257 - else 4258 - temp &= ~LVDS_ENABLE_DITHER; 4259 - } 4260 - temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY); 4261 - if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC) 4262 - temp |= LVDS_HSYNC_POLARITY; 4263 - if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC) 4264 - temp |= LVDS_VSYNC_POLARITY; 4265 - I915_WRITE(LVDS, temp); 4266 - } 4267 - 4268 4292 static void vlv_update_pll(struct drm_crtc *crtc, 4269 4293 struct drm_display_mode *mode, 4270 4294 struct drm_display_mode *adjusted_mode, ··· 4234 4348 u32 bestn, bestm1, bestm2, bestp1, bestp2; 4235 4349 bool is_sdvo; 4236 4350 u32 temp; 4351 + 4352 + mutex_lock(&dev_priv->dpio_lock); 4237 4353 4238 4354 is_sdvo = intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO) || 4239 4355 intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI); ··· 4320 4432 temp |= (1 << 21); 4321 4433 intel_dpio_write(dev_priv, DPIO_DATA_CHANNEL2, temp); 4322 4434 } 4435 + 4436 + mutex_unlock(&dev_priv->dpio_lock); 4323 4437 } 4324 4438 4325 4439 static void i9xx_update_pll(struct drm_crtc *crtc, ··· 4333 4443 struct drm_device *dev = crtc->dev; 4334 4444 struct drm_i915_private *dev_priv = dev->dev_private; 4335 4445 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4446 + struct intel_encoder *encoder; 4336 4447 int pipe = intel_crtc->pipe; 4337 4448 u32 dpll; 4338 4449 bool is_sdvo; ··· 4402 4511 POSTING_READ(DPLL(pipe)); 4403 4512 udelay(150); 4404 4513 4405 - /* The LVDS pin pair needs to be on before the DPLLs are enabled. 4406 - * This is an exception to the general rule that mode_set doesn't turn 4407 - * things on. 4408 - */ 4409 - if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) 4410 - intel_update_lvds(crtc, clock, adjusted_mode); 4514 + for_each_encoder_on_crtc(dev, crtc, encoder) 4515 + if (encoder->pre_pll_enable) 4516 + encoder->pre_pll_enable(encoder); 4411 4517 4412 4518 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) 4413 4519 intel_dp_set_m_n(crtc, mode, adjusted_mode); ··· 4443 4555 struct drm_device *dev = crtc->dev; 4444 4556 struct drm_i915_private *dev_priv = dev->dev_private; 4445 4557 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4558 + struct intel_encoder *encoder; 4446 4559 int pipe = intel_crtc->pipe; 4447 4560 u32 dpll; 4448 4561 ··· 4477 4588 POSTING_READ(DPLL(pipe)); 4478 4589 udelay(150); 4479 4590 4480 - /* The LVDS pin pair needs to be on before the DPLLs are enabled. 4481 - * This is an exception to the general rule that mode_set doesn't turn 4482 - * things on. 4483 - */ 4484 - if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) 4485 - intel_update_lvds(crtc, clock, adjusted_mode); 4591 + for_each_encoder_on_crtc(dev, crtc, encoder) 4592 + if (encoder->pre_pll_enable) 4593 + encoder->pre_pll_enable(encoder); 4486 4594 4487 4595 I915_WRITE(DPLL(pipe), dpll); 4488 4596 ··· 4669 4783 } 4670 4784 4671 4785 /* default to 8bpc */ 4672 - pipeconf &= ~(PIPECONF_BPP_MASK | PIPECONF_DITHER_EN); 4786 + pipeconf &= ~(PIPECONF_BPC_MASK | PIPECONF_DITHER_EN); 4673 4787 if (is_dp) { 4674 4788 if (adjusted_mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) { 4675 - pipeconf |= PIPECONF_BPP_6 | 4789 + pipeconf |= PIPECONF_6BPC | 4676 4790 PIPECONF_DITHER_EN | 4677 4791 PIPECONF_DITHER_TYPE_SP; 4678 4792 } ··· 4680 4794 4681 4795 if (IS_VALLEYVIEW(dev) && intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP)) { 4682 4796 if (adjusted_mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) { 4683 - pipeconf |= PIPECONF_BPP_6 | 4797 + pipeconf |= PIPECONF_6BPC | 4684 4798 PIPECONF_ENABLE | 4685 4799 I965_PIPECONF_ACTIVE; 4686 4800 } ··· 5063 5177 5064 5178 val = I915_READ(PIPECONF(pipe)); 5065 5179 5066 - val &= ~PIPE_BPC_MASK; 5180 + val &= ~PIPECONF_BPC_MASK; 5067 5181 switch (intel_crtc->bpp) { 5068 5182 case 18: 5069 - val |= PIPE_6BPC; 5183 + val |= PIPECONF_6BPC; 5070 5184 break; 5071 5185 case 24: 5072 - val |= PIPE_8BPC; 5186 + val |= PIPECONF_8BPC; 5073 5187 break; 5074 5188 case 30: 5075 - val |= PIPE_10BPC; 5189 + val |= PIPECONF_10BPC; 5076 5190 break; 5077 5191 case 36: 5078 - val |= PIPE_12BPC; 5192 + val |= PIPECONF_12BPC; 5079 5193 break; 5080 5194 default: 5081 5195 /* Case prevented by intel_choose_pipe_bpp_dither. */ ··· 5286 5400 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5287 5401 enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder; 5288 5402 struct intel_encoder *intel_encoder, *edp_encoder = NULL; 5289 - struct fdi_m_n m_n = {0}; 5403 + struct intel_link_m_n m_n = {0}; 5290 5404 int target_clock, pixel_multiplier, lane, link_bw; 5291 5405 bool is_dp = false, is_cpu_edp = false; 5292 5406 ··· 5338 5452 5339 5453 if (pixel_multiplier > 1) 5340 5454 link_bw *= pixel_multiplier; 5341 - ironlake_compute_m_n(intel_crtc->bpp, lane, target_clock, link_bw, 5342 - &m_n); 5455 + intel_link_compute_m_n(intel_crtc->bpp, lane, target_clock, link_bw, &m_n); 5343 5456 5344 5457 I915_WRITE(PIPE_DATA_M1(cpu_transcoder), TU_SIZE(m_n.tu) | m_n.gmch_m); 5345 5458 I915_WRITE(PIPE_DATA_N1(cpu_transcoder), m_n.gmch_n); ··· 5391 5506 if (is_lvds) { 5392 5507 if ((intel_panel_use_ssc(dev_priv) && 5393 5508 dev_priv->lvds_ssc_freq == 100) || 5394 - (I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP) 5509 + intel_is_dual_link_lvds(dev)) 5395 5510 factor = 25; 5396 5511 } else if (is_sdvo && is_tv) 5397 5512 factor = 20; ··· 5466 5581 bool ok, has_reduced_clock = false; 5467 5582 bool is_lvds = false, is_dp = false, is_cpu_edp = false; 5468 5583 struct intel_encoder *encoder; 5469 - u32 temp; 5470 5584 int ret; 5471 5585 bool dither, fdi_config_ok; 5472 5586 ··· 5529 5645 } else 5530 5646 intel_put_pch_pll(intel_crtc); 5531 5647 5532 - /* The LVDS pin pair needs to be on before the DPLLs are enabled. 5533 - * This is an exception to the general rule that mode_set doesn't turn 5534 - * things on. 5535 - */ 5536 - if (is_lvds) { 5537 - temp = I915_READ(PCH_LVDS); 5538 - temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP; 5539 - if (HAS_PCH_CPT(dev)) { 5540 - temp &= ~PORT_TRANS_SEL_MASK; 5541 - temp |= PORT_TRANS_SEL_CPT(pipe); 5542 - } else { 5543 - if (pipe == 1) 5544 - temp |= LVDS_PIPEB_SELECT; 5545 - else 5546 - temp &= ~LVDS_PIPEB_SELECT; 5547 - } 5548 - 5549 - /* set the corresponsding LVDS_BORDER bit */ 5550 - temp |= dev_priv->lvds_border_bits; 5551 - /* Set the B0-B3 data pairs corresponding to whether we're going to 5552 - * set the DPLLs for dual-channel mode or not. 5553 - */ 5554 - if (clock.p2 == 7) 5555 - temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP; 5556 - else 5557 - temp &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP); 5558 - 5559 - /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP) 5560 - * appropriately here, but we need to look more thoroughly into how 5561 - * panels behave in the two modes. 5562 - */ 5563 - temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY); 5564 - if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC) 5565 - temp |= LVDS_HSYNC_POLARITY; 5566 - if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC) 5567 - temp |= LVDS_VSYNC_POLARITY; 5568 - I915_WRITE(PCH_LVDS, temp); 5569 - } 5570 - 5571 - if (is_dp && !is_cpu_edp) { 5648 + if (is_dp && !is_cpu_edp) 5572 5649 intel_dp_set_m_n(crtc, mode, adjusted_mode); 5573 - } else { 5574 - /* For non-DP output, clear any trans DP clock recovery setting.*/ 5575 - I915_WRITE(TRANSDATA_M1(pipe), 0); 5576 - I915_WRITE(TRANSDATA_N1(pipe), 0); 5577 - I915_WRITE(TRANSDPLINK_M1(pipe), 0); 5578 - I915_WRITE(TRANSDPLINK_N1(pipe), 0); 5579 - } 5650 + 5651 + for_each_encoder_on_crtc(dev, crtc, encoder) 5652 + if (encoder->pre_pll_enable) 5653 + encoder->pre_pll_enable(encoder); 5580 5654 5581 5655 if (intel_crtc->pch_pll) { 5582 5656 I915_WRITE(intel_crtc->pch_pll->pll_reg, dpll); ··· 5569 5727 5570 5728 fdi_config_ok = ironlake_check_fdi_lanes(intel_crtc); 5571 5729 5572 - if (is_cpu_edp) 5573 - ironlake_set_pll_edp(crtc, adjusted_mode->clock); 5574 - 5575 5730 ironlake_set_pipeconf(crtc, adjusted_mode, dither); 5576 5731 5577 5732 intel_wait_for_vblank(dev, pipe); ··· 5598 5759 int pipe = intel_crtc->pipe; 5599 5760 int plane = intel_crtc->plane; 5600 5761 int num_connectors = 0; 5601 - intel_clock_t clock, reduced_clock; 5602 - u32 dpll = 0, fp = 0, fp2 = 0; 5603 - bool ok, has_reduced_clock = false; 5604 - bool is_lvds = false, is_dp = false, is_cpu_edp = false; 5762 + bool is_dp = false, is_cpu_edp = false; 5605 5763 struct intel_encoder *encoder; 5606 - u32 temp; 5607 5764 int ret; 5608 5765 bool dither; 5609 5766 5610 5767 for_each_encoder_on_crtc(dev, crtc, encoder) { 5611 5768 switch (encoder->type) { 5612 - case INTEL_OUTPUT_LVDS: 5613 - is_lvds = true; 5614 - break; 5615 5769 case INTEL_OUTPUT_DISPLAYPORT: 5616 5770 is_dp = true; 5617 5771 break; ··· 5638 5806 if (!intel_ddi_pll_mode_set(crtc, adjusted_mode->clock)) 5639 5807 return -EINVAL; 5640 5808 5641 - if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) { 5642 - ok = ironlake_compute_clocks(crtc, adjusted_mode, &clock, 5643 - &has_reduced_clock, 5644 - &reduced_clock); 5645 - if (!ok) { 5646 - DRM_ERROR("Couldn't find PLL settings for mode!\n"); 5647 - return -EINVAL; 5648 - } 5649 - } 5650 - 5651 5809 /* Ensure that the cursor is valid for the new mode before changing... */ 5652 5810 intel_crtc_update_cursor(crtc, true); 5653 5811 5654 5812 /* determine panel color depth */ 5655 5813 dither = intel_choose_pipe_bpp_dither(crtc, fb, &intel_crtc->bpp, 5656 5814 adjusted_mode); 5657 - if (is_lvds && dev_priv->lvds_dither) 5658 - dither = true; 5659 5815 5660 5816 DRM_DEBUG_KMS("Mode for pipe %d:\n", pipe); 5661 5817 drm_mode_debug_printmodeline(mode); 5662 5818 5663 - if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) { 5664 - fp = clock.n << 16 | clock.m1 << 8 | clock.m2; 5665 - if (has_reduced_clock) 5666 - fp2 = reduced_clock.n << 16 | reduced_clock.m1 << 8 | 5667 - reduced_clock.m2; 5668 - 5669 - dpll = ironlake_compute_dpll(intel_crtc, adjusted_mode, &clock, 5670 - fp); 5671 - 5672 - /* CPU eDP is the only output that doesn't need a PCH PLL of its 5673 - * own on pre-Haswell/LPT generation */ 5674 - if (!is_cpu_edp) { 5675 - struct intel_pch_pll *pll; 5676 - 5677 - pll = intel_get_pch_pll(intel_crtc, dpll, fp); 5678 - if (pll == NULL) { 5679 - DRM_DEBUG_DRIVER("failed to find PLL for pipe %d\n", 5680 - pipe); 5681 - return -EINVAL; 5682 - } 5683 - } else 5684 - intel_put_pch_pll(intel_crtc); 5685 - 5686 - /* The LVDS pin pair needs to be on before the DPLLs are 5687 - * enabled. This is an exception to the general rule that 5688 - * mode_set doesn't turn things on. 5689 - */ 5690 - if (is_lvds) { 5691 - temp = I915_READ(PCH_LVDS); 5692 - temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP; 5693 - if (HAS_PCH_CPT(dev)) { 5694 - temp &= ~PORT_TRANS_SEL_MASK; 5695 - temp |= PORT_TRANS_SEL_CPT(pipe); 5696 - } else { 5697 - if (pipe == 1) 5698 - temp |= LVDS_PIPEB_SELECT; 5699 - else 5700 - temp &= ~LVDS_PIPEB_SELECT; 5701 - } 5702 - 5703 - /* set the corresponsding LVDS_BORDER bit */ 5704 - temp |= dev_priv->lvds_border_bits; 5705 - /* Set the B0-B3 data pairs corresponding to whether 5706 - * we're going to set the DPLLs for dual-channel mode or 5707 - * not. 5708 - */ 5709 - if (clock.p2 == 7) 5710 - temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP; 5711 - else 5712 - temp &= ~(LVDS_B0B3_POWER_UP | 5713 - LVDS_CLKB_POWER_UP); 5714 - 5715 - /* It would be nice to set 24 vs 18-bit mode 5716 - * (LVDS_A3_POWER_UP) appropriately here, but we need to 5717 - * look more thoroughly into how panels behave in the 5718 - * two modes. 5719 - */ 5720 - temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY); 5721 - if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC) 5722 - temp |= LVDS_HSYNC_POLARITY; 5723 - if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC) 5724 - temp |= LVDS_VSYNC_POLARITY; 5725 - I915_WRITE(PCH_LVDS, temp); 5726 - } 5727 - } 5728 - 5729 - if (is_dp && !is_cpu_edp) { 5819 + if (is_dp && !is_cpu_edp) 5730 5820 intel_dp_set_m_n(crtc, mode, adjusted_mode); 5731 - } else { 5732 - if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) { 5733 - /* For non-DP output, clear any trans DP clock recovery 5734 - * setting.*/ 5735 - I915_WRITE(TRANSDATA_M1(pipe), 0); 5736 - I915_WRITE(TRANSDATA_N1(pipe), 0); 5737 - I915_WRITE(TRANSDPLINK_M1(pipe), 0); 5738 - I915_WRITE(TRANSDPLINK_N1(pipe), 0); 5739 - } 5740 - } 5741 5821 5742 5822 intel_crtc->lowfreq_avail = false; 5743 - if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) { 5744 - if (intel_crtc->pch_pll) { 5745 - I915_WRITE(intel_crtc->pch_pll->pll_reg, dpll); 5746 - 5747 - /* Wait for the clocks to stabilize. */ 5748 - POSTING_READ(intel_crtc->pch_pll->pll_reg); 5749 - udelay(150); 5750 - 5751 - /* The pixel multiplier can only be updated once the 5752 - * DPLL is enabled and the clocks are stable. 5753 - * 5754 - * So write it again. 5755 - */ 5756 - I915_WRITE(intel_crtc->pch_pll->pll_reg, dpll); 5757 - } 5758 - 5759 - if (intel_crtc->pch_pll) { 5760 - if (is_lvds && has_reduced_clock && i915_powersave) { 5761 - I915_WRITE(intel_crtc->pch_pll->fp1_reg, fp2); 5762 - intel_crtc->lowfreq_avail = true; 5763 - } else { 5764 - I915_WRITE(intel_crtc->pch_pll->fp1_reg, fp); 5765 - } 5766 - } 5767 - } 5768 5823 5769 5824 intel_set_pipe_timings(intel_crtc, mode, adjusted_mode); 5770 5825 5771 5826 if (!is_dp || is_cpu_edp) 5772 5827 ironlake_set_m_n(crtc, mode, adjusted_mode); 5773 - 5774 - if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) 5775 - if (is_cpu_edp) 5776 - ironlake_set_pll_edp(crtc, adjusted_mode->clock); 5777 5828 5778 5829 haswell_set_pipeconf(crtc, adjusted_mode, dither); 5779 5830 ··· 6474 6759 return false; 6475 6760 } 6476 6761 6477 - if (!intel_set_mode(crtc, mode, 0, 0, fb)) { 6762 + if (intel_set_mode(crtc, mode, 0, 0, fb)) { 6478 6763 DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n"); 6479 6764 if (old->release_fb) 6480 6765 old->release_fb->funcs->destroy(old->release_fb); ··· 6824 7109 6825 7110 obj = work->old_fb_obj; 6826 7111 6827 - atomic_clear_mask(1 << intel_crtc->plane, 6828 - &obj->pending_flip.counter); 6829 7112 wake_up(&dev_priv->pending_flip_queue); 6830 7113 6831 7114 queue_work(dev_priv->wq, &work->work); ··· 7187 7474 7188 7475 work->enable_stall_check = true; 7189 7476 7190 - /* Block clients from rendering to the new back buffer until 7191 - * the flip occurs and the object is no longer visible. 7192 - */ 7193 - atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip); 7194 7477 atomic_inc(&intel_crtc->unpin_work_count); 7195 7478 7196 7479 ret = dev_priv->display.queue_flip(dev, crtc, fb, obj); ··· 7203 7494 7204 7495 cleanup_pending: 7205 7496 atomic_dec(&intel_crtc->unpin_work_count); 7206 - atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip); 7207 7497 drm_gem_object_unreference(&work->old_fb_obj->base); 7208 7498 drm_gem_object_unreference(&obj->base); 7209 7499 mutex_unlock(&dev->struct_mutex); ··· 7612 7904 } 7613 7905 } 7614 7906 7615 - bool intel_set_mode(struct drm_crtc *crtc, 7616 - struct drm_display_mode *mode, 7617 - int x, int y, struct drm_framebuffer *fb) 7907 + int intel_set_mode(struct drm_crtc *crtc, 7908 + struct drm_display_mode *mode, 7909 + int x, int y, struct drm_framebuffer *fb) 7618 7910 { 7619 7911 struct drm_device *dev = crtc->dev; 7620 7912 drm_i915_private_t *dev_priv = dev->dev_private; 7621 - struct drm_display_mode *adjusted_mode, saved_mode, saved_hwmode; 7913 + struct drm_display_mode *adjusted_mode, *saved_mode, *saved_hwmode; 7622 7914 struct intel_crtc *intel_crtc; 7623 7915 unsigned disable_pipes, prepare_pipes, modeset_pipes; 7624 - bool ret = true; 7916 + int ret = 0; 7917 + 7918 + saved_mode = kmalloc(2 * sizeof(*saved_mode), GFP_KERNEL); 7919 + if (!saved_mode) 7920 + return -ENOMEM; 7921 + saved_hwmode = saved_mode + 1; 7625 7922 7626 7923 intel_modeset_affected_pipes(crtc, &modeset_pipes, 7627 7924 &prepare_pipes, &disable_pipes); ··· 7637 7924 for_each_intel_crtc_masked(dev, disable_pipes, intel_crtc) 7638 7925 intel_crtc_disable(&intel_crtc->base); 7639 7926 7640 - saved_hwmode = crtc->hwmode; 7641 - saved_mode = crtc->mode; 7927 + *saved_hwmode = crtc->hwmode; 7928 + *saved_mode = crtc->mode; 7642 7929 7643 7930 /* Hack: Because we don't (yet) support global modeset on multiple 7644 7931 * crtcs, we don't keep track of the new mode for more than one crtc. ··· 7649 7936 if (modeset_pipes) { 7650 7937 adjusted_mode = intel_modeset_adjusted_mode(crtc, mode); 7651 7938 if (IS_ERR(adjusted_mode)) { 7652 - return false; 7939 + ret = PTR_ERR(adjusted_mode); 7940 + goto out; 7653 7941 } 7654 7942 } 7655 7943 ··· 7676 7962 * on the DPLL. 7677 7963 */ 7678 7964 for_each_intel_crtc_masked(dev, modeset_pipes, intel_crtc) { 7679 - ret = !intel_crtc_mode_set(&intel_crtc->base, 7680 - mode, adjusted_mode, 7681 - x, y, fb); 7682 - if (!ret) 7683 - goto done; 7965 + ret = intel_crtc_mode_set(&intel_crtc->base, 7966 + mode, adjusted_mode, 7967 + x, y, fb); 7968 + if (ret) 7969 + goto done; 7684 7970 } 7685 7971 7686 7972 /* Now enable the clocks, plane, pipe, and connectors that we set up. */ ··· 7701 7987 /* FIXME: add subpixel order */ 7702 7988 done: 7703 7989 drm_mode_destroy(dev, adjusted_mode); 7704 - if (!ret && crtc->enabled) { 7705 - crtc->hwmode = saved_hwmode; 7706 - crtc->mode = saved_mode; 7990 + if (ret && crtc->enabled) { 7991 + crtc->hwmode = *saved_hwmode; 7992 + crtc->mode = *saved_mode; 7707 7993 } else { 7708 7994 intel_modeset_check_state(dev); 7709 7995 } 7710 7996 7997 + out: 7998 + kfree(saved_mode); 7711 7999 return ret; 8000 + } 8001 + 8002 + void intel_crtc_restore_mode(struct drm_crtc *crtc) 8003 + { 8004 + intel_set_mode(crtc, &crtc->mode, crtc->x, crtc->y, crtc->fb); 7712 8005 } 7713 8006 7714 8007 #undef for_each_intel_crtc_masked ··· 7983 8262 drm_mode_debug_printmodeline(set->mode); 7984 8263 } 7985 8264 7986 - if (!intel_set_mode(set->crtc, set->mode, 7987 - set->x, set->y, set->fb)) { 7988 - DRM_ERROR("failed to set mode on [CRTC:%d]\n", 7989 - set->crtc->base.id); 7990 - ret = -EINVAL; 8265 + ret = intel_set_mode(set->crtc, set->mode, 8266 + set->x, set->y, set->fb); 8267 + if (ret) { 8268 + DRM_ERROR("failed to set mode on [CRTC:%d], err = %d\n", 8269 + set->crtc->base.id, ret); 7991 8270 goto fail; 7992 8271 } 7993 8272 } else if (config->fb_changed) { ··· 8004 8283 8005 8284 /* Try to restore the config */ 8006 8285 if (config->mode_changed && 8007 - !intel_set_mode(save_set.crtc, save_set.mode, 8008 - save_set.x, save_set.y, save_set.fb)) 8286 + intel_set_mode(save_set.crtc, save_set.mode, 8287 + save_set.x, save_set.y, save_set.fb)) 8009 8288 DRM_ERROR("failed to restore config after modeset failure\n"); 8010 8289 8011 8290 out_config: ··· 8024 8303 8025 8304 static void intel_cpu_pll_init(struct drm_device *dev) 8026 8305 { 8027 - if (IS_HASWELL(dev)) 8306 + if (HAS_DDI(dev)) 8028 8307 intel_ddi_pll_init(dev); 8029 8308 } 8030 8309 ··· 8160 8439 I915_WRITE(PFIT_CONTROL, 0); 8161 8440 } 8162 8441 8163 - if (!(IS_HASWELL(dev) && 8164 - (I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES))) 8442 + if (!(HAS_DDI(dev) && (I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES))) 8165 8443 intel_crt_init(dev); 8166 8444 8167 - if (IS_HASWELL(dev)) { 8445 + if (HAS_DDI(dev)) { 8168 8446 int found; 8169 8447 8170 8448 /* Haswell uses DDI functions to detect digital outputs */ ··· 8406 8686 struct drm_i915_private *dev_priv = dev->dev_private; 8407 8687 8408 8688 /* We always want a DPMS function */ 8409 - if (IS_HASWELL(dev)) { 8689 + if (HAS_DDI(dev)) { 8410 8690 dev_priv->display.crtc_mode_set = haswell_crtc_mode_set; 8411 8691 dev_priv->display.crtc_enable = haswell_crtc_enable; 8412 8692 dev_priv->display.crtc_disable = haswell_crtc_disable; ··· 8468 8748 } else if (IS_HASWELL(dev)) { 8469 8749 dev_priv->display.fdi_link_train = hsw_fdi_link_train; 8470 8750 dev_priv->display.write_eld = haswell_write_eld; 8471 - } else 8472 - dev_priv->display.update_wm = NULL; 8751 + } 8473 8752 } else if (IS_G4X(dev)) { 8474 8753 dev_priv->display.write_eld = g4x_write_eld; 8475 8754 } ··· 8701 8982 /* Just disable it once at startup */ 8702 8983 i915_disable_vga(dev); 8703 8984 intel_setup_outputs(dev); 8985 + 8986 + /* Just in case the BIOS is doing something questionable. */ 8987 + intel_disable_fbc(dev); 8704 8988 } 8705 8989 8706 8990 static void ··· 8914 9192 struct intel_encoder *encoder; 8915 9193 struct intel_connector *connector; 8916 9194 8917 - if (IS_HASWELL(dev)) { 9195 + if (HAS_DDI(dev)) { 8918 9196 tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP)); 8919 9197 8920 9198 if (tmp & TRANS_DDI_FUNC_ENABLE) { ··· 8955 9233 crtc->active ? "enabled" : "disabled"); 8956 9234 } 8957 9235 8958 - if (IS_HASWELL(dev)) 9236 + if (HAS_DDI(dev)) 8959 9237 intel_ddi_setup_hw_pll_state(dev); 8960 9238 8961 9239 list_for_each_entry(encoder, &dev->mode_config.encoder_list, ··· 9006 9284 9007 9285 if (force_restore) { 9008 9286 for_each_pipe(pipe) { 9009 - crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); 9010 - intel_set_mode(&crtc->base, &crtc->base.mode, 9011 - crtc->base.x, crtc->base.y, crtc->base.fb); 9287 + intel_crtc_restore_mode(dev_priv->pipe_to_crtc_mapping[pipe]); 9012 9288 } 9013 9289 9014 9290 i915_redisable_vga(dev); ··· 9070 9350 flush_scheduled_work(); 9071 9351 9072 9352 drm_mode_config_cleanup(dev); 9353 + 9354 + intel_cleanup_overlay(dev); 9073 9355 } 9074 9356 9075 9357 /*
+164 -129
drivers/gpu/drm/i915/intel_dp.c
··· 148 148 return max_link_bw; 149 149 } 150 150 151 - static int 152 - intel_dp_link_clock(uint8_t link_bw) 153 - { 154 - if (link_bw == DP_LINK_BW_2_7) 155 - return 270000; 156 - else 157 - return 162000; 158 - } 159 - 160 151 /* 161 152 * The units on the numbers in the next two are... bizarre. Examples will 162 153 * make it clearer; this one parallels an example in the eDP spec. ··· 182 191 struct drm_display_mode *mode, 183 192 bool adjust_mode) 184 193 { 185 - int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_dp)); 194 + int max_link_clock = 195 + drm_dp_bw_code_to_link_rate(intel_dp_max_link_bw(intel_dp)); 186 196 int max_lanes = drm_dp_max_lane_count(intel_dp->dpcd); 187 197 int max_rate, mode_rate; 188 198 ··· 322 330 } 323 331 } 324 332 333 + static uint32_t 334 + intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq) 335 + { 336 + struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 337 + struct drm_device *dev = intel_dig_port->base.base.dev; 338 + struct drm_i915_private *dev_priv = dev->dev_private; 339 + uint32_t ch_ctl = intel_dp->output_reg + 0x10; 340 + uint32_t status; 341 + bool done; 342 + 343 + if (IS_HASWELL(dev)) { 344 + switch (intel_dig_port->port) { 345 + case PORT_A: 346 + ch_ctl = DPA_AUX_CH_CTL; 347 + break; 348 + case PORT_B: 349 + ch_ctl = PCH_DPB_AUX_CH_CTL; 350 + break; 351 + case PORT_C: 352 + ch_ctl = PCH_DPC_AUX_CH_CTL; 353 + break; 354 + case PORT_D: 355 + ch_ctl = PCH_DPD_AUX_CH_CTL; 356 + break; 357 + default: 358 + BUG(); 359 + } 360 + } 361 + 362 + #define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0) 363 + if (has_aux_irq) 364 + done = wait_event_timeout(dev_priv->gmbus_wait_queue, C, 10); 365 + else 366 + done = wait_for_atomic(C, 10) == 0; 367 + if (!done) 368 + DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n", 369 + has_aux_irq); 370 + #undef C 371 + 372 + return status; 373 + } 374 + 325 375 static int 326 376 intel_dp_aux_ch(struct intel_dp *intel_dp, 327 377 uint8_t *send, int send_bytes, ··· 375 341 struct drm_i915_private *dev_priv = dev->dev_private; 376 342 uint32_t ch_ctl = output_reg + 0x10; 377 343 uint32_t ch_data = ch_ctl + 4; 378 - int i; 379 - int recv_bytes; 344 + int i, ret, recv_bytes; 380 345 uint32_t status; 381 346 uint32_t aux_clock_divider; 382 347 int try, precharge; 348 + bool has_aux_irq = INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev); 349 + 350 + /* dp aux is extremely sensitive to irq latency, hence request the 351 + * lowest possible wakeup latency and so prevent the cpu from going into 352 + * deep sleep states. 353 + */ 354 + pm_qos_update_request(&dev_priv->pm_qos, 0); 383 355 384 356 if (IS_HASWELL(dev)) { 385 357 switch (intel_dig_port->port) { ··· 419 379 * clock divider. 420 380 */ 421 381 if (is_cpu_edp(intel_dp)) { 422 - if (IS_HASWELL(dev)) 382 + if (HAS_DDI(dev)) 423 383 aux_clock_divider = intel_ddi_get_cdclk_freq(dev_priv) >> 1; 424 384 else if (IS_VALLEYVIEW(dev)) 425 385 aux_clock_divider = 100; ··· 439 399 440 400 /* Try to wait for any previous AUX channel activity */ 441 401 for (try = 0; try < 3; try++) { 442 - status = I915_READ(ch_ctl); 402 + status = I915_READ_NOTRACE(ch_ctl); 443 403 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0) 444 404 break; 445 405 msleep(1); ··· 448 408 if (try == 3) { 449 409 WARN(1, "dp_aux_ch not started status 0x%08x\n", 450 410 I915_READ(ch_ctl)); 451 - return -EBUSY; 411 + ret = -EBUSY; 412 + goto out; 452 413 } 453 414 454 415 /* Must try at least 3 times according to DP spec */ ··· 462 421 /* Send the command and wait for it to complete */ 463 422 I915_WRITE(ch_ctl, 464 423 DP_AUX_CH_CTL_SEND_BUSY | 424 + (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) | 465 425 DP_AUX_CH_CTL_TIME_OUT_400us | 466 426 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) | 467 427 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) | ··· 470 428 DP_AUX_CH_CTL_DONE | 471 429 DP_AUX_CH_CTL_TIME_OUT_ERROR | 472 430 DP_AUX_CH_CTL_RECEIVE_ERROR); 473 - for (;;) { 474 - status = I915_READ(ch_ctl); 475 - if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0) 476 - break; 477 - udelay(100); 478 - } 431 + 432 + status = intel_dp_aux_wait_done(intel_dp, has_aux_irq); 479 433 480 434 /* Clear done status and any errors */ 481 435 I915_WRITE(ch_ctl, ··· 489 451 490 452 if ((status & DP_AUX_CH_CTL_DONE) == 0) { 491 453 DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status); 492 - return -EBUSY; 454 + ret = -EBUSY; 455 + goto out; 493 456 } 494 457 495 458 /* Check for timeout or receive error. ··· 498 459 */ 499 460 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) { 500 461 DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status); 501 - return -EIO; 462 + ret = -EIO; 463 + goto out; 502 464 } 503 465 504 466 /* Timeouts occur when the device isn't connected, so they're 505 467 * "normal" -- don't fill the kernel log with these */ 506 468 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) { 507 469 DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status); 508 - return -ETIMEDOUT; 470 + ret = -ETIMEDOUT; 471 + goto out; 509 472 } 510 473 511 474 /* Unload any bytes sent back from the other side */ ··· 520 479 unpack_aux(I915_READ(ch_data + i), 521 480 recv + i, recv_bytes - i); 522 481 523 - return recv_bytes; 482 + ret = recv_bytes; 483 + out: 484 + pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE); 485 + 486 + return ret; 524 487 } 525 488 526 489 /* Write data to the aux channel in native mode */ ··· 767 722 768 723 for (clock = 0; clock <= max_clock; clock++) { 769 724 for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) { 770 - int link_avail = intel_dp_max_data_rate(intel_dp_link_clock(bws[clock]), lane_count); 725 + int link_bw_clock = 726 + drm_dp_bw_code_to_link_rate(bws[clock]); 727 + int link_avail = intel_dp_max_data_rate(link_bw_clock, 728 + lane_count); 771 729 772 730 if (mode_rate <= link_avail) { 773 731 intel_dp->link_bw = bws[clock]; 774 732 intel_dp->lane_count = lane_count; 775 - adjusted_mode->clock = intel_dp_link_clock(intel_dp->link_bw); 733 + adjusted_mode->clock = link_bw_clock; 776 734 DRM_DEBUG_KMS("DP link bw %02x lane " 777 735 "count %d clock %d bpp %d\n", 778 736 intel_dp->link_bw, intel_dp->lane_count, ··· 790 742 return false; 791 743 } 792 744 793 - struct intel_dp_m_n { 794 - uint32_t tu; 795 - uint32_t gmch_m; 796 - uint32_t gmch_n; 797 - uint32_t link_m; 798 - uint32_t link_n; 799 - }; 800 - 801 - static void 802 - intel_reduce_ratio(uint32_t *num, uint32_t *den) 803 - { 804 - while (*num > 0xffffff || *den > 0xffffff) { 805 - *num >>= 1; 806 - *den >>= 1; 807 - } 808 - } 809 - 810 - static void 811 - intel_dp_compute_m_n(int bpp, 812 - int nlanes, 813 - int pixel_clock, 814 - int link_clock, 815 - struct intel_dp_m_n *m_n) 816 - { 817 - m_n->tu = 64; 818 - m_n->gmch_m = (pixel_clock * bpp) >> 3; 819 - m_n->gmch_n = link_clock * nlanes; 820 - intel_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n); 821 - m_n->link_m = pixel_clock; 822 - m_n->link_n = link_clock; 823 - intel_reduce_ratio(&m_n->link_m, &m_n->link_n); 824 - } 825 - 826 745 void 827 746 intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, 828 747 struct drm_display_mode *adjusted_mode) ··· 800 785 struct drm_i915_private *dev_priv = dev->dev_private; 801 786 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 802 787 int lane_count = 4; 803 - struct intel_dp_m_n m_n; 788 + struct intel_link_m_n m_n; 804 789 int pipe = intel_crtc->pipe; 805 790 enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder; 806 791 ··· 823 808 * the number of bytes_per_pixel post-LUT, which we always 824 809 * set up for 8-bits of R/G/B, or 3 bytes total. 825 810 */ 826 - intel_dp_compute_m_n(intel_crtc->bpp, lane_count, 827 - mode->clock, adjusted_mode->clock, &m_n); 811 + intel_link_compute_m_n(intel_crtc->bpp, lane_count, 812 + mode->clock, adjusted_mode->clock, &m_n); 828 813 829 814 if (IS_HASWELL(dev)) { 830 815 I915_WRITE(PIPE_DATA_M1(cpu_transcoder), ··· 864 849 (intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP)) { 865 850 intel_dp->link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN; 866 851 } 852 + } 853 + 854 + static void ironlake_set_pll_edp(struct drm_crtc *crtc, int clock) 855 + { 856 + struct drm_device *dev = crtc->dev; 857 + struct drm_i915_private *dev_priv = dev->dev_private; 858 + u32 dpa_ctl; 859 + 860 + DRM_DEBUG_KMS("eDP PLL enable for clock %d\n", clock); 861 + dpa_ctl = I915_READ(DP_A); 862 + dpa_ctl &= ~DP_PLL_FREQ_MASK; 863 + 864 + if (clock < 200000) { 865 + /* For a long time we've carried around a ILK-DevA w/a for the 866 + * 160MHz clock. If we're really unlucky, it's still required. 867 + */ 868 + DRM_DEBUG_KMS("160MHz cpu eDP clock, might need ilk devA w/a\n"); 869 + dpa_ctl |= DP_PLL_FREQ_160MHZ; 870 + } else { 871 + dpa_ctl |= DP_PLL_FREQ_270MHZ; 872 + } 873 + 874 + I915_WRITE(DP_A, dpa_ctl); 875 + 876 + POSTING_READ(DP_A); 877 + udelay(500); 867 878 } 868 879 869 880 static void ··· 991 950 } else { 992 951 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT; 993 952 } 953 + 954 + if (is_cpu_edp(intel_dp)) 955 + ironlake_set_pll_edp(crtc, adjusted_mode->clock); 994 956 } 995 957 996 958 #define IDLE_ON_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK) ··· 1587 1543 } 1588 1544 1589 1545 static uint32_t 1590 - intel_dp_signal_levels(uint8_t train_set) 1546 + intel_gen4_signal_levels(uint8_t train_set) 1591 1547 { 1592 1548 uint32_t signal_levels = 0; 1593 1549 ··· 1685 1641 1686 1642 /* Gen7.5's (HSW) DP voltage swing and pre-emphasis control */ 1687 1643 static uint32_t 1688 - intel_dp_signal_levels_hsw(uint8_t train_set) 1644 + intel_hsw_signal_levels(uint8_t train_set) 1689 1645 { 1690 1646 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK | 1691 1647 DP_TRAIN_PRE_EMPHASIS_MASK); ··· 1715 1671 "0x%x\n", signal_levels); 1716 1672 return DDI_BUF_EMP_400MV_0DB_HSW; 1717 1673 } 1674 + } 1675 + 1676 + /* Properly updates "DP" with the correct signal levels. */ 1677 + static void 1678 + intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP) 1679 + { 1680 + struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 1681 + struct drm_device *dev = intel_dig_port->base.base.dev; 1682 + uint32_t signal_levels, mask; 1683 + uint8_t train_set = intel_dp->train_set[0]; 1684 + 1685 + if (IS_HASWELL(dev)) { 1686 + signal_levels = intel_hsw_signal_levels(train_set); 1687 + mask = DDI_BUF_EMP_MASK; 1688 + } else if (IS_GEN7(dev) && is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev)) { 1689 + signal_levels = intel_gen7_edp_signal_levels(train_set); 1690 + mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB; 1691 + } else if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) { 1692 + signal_levels = intel_gen6_edp_signal_levels(train_set); 1693 + mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB; 1694 + } else { 1695 + signal_levels = intel_gen4_signal_levels(train_set); 1696 + mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK; 1697 + } 1698 + 1699 + DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels); 1700 + 1701 + *DP = (*DP & ~mask) | signal_levels; 1718 1702 } 1719 1703 1720 1704 static bool ··· 1863 1791 int voltage_tries, loop_tries; 1864 1792 uint32_t DP = intel_dp->DP; 1865 1793 1866 - if (IS_HASWELL(dev)) 1794 + if (HAS_DDI(dev)) 1867 1795 intel_ddi_prepare_link_retrain(encoder); 1868 1796 1869 1797 /* Write the link configuration data */ ··· 1881 1809 for (;;) { 1882 1810 /* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */ 1883 1811 uint8_t link_status[DP_LINK_STATUS_SIZE]; 1884 - uint32_t signal_levels; 1885 1812 1886 - if (IS_HASWELL(dev)) { 1887 - signal_levels = intel_dp_signal_levels_hsw( 1888 - intel_dp->train_set[0]); 1889 - DP = (DP & ~DDI_BUF_EMP_MASK) | signal_levels; 1890 - } else if (IS_GEN7(dev) && is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev)) { 1891 - signal_levels = intel_gen7_edp_signal_levels(intel_dp->train_set[0]); 1892 - DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_IVB) | signal_levels; 1893 - } else if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) { 1894 - signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]); 1895 - DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels; 1896 - } else { 1897 - signal_levels = intel_dp_signal_levels(intel_dp->train_set[0]); 1898 - DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; 1899 - } 1900 - DRM_DEBUG_KMS("training pattern 1 signal levels %08x\n", 1901 - signal_levels); 1813 + intel_dp_set_signal_levels(intel_dp, &DP); 1902 1814 1903 1815 /* Set training pattern 1 */ 1904 1816 if (!intel_dp_set_link_train(intel_dp, DP, ··· 1938 1882 void 1939 1883 intel_dp_complete_link_train(struct intel_dp *intel_dp) 1940 1884 { 1941 - struct drm_device *dev = intel_dp_to_dev(intel_dp); 1942 1885 bool channel_eq = false; 1943 1886 int tries, cr_tries; 1944 1887 uint32_t DP = intel_dp->DP; ··· 1947 1892 cr_tries = 0; 1948 1893 channel_eq = false; 1949 1894 for (;;) { 1950 - /* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */ 1951 - uint32_t signal_levels; 1952 1895 uint8_t link_status[DP_LINK_STATUS_SIZE]; 1953 1896 1954 1897 if (cr_tries > 5) { ··· 1955 1902 break; 1956 1903 } 1957 1904 1958 - if (IS_HASWELL(dev)) { 1959 - signal_levels = intel_dp_signal_levels_hsw(intel_dp->train_set[0]); 1960 - DP = (DP & ~DDI_BUF_EMP_MASK) | signal_levels; 1961 - } else if (IS_GEN7(dev) && is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev)) { 1962 - signal_levels = intel_gen7_edp_signal_levels(intel_dp->train_set[0]); 1963 - DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_IVB) | signal_levels; 1964 - } else if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) { 1965 - signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]); 1966 - DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels; 1967 - } else { 1968 - signal_levels = intel_dp_signal_levels(intel_dp->train_set[0]); 1969 - DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; 1970 - } 1905 + intel_dp_set_signal_levels(intel_dp, &DP); 1971 1906 1972 1907 /* channel eq pattern */ 1973 1908 if (!intel_dp_set_link_train(intel_dp, DP, ··· 2005 1964 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 2006 1965 struct drm_device *dev = intel_dig_port->base.base.dev; 2007 1966 struct drm_i915_private *dev_priv = dev->dev_private; 1967 + struct intel_crtc *intel_crtc = 1968 + to_intel_crtc(intel_dig_port->base.base.crtc); 2008 1969 uint32_t DP = intel_dp->DP; 2009 1970 2010 1971 /* ··· 2024 1981 * intel_ddi_prepare_link_retrain will take care of redoing the link 2025 1982 * train. 2026 1983 */ 2027 - if (IS_HASWELL(dev)) 1984 + if (HAS_DDI(dev)) 2028 1985 return; 2029 1986 2030 1987 if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0)) ··· 2041 1998 } 2042 1999 POSTING_READ(intel_dp->output_reg); 2043 2000 2044 - msleep(17); 2001 + /* We don't really know why we're doing this */ 2002 + intel_wait_for_vblank(dev, intel_crtc->pipe); 2045 2003 2046 2004 if (HAS_PCH_IBX(dev) && 2047 2005 I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) { ··· 2062 2018 /* Changes to enable or select take place the vblank 2063 2019 * after being written. 2064 2020 */ 2065 - if (crtc == NULL) { 2066 - /* We can arrive here never having been attached 2067 - * to a CRTC, for instance, due to inheriting 2068 - * random state from the BIOS. 2069 - * 2070 - * If the pipe is not running, play safe and 2071 - * wait for the clocks to stabilise before 2072 - * continuing. 2073 - */ 2021 + if (WARN_ON(crtc == NULL)) { 2022 + /* We should never try to disable a port without a crtc 2023 + * attached. For paranoia keep the code around for a 2024 + * bit. */ 2074 2025 POSTING_READ(intel_dp->output_reg); 2075 2026 msleep(50); 2076 2027 } else 2077 - intel_wait_for_vblank(dev, to_intel_crtc(crtc)->pipe); 2028 + intel_wait_for_vblank(dev, intel_crtc->pipe); 2078 2029 } 2079 2030 2080 2031 DP &= ~DP_AUDIO_OUTPUT_ENABLE; ··· 2081 2042 static bool 2082 2043 intel_dp_get_dpcd(struct intel_dp *intel_dp) 2083 2044 { 2045 + char dpcd_hex_dump[sizeof(intel_dp->dpcd) * 3]; 2046 + 2084 2047 if (intel_dp_aux_native_read_retry(intel_dp, 0x000, intel_dp->dpcd, 2085 2048 sizeof(intel_dp->dpcd)) == 0) 2086 2049 return false; /* aux transfer failed */ 2050 + 2051 + hex_dump_to_buffer(intel_dp->dpcd, sizeof(intel_dp->dpcd), 2052 + 32, 1, dpcd_hex_dump, sizeof(dpcd_hex_dump), false); 2053 + DRM_DEBUG_KMS("DPCD: %s\n", dpcd_hex_dump); 2087 2054 2088 2055 if (intel_dp->dpcd[DP_DPCD_REV] == 0) 2089 2056 return false; /* DPCD not present */ ··· 2251 2206 ironlake_dp_detect(struct intel_dp *intel_dp) 2252 2207 { 2253 2208 struct drm_device *dev = intel_dp_to_dev(intel_dp); 2209 + struct drm_i915_private *dev_priv = dev->dev_private; 2210 + struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 2254 2211 enum drm_connector_status status; 2255 2212 2256 2213 /* Can't disconnect eDP, but you can close the lid... */ ··· 2262 2215 status = connector_status_connected; 2263 2216 return status; 2264 2217 } 2218 + 2219 + if (!ibx_digital_port_connected(dev_priv, intel_dig_port)) 2220 + return connector_status_disconnected; 2265 2221 2266 2222 return intel_dp_detect_dpcd(intel_dp); 2267 2223 } ··· 2340 2290 return intel_ddc_get_modes(connector, adapter); 2341 2291 } 2342 2292 2343 - 2344 - /** 2345 - * Uses CRT_HOTPLUG_EN and CRT_HOTPLUG_STAT to detect DP connection. 2346 - * 2347 - * \return true if DP port is connected. 2348 - * \return false if DP port is disconnected. 2349 - */ 2350 2293 static enum drm_connector_status 2351 2294 intel_dp_detect(struct drm_connector *connector, bool force) 2352 2295 { ··· 2349 2306 struct drm_device *dev = connector->dev; 2350 2307 enum drm_connector_status status; 2351 2308 struct edid *edid = NULL; 2352 - char dpcd_hex_dump[sizeof(intel_dp->dpcd) * 3]; 2353 2309 2354 2310 intel_dp->has_audio = false; 2355 2311 ··· 2356 2314 status = ironlake_dp_detect(intel_dp); 2357 2315 else 2358 2316 status = g4x_dp_detect(intel_dp); 2359 - 2360 - hex_dump_to_buffer(intel_dp->dpcd, sizeof(intel_dp->dpcd), 2361 - 32, 1, dpcd_hex_dump, sizeof(dpcd_hex_dump), false); 2362 - DRM_DEBUG_KMS("DPCD: %s\n", dpcd_hex_dump); 2363 2317 2364 2318 if (status != connector_status_connected) 2365 2319 return status; ··· 2483 2445 return -EINVAL; 2484 2446 2485 2447 done: 2486 - if (intel_encoder->base.crtc) { 2487 - struct drm_crtc *crtc = intel_encoder->base.crtc; 2488 - intel_set_mode(crtc, &crtc->mode, 2489 - crtc->x, crtc->y, crtc->fb); 2490 - } 2448 + if (intel_encoder->base.crtc) 2449 + intel_crtc_restore_mode(intel_encoder->base.crtc); 2491 2450 2492 2451 return 0; 2493 2452 } ··· 2777 2742 intel_connector_attach_encoder(intel_connector, intel_encoder); 2778 2743 drm_sysfs_connector_add(connector); 2779 2744 2780 - if (IS_HASWELL(dev)) 2745 + if (HAS_DDI(dev)) 2781 2746 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state; 2782 2747 else 2783 2748 intel_connector->get_hw_state = intel_connector_get_hw_state;
+9 -2
drivers/gpu/drm/i915/intel_drv.h
··· 153 153 bool cloneable; 154 154 bool connectors_active; 155 155 void (*hot_plug)(struct intel_encoder *); 156 + void (*pre_pll_enable)(struct intel_encoder *); 156 157 void (*pre_enable)(struct intel_encoder *); 157 158 void (*enable)(struct intel_encoder *); 158 159 void (*disable)(struct intel_encoder *); ··· 444 443 extern void intel_mark_fb_busy(struct drm_i915_gem_object *obj); 445 444 extern void intel_mark_fb_idle(struct drm_i915_gem_object *obj); 446 445 extern bool intel_lvds_init(struct drm_device *dev); 446 + extern bool intel_is_dual_link_lvds(struct drm_device *dev); 447 447 extern void intel_dp_init(struct drm_device *dev, int output_reg, 448 448 enum port port); 449 449 extern void intel_dp_init_connector(struct intel_digital_port *intel_dig_port, ··· 504 502 bool mode_changed; 505 503 }; 506 504 507 - extern bool intel_set_mode(struct drm_crtc *crtc, struct drm_display_mode *mode, 508 - int x, int y, struct drm_framebuffer *old_fb); 505 + extern int intel_set_mode(struct drm_crtc *crtc, struct drm_display_mode *mode, 506 + int x, int y, struct drm_framebuffer *old_fb); 509 507 extern void intel_modeset_disable(struct drm_device *dev); 508 + extern void intel_crtc_restore_mode(struct drm_crtc *crtc); 510 509 extern void intel_crtc_load_lut(struct drm_crtc *crtc); 511 510 extern void intel_crtc_update_dpms(struct drm_crtc *crtc); 512 511 extern void intel_encoder_noop(struct drm_encoder *encoder); ··· 548 545 { 549 546 return container_of(intel_hdmi, struct intel_digital_port, hdmi); 550 547 } 548 + 549 + bool ibx_digital_port_connected(struct drm_i915_private *dev_priv, 550 + struct intel_digital_port *port); 551 551 552 552 extern void intel_connector_attach_encoder(struct intel_connector *connector, 553 553 struct intel_encoder *encoder); ··· 595 589 struct drm_mode_fb_cmd2 *mode_cmd, 596 590 struct drm_i915_gem_object *obj); 597 591 extern int intel_fbdev_init(struct drm_device *dev); 592 + extern void intel_fbdev_initial_config(struct drm_device *dev); 598 593 extern void intel_fbdev_fini(struct drm_device *dev); 599 594 extern void intel_fbdev_set_suspend(struct drm_device *dev, int state); 600 595 extern void intel_prepare_page_flip(struct drm_device *dev, int plane);
+19 -2
drivers/gpu/drm/i915/intel_fb.c
··· 83 83 84 84 size = mode_cmd.pitches[0] * mode_cmd.height; 85 85 size = ALIGN(size, PAGE_SIZE); 86 - obj = i915_gem_alloc_object(dev, size); 86 + obj = i915_gem_object_create_stolen(dev, size); 87 + if (obj == NULL) 88 + obj = i915_gem_alloc_object(dev, size); 87 89 if (!obj) { 88 90 DRM_ERROR("failed to allocate framebuffer\n"); 89 91 ret = -ENOMEM; ··· 154 152 155 153 drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth); 156 154 drm_fb_helper_fill_var(info, &ifbdev->helper, sizes->fb_width, sizes->fb_height); 155 + 156 + /* If the object is shmemfs backed, it will have given us zeroed pages. 157 + * If the object is stolen however, it will be full of whatever 158 + * garbage was left in there. 159 + */ 160 + if (ifbdev->ifb.obj->stolen) 161 + memset_io(info->screen_base, 0, info->screen_size); 157 162 158 163 /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */ 159 164 ··· 250 241 } 251 242 252 243 drm_fb_helper_single_add_all_connectors(&ifbdev->helper); 253 - drm_fb_helper_initial_config(&ifbdev->helper, 32); 244 + 254 245 return 0; 246 + } 247 + 248 + void intel_fbdev_initial_config(struct drm_device *dev) 249 + { 250 + drm_i915_private_t *dev_priv = dev->dev_private; 251 + 252 + /* Due to peculiar init order wrt to hpd handling this is separate. */ 253 + drm_fb_helper_initial_config(&dev_priv->fbdev->helper, 32); 255 254 } 256 255 257 256 void intel_fbdev_fini(struct drm_device *dev)
+11 -9
drivers/gpu/drm/i915/intel_hdmi.c
··· 48 48 struct drm_i915_private *dev_priv = dev->dev_private; 49 49 uint32_t enabled_bits; 50 50 51 - enabled_bits = IS_HASWELL(dev) ? DDI_BUF_CTL_ENABLE : SDVO_ENABLE; 51 + enabled_bits = HAS_DDI(dev) ? DDI_BUF_CTL_ENABLE : SDVO_ENABLE; 52 52 53 53 WARN(I915_READ(intel_hdmi->sdvox_reg) & enabled_bits, 54 54 "HDMI port enabled, expecting disabled\n"); ··· 793 793 static enum drm_connector_status 794 794 intel_hdmi_detect(struct drm_connector *connector, bool force) 795 795 { 796 + struct drm_device *dev = connector->dev; 796 797 struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector); 797 798 struct intel_digital_port *intel_dig_port = 798 799 hdmi_to_dig_port(intel_hdmi); 799 800 struct intel_encoder *intel_encoder = &intel_dig_port->base; 800 - struct drm_i915_private *dev_priv = connector->dev->dev_private; 801 + struct drm_i915_private *dev_priv = dev->dev_private; 801 802 struct edid *edid; 802 803 enum drm_connector_status status = connector_status_disconnected; 803 804 804 - if (IS_G4X(connector->dev) && !g4x_hdmi_connected(intel_hdmi)) 805 + 806 + if (IS_G4X(dev) && !g4x_hdmi_connected(intel_hdmi)) 805 807 return status; 808 + else if (HAS_PCH_SPLIT(dev) && 809 + !ibx_digital_port_connected(dev_priv, intel_dig_port)) 810 + return status; 806 811 807 812 intel_hdmi->has_hdmi_sink = false; 808 813 intel_hdmi->has_audio = false; ··· 917 912 return -EINVAL; 918 913 919 914 done: 920 - if (intel_dig_port->base.base.crtc) { 921 - struct drm_crtc *crtc = intel_dig_port->base.base.crtc; 922 - intel_set_mode(crtc, &crtc->mode, 923 - crtc->x, crtc->y, crtc->fb); 924 - } 915 + if (intel_dig_port->base.base.crtc) 916 + intel_crtc_restore_mode(intel_dig_port->base.base.crtc); 925 917 926 918 return 0; 927 919 } ··· 1015 1013 intel_hdmi->set_infoframes = cpt_set_infoframes; 1016 1014 } 1017 1015 1018 - if (IS_HASWELL(dev)) 1016 + if (HAS_DDI(dev)) 1019 1017 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state; 1020 1018 else 1021 1019 intel_connector->get_hw_state = intel_connector_get_hw_state;
+76 -25
drivers/gpu/drm/i915/intel_i2c.c
··· 63 63 { 64 64 struct drm_i915_private *dev_priv = dev->dev_private; 65 65 I915_WRITE(dev_priv->gpio_mmio_base + GMBUS0, 0); 66 + I915_WRITE(dev_priv->gpio_mmio_base + GMBUS4, 0); 66 67 } 67 68 68 69 static void intel_i2c_quirk_set(struct drm_i915_private *dev_priv, bool enable) ··· 203 202 algo->data = bus; 204 203 } 205 204 205 + #define HAS_GMBUS_IRQ(dev) (INTEL_INFO(dev)->gen >= 4) 206 + static int 207 + gmbus_wait_hw_status(struct drm_i915_private *dev_priv, 208 + u32 gmbus2_status, 209 + u32 gmbus4_irq_en) 210 + { 211 + int i; 212 + int reg_offset = dev_priv->gpio_mmio_base; 213 + u32 gmbus2 = 0; 214 + DEFINE_WAIT(wait); 215 + 216 + /* Important: The hw handles only the first bit, so set only one! Since 217 + * we also need to check for NAKs besides the hw ready/idle signal, we 218 + * need to wake up periodically and check that ourselves. */ 219 + I915_WRITE(GMBUS4 + reg_offset, gmbus4_irq_en); 220 + 221 + for (i = 0; i < msecs_to_jiffies(50) + 1; i++) { 222 + prepare_to_wait(&dev_priv->gmbus_wait_queue, &wait, 223 + TASK_UNINTERRUPTIBLE); 224 + 225 + gmbus2 = I915_READ_NOTRACE(GMBUS2 + reg_offset); 226 + if (gmbus2 & (GMBUS_SATOER | gmbus2_status)) 227 + break; 228 + 229 + schedule_timeout(1); 230 + } 231 + finish_wait(&dev_priv->gmbus_wait_queue, &wait); 232 + 233 + I915_WRITE(GMBUS4 + reg_offset, 0); 234 + 235 + if (gmbus2 & GMBUS_SATOER) 236 + return -ENXIO; 237 + if (gmbus2 & gmbus2_status) 238 + return 0; 239 + return -ETIMEDOUT; 240 + } 241 + 242 + static int 243 + gmbus_wait_idle(struct drm_i915_private *dev_priv) 244 + { 245 + int ret; 246 + int reg_offset = dev_priv->gpio_mmio_base; 247 + 248 + #define C ((I915_READ_NOTRACE(GMBUS2 + reg_offset) & GMBUS_ACTIVE) == 0) 249 + 250 + if (!HAS_GMBUS_IRQ(dev_priv->dev)) 251 + return wait_for(C, 10); 252 + 253 + /* Important: The hw handles only the first bit, so set only one! */ 254 + I915_WRITE(GMBUS4 + reg_offset, GMBUS_IDLE_EN); 255 + 256 + ret = wait_event_timeout(dev_priv->gmbus_wait_queue, C, 10); 257 + 258 + I915_WRITE(GMBUS4 + reg_offset, 0); 259 + 260 + if (ret) 261 + return 0; 262 + else 263 + return -ETIMEDOUT; 264 + #undef C 265 + } 266 + 206 267 static int 207 268 gmbus_xfer_read(struct drm_i915_private *dev_priv, struct i2c_msg *msg, 208 269 u32 gmbus1_index) ··· 282 219 while (len) { 283 220 int ret; 284 221 u32 val, loop = 0; 285 - u32 gmbus2; 286 222 287 - ret = wait_for((gmbus2 = I915_READ(GMBUS2 + reg_offset)) & 288 - (GMBUS_SATOER | GMBUS_HW_RDY), 289 - 50); 223 + ret = gmbus_wait_hw_status(dev_priv, GMBUS_HW_RDY, 224 + GMBUS_HW_RDY_EN); 290 225 if (ret) 291 - return -ETIMEDOUT; 292 - if (gmbus2 & GMBUS_SATOER) 293 - return -ENXIO; 226 + return ret; 294 227 295 228 val = I915_READ(GMBUS3 + reg_offset); 296 229 do { ··· 320 261 GMBUS_SLAVE_WRITE | GMBUS_SW_RDY); 321 262 while (len) { 322 263 int ret; 323 - u32 gmbus2; 324 264 325 265 val = loop = 0; 326 266 do { ··· 328 270 329 271 I915_WRITE(GMBUS3 + reg_offset, val); 330 272 331 - ret = wait_for((gmbus2 = I915_READ(GMBUS2 + reg_offset)) & 332 - (GMBUS_SATOER | GMBUS_HW_RDY), 333 - 50); 273 + ret = gmbus_wait_hw_status(dev_priv, GMBUS_HW_RDY, 274 + GMBUS_HW_RDY_EN); 334 275 if (ret) 335 - return -ETIMEDOUT; 336 - if (gmbus2 & GMBUS_SATOER) 337 - return -ENXIO; 276 + return ret; 338 277 } 339 278 return 0; 340 279 } ··· 400 345 I915_WRITE(GMBUS0 + reg_offset, bus->reg0); 401 346 402 347 for (i = 0; i < num; i++) { 403 - u32 gmbus2; 404 - 405 348 if (gmbus_is_index_read(msgs, i, num)) { 406 349 ret = gmbus_xfer_index_read(dev_priv, &msgs[i]); 407 350 i += 1; /* set i to the index of the read xfer */ ··· 414 361 if (ret == -ENXIO) 415 362 goto clear_err; 416 363 417 - ret = wait_for((gmbus2 = I915_READ(GMBUS2 + reg_offset)) & 418 - (GMBUS_SATOER | GMBUS_HW_WAIT_PHASE), 419 - 50); 364 + ret = gmbus_wait_hw_status(dev_priv, GMBUS_HW_WAIT_PHASE, 365 + GMBUS_HW_WAIT_EN); 366 + if (ret == -ENXIO) 367 + goto clear_err; 420 368 if (ret) 421 369 goto timeout; 422 - if (gmbus2 & GMBUS_SATOER) 423 - goto clear_err; 424 370 } 425 371 426 372 /* Generate a STOP condition on the bus. Note that gmbus can't generata ··· 432 380 * We will re-enable it at the start of the next xfer, 433 381 * till then let it sleep. 434 382 */ 435 - if (wait_for((I915_READ(GMBUS2 + reg_offset) & GMBUS_ACTIVE) == 0, 436 - 10)) { 383 + if (gmbus_wait_idle(dev_priv)) { 437 384 DRM_DEBUG_KMS("GMBUS [%s] timed out waiting for idle\n", 438 385 adapter->name); 439 386 ret = -ETIMEDOUT; ··· 456 405 * it's slow responding and only answers on the 2nd retry. 457 406 */ 458 407 ret = -ENXIO; 459 - if (wait_for((I915_READ(GMBUS2 + reg_offset) & GMBUS_ACTIVE) == 0, 460 - 10)) { 408 + if (gmbus_wait_idle(dev_priv)) { 461 409 DRM_DEBUG_KMS("GMBUS [%s] timed out after NAK\n", 462 410 adapter->name); 463 411 ret = -ETIMEDOUT; ··· 519 469 dev_priv->gpio_mmio_base = 0; 520 470 521 471 mutex_init(&dev_priv->gmbus_mutex); 472 + init_waitqueue_head(&dev_priv->gmbus_wait_queue); 522 473 523 474 for (i = 0; i < GMBUS_NUM_PORTS; i++) { 524 475 struct intel_gmbus *bus = &dev_priv->gmbus[i];
+145 -20
drivers/gpu/drm/i915/intel_lvds.c
··· 52 52 u32 pfit_control; 53 53 u32 pfit_pgm_ratios; 54 54 bool pfit_dirty; 55 + bool is_dual_link; 56 + u32 reg; 55 57 56 58 struct intel_lvds_connector *attached_connector; 57 59 }; ··· 73 71 { 74 72 struct drm_device *dev = encoder->base.dev; 75 73 struct drm_i915_private *dev_priv = dev->dev_private; 76 - u32 lvds_reg, tmp; 74 + struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base); 75 + u32 tmp; 77 76 78 - if (HAS_PCH_SPLIT(dev)) { 79 - lvds_reg = PCH_LVDS; 80 - } else { 81 - lvds_reg = LVDS; 82 - } 83 - 84 - tmp = I915_READ(lvds_reg); 77 + tmp = I915_READ(lvds_encoder->reg); 85 78 86 79 if (!(tmp & LVDS_PORT_EN)) 87 80 return false; ··· 89 92 return true; 90 93 } 91 94 95 + /* The LVDS pin pair needs to be on before the DPLLs are enabled. 96 + * This is an exception to the general rule that mode_set doesn't turn 97 + * things on. 98 + */ 99 + static void intel_pre_pll_enable_lvds(struct intel_encoder *encoder) 100 + { 101 + struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base); 102 + struct drm_device *dev = encoder->base.dev; 103 + struct drm_i915_private *dev_priv = dev->dev_private; 104 + struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); 105 + struct drm_display_mode *fixed_mode = 106 + lvds_encoder->attached_connector->base.panel.fixed_mode; 107 + int pipe = intel_crtc->pipe; 108 + u32 temp; 109 + 110 + temp = I915_READ(lvds_encoder->reg); 111 + temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP; 112 + 113 + if (HAS_PCH_CPT(dev)) { 114 + temp &= ~PORT_TRANS_SEL_MASK; 115 + temp |= PORT_TRANS_SEL_CPT(pipe); 116 + } else { 117 + if (pipe == 1) { 118 + temp |= LVDS_PIPEB_SELECT; 119 + } else { 120 + temp &= ~LVDS_PIPEB_SELECT; 121 + } 122 + } 123 + 124 + /* set the corresponsding LVDS_BORDER bit */ 125 + temp |= dev_priv->lvds_border_bits; 126 + /* Set the B0-B3 data pairs corresponding to whether we're going to 127 + * set the DPLLs for dual-channel mode or not. 128 + */ 129 + if (lvds_encoder->is_dual_link) 130 + temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP; 131 + else 132 + temp &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP); 133 + 134 + /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP) 135 + * appropriately here, but we need to look more thoroughly into how 136 + * panels behave in the two modes. 137 + */ 138 + 139 + /* Set the dithering flag on LVDS as needed, note that there is no 140 + * special lvds dither control bit on pch-split platforms, dithering is 141 + * only controlled through the PIPECONF reg. */ 142 + if (INTEL_INFO(dev)->gen == 4) { 143 + if (dev_priv->lvds_dither) 144 + temp |= LVDS_ENABLE_DITHER; 145 + else 146 + temp &= ~LVDS_ENABLE_DITHER; 147 + } 148 + temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY); 149 + if (fixed_mode->flags & DRM_MODE_FLAG_NHSYNC) 150 + temp |= LVDS_HSYNC_POLARITY; 151 + if (fixed_mode->flags & DRM_MODE_FLAG_NVSYNC) 152 + temp |= LVDS_VSYNC_POLARITY; 153 + 154 + I915_WRITE(lvds_encoder->reg, temp); 155 + } 156 + 92 157 /** 93 158 * Sets the power state for the panel. 94 159 */ ··· 160 101 struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base); 161 102 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); 162 103 struct drm_i915_private *dev_priv = dev->dev_private; 163 - u32 ctl_reg, lvds_reg, stat_reg; 104 + u32 ctl_reg, stat_reg; 164 105 165 106 if (HAS_PCH_SPLIT(dev)) { 166 107 ctl_reg = PCH_PP_CONTROL; 167 - lvds_reg = PCH_LVDS; 168 108 stat_reg = PCH_PP_STATUS; 169 109 } else { 170 110 ctl_reg = PP_CONTROL; 171 - lvds_reg = LVDS; 172 111 stat_reg = PP_STATUS; 173 112 } 174 113 175 - I915_WRITE(lvds_reg, I915_READ(lvds_reg) | LVDS_PORT_EN); 114 + I915_WRITE(lvds_encoder->reg, I915_READ(lvds_encoder->reg) | LVDS_PORT_EN); 176 115 177 116 if (lvds_encoder->pfit_dirty) { 178 117 /* ··· 189 132 } 190 133 191 134 I915_WRITE(ctl_reg, I915_READ(ctl_reg) | POWER_TARGET_ON); 192 - POSTING_READ(lvds_reg); 135 + POSTING_READ(lvds_encoder->reg); 193 136 if (wait_for((I915_READ(stat_reg) & PP_ON) != 0, 1000)) 194 137 DRM_ERROR("timed out waiting for panel to power on\n"); 195 138 ··· 201 144 struct drm_device *dev = encoder->base.dev; 202 145 struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base); 203 146 struct drm_i915_private *dev_priv = dev->dev_private; 204 - u32 ctl_reg, lvds_reg, stat_reg; 147 + u32 ctl_reg, stat_reg; 205 148 206 149 if (HAS_PCH_SPLIT(dev)) { 207 150 ctl_reg = PCH_PP_CONTROL; 208 - lvds_reg = PCH_LVDS; 209 151 stat_reg = PCH_PP_STATUS; 210 152 } else { 211 153 ctl_reg = PP_CONTROL; 212 - lvds_reg = LVDS; 213 154 stat_reg = PP_STATUS; 214 155 } 215 156 ··· 222 167 lvds_encoder->pfit_dirty = true; 223 168 } 224 169 225 - I915_WRITE(lvds_reg, I915_READ(lvds_reg) & ~LVDS_PORT_EN); 226 - POSTING_READ(lvds_reg); 170 + I915_WRITE(lvds_encoder->reg, I915_READ(lvds_encoder->reg) & ~LVDS_PORT_EN); 171 + POSTING_READ(lvds_encoder->reg); 227 172 } 228 173 229 174 static int intel_lvds_mode_valid(struct drm_connector *connector, ··· 646 591 * If the CRTC is enabled, the display will be changed 647 592 * according to the new panel fitting mode. 648 593 */ 649 - intel_set_mode(crtc, &crtc->mode, 650 - crtc->x, crtc->y, crtc->fb); 594 + intel_crtc_restore_mode(crtc); 651 595 } 652 596 } 653 597 ··· 957 903 return false; 958 904 } 959 905 906 + static int intel_dual_link_lvds_callback(const struct dmi_system_id *id) 907 + { 908 + DRM_INFO("Forcing lvds to dual link mode on %s\n", id->ident); 909 + return 1; 910 + } 911 + 912 + static const struct dmi_system_id intel_dual_link_lvds[] = { 913 + { 914 + .callback = intel_dual_link_lvds_callback, 915 + .ident = "Apple MacBook Pro (Core i5/i7 Series)", 916 + .matches = { 917 + DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."), 918 + DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro8,2"), 919 + }, 920 + }, 921 + { } /* terminating entry */ 922 + }; 923 + 924 + bool intel_is_dual_link_lvds(struct drm_device *dev) 925 + { 926 + struct intel_encoder *encoder; 927 + struct intel_lvds_encoder *lvds_encoder; 928 + 929 + list_for_each_entry(encoder, &dev->mode_config.encoder_list, 930 + base.head) { 931 + if (encoder->type == INTEL_OUTPUT_LVDS) { 932 + lvds_encoder = to_lvds_encoder(&encoder->base); 933 + 934 + return lvds_encoder->is_dual_link; 935 + } 936 + } 937 + 938 + return false; 939 + } 940 + 941 + static bool compute_is_dual_link_lvds(struct intel_lvds_encoder *lvds_encoder) 942 + { 943 + struct drm_device *dev = lvds_encoder->base.base.dev; 944 + unsigned int val; 945 + struct drm_i915_private *dev_priv = dev->dev_private; 946 + 947 + /* use the module option value if specified */ 948 + if (i915_lvds_channel_mode > 0) 949 + return i915_lvds_channel_mode == 2; 950 + 951 + if (dmi_check_system(intel_dual_link_lvds)) 952 + return true; 953 + 954 + /* BIOS should set the proper LVDS register value at boot, but 955 + * in reality, it doesn't set the value when the lid is closed; 956 + * we need to check "the value to be set" in VBT when LVDS 957 + * register is uninitialized. 958 + */ 959 + val = I915_READ(lvds_encoder->reg); 960 + if (!(val & ~(LVDS_PIPE_MASK | LVDS_DETECTED))) 961 + val = dev_priv->bios_lvds_val; 962 + 963 + return (val & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP; 964 + } 965 + 960 966 static bool intel_lvds_supported(struct drm_device *dev) 961 967 { 962 968 /* With the introduction of the PCH we gained a dedicated ··· 1102 988 DRM_MODE_ENCODER_LVDS); 1103 989 1104 990 intel_encoder->enable = intel_enable_lvds; 991 + intel_encoder->pre_pll_enable = intel_pre_pll_enable_lvds; 1105 992 intel_encoder->disable = intel_disable_lvds; 1106 993 intel_encoder->get_hw_state = intel_lvds_get_hw_state; 1107 994 intel_connector->get_hw_state = intel_connector_get_hw_state; ··· 1123 1008 connector->display_info.subpixel_order = SubPixelHorizontalRGB; 1124 1009 connector->interlace_allowed = false; 1125 1010 connector->doublescan_allowed = false; 1011 + 1012 + if (HAS_PCH_SPLIT(dev)) { 1013 + lvds_encoder->reg = PCH_LVDS; 1014 + } else { 1015 + lvds_encoder->reg = LVDS; 1016 + } 1126 1017 1127 1018 /* create the scaling mode property */ 1128 1019 drm_mode_create_scaling_mode_property(dev); ··· 1230 1109 goto failed; 1231 1110 1232 1111 out: 1112 + lvds_encoder->is_dual_link = compute_is_dual_link_lvds(lvds_encoder); 1113 + DRM_DEBUG_KMS("detected %s-link lvds configuration\n", 1114 + lvds_encoder->is_dual_link ? "dual" : "single"); 1115 + 1233 1116 /* 1234 1117 * Unlock registers and just 1235 1118 * leave them unlocked
-1
drivers/gpu/drm/i915/intel_modes.c
··· 28 28 #include <linux/fb.h> 29 29 #include <drm/drm_edid.h> 30 30 #include <drm/drmP.h> 31 - #include <drm/drm_edid.h> 32 31 #include "intel_drv.h" 33 32 #include "i915_drv.h" 34 33
+4 -2
drivers/gpu/drm/i915/intel_overlay.c
··· 1333 1333 1334 1334 overlay->dev = dev; 1335 1335 1336 - reg_bo = i915_gem_alloc_object(dev, PAGE_SIZE); 1337 - if (!reg_bo) 1336 + reg_bo = i915_gem_object_create_stolen(dev, PAGE_SIZE); 1337 + if (reg_bo == NULL) 1338 + reg_bo = i915_gem_alloc_object(dev, PAGE_SIZE); 1339 + if (reg_bo == NULL) 1338 1340 goto out_free; 1339 1341 overlay->reg_bo = reg_bo; 1340 1342
+9 -6
drivers/gpu/drm/i915/intel_pm.c
··· 440 440 dev_priv->no_fbc_reason = FBC_MODULE_PARAM; 441 441 goto out_disable; 442 442 } 443 - if (intel_fb->obj->base.size > dev_priv->cfb_size) { 444 - DRM_DEBUG_KMS("framebuffer too large, disabling " 445 - "compression\n"); 446 - dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL; 447 - goto out_disable; 448 - } 449 443 if ((crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) || 450 444 (crtc->mode.flags & DRM_MODE_FLAG_DBLSCAN)) { 451 445 DRM_DEBUG_KMS("mode incompatible with compression, " ··· 472 478 /* If the kernel debugger is active, always disable compression */ 473 479 if (in_dbg_master()) 474 480 goto out_disable; 481 + 482 + if (i915_gem_stolen_setup_compression(dev, intel_fb->obj->base.size)) { 483 + DRM_INFO("not enough stolen space for compressed buffer (need %zd bytes), disabling\n", intel_fb->obj->base.size); 484 + DRM_INFO("hint: you may be able to increase stolen memory size in the BIOS to avoid this\n"); 485 + DRM_DEBUG_KMS("framebuffer too large, disabling compression\n"); 486 + dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL; 487 + goto out_disable; 488 + } 475 489 476 490 /* If the scanout has not changed, don't modify the FBC settings. 477 491 * Note that we make the fundamental assumption that the fb->obj ··· 528 526 DRM_DEBUG_KMS("unsupported config, disabling FBC\n"); 529 527 intel_disable_fbc(dev); 530 528 } 529 + i915_gem_stolen_cleanup_compression(dev); 531 530 } 532 531 533 532 static void i915_pineview_get_mem_freq(struct drm_device *dev)
+82 -19
drivers/gpu/drm/i915/intel_ringbuffer.c
··· 601 601 return 0; 602 602 } 603 603 604 + static inline bool i915_gem_has_seqno_wrapped(struct drm_device *dev, 605 + u32 seqno) 606 + { 607 + struct drm_i915_private *dev_priv = dev->dev_private; 608 + return dev_priv->last_seqno < seqno; 609 + } 610 + 604 611 /** 605 612 * intel_ring_sync - sync the waiter to the signaller on seqno 606 613 * ··· 638 631 if (ret) 639 632 return ret; 640 633 641 - intel_ring_emit(waiter, 642 - dw1 | signaller->semaphore_register[waiter->id]); 643 - intel_ring_emit(waiter, seqno); 644 - intel_ring_emit(waiter, 0); 645 - intel_ring_emit(waiter, MI_NOOP); 634 + /* If seqno wrap happened, omit the wait with no-ops */ 635 + if (likely(!i915_gem_has_seqno_wrapped(waiter->dev, seqno))) { 636 + intel_ring_emit(waiter, 637 + dw1 | 638 + signaller->semaphore_register[waiter->id]); 639 + intel_ring_emit(waiter, seqno); 640 + intel_ring_emit(waiter, 0); 641 + intel_ring_emit(waiter, MI_NOOP); 642 + } else { 643 + intel_ring_emit(waiter, MI_NOOP); 644 + intel_ring_emit(waiter, MI_NOOP); 645 + intel_ring_emit(waiter, MI_NOOP); 646 + intel_ring_emit(waiter, MI_NOOP); 647 + } 646 648 intel_ring_advance(waiter); 647 649 648 650 return 0; ··· 732 716 return intel_read_status_page(ring, I915_GEM_HWS_INDEX); 733 717 } 734 718 719 + static void 720 + ring_set_seqno(struct intel_ring_buffer *ring, u32 seqno) 721 + { 722 + intel_write_status_page(ring, I915_GEM_HWS_INDEX, seqno); 723 + } 724 + 735 725 static u32 736 726 pc_render_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency) 737 727 { 738 728 struct pipe_control *pc = ring->private; 739 729 return pc->cpu_page[0]; 730 + } 731 + 732 + static void 733 + pc_render_set_seqno(struct intel_ring_buffer *ring, u32 seqno) 734 + { 735 + struct pipe_control *pc = ring->private; 736 + pc->cpu_page[0] = seqno; 740 737 } 741 738 742 739 static bool ··· 1181 1152 return ret; 1182 1153 } 1183 1154 1184 - obj = i915_gem_alloc_object(dev, ring->size); 1155 + obj = NULL; 1156 + if (!HAS_LLC(dev)) 1157 + obj = i915_gem_object_create_stolen(dev, ring->size); 1158 + if (obj == NULL) 1159 + obj = i915_gem_alloc_object(dev, ring->size); 1185 1160 if (obj == NULL) { 1186 1161 DRM_ERROR("Failed to allocate ringbuffer\n"); 1187 1162 ret = -ENOMEM; ··· 1222 1189 ring->effective_size = ring->size; 1223 1190 if (IS_I830(ring->dev) || IS_845G(ring->dev)) 1224 1191 ring->effective_size -= 128; 1192 + 1193 + intel_ring_init_seqno(ring, dev_priv->last_seqno); 1225 1194 1226 1195 return 0; 1227 1196 ··· 1433 1398 return i915_gem_get_seqno(ring->dev, &ring->outstanding_lazy_request); 1434 1399 } 1435 1400 1401 + static int __intel_ring_begin(struct intel_ring_buffer *ring, 1402 + int bytes) 1403 + { 1404 + int ret; 1405 + 1406 + if (unlikely(ring->tail + bytes > ring->effective_size)) { 1407 + ret = intel_wrap_ring_buffer(ring); 1408 + if (unlikely(ret)) 1409 + return ret; 1410 + } 1411 + 1412 + if (unlikely(ring->space < bytes)) { 1413 + ret = ring_wait_for_space(ring, bytes); 1414 + if (unlikely(ret)) 1415 + return ret; 1416 + } 1417 + 1418 + ring->space -= bytes; 1419 + return 0; 1420 + } 1421 + 1436 1422 int intel_ring_begin(struct intel_ring_buffer *ring, 1437 1423 int num_dwords) 1438 1424 { 1439 1425 drm_i915_private_t *dev_priv = ring->dev->dev_private; 1440 - int n = 4*num_dwords; 1441 1426 int ret; 1442 1427 1443 1428 ret = i915_gem_check_wedge(dev_priv, dev_priv->mm.interruptible); ··· 1469 1414 if (ret) 1470 1415 return ret; 1471 1416 1472 - if (unlikely(ring->tail + n > ring->effective_size)) { 1473 - ret = intel_wrap_ring_buffer(ring); 1474 - if (unlikely(ret)) 1475 - return ret; 1417 + return __intel_ring_begin(ring, num_dwords * sizeof(uint32_t)); 1418 + } 1419 + 1420 + void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno) 1421 + { 1422 + struct drm_i915_private *dev_priv = ring->dev->dev_private; 1423 + 1424 + BUG_ON(ring->outstanding_lazy_request); 1425 + 1426 + if (INTEL_INFO(ring->dev)->gen >= 6) { 1427 + I915_WRITE(RING_SYNC_0(ring->mmio_base), 0); 1428 + I915_WRITE(RING_SYNC_1(ring->mmio_base), 0); 1476 1429 } 1477 1430 1478 - if (unlikely(ring->space < n)) { 1479 - ret = ring_wait_for_space(ring, n); 1480 - if (unlikely(ret)) 1481 - return ret; 1482 - } 1483 - 1484 - ring->space -= n; 1485 - return 0; 1431 + ring->set_seqno(ring, seqno); 1486 1432 } 1487 1433 1488 1434 void intel_ring_advance(struct intel_ring_buffer *ring) ··· 1648 1592 ring->irq_put = gen6_ring_put_irq; 1649 1593 ring->irq_enable_mask = GT_USER_INTERRUPT; 1650 1594 ring->get_seqno = gen6_ring_get_seqno; 1595 + ring->set_seqno = ring_set_seqno; 1651 1596 ring->sync_to = gen6_ring_sync; 1652 1597 ring->semaphore_register[0] = MI_SEMAPHORE_SYNC_INVALID; 1653 1598 ring->semaphore_register[1] = MI_SEMAPHORE_SYNC_RV; ··· 1659 1602 ring->add_request = pc_render_add_request; 1660 1603 ring->flush = gen4_render_ring_flush; 1661 1604 ring->get_seqno = pc_render_get_seqno; 1605 + ring->set_seqno = pc_render_set_seqno; 1662 1606 ring->irq_get = gen5_ring_get_irq; 1663 1607 ring->irq_put = gen5_ring_put_irq; 1664 1608 ring->irq_enable_mask = GT_USER_INTERRUPT | GT_PIPE_NOTIFY; ··· 1670 1612 else 1671 1613 ring->flush = gen4_render_ring_flush; 1672 1614 ring->get_seqno = ring_get_seqno; 1615 + ring->set_seqno = ring_set_seqno; 1673 1616 if (IS_GEN2(dev)) { 1674 1617 ring->irq_get = i8xx_ring_get_irq; 1675 1618 ring->irq_put = i8xx_ring_put_irq; ··· 1742 1683 else 1743 1684 ring->flush = gen4_render_ring_flush; 1744 1685 ring->get_seqno = ring_get_seqno; 1686 + ring->set_seqno = ring_set_seqno; 1745 1687 if (IS_GEN2(dev)) { 1746 1688 ring->irq_get = i8xx_ring_get_irq; 1747 1689 ring->irq_put = i8xx_ring_put_irq; ··· 1803 1743 ring->flush = gen6_ring_flush; 1804 1744 ring->add_request = gen6_add_request; 1805 1745 ring->get_seqno = gen6_ring_get_seqno; 1746 + ring->set_seqno = ring_set_seqno; 1806 1747 ring->irq_enable_mask = GEN6_BSD_USER_INTERRUPT; 1807 1748 ring->irq_get = gen6_ring_get_irq; 1808 1749 ring->irq_put = gen6_ring_put_irq; ··· 1819 1758 ring->flush = bsd_ring_flush; 1820 1759 ring->add_request = i9xx_add_request; 1821 1760 ring->get_seqno = ring_get_seqno; 1761 + ring->set_seqno = ring_set_seqno; 1822 1762 if (IS_GEN5(dev)) { 1823 1763 ring->irq_enable_mask = GT_BSD_USER_INTERRUPT; 1824 1764 ring->irq_get = gen5_ring_get_irq; ··· 1849 1787 ring->flush = blt_ring_flush; 1850 1788 ring->add_request = gen6_add_request; 1851 1789 ring->get_seqno = gen6_ring_get_seqno; 1790 + ring->set_seqno = ring_set_seqno; 1852 1791 ring->irq_enable_mask = GEN6_BLITTER_USER_INTERRUPT; 1853 1792 ring->irq_get = gen6_ring_get_irq; 1854 1793 ring->irq_put = gen6_ring_put_irq;
+10 -1
drivers/gpu/drm/i915/intel_ringbuffer.h
··· 90 90 */ 91 91 u32 (*get_seqno)(struct intel_ring_buffer *ring, 92 92 bool lazy_coherency); 93 + void (*set_seqno)(struct intel_ring_buffer *ring, 94 + u32 seqno); 93 95 int (*dispatch_execbuffer)(struct intel_ring_buffer *ring, 94 96 u32 offset, u32 length, 95 97 unsigned flags); ··· 180 178 return ring->status_page.page_addr[reg]; 181 179 } 182 180 181 + static inline void 182 + intel_write_status_page(struct intel_ring_buffer *ring, 183 + int reg, u32 value) 184 + { 185 + ring->status_page.page_addr[reg] = value; 186 + } 187 + 183 188 /** 184 189 * Reads a dword out of the status page, which is written to from the command 185 190 * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or ··· 217 208 } 218 209 void intel_ring_advance(struct intel_ring_buffer *ring); 219 210 int __must_check intel_ring_idle(struct intel_ring_buffer *ring); 220 - 211 + void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno); 221 212 int intel_ring_flush_all_caches(struct intel_ring_buffer *ring); 222 213 int intel_ring_invalidate_all_caches(struct intel_ring_buffer *ring); 223 214
+2 -5
drivers/gpu/drm/i915/intel_sdvo.c
··· 1997 1997 1998 1998 1999 1999 done: 2000 - if (intel_sdvo->base.base.crtc) { 2001 - struct drm_crtc *crtc = intel_sdvo->base.base.crtc; 2002 - intel_set_mode(crtc, &crtc->mode, 2003 - crtc->x, crtc->y, crtc->fb); 2004 - } 2000 + if (intel_sdvo->base.base.crtc) 2001 + intel_crtc_restore_mode(intel_sdvo->base.base.crtc); 2005 2002 2006 2003 return 0; 2007 2004 #undef CHECK_PROPERTY
+1 -2
drivers/gpu/drm/i915/intel_tv.c
··· 1479 1479 } 1480 1480 1481 1481 if (changed && crtc) 1482 - intel_set_mode(crtc, &crtc->mode, 1483 - crtc->x, crtc->y, crtc->fb); 1482 + intel_crtc_restore_mode(crtc); 1484 1483 out: 1485 1484 return ret; 1486 1485 }
+40
include/drm/drm_mm.h
··· 89 89 { 90 90 return mm->hole_stack.next; 91 91 } 92 + 93 + static inline unsigned long __drm_mm_hole_node_start(struct drm_mm_node *hole_node) 94 + { 95 + return hole_node->start + hole_node->size; 96 + } 97 + 98 + static inline unsigned long drm_mm_hole_node_start(struct drm_mm_node *hole_node) 99 + { 100 + BUG_ON(!hole_node->hole_follows); 101 + return __drm_mm_hole_node_start(hole_node); 102 + } 103 + 104 + static inline unsigned long __drm_mm_hole_node_end(struct drm_mm_node *hole_node) 105 + { 106 + return list_entry(hole_node->node_list.next, 107 + struct drm_mm_node, node_list)->start; 108 + } 109 + 110 + static inline unsigned long drm_mm_hole_node_end(struct drm_mm_node *hole_node) 111 + { 112 + return __drm_mm_hole_node_end(hole_node); 113 + } 114 + 92 115 #define drm_mm_for_each_node(entry, mm) list_for_each_entry(entry, \ 93 116 &(mm)->head_node.node_list, \ 94 117 node_list) ··· 122 99 entry != NULL; entry = next, \ 123 100 next = entry ? list_entry(entry->node_list.next, \ 124 101 struct drm_mm_node, node_list) : NULL) \ 102 + 103 + /* Note that we need to unroll list_for_each_entry in order to inline 104 + * setting hole_start and hole_end on each iteration and keep the 105 + * macro sane. 106 + */ 107 + #define drm_mm_for_each_hole(entry, mm, hole_start, hole_end) \ 108 + for (entry = list_entry((mm)->hole_stack.next, struct drm_mm_node, hole_stack); \ 109 + &entry->hole_stack != &(mm)->hole_stack ? \ 110 + hole_start = drm_mm_hole_node_start(entry), \ 111 + hole_end = drm_mm_hole_node_end(entry), \ 112 + 1 : 0; \ 113 + entry = list_entry(entry->hole_stack.next, struct drm_mm_node, hole_stack)) 114 + 125 115 /* 126 116 * Basic range manager support (drm_mm.c) 127 117 */ 118 + extern struct drm_mm_node *drm_mm_create_block(struct drm_mm *mm, 119 + unsigned long start, 120 + unsigned long size, 121 + bool atomic); 128 122 extern struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *node, 129 123 unsigned long size, 130 124 unsigned alignment,
-2
include/drm/intel-gtt.h
··· 18 18 /* Share the scratch page dma with ppgtts. */ 19 19 dma_addr_t scratch_page_dma; 20 20 struct page *scratch_page; 21 - /* for ppgtt PDE access */ 22 - u32 __iomem *gtt; 23 21 /* needed for ioremap in drm/i915 */ 24 22 phys_addr_t gma_bus_addr; 25 23 } *intel_gtt_get(void);