Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'drm-intel-next-2013-02-01' of git://people.freedesktop.org/~danvet/drm-intel into drm-next

Daniel writes:
"Probably the last feature pull for 3.9, there's some fixes outstanding
thought that I'd like to sneak in. And maybe 3.8 takes a bit longer ...
Anyway, highlights of this pull:
- Kill the horrible IS_DISPLAYREG hack to handle the mmio offset movements
on vlv, big thanks to Ville.
- Dynamic power well support for Haswell, shaves away a bit when only
using the eDP port on pipe A (Paulo). Plus unclaimed register fixes
uncovered by this.
- Clarifications of the gpu hang/reset state transitions, hopefully fixing
a few spurious -EIO deaths in userspace.
- Haswell ELD fixes.
- Some more (pp)gtt cleanups from Ben.
- A few smaller things all over.

Plus all the stuff from the previous rather small pull request:
- Broadcast RBG improvements and reduced color range fixes from Ville.
- Ben is on a "kill legacy gtt code for good" spree, first pile of patches
included.
- No-relocs and bo lut improvements for faster execbuf from Chris.
- Some refactorings from Imre."

* tag 'drm-intel-next-2013-02-01' of git://people.freedesktop.org/~danvet/drm-intel: (101 commits)
GPU/i915: Fix acpi_bus_get_device() check in drivers/gpu/drm/i915/intel_opregion.c
drm/i915: Set the SR01 "screen off" bit in i915_redisable_vga() too
drm/i915: Kill IS_DISPLAYREG()
drm/i915: Introduce i915_vgacntrl_reg()
drm/i915: gen6_gmch_remove can be static
drm/i915: dynamic Haswell display power well support
drm/i915: check the power down well on assert_pipe()
drm/i915: don't send DP "idle" pattern before "normal" on HSW PORT_A
drm/i915: don't run hsw power well code on !hsw
drm/i915: kill cargo-culted locking from power well code
drm/i915: Only run idle processing from i915_gem_retire_requests_worker
drm/i915: Fix CAGF for HSW
drm/i915: Reclaim GTT space for failed PPGTT
drm/i915: remove intel_gtt structure
drm/i915: Add probe and remove to the gtt ops
drm/i915: extract hw ppgtt setup/cleanup code
drm/i915: pte_encode is gen6+
drm/i915: vfuncs for ppgtt
drm/i915: vfuncs for gtt_clear_range/insert_entries
drm/i915: Error state should print /sys/kernel/debug
...

+2290 -1767
+35 -52
drivers/char/agp/intel-gtt.c
··· 60 60 }; 61 61 62 62 static struct _intel_private { 63 - struct intel_gtt base; 64 63 const struct intel_gtt_driver *driver; 65 64 struct pci_dev *pcidev; /* device one */ 66 65 struct pci_dev *bridge_dev; ··· 74 75 struct resource ifp_resource; 75 76 int resource_valid; 76 77 struct page *scratch_page; 78 + phys_addr_t scratch_page_dma; 77 79 int refcount; 80 + /* Whether i915 needs to use the dmar apis or not. */ 81 + unsigned int needs_dmar : 1; 82 + phys_addr_t gma_bus_addr; 83 + /* Size of memory reserved for graphics by the BIOS */ 84 + unsigned int stolen_size; 85 + /* Total number of gtt entries. */ 86 + unsigned int gtt_total_entries; 87 + /* Part of the gtt that is mappable by the cpu, for those chips where 88 + * this is not the full gtt. */ 89 + unsigned int gtt_mappable_entries; 78 90 } intel_private; 79 91 80 92 #define INTEL_GTT_GEN intel_private.driver->gen ··· 301 291 get_page(page); 302 292 set_pages_uc(page, 1); 303 293 304 - if (intel_private.base.needs_dmar) { 294 + if (intel_private.needs_dmar) { 305 295 dma_addr = pci_map_page(intel_private.pcidev, page, 0, 306 296 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); 307 297 if (pci_dma_mapping_error(intel_private.pcidev, dma_addr)) 308 298 return -EINVAL; 309 299 310 - intel_private.base.scratch_page_dma = dma_addr; 300 + intel_private.scratch_page_dma = dma_addr; 311 301 } else 312 - intel_private.base.scratch_page_dma = page_to_phys(page); 302 + intel_private.scratch_page_dma = page_to_phys(page); 313 303 314 304 intel_private.scratch_page = page; 315 305 ··· 516 506 /* On previous hardware, the GTT size was just what was 517 507 * required to map the aperture. 518 508 */ 519 - return intel_private.base.gtt_mappable_entries; 509 + return intel_private.gtt_mappable_entries; 520 510 } 521 511 } 522 512 ··· 556 546 static void intel_gtt_teardown_scratch_page(void) 557 547 { 558 548 set_pages_wb(intel_private.scratch_page, 1); 559 - pci_unmap_page(intel_private.pcidev, intel_private.base.scratch_page_dma, 549 + pci_unmap_page(intel_private.pcidev, intel_private.scratch_page_dma, 560 550 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); 561 551 put_page(intel_private.scratch_page); 562 552 __free_page(intel_private.scratch_page); ··· 582 572 if (ret != 0) 583 573 return ret; 584 574 585 - intel_private.base.gtt_mappable_entries = intel_gtt_mappable_entries(); 586 - intel_private.base.gtt_total_entries = intel_gtt_total_entries(); 575 + intel_private.gtt_mappable_entries = intel_gtt_mappable_entries(); 576 + intel_private.gtt_total_entries = intel_gtt_total_entries(); 587 577 588 578 /* save the PGETBL reg for resume */ 589 579 intel_private.PGETBL_save = ··· 595 585 596 586 dev_info(&intel_private.bridge_dev->dev, 597 587 "detected gtt size: %dK total, %dK mappable\n", 598 - intel_private.base.gtt_total_entries * 4, 599 - intel_private.base.gtt_mappable_entries * 4); 588 + intel_private.gtt_total_entries * 4, 589 + intel_private.gtt_mappable_entries * 4); 600 590 601 - gtt_map_size = intel_private.base.gtt_total_entries * 4; 591 + gtt_map_size = intel_private.gtt_total_entries * 4; 602 592 603 593 intel_private.gtt = NULL; 604 594 if (INTEL_GTT_GEN < 6 && INTEL_GTT_GEN > 2) ··· 615 605 616 606 global_cache_flush(); /* FIXME: ? */ 617 607 618 - intel_private.base.stolen_size = intel_gtt_stolen_size(); 608 + intel_private.stolen_size = intel_gtt_stolen_size(); 619 609 620 - intel_private.base.needs_dmar = USE_PCI_DMA_API && INTEL_GTT_GEN > 2; 610 + intel_private.needs_dmar = USE_PCI_DMA_API && INTEL_GTT_GEN > 2; 621 611 622 612 ret = intel_gtt_setup_scratch_page(); 623 613 if (ret != 0) { ··· 632 622 pci_read_config_dword(intel_private.pcidev, I915_GMADDR, 633 623 &gma_addr); 634 624 635 - intel_private.base.gma_bus_addr = (gma_addr & PCI_BASE_ADDRESS_MEM_MASK); 625 + intel_private.gma_bus_addr = (gma_addr & PCI_BASE_ADDRESS_MEM_MASK); 636 626 637 627 return 0; 638 628 } ··· 643 633 unsigned int aper_size; 644 634 int i; 645 635 646 - aper_size = (intel_private.base.gtt_mappable_entries << PAGE_SHIFT) 647 - / MB(1); 636 + aper_size = (intel_private.gtt_mappable_entries << PAGE_SHIFT) / MB(1); 648 637 649 638 for (i = 0; i < num_sizes; i++) { 650 639 if (aper_size == intel_fake_agp_sizes[i].size) { ··· 787 778 return -EIO; 788 779 789 780 intel_private.clear_fake_agp = true; 790 - agp_bridge->gart_bus_addr = intel_private.base.gma_bus_addr; 781 + agp_bridge->gart_bus_addr = intel_private.gma_bus_addr; 791 782 792 783 return 0; 793 784 } ··· 849 840 { 850 841 int ret = -EINVAL; 851 842 852 - if (intel_private.base.do_idle_maps) 853 - return -ENODEV; 854 - 855 843 if (intel_private.clear_fake_agp) { 856 - int start = intel_private.base.stolen_size / PAGE_SIZE; 857 - int end = intel_private.base.gtt_mappable_entries; 844 + int start = intel_private.stolen_size / PAGE_SIZE; 845 + int end = intel_private.gtt_mappable_entries; 858 846 intel_gtt_clear_range(start, end - start); 859 847 intel_private.clear_fake_agp = false; 860 848 } ··· 862 856 if (mem->page_count == 0) 863 857 goto out; 864 858 865 - if (pg_start + mem->page_count > intel_private.base.gtt_total_entries) 859 + if (pg_start + mem->page_count > intel_private.gtt_total_entries) 866 860 goto out_err; 867 861 868 862 if (type != mem->type) ··· 874 868 if (!mem->is_flushed) 875 869 global_cache_flush(); 876 870 877 - if (intel_private.base.needs_dmar) { 871 + if (intel_private.needs_dmar) { 878 872 struct sg_table st; 879 873 880 874 ret = intel_gtt_map_memory(mem->pages, mem->page_count, &st); ··· 900 894 unsigned int i; 901 895 902 896 for (i = first_entry; i < (first_entry + num_entries); i++) { 903 - intel_private.driver->write_entry(intel_private.base.scratch_page_dma, 897 + intel_private.driver->write_entry(intel_private.scratch_page_dma, 904 898 i, 0); 905 899 } 906 900 readl(intel_private.gtt+i-1); ··· 913 907 if (mem->page_count == 0) 914 908 return 0; 915 909 916 - if (intel_private.base.do_idle_maps) 917 - return -ENODEV; 918 - 919 910 intel_gtt_clear_range(pg_start, mem->page_count); 920 911 921 - if (intel_private.base.needs_dmar) { 912 + if (intel_private.needs_dmar) { 922 913 intel_gtt_unmap_memory(mem->sg_list, mem->num_sg); 923 914 mem->sg_list = NULL; 924 915 mem->num_sg = 0; ··· 1072 1069 writel(addr | pte_flags, intel_private.gtt + entry); 1073 1070 } 1074 1071 1075 - /* Certain Gen5 chipsets require require idling the GPU before 1076 - * unmapping anything from the GTT when VT-d is enabled. 1077 - */ 1078 - static inline int needs_idle_maps(void) 1079 - { 1080 - #ifdef CONFIG_INTEL_IOMMU 1081 - const unsigned short gpu_devid = intel_private.pcidev->device; 1082 - 1083 - /* Query intel_iommu to see if we need the workaround. Presumably that 1084 - * was loaded first. 1085 - */ 1086 - if ((gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB || 1087 - gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG) && 1088 - intel_iommu_gfx_mapped) 1089 - return 1; 1090 - #endif 1091 - return 0; 1092 - } 1093 1072 1094 1073 static int i9xx_setup(void) 1095 1074 { ··· 1099 1114 intel_private.gtt_bus_addr = reg_addr + KB(512); 1100 1115 break; 1101 1116 } 1102 - 1103 - if (needs_idle_maps()) 1104 - intel_private.base.do_idle_maps = 1; 1105 1117 1106 1118 intel_i9xx_setup_flush(); 1107 1119 ··· 1371 1389 } 1372 1390 EXPORT_SYMBOL(intel_gmch_probe); 1373 1391 1374 - struct intel_gtt *intel_gtt_get(void) 1392 + void intel_gtt_get(size_t *gtt_total, size_t *stolen_size) 1375 1393 { 1376 - return &intel_private.base; 1394 + *gtt_total = intel_private.gtt_total_entries << PAGE_SHIFT; 1395 + *stolen_size = intel_private.stolen_size; 1377 1396 } 1378 1397 EXPORT_SYMBOL(intel_gtt_get); 1379 1398
+33
drivers/gpu/drm/drm_edid.c
··· 1483 1483 #define VIDEO_BLOCK 0x02 1484 1484 #define VENDOR_BLOCK 0x03 1485 1485 #define SPEAKER_BLOCK 0x04 1486 + #define VIDEO_CAPABILITY_BLOCK 0x07 1486 1487 #define EDID_BASIC_AUDIO (1 << 6) 1487 1488 #define EDID_CEA_YCRCB444 (1 << 5) 1488 1489 #define EDID_CEA_YCRCB422 (1 << 4) 1490 + #define EDID_CEA_VCDB_QS (1 << 6) 1489 1491 1490 1492 /** 1491 1493 * Search EDID for CEA extension block. ··· 1902 1900 return has_audio; 1903 1901 } 1904 1902 EXPORT_SYMBOL(drm_detect_monitor_audio); 1903 + 1904 + /** 1905 + * drm_rgb_quant_range_selectable - is RGB quantization range selectable? 1906 + * 1907 + * Check whether the monitor reports the RGB quantization range selection 1908 + * as supported. The AVI infoframe can then be used to inform the monitor 1909 + * which quantization range (full or limited) is used. 1910 + */ 1911 + bool drm_rgb_quant_range_selectable(struct edid *edid) 1912 + { 1913 + u8 *edid_ext; 1914 + int i, start, end; 1915 + 1916 + edid_ext = drm_find_cea_extension(edid); 1917 + if (!edid_ext) 1918 + return false; 1919 + 1920 + if (cea_db_offsets(edid_ext, &start, &end)) 1921 + return false; 1922 + 1923 + for_each_cea_db(edid_ext, i, start, end) { 1924 + if (cea_db_tag(&edid_ext[i]) == VIDEO_CAPABILITY_BLOCK && 1925 + cea_db_payload_len(&edid_ext[i]) == 2) { 1926 + DRM_DEBUG_KMS("CEA VCDB 0x%02x\n", edid_ext[i + 2]); 1927 + return edid_ext[i + 2] & EDID_CEA_VCDB_QS; 1928 + } 1929 + } 1930 + 1931 + return false; 1932 + } 1933 + EXPORT_SYMBOL(drm_rgb_quant_range_selectable); 1905 1934 1906 1935 /** 1907 1936 * drm_add_display_info - pull display info out if present
+1
drivers/gpu/drm/i915/Makefile
··· 16 16 i915_gem_tiling.o \ 17 17 i915_sysfs.o \ 18 18 i915_trace_points.o \ 19 + i915_ums.o \ 19 20 intel_display.o \ 20 21 intel_crt.o \ 21 22 intel_lvds.o \
+120 -11
drivers/gpu/drm/i915/i915_debugfs.c
··· 258 258 seq_printf(m, "%u fault mappable objects, %zu bytes\n", 259 259 count, size); 260 260 261 - seq_printf(m, "%zu [%zu] gtt total\n", 262 - dev_priv->mm.gtt_total, dev_priv->mm.mappable_gtt_total); 261 + seq_printf(m, "%zu [%lu] gtt total\n", 262 + dev_priv->gtt.total, 263 + dev_priv->gtt.mappable_end - dev_priv->gtt.start); 263 264 264 265 mutex_unlock(&dev->struct_mutex); 265 266 ··· 814 813 815 814 error_priv->dev = dev; 816 815 817 - spin_lock_irqsave(&dev_priv->error_lock, flags); 818 - error_priv->error = dev_priv->first_error; 816 + spin_lock_irqsave(&dev_priv->gpu_error.lock, flags); 817 + error_priv->error = dev_priv->gpu_error.first_error; 819 818 if (error_priv->error) 820 819 kref_get(&error_priv->error->ref); 821 - spin_unlock_irqrestore(&dev_priv->error_lock, flags); 820 + spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags); 822 821 823 822 return single_open(file, i915_error_state, error_priv); 824 823 } ··· 957 956 u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS); 958 957 u32 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS); 959 958 u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); 960 - u32 rpstat; 959 + u32 rpstat, cagf; 961 960 u32 rpupei, rpcurup, rpprevup; 962 961 u32 rpdownei, rpcurdown, rpprevdown; 963 962 int max_freq; ··· 976 975 rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI); 977 976 rpcurdown = I915_READ(GEN6_RP_CUR_DOWN); 978 977 rpprevdown = I915_READ(GEN6_RP_PREV_DOWN); 978 + if (IS_HASWELL(dev)) 979 + cagf = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT; 980 + else 981 + cagf = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT; 982 + cagf *= GT_FREQUENCY_MULTIPLIER; 979 983 980 984 gen6_gt_force_wake_put(dev_priv); 981 985 mutex_unlock(&dev->struct_mutex); ··· 993 987 gt_perf_status & 0xff); 994 988 seq_printf(m, "Render p-state limit: %d\n", 995 989 rp_state_limits & 0xff); 996 - seq_printf(m, "CAGF: %dMHz\n", ((rpstat & GEN6_CAGF_MASK) >> 997 - GEN6_CAGF_SHIFT) * GT_FREQUENCY_MULTIPLIER); 990 + seq_printf(m, "CAGF: %dMHz\n", cagf); 998 991 seq_printf(m, "RP CUR UP EI: %dus\n", rpupei & 999 992 GEN6_CURICONT_MASK); 1000 993 seq_printf(m, "RP CUR UP: %dus\n", rpcurup & ··· 1679 1674 1680 1675 len = snprintf(buf, sizeof(buf), 1681 1676 "wedged : %d\n", 1682 - atomic_read(&dev_priv->mm.wedged)); 1677 + atomic_read(&dev_priv->gpu_error.reset_counter)); 1683 1678 1684 1679 if (len > sizeof(buf)) 1685 1680 len = sizeof(buf); ··· 1734 1729 int len; 1735 1730 1736 1731 len = snprintf(buf, sizeof(buf), 1737 - "0x%08x\n", dev_priv->stop_rings); 1732 + "0x%08x\n", dev_priv->gpu_error.stop_rings); 1738 1733 1739 1734 if (len > sizeof(buf)) 1740 1735 len = sizeof(buf); ··· 1770 1765 if (ret) 1771 1766 return ret; 1772 1767 1773 - dev_priv->stop_rings = val; 1768 + dev_priv->gpu_error.stop_rings = val; 1774 1769 mutex_unlock(&dev->struct_mutex); 1775 1770 1776 1771 return cnt; ··· 1781 1776 .open = simple_open, 1782 1777 .read = i915_ring_stop_read, 1783 1778 .write = i915_ring_stop_write, 1779 + .llseek = default_llseek, 1780 + }; 1781 + 1782 + #define DROP_UNBOUND 0x1 1783 + #define DROP_BOUND 0x2 1784 + #define DROP_RETIRE 0x4 1785 + #define DROP_ACTIVE 0x8 1786 + #define DROP_ALL (DROP_UNBOUND | \ 1787 + DROP_BOUND | \ 1788 + DROP_RETIRE | \ 1789 + DROP_ACTIVE) 1790 + static ssize_t 1791 + i915_drop_caches_read(struct file *filp, 1792 + char __user *ubuf, 1793 + size_t max, 1794 + loff_t *ppos) 1795 + { 1796 + char buf[20]; 1797 + int len; 1798 + 1799 + len = snprintf(buf, sizeof(buf), "0x%08x\n", DROP_ALL); 1800 + if (len > sizeof(buf)) 1801 + len = sizeof(buf); 1802 + 1803 + return simple_read_from_buffer(ubuf, max, ppos, buf, len); 1804 + } 1805 + 1806 + static ssize_t 1807 + i915_drop_caches_write(struct file *filp, 1808 + const char __user *ubuf, 1809 + size_t cnt, 1810 + loff_t *ppos) 1811 + { 1812 + struct drm_device *dev = filp->private_data; 1813 + struct drm_i915_private *dev_priv = dev->dev_private; 1814 + struct drm_i915_gem_object *obj, *next; 1815 + char buf[20]; 1816 + int val = 0, ret; 1817 + 1818 + if (cnt > 0) { 1819 + if (cnt > sizeof(buf) - 1) 1820 + return -EINVAL; 1821 + 1822 + if (copy_from_user(buf, ubuf, cnt)) 1823 + return -EFAULT; 1824 + buf[cnt] = 0; 1825 + 1826 + val = simple_strtoul(buf, NULL, 0); 1827 + } 1828 + 1829 + DRM_DEBUG_DRIVER("Dropping caches: 0x%08x\n", val); 1830 + 1831 + /* No need to check and wait for gpu resets, only libdrm auto-restarts 1832 + * on ioctls on -EAGAIN. */ 1833 + ret = mutex_lock_interruptible(&dev->struct_mutex); 1834 + if (ret) 1835 + return ret; 1836 + 1837 + if (val & DROP_ACTIVE) { 1838 + ret = i915_gpu_idle(dev); 1839 + if (ret) 1840 + goto unlock; 1841 + } 1842 + 1843 + if (val & (DROP_RETIRE | DROP_ACTIVE)) 1844 + i915_gem_retire_requests(dev); 1845 + 1846 + if (val & DROP_BOUND) { 1847 + list_for_each_entry_safe(obj, next, &dev_priv->mm.inactive_list, mm_list) 1848 + if (obj->pin_count == 0) { 1849 + ret = i915_gem_object_unbind(obj); 1850 + if (ret) 1851 + goto unlock; 1852 + } 1853 + } 1854 + 1855 + if (val & DROP_UNBOUND) { 1856 + list_for_each_entry_safe(obj, next, &dev_priv->mm.unbound_list, gtt_list) 1857 + if (obj->pages_pin_count == 0) { 1858 + ret = i915_gem_object_put_pages(obj); 1859 + if (ret) 1860 + goto unlock; 1861 + } 1862 + } 1863 + 1864 + unlock: 1865 + mutex_unlock(&dev->struct_mutex); 1866 + 1867 + return ret ?: cnt; 1868 + } 1869 + 1870 + static const struct file_operations i915_drop_caches_fops = { 1871 + .owner = THIS_MODULE, 1872 + .open = simple_open, 1873 + .read = i915_drop_caches_read, 1874 + .write = i915_drop_caches_write, 1784 1875 .llseek = default_llseek, 1785 1876 }; 1786 1877 ··· 2277 2176 return ret; 2278 2177 2279 2178 ret = i915_debugfs_create(minor->debugfs_root, minor, 2179 + "i915_gem_drop_caches", 2180 + &i915_drop_caches_fops); 2181 + if (ret) 2182 + return ret; 2183 + 2184 + ret = i915_debugfs_create(minor->debugfs_root, minor, 2280 2185 "i915_error_state", 2281 2186 &i915_error_state_fops); 2282 2187 if (ret) ··· 2312 2205 drm_debugfs_remove_files((struct drm_info_list *) &i915_min_freq_fops, 2313 2206 1, minor); 2314 2207 drm_debugfs_remove_files((struct drm_info_list *) &i915_cache_sharing_fops, 2208 + 1, minor); 2209 + drm_debugfs_remove_files((struct drm_info_list *) &i915_drop_caches_fops, 2315 2210 1, minor); 2316 2211 drm_debugfs_remove_files((struct drm_info_list *) &i915_ring_stop_fops, 2317 2212 1, minor);
+24 -19
drivers/gpu/drm/i915/i915_dma.c
··· 992 992 case I915_PARAM_HAS_PINNED_BATCHES: 993 993 value = 1; 994 994 break; 995 + case I915_PARAM_HAS_EXEC_NO_RELOC: 996 + value = 1; 997 + break; 998 + case I915_PARAM_HAS_EXEC_HANDLE_LUT: 999 + value = 1; 1000 + break; 995 1001 default: 996 1002 DRM_DEBUG_DRIVER("Unknown parameter %d\n", 997 1003 param->param); ··· 1076 1070 ring->status_page.gfx_addr = hws->addr & (0x1ffff<<12); 1077 1071 1078 1072 dev_priv->dri1.gfx_hws_cpu_addr = 1079 - ioremap_wc(dev_priv->mm.gtt_base_addr + hws->addr, 4096); 1073 + ioremap_wc(dev_priv->gtt.mappable_base + hws->addr, 4096); 1080 1074 if (dev_priv->dri1.gfx_hws_cpu_addr == NULL) { 1081 1075 i915_dma_cleanup(dev); 1082 1076 ring->status_page.gfx_addr = 0; ··· 1426 1420 if (!ap) 1427 1421 return; 1428 1422 1429 - ap->ranges[0].base = dev_priv->mm.gtt->gma_bus_addr; 1430 - ap->ranges[0].size = 1431 - dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT; 1423 + ap->ranges[0].base = dev_priv->gtt.mappable_base; 1424 + ap->ranges[0].size = dev_priv->gtt.mappable_end - dev_priv->gtt.start; 1425 + 1432 1426 primary = 1433 1427 pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW; 1434 1428 ··· 1542 1536 goto put_gmch; 1543 1537 } 1544 1538 1545 - aperture_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT; 1546 - dev_priv->mm.gtt_base_addr = dev_priv->mm.gtt->gma_bus_addr; 1539 + aperture_size = dev_priv->gtt.mappable_end; 1547 1540 1548 - dev_priv->mm.gtt_mapping = 1549 - io_mapping_create_wc(dev_priv->mm.gtt_base_addr, 1541 + dev_priv->gtt.mappable = 1542 + io_mapping_create_wc(dev_priv->gtt.mappable_base, 1550 1543 aperture_size); 1551 - if (dev_priv->mm.gtt_mapping == NULL) { 1544 + if (dev_priv->gtt.mappable == NULL) { 1552 1545 ret = -EIO; 1553 1546 goto out_rmmap; 1554 1547 } 1555 1548 1556 - i915_mtrr_setup(dev_priv, dev_priv->mm.gtt_base_addr, 1549 + i915_mtrr_setup(dev_priv, dev_priv->gtt.mappable_base, 1557 1550 aperture_size); 1558 1551 1559 1552 /* The i915 workqueue is primarily used for batched retirement of ··· 1605 1600 pci_enable_msi(dev->pdev); 1606 1601 1607 1602 spin_lock_init(&dev_priv->irq_lock); 1608 - spin_lock_init(&dev_priv->error_lock); 1603 + spin_lock_init(&dev_priv->gpu_error.lock); 1609 1604 spin_lock_init(&dev_priv->rps.lock); 1610 1605 mutex_init(&dev_priv->dpio_lock); 1611 1606 ··· 1657 1652 out_mtrrfree: 1658 1653 if (dev_priv->mm.gtt_mtrr >= 0) { 1659 1654 mtrr_del(dev_priv->mm.gtt_mtrr, 1660 - dev_priv->mm.gtt_base_addr, 1655 + dev_priv->gtt.mappable_base, 1661 1656 aperture_size); 1662 1657 dev_priv->mm.gtt_mtrr = -1; 1663 1658 } 1664 - io_mapping_free(dev_priv->mm.gtt_mapping); 1659 + io_mapping_free(dev_priv->gtt.mappable); 1665 1660 out_rmmap: 1666 1661 pci_iounmap(dev->pdev, dev_priv->regs); 1667 1662 put_gmch: 1668 - i915_gem_gtt_fini(dev); 1663 + dev_priv->gtt.gtt_remove(dev); 1669 1664 put_bridge: 1670 1665 pci_dev_put(dev_priv->bridge_dev); 1671 1666 free_priv: ··· 1695 1690 /* Cancel the retire work handler, which should be idle now. */ 1696 1691 cancel_delayed_work_sync(&dev_priv->mm.retire_work); 1697 1692 1698 - io_mapping_free(dev_priv->mm.gtt_mapping); 1693 + io_mapping_free(dev_priv->gtt.mappable); 1699 1694 if (dev_priv->mm.gtt_mtrr >= 0) { 1700 1695 mtrr_del(dev_priv->mm.gtt_mtrr, 1701 - dev_priv->mm.gtt_base_addr, 1702 - dev_priv->mm.gtt->gtt_mappable_entries * PAGE_SIZE); 1696 + dev_priv->gtt.mappable_base, 1697 + dev_priv->gtt.mappable_end); 1703 1698 dev_priv->mm.gtt_mtrr = -1; 1704 1699 } 1705 1700 ··· 1725 1720 } 1726 1721 1727 1722 /* Free error state after interrupts are fully disabled. */ 1728 - del_timer_sync(&dev_priv->hangcheck_timer); 1729 - cancel_work_sync(&dev_priv->error_work); 1723 + del_timer_sync(&dev_priv->gpu_error.hangcheck_timer); 1724 + cancel_work_sync(&dev_priv->gpu_error.work); 1730 1725 i915_destroy_error_state(dev); 1731 1726 1732 1727 if (dev->pdev->msi_enabled)
+9 -107
drivers/gpu/drm/i915/i915_drv.c
··· 276 276 .has_bsd_ring = 1, 277 277 .has_blt_ring = 1, 278 278 .is_valleyview = 1, 279 + .display_mmio_offset = VLV_DISPLAY_BASE, 279 280 }; 280 281 281 282 static const struct intel_device_info intel_valleyview_d_info = { ··· 286 285 .has_bsd_ring = 1, 287 286 .has_blt_ring = 1, 288 287 .is_valleyview = 1, 288 + .display_mmio_offset = VLV_DISPLAY_BASE, 289 289 }; 290 290 291 291 static const struct intel_device_info intel_haswell_d_info = { ··· 469 467 static int i915_drm_freeze(struct drm_device *dev) 470 468 { 471 469 struct drm_i915_private *dev_priv = dev->dev_private; 470 + 471 + intel_set_power_well(dev, true); 472 472 473 473 drm_kms_helper_poll_disable(dev); 474 474 ··· 783 779 } 784 780 785 781 /* Also reset the gpu hangman. */ 786 - if (dev_priv->stop_rings) { 782 + if (dev_priv->gpu_error.stop_rings) { 787 783 DRM_DEBUG("Simulated gpu hang, resetting stop_rings\n"); 788 - dev_priv->stop_rings = 0; 784 + dev_priv->gpu_error.stop_rings = 0; 789 785 if (ret == -ENODEV) { 790 786 DRM_ERROR("Reset not implemented, but ignoring " 791 787 "error for simulated gpu hangs\n"); ··· 824 820 i915_gem_reset(dev); 825 821 826 822 ret = -ENODEV; 827 - if (get_seconds() - dev_priv->last_gpu_reset < 5) 823 + if (get_seconds() - dev_priv->gpu_error.last_reset < 5) 828 824 DRM_ERROR("GPU hanging too fast, declaring wedged!\n"); 829 825 else 830 826 ret = intel_gpu_reset(dev); 831 827 832 - dev_priv->last_gpu_reset = get_seconds(); 828 + dev_priv->gpu_error.last_reset = get_seconds(); 833 829 if (ret) { 834 830 DRM_ERROR("Failed to reset chip.\n"); 835 831 mutex_unlock(&dev->struct_mutex); ··· 1119 1115 ((HAS_FORCE_WAKE((dev_priv)->dev)) && \ 1120 1116 ((reg) < 0x40000) && \ 1121 1117 ((reg) != FORCEWAKE)) 1122 - 1123 - static bool IS_DISPLAYREG(u32 reg) 1124 - { 1125 - /* 1126 - * This should make it easier to transition modules over to the 1127 - * new register block scheme, since we can do it incrementally. 1128 - */ 1129 - if (reg >= VLV_DISPLAY_BASE) 1130 - return false; 1131 - 1132 - if (reg >= RENDER_RING_BASE && 1133 - reg < RENDER_RING_BASE + 0xff) 1134 - return false; 1135 - if (reg >= GEN6_BSD_RING_BASE && 1136 - reg < GEN6_BSD_RING_BASE + 0xff) 1137 - return false; 1138 - if (reg >= BLT_RING_BASE && 1139 - reg < BLT_RING_BASE + 0xff) 1140 - return false; 1141 - 1142 - if (reg == PGTBL_ER) 1143 - return false; 1144 - 1145 - if (reg >= IPEIR_I965 && 1146 - reg < HWSTAM) 1147 - return false; 1148 - 1149 - if (reg == MI_MODE) 1150 - return false; 1151 - 1152 - if (reg == GFX_MODE_GEN7) 1153 - return false; 1154 - 1155 - if (reg == RENDER_HWS_PGA_GEN7 || 1156 - reg == BSD_HWS_PGA_GEN7 || 1157 - reg == BLT_HWS_PGA_GEN7) 1158 - return false; 1159 - 1160 - if (reg == GEN6_BSD_SLEEP_PSMI_CONTROL || 1161 - reg == GEN6_BSD_RNCID) 1162 - return false; 1163 - 1164 - if (reg == GEN6_BLITTER_ECOSKPD) 1165 - return false; 1166 - 1167 - if (reg >= 0x4000c && 1168 - reg <= 0x4002c) 1169 - return false; 1170 - 1171 - if (reg >= 0x4f000 && 1172 - reg <= 0x4f08f) 1173 - return false; 1174 - 1175 - if (reg >= 0x4f100 && 1176 - reg <= 0x4f11f) 1177 - return false; 1178 - 1179 - if (reg >= VLV_MASTER_IER && 1180 - reg <= GEN6_PMIER) 1181 - return false; 1182 - 1183 - if (reg >= FENCE_REG_SANDYBRIDGE_0 && 1184 - reg < (FENCE_REG_SANDYBRIDGE_0 + (16*8))) 1185 - return false; 1186 - 1187 - if (reg >= VLV_IIR_RW && 1188 - reg <= VLV_ISR) 1189 - return false; 1190 - 1191 - if (reg == FORCEWAKE_VLV || 1192 - reg == FORCEWAKE_ACK_VLV) 1193 - return false; 1194 - 1195 - if (reg == GEN6_GDRST) 1196 - return false; 1197 - 1198 - switch (reg) { 1199 - case _3D_CHICKEN3: 1200 - case IVB_CHICKEN3: 1201 - case GEN7_COMMON_SLICE_CHICKEN1: 1202 - case GEN7_L3CNTLREG1: 1203 - case GEN7_L3_CHICKEN_MODE_REGISTER: 1204 - case GEN7_ROW_CHICKEN2: 1205 - case GEN7_L3SQCREG4: 1206 - case GEN7_SQ_CHICKEN_MBCUNIT_CONFIG: 1207 - case GEN7_HALF_SLICE_CHICKEN1: 1208 - case GEN6_MBCTL: 1209 - case GEN6_UCGCTL2: 1210 - return false; 1211 - default: 1212 - break; 1213 - } 1214 - 1215 - return true; 1216 - } 1217 - 1218 1118 static void 1219 1119 ilk_dummy_write(struct drm_i915_private *dev_priv) 1220 1120 { ··· 1142 1234 if (dev_priv->forcewake_count == 0) \ 1143 1235 dev_priv->gt.force_wake_put(dev_priv); \ 1144 1236 spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags); \ 1145 - } else if (IS_VALLEYVIEW(dev_priv->dev) && IS_DISPLAYREG(reg)) { \ 1146 - val = read##y(dev_priv->regs + reg + 0x180000); \ 1147 1237 } else { \ 1148 1238 val = read##y(dev_priv->regs + reg); \ 1149 1239 } \ ··· 1168 1262 DRM_ERROR("Unknown unclaimed register before writing to %x\n", reg); \ 1169 1263 I915_WRITE_NOTRACE(GEN7_ERR_INT, ERR_INT_MMIO_UNCLAIMED); \ 1170 1264 } \ 1171 - if (IS_VALLEYVIEW(dev_priv->dev) && IS_DISPLAYREG(reg)) { \ 1172 - write##y(val, dev_priv->regs + reg + 0x180000); \ 1173 - } else { \ 1174 - write##y(val, dev_priv->regs + reg); \ 1175 - } \ 1265 + write##y(val, dev_priv->regs + reg); \ 1176 1266 if (unlikely(__fifo_ret)) { \ 1177 1267 gen6_gt_check_fifodbg(dev_priv); \ 1178 1268 } \
+238 -141
drivers/gpu/drm/i915/i915_drv.h
··· 337 337 DEV_INFO_FLAG(has_llc) 338 338 339 339 struct intel_device_info { 340 + u32 display_mmio_offset; 340 341 u8 gen; 341 342 u8 is_mobile:1; 342 343 u8 is_i85x:1; ··· 365 364 u8 has_llc:1; 366 365 }; 367 366 367 + enum i915_cache_level { 368 + I915_CACHE_NONE = 0, 369 + I915_CACHE_LLC, 370 + I915_CACHE_LLC_MLC, /* gen6+, in docs at least! */ 371 + }; 372 + 373 + /* The Graphics Translation Table is the way in which GEN hardware translates a 374 + * Graphics Virtual Address into a Physical Address. In addition to the normal 375 + * collateral associated with any va->pa translations GEN hardware also has a 376 + * portion of the GTT which can be mapped by the CPU and remain both coherent 377 + * and correct (in cases like swizzling). That region is referred to as GMADR in 378 + * the spec. 379 + */ 380 + struct i915_gtt { 381 + unsigned long start; /* Start offset of used GTT */ 382 + size_t total; /* Total size GTT can map */ 383 + size_t stolen_size; /* Total size of stolen memory */ 384 + 385 + unsigned long mappable_end; /* End offset that we can CPU map */ 386 + struct io_mapping *mappable; /* Mapping to our CPU mappable region */ 387 + phys_addr_t mappable_base; /* PA of our GMADR */ 388 + 389 + /** "Graphics Stolen Memory" holds the global PTEs */ 390 + void __iomem *gsm; 391 + 392 + bool do_idle_maps; 393 + dma_addr_t scratch_page_dma; 394 + struct page *scratch_page; 395 + 396 + /* global gtt ops */ 397 + int (*gtt_probe)(struct drm_device *dev, size_t *gtt_total, 398 + size_t *stolen); 399 + void (*gtt_remove)(struct drm_device *dev); 400 + void (*gtt_clear_range)(struct drm_device *dev, 401 + unsigned int first_entry, 402 + unsigned int num_entries); 403 + void (*gtt_insert_entries)(struct drm_device *dev, 404 + struct sg_table *st, 405 + unsigned int pg_start, 406 + enum i915_cache_level cache_level); 407 + }; 408 + #define gtt_total_entries(gtt) ((gtt).total >> PAGE_SHIFT) 409 + 368 410 #define I915_PPGTT_PD_ENTRIES 512 369 411 #define I915_PPGTT_PT_ENTRIES 1024 370 412 struct i915_hw_ppgtt { ··· 417 373 uint32_t pd_offset; 418 374 dma_addr_t *pt_dma_addr; 419 375 dma_addr_t scratch_page_dma_addr; 376 + 377 + /* pte functions, mirroring the interface of the global gtt. */ 378 + void (*clear_range)(struct i915_hw_ppgtt *ppgtt, 379 + unsigned int first_entry, 380 + unsigned int num_entries); 381 + void (*insert_entries)(struct i915_hw_ppgtt *ppgtt, 382 + struct sg_table *st, 383 + unsigned int pg_start, 384 + enum i915_cache_level cache_level); 385 + void (*cleanup)(struct i915_hw_ppgtt *ppgtt); 420 386 }; 421 387 422 388 ··· 696 642 struct work_struct error_work; 697 643 }; 698 644 645 + struct i915_gem_mm { 646 + /** Memory allocator for GTT stolen memory */ 647 + struct drm_mm stolen; 648 + /** Memory allocator for GTT */ 649 + struct drm_mm gtt_space; 650 + /** List of all objects in gtt_space. Used to restore gtt 651 + * mappings on resume */ 652 + struct list_head bound_list; 653 + /** 654 + * List of objects which are not bound to the GTT (thus 655 + * are idle and not used by the GPU) but still have 656 + * (presumably uncached) pages still attached. 657 + */ 658 + struct list_head unbound_list; 659 + 660 + /** Usable portion of the GTT for GEM */ 661 + unsigned long stolen_base; /* limited to low memory (32-bit) */ 662 + 663 + int gtt_mtrr; 664 + 665 + /** PPGTT used for aliasing the PPGTT with the GTT */ 666 + struct i915_hw_ppgtt *aliasing_ppgtt; 667 + 668 + struct shrinker inactive_shrinker; 669 + bool shrinker_no_lock_stealing; 670 + 671 + /** 672 + * List of objects currently involved in rendering. 673 + * 674 + * Includes buffers having the contents of their GPU caches 675 + * flushed, not necessarily primitives. last_rendering_seqno 676 + * represents when the rendering involved will be completed. 677 + * 678 + * A reference is held on the buffer while on this list. 679 + */ 680 + struct list_head active_list; 681 + 682 + /** 683 + * LRU list of objects which are not in the ringbuffer and 684 + * are ready to unbind, but are still in the GTT. 685 + * 686 + * last_rendering_seqno is 0 while an object is in this list. 687 + * 688 + * A reference is not held on the buffer while on this list, 689 + * as merely being GTT-bound shouldn't prevent its being 690 + * freed, and we'll pull it off the list in the free path. 691 + */ 692 + struct list_head inactive_list; 693 + 694 + /** LRU list of objects with fence regs on them. */ 695 + struct list_head fence_list; 696 + 697 + /** 698 + * We leave the user IRQ off as much as possible, 699 + * but this means that requests will finish and never 700 + * be retired once the system goes idle. Set a timer to 701 + * fire periodically while the ring is running. When it 702 + * fires, go retire requests. 703 + */ 704 + struct delayed_work retire_work; 705 + 706 + /** 707 + * Are we in a non-interruptible section of code like 708 + * modesetting? 709 + */ 710 + bool interruptible; 711 + 712 + /** 713 + * Flag if the X Server, and thus DRM, is not currently in 714 + * control of the device. 715 + * 716 + * This is set between LeaveVT and EnterVT. It needs to be 717 + * replaced with a semaphore. It also needs to be 718 + * transitioned away from for kernel modesetting. 719 + */ 720 + int suspended; 721 + 722 + /** Bit 6 swizzling required for X tiling */ 723 + uint32_t bit_6_swizzle_x; 724 + /** Bit 6 swizzling required for Y tiling */ 725 + uint32_t bit_6_swizzle_y; 726 + 727 + /* storage for physical objects */ 728 + struct drm_i915_gem_phys_object *phys_objs[I915_MAX_PHYS_OBJECT]; 729 + 730 + /* accounting, useful for userland debugging */ 731 + size_t object_memory; 732 + u32 object_count; 733 + }; 734 + 735 + struct i915_gpu_error { 736 + /* For hangcheck timer */ 737 + #define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */ 738 + #define DRM_I915_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD) 739 + struct timer_list hangcheck_timer; 740 + int hangcheck_count; 741 + uint32_t last_acthd[I915_NUM_RINGS]; 742 + uint32_t prev_instdone[I915_NUM_INSTDONE_REG]; 743 + 744 + /* For reset and error_state handling. */ 745 + spinlock_t lock; 746 + /* Protected by the above dev->gpu_error.lock. */ 747 + struct drm_i915_error_state *first_error; 748 + struct work_struct work; 749 + 750 + unsigned long last_reset; 751 + 752 + /** 753 + * State variable and reset counter controlling the reset flow 754 + * 755 + * Upper bits are for the reset counter. This counter is used by the 756 + * wait_seqno code to race-free noticed that a reset event happened and 757 + * that it needs to restart the entire ioctl (since most likely the 758 + * seqno it waited for won't ever signal anytime soon). 759 + * 760 + * This is important for lock-free wait paths, where no contended lock 761 + * naturally enforces the correct ordering between the bail-out of the 762 + * waiter and the gpu reset work code. 763 + * 764 + * Lowest bit controls the reset state machine: Set means a reset is in 765 + * progress. This state will (presuming we don't have any bugs) decay 766 + * into either unset (successful reset) or the special WEDGED value (hw 767 + * terminally sour). All waiters on the reset_queue will be woken when 768 + * that happens. 769 + */ 770 + atomic_t reset_counter; 771 + 772 + /** 773 + * Special values/flags for reset_counter 774 + * 775 + * Note that the code relies on 776 + * I915_WEDGED & I915_RESET_IN_PROGRESS_FLAG 777 + * being true. 778 + */ 779 + #define I915_RESET_IN_PROGRESS_FLAG 1 780 + #define I915_WEDGED 0xffffffff 781 + 782 + /** 783 + * Waitqueue to signal when the reset has completed. Used by clients 784 + * that wait for dev_priv->mm.wedged to settle. 785 + */ 786 + wait_queue_head_t reset_queue; 787 + 788 + /* For gpu hang simulation. */ 789 + unsigned int stop_rings; 790 + }; 791 + 699 792 typedef struct drm_i915_private { 700 793 struct drm_device *dev; 701 794 struct kmem_cache *slab; ··· 898 697 u32 pipestat[2]; 899 698 u32 irq_mask; 900 699 u32 gt_irq_mask; 901 - u32 pch_irq_mask; 902 700 903 701 u32 hotplug_supported_mask; 904 702 struct work_struct hotplug_work; ··· 905 705 906 706 int num_pipe; 907 707 int num_pch_pll; 908 - 909 - /* For hangcheck timer */ 910 - #define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */ 911 - #define DRM_I915_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD) 912 - struct timer_list hangcheck_timer; 913 - int hangcheck_count; 914 - uint32_t last_acthd[I915_NUM_RINGS]; 915 - uint32_t prev_instdone[I915_NUM_INSTDONE_REG]; 916 - 917 - unsigned int stop_rings; 918 708 919 709 unsigned long cfb_size; 920 710 unsigned int cfb_fb; ··· 953 763 954 764 unsigned int fsb_freq, mem_freq, is_ddr3; 955 765 956 - spinlock_t error_lock; 957 - /* Protected by dev->error_lock. */ 958 - struct drm_i915_error_state *first_error; 959 - struct work_struct error_work; 960 - struct completion error_completion; 961 766 struct workqueue_struct *wq; 962 767 963 768 /* Display functions */ ··· 967 782 /* Register state */ 968 783 bool modeset_on_lid; 969 784 970 - struct { 971 - /** Bridge to intel-gtt-ko */ 972 - struct intel_gtt *gtt; 973 - /** Memory allocator for GTT stolen memory */ 974 - struct drm_mm stolen; 975 - /** Memory allocator for GTT */ 976 - struct drm_mm gtt_space; 977 - /** List of all objects in gtt_space. Used to restore gtt 978 - * mappings on resume */ 979 - struct list_head bound_list; 980 - /** 981 - * List of objects which are not bound to the GTT (thus 982 - * are idle and not used by the GPU) but still have 983 - * (presumably uncached) pages still attached. 984 - */ 985 - struct list_head unbound_list; 785 + struct i915_gtt gtt; 986 786 987 - /** Usable portion of the GTT for GEM */ 988 - unsigned long gtt_start; 989 - unsigned long gtt_mappable_end; 990 - unsigned long gtt_end; 991 - unsigned long stolen_base; /* limited to low memory (32-bit) */ 992 - 993 - /** "Graphics Stolen Memory" holds the global PTEs */ 994 - void __iomem *gsm; 995 - 996 - struct io_mapping *gtt_mapping; 997 - phys_addr_t gtt_base_addr; 998 - int gtt_mtrr; 999 - 1000 - /** PPGTT used for aliasing the PPGTT with the GTT */ 1001 - struct i915_hw_ppgtt *aliasing_ppgtt; 1002 - 1003 - struct shrinker inactive_shrinker; 1004 - bool shrinker_no_lock_stealing; 1005 - 1006 - /** 1007 - * List of objects currently involved in rendering. 1008 - * 1009 - * Includes buffers having the contents of their GPU caches 1010 - * flushed, not necessarily primitives. last_rendering_seqno 1011 - * represents when the rendering involved will be completed. 1012 - * 1013 - * A reference is held on the buffer while on this list. 1014 - */ 1015 - struct list_head active_list; 1016 - 1017 - /** 1018 - * LRU list of objects which are not in the ringbuffer and 1019 - * are ready to unbind, but are still in the GTT. 1020 - * 1021 - * last_rendering_seqno is 0 while an object is in this list. 1022 - * 1023 - * A reference is not held on the buffer while on this list, 1024 - * as merely being GTT-bound shouldn't prevent its being 1025 - * freed, and we'll pull it off the list in the free path. 1026 - */ 1027 - struct list_head inactive_list; 1028 - 1029 - /** LRU list of objects with fence regs on them. */ 1030 - struct list_head fence_list; 1031 - 1032 - /** 1033 - * We leave the user IRQ off as much as possible, 1034 - * but this means that requests will finish and never 1035 - * be retired once the system goes idle. Set a timer to 1036 - * fire periodically while the ring is running. When it 1037 - * fires, go retire requests. 1038 - */ 1039 - struct delayed_work retire_work; 1040 - 1041 - /** 1042 - * Are we in a non-interruptible section of code like 1043 - * modesetting? 1044 - */ 1045 - bool interruptible; 1046 - 1047 - /** 1048 - * Flag if the X Server, and thus DRM, is not currently in 1049 - * control of the device. 1050 - * 1051 - * This is set between LeaveVT and EnterVT. It needs to be 1052 - * replaced with a semaphore. It also needs to be 1053 - * transitioned away from for kernel modesetting. 1054 - */ 1055 - int suspended; 1056 - 1057 - /** 1058 - * Flag if the hardware appears to be wedged. 1059 - * 1060 - * This is set when attempts to idle the device timeout. 1061 - * It prevents command submission from occurring and makes 1062 - * every pending request fail 1063 - */ 1064 - atomic_t wedged; 1065 - 1066 - /** Bit 6 swizzling required for X tiling */ 1067 - uint32_t bit_6_swizzle_x; 1068 - /** Bit 6 swizzling required for Y tiling */ 1069 - uint32_t bit_6_swizzle_y; 1070 - 1071 - /* storage for physical objects */ 1072 - struct drm_i915_gem_phys_object *phys_objs[I915_MAX_PHYS_OBJECT]; 1073 - 1074 - /* accounting, useful for userland debugging */ 1075 - size_t gtt_total; 1076 - size_t mappable_gtt_total; 1077 - size_t object_memory; 1078 - u32 object_count; 1079 - } mm; 787 + struct i915_gem_mm mm; 1080 788 1081 789 /* Kernel Modesetting */ 1082 790 ··· 1011 933 struct drm_mm_node *compressed_fb; 1012 934 struct drm_mm_node *compressed_llb; 1013 935 1014 - unsigned long last_gpu_reset; 936 + struct i915_gpu_error gpu_error; 1015 937 1016 938 /* list of fbdev register on this device */ 1017 939 struct intel_fbdev *fbdev; ··· 1049 971 HDMI_AUDIO_OFF, /* force turn off HDMI audio */ 1050 972 HDMI_AUDIO_AUTO, /* trust EDID */ 1051 973 HDMI_AUDIO_ON, /* force turn on HDMI audio */ 1052 - }; 1053 - 1054 - enum i915_cache_level { 1055 - I915_CACHE_NONE = 0, 1056 - I915_CACHE_LLC, 1057 - I915_CACHE_LLC_MLC, /* gen6+, in docs at least! */ 1058 974 }; 1059 975 1060 976 #define I915_GTT_RESERVED ((struct drm_mm_node *)0x1) ··· 1518 1446 bool nonblocking); 1519 1447 void i915_gem_object_unpin(struct drm_i915_gem_object *obj); 1520 1448 int __must_check i915_gem_object_unbind(struct drm_i915_gem_object *obj); 1449 + int i915_gem_object_put_pages(struct drm_i915_gem_object *obj); 1521 1450 void i915_gem_release_mmap(struct drm_i915_gem_object *obj); 1522 1451 void i915_gem_lastclose(struct drm_device *dev); 1523 1452 ··· 1597 1524 1598 1525 void i915_gem_retire_requests(struct drm_device *dev); 1599 1526 void i915_gem_retire_requests_ring(struct intel_ring_buffer *ring); 1600 - int __must_check i915_gem_check_wedge(struct drm_i915_private *dev_priv, 1527 + int __must_check i915_gem_check_wedge(struct i915_gpu_error *error, 1601 1528 bool interruptible); 1529 + static inline bool i915_reset_in_progress(struct i915_gpu_error *error) 1530 + { 1531 + return unlikely(atomic_read(&error->reset_counter) 1532 + & I915_RESET_IN_PROGRESS_FLAG); 1533 + } 1534 + 1535 + static inline bool i915_terminally_wedged(struct i915_gpu_error *error) 1536 + { 1537 + return atomic_read(&error->reset_counter) == I915_WEDGED; 1538 + } 1602 1539 1603 1540 void i915_gem_reset(struct drm_device *dev); 1604 1541 void i915_gem_clflush_object(struct drm_i915_gem_object *obj); ··· 1649 1566 void i915_gem_release(struct drm_device *dev, struct drm_file *file); 1650 1567 1651 1568 uint32_t 1652 - i915_gem_get_unfenced_gtt_alignment(struct drm_device *dev, 1653 - uint32_t size, 1654 - int tiling_mode); 1569 + i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode); 1570 + uint32_t 1571 + i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size, 1572 + int tiling_mode, bool fenced); 1655 1573 1656 1574 int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, 1657 1575 enum i915_cache_level cache_level); ··· 1675 1591 struct drm_file *file); 1676 1592 1677 1593 /* i915_gem_gtt.c */ 1678 - int __must_check i915_gem_init_aliasing_ppgtt(struct drm_device *dev); 1679 1594 void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev); 1680 1595 void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt, 1681 1596 struct drm_i915_gem_object *obj, ··· 1692 1609 void i915_gem_setup_global_gtt(struct drm_device *dev, unsigned long start, 1693 1610 unsigned long mappable_end, unsigned long end); 1694 1611 int i915_gem_gtt_init(struct drm_device *dev); 1695 - void i915_gem_gtt_fini(struct drm_device *dev); 1696 1612 static inline void i915_gem_chipset_flush(struct drm_device *dev) 1697 1613 { 1698 1614 if (INTEL_INFO(dev)->gen < 6) ··· 1750 1668 extern int i915_save_state(struct drm_device *dev); 1751 1669 extern int i915_restore_state(struct drm_device *dev); 1752 1670 1753 - /* i915_suspend.c */ 1754 - extern int i915_save_state(struct drm_device *dev); 1755 - extern int i915_restore_state(struct drm_device *dev); 1671 + /* i915_ums.c */ 1672 + void i915_save_display_reg(struct drm_device *dev); 1673 + void i915_restore_display_reg(struct drm_device *dev); 1756 1674 1757 1675 /* i915_sysfs.c */ 1758 1676 void i915_setup_sysfs(struct drm_device *dev_priv); ··· 1809 1727 extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state); 1810 1728 extern void intel_modeset_setup_hw_state(struct drm_device *dev, 1811 1729 bool force_restore); 1730 + extern void i915_redisable_vga(struct drm_device *dev); 1812 1731 extern bool intel_fbc_enabled(struct drm_device *dev); 1813 1732 extern void intel_disable_fbc(struct drm_device *dev); 1814 1733 extern bool ironlake_set_drps(struct drm_device *dev, u8 val); ··· 1882 1799 #define POSTING_READ(reg) (void)I915_READ_NOTRACE(reg) 1883 1800 #define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg) 1884 1801 1802 + /* "Broadcast RGB" property */ 1803 + #define INTEL_BROADCAST_RGB_AUTO 0 1804 + #define INTEL_BROADCAST_RGB_FULL 1 1805 + #define INTEL_BROADCAST_RGB_LIMITED 2 1806 + 1807 + static inline uint32_t i915_vgacntrl_reg(struct drm_device *dev) 1808 + { 1809 + if (HAS_PCH_SPLIT(dev)) 1810 + return CPU_VGACNTRL; 1811 + else if (IS_VALLEYVIEW(dev)) 1812 + return VLV_VGACNTRL; 1813 + else 1814 + return VGACNTRL; 1815 + } 1885 1816 1886 1817 #endif
+140 -145
drivers/gpu/drm/i915/i915_gem.c
··· 87 87 } 88 88 89 89 static int 90 - i915_gem_wait_for_error(struct drm_device *dev) 90 + i915_gem_wait_for_error(struct i915_gpu_error *error) 91 91 { 92 - struct drm_i915_private *dev_priv = dev->dev_private; 93 - struct completion *x = &dev_priv->error_completion; 94 - unsigned long flags; 95 92 int ret; 96 93 97 - if (!atomic_read(&dev_priv->mm.wedged)) 94 + #define EXIT_COND (!i915_reset_in_progress(error)) 95 + if (EXIT_COND) 98 96 return 0; 97 + 98 + /* GPU is already declared terminally dead, give up. */ 99 + if (i915_terminally_wedged(error)) 100 + return -EIO; 99 101 100 102 /* 101 103 * Only wait 10 seconds for the gpu reset to complete to avoid hanging 102 104 * userspace. If it takes that long something really bad is going on and 103 105 * we should simply try to bail out and fail as gracefully as possible. 104 106 */ 105 - ret = wait_for_completion_interruptible_timeout(x, 10*HZ); 107 + ret = wait_event_interruptible_timeout(error->reset_queue, 108 + EXIT_COND, 109 + 10*HZ); 106 110 if (ret == 0) { 107 111 DRM_ERROR("Timed out waiting for the gpu reset to complete\n"); 108 112 return -EIO; 109 113 } else if (ret < 0) { 110 114 return ret; 111 115 } 116 + #undef EXIT_COND 112 117 113 - if (atomic_read(&dev_priv->mm.wedged)) { 114 - /* GPU is hung, bump the completion count to account for 115 - * the token we just consumed so that we never hit zero and 116 - * end up waiting upon a subsequent completion event that 117 - * will never happen. 118 - */ 119 - spin_lock_irqsave(&x->wait.lock, flags); 120 - x->done++; 121 - spin_unlock_irqrestore(&x->wait.lock, flags); 122 - } 123 118 return 0; 124 119 } 125 120 126 121 int i915_mutex_lock_interruptible(struct drm_device *dev) 127 122 { 123 + struct drm_i915_private *dev_priv = dev->dev_private; 128 124 int ret; 129 125 130 - ret = i915_gem_wait_for_error(dev); 126 + ret = i915_gem_wait_for_error(&dev_priv->gpu_error); 131 127 if (ret) 132 128 return ret; 133 129 ··· 145 149 i915_gem_init_ioctl(struct drm_device *dev, void *data, 146 150 struct drm_file *file) 147 151 { 152 + struct drm_i915_private *dev_priv = dev->dev_private; 148 153 struct drm_i915_gem_init *args = data; 149 154 150 155 if (drm_core_check_feature(dev, DRIVER_MODESET)) ··· 162 165 mutex_lock(&dev->struct_mutex); 163 166 i915_gem_setup_global_gtt(dev, args->gtt_start, args->gtt_end, 164 167 args->gtt_end); 168 + dev_priv->gtt.mappable_end = args->gtt_end; 165 169 mutex_unlock(&dev->struct_mutex); 166 170 167 171 return 0; ··· 184 186 pinned += obj->gtt_space->size; 185 187 mutex_unlock(&dev->struct_mutex); 186 188 187 - args->aper_size = dev_priv->mm.gtt_total; 189 + args->aper_size = dev_priv->gtt.total; 188 190 args->aper_available_size = args->aper_size - pinned; 189 191 190 192 return 0; ··· 635 637 * source page isn't available. Return the error and we'll 636 638 * retry in the slow path. 637 639 */ 638 - if (fast_user_write(dev_priv->mm.gtt_mapping, page_base, 640 + if (fast_user_write(dev_priv->gtt.mappable, page_base, 639 641 page_offset, user_data, page_length)) { 640 642 ret = -EFAULT; 641 643 goto out_unpin; ··· 935 937 } 936 938 937 939 int 938 - i915_gem_check_wedge(struct drm_i915_private *dev_priv, 940 + i915_gem_check_wedge(struct i915_gpu_error *error, 939 941 bool interruptible) 940 942 { 941 - if (atomic_read(&dev_priv->mm.wedged)) { 942 - struct completion *x = &dev_priv->error_completion; 943 - bool recovery_complete; 944 - unsigned long flags; 945 - 946 - /* Give the error handler a chance to run. */ 947 - spin_lock_irqsave(&x->wait.lock, flags); 948 - recovery_complete = x->done > 0; 949 - spin_unlock_irqrestore(&x->wait.lock, flags); 950 - 943 + if (i915_reset_in_progress(error)) { 951 944 /* Non-interruptible callers can't handle -EAGAIN, hence return 952 945 * -EIO unconditionally for these. */ 953 946 if (!interruptible) 954 947 return -EIO; 955 948 956 - /* Recovery complete, but still wedged means reset failure. */ 957 - if (recovery_complete) 949 + /* Recovery complete, but the reset failed ... */ 950 + if (i915_terminally_wedged(error)) 958 951 return -EIO; 959 952 960 953 return -EAGAIN; ··· 976 987 * __wait_seqno - wait until execution of seqno has finished 977 988 * @ring: the ring expected to report seqno 978 989 * @seqno: duh! 990 + * @reset_counter: reset sequence associated with the given seqno 979 991 * @interruptible: do an interruptible wait (normally yes) 980 992 * @timeout: in - how long to wait (NULL forever); out - how much time remaining 993 + * 994 + * Note: It is of utmost importance that the passed in seqno and reset_counter 995 + * values have been read by the caller in an smp safe manner. Where read-side 996 + * locks are involved, it is sufficient to read the reset_counter before 997 + * unlocking the lock that protects the seqno. For lockless tricks, the 998 + * reset_counter _must_ be read before, and an appropriate smp_rmb must be 999 + * inserted. 981 1000 * 982 1001 * Returns 0 if the seqno was found within the alloted time. Else returns the 983 1002 * errno with remaining time filled in timeout argument. 984 1003 */ 985 1004 static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno, 1005 + unsigned reset_counter, 986 1006 bool interruptible, struct timespec *timeout) 987 1007 { 988 1008 drm_i915_private_t *dev_priv = ring->dev->dev_private; ··· 1021 1023 1022 1024 #define EXIT_COND \ 1023 1025 (i915_seqno_passed(ring->get_seqno(ring, false), seqno) || \ 1024 - atomic_read(&dev_priv->mm.wedged)) 1026 + i915_reset_in_progress(&dev_priv->gpu_error) || \ 1027 + reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter)) 1025 1028 do { 1026 1029 if (interruptible) 1027 1030 end = wait_event_interruptible_timeout(ring->irq_queue, ··· 1032 1033 end = wait_event_timeout(ring->irq_queue, EXIT_COND, 1033 1034 timeout_jiffies); 1034 1035 1035 - ret = i915_gem_check_wedge(dev_priv, interruptible); 1036 + /* We need to check whether any gpu reset happened in between 1037 + * the caller grabbing the seqno and now ... */ 1038 + if (reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter)) 1039 + end = -EAGAIN; 1040 + 1041 + /* ... but upgrade the -EGAIN to an -EIO if the gpu is truely 1042 + * gone. */ 1043 + ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible); 1036 1044 if (ret) 1037 1045 end = ret; 1038 1046 } while (end == 0 && wait_forever); ··· 1085 1079 BUG_ON(!mutex_is_locked(&dev->struct_mutex)); 1086 1080 BUG_ON(seqno == 0); 1087 1081 1088 - ret = i915_gem_check_wedge(dev_priv, interruptible); 1082 + ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible); 1089 1083 if (ret) 1090 1084 return ret; 1091 1085 ··· 1093 1087 if (ret) 1094 1088 return ret; 1095 1089 1096 - return __wait_seqno(ring, seqno, interruptible, NULL); 1090 + return __wait_seqno(ring, seqno, 1091 + atomic_read(&dev_priv->gpu_error.reset_counter), 1092 + interruptible, NULL); 1097 1093 } 1098 1094 1099 1095 /** ··· 1142 1134 struct drm_device *dev = obj->base.dev; 1143 1135 struct drm_i915_private *dev_priv = dev->dev_private; 1144 1136 struct intel_ring_buffer *ring = obj->ring; 1137 + unsigned reset_counter; 1145 1138 u32 seqno; 1146 1139 int ret; 1147 1140 ··· 1153 1144 if (seqno == 0) 1154 1145 return 0; 1155 1146 1156 - ret = i915_gem_check_wedge(dev_priv, true); 1147 + ret = i915_gem_check_wedge(&dev_priv->gpu_error, true); 1157 1148 if (ret) 1158 1149 return ret; 1159 1150 ··· 1161 1152 if (ret) 1162 1153 return ret; 1163 1154 1155 + reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter); 1164 1156 mutex_unlock(&dev->struct_mutex); 1165 - ret = __wait_seqno(ring, seqno, true, NULL); 1157 + ret = __wait_seqno(ring, seqno, reset_counter, true, NULL); 1166 1158 mutex_lock(&dev->struct_mutex); 1167 1159 1168 1160 i915_gem_retire_requests_ring(ring); ··· 1372 1362 1373 1363 obj->fault_mappable = true; 1374 1364 1375 - pfn = ((dev_priv->mm.gtt_base_addr + obj->gtt_offset) >> PAGE_SHIFT) + 1365 + pfn = ((dev_priv->gtt.mappable_base + obj->gtt_offset) >> PAGE_SHIFT) + 1376 1366 page_offset; 1377 1367 1378 1368 /* Finally, remap it using the new GTT offset */ ··· 1387 1377 /* If this -EIO is due to a gpu hang, give the reset code a 1388 1378 * chance to clean up the mess. Otherwise return the proper 1389 1379 * SIGBUS. */ 1390 - if (!atomic_read(&dev_priv->mm.wedged)) 1380 + if (i915_terminally_wedged(&dev_priv->gpu_error)) 1391 1381 return VM_FAULT_SIGBUS; 1392 1382 case -EAGAIN: 1393 1383 /* Give the error handler a chance to run and move the ··· 1445 1435 obj->fault_mappable = false; 1446 1436 } 1447 1437 1448 - static uint32_t 1438 + uint32_t 1449 1439 i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode) 1450 1440 { 1451 1441 uint32_t gtt_size; ··· 1473 1463 * Return the required GTT alignment for an object, taking into account 1474 1464 * potential fence register mapping. 1475 1465 */ 1476 - static uint32_t 1477 - i915_gem_get_gtt_alignment(struct drm_device *dev, 1478 - uint32_t size, 1479 - int tiling_mode) 1466 + uint32_t 1467 + i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size, 1468 + int tiling_mode, bool fenced) 1480 1469 { 1481 1470 /* 1482 1471 * Minimum alignment is 4k (GTT page size), but might be greater 1483 1472 * if a fence register is needed for the object. 1484 1473 */ 1485 - if (INTEL_INFO(dev)->gen >= 4 || 1474 + if (INTEL_INFO(dev)->gen >= 4 || (!fenced && IS_G33(dev)) || 1486 1475 tiling_mode == I915_TILING_NONE) 1487 1476 return 4096; 1488 1477 1489 1478 /* 1490 1479 * Previous chips need to be aligned to the size of the smallest 1491 1480 * fence register that can contain the object. 1492 - */ 1493 - return i915_gem_get_gtt_size(dev, size, tiling_mode); 1494 - } 1495 - 1496 - /** 1497 - * i915_gem_get_unfenced_gtt_alignment - return required GTT alignment for an 1498 - * unfenced object 1499 - * @dev: the device 1500 - * @size: size of the object 1501 - * @tiling_mode: tiling mode of the object 1502 - * 1503 - * Return the required GTT alignment for an object, only taking into account 1504 - * unfenced tiled surface requirements. 1505 - */ 1506 - uint32_t 1507 - i915_gem_get_unfenced_gtt_alignment(struct drm_device *dev, 1508 - uint32_t size, 1509 - int tiling_mode) 1510 - { 1511 - /* 1512 - * Minimum alignment is 4k (GTT page size) for sane hw. 1513 - */ 1514 - if (INTEL_INFO(dev)->gen >= 4 || IS_G33(dev) || 1515 - tiling_mode == I915_TILING_NONE) 1516 - return 4096; 1517 - 1518 - /* Previous hardware however needs to be aligned to a power-of-two 1519 - * tile height. The simplest method for determining this is to reuse 1520 - * the power-of-tile object size. 1521 1481 */ 1522 1482 return i915_gem_get_gtt_size(dev, size, tiling_mode); 1523 1483 } ··· 1554 1574 goto unlock; 1555 1575 } 1556 1576 1557 - if (obj->base.size > dev_priv->mm.gtt_mappable_end) { 1577 + if (obj->base.size > dev_priv->gtt.mappable_end) { 1558 1578 ret = -E2BIG; 1559 1579 goto out; 1560 1580 } ··· 1672 1692 kfree(obj->pages); 1673 1693 } 1674 1694 1675 - static int 1695 + int 1676 1696 i915_gem_object_put_pages(struct drm_i915_gem_object *obj) 1677 1697 { 1678 1698 const struct drm_i915_gem_object_ops *ops = obj->ops; ··· 1845 1865 if (obj->pages) 1846 1866 return 0; 1847 1867 1868 + if (obj->madv != I915_MADV_WILLNEED) { 1869 + DRM_ERROR("Attempting to obtain a purgeable object\n"); 1870 + return -EINVAL; 1871 + } 1872 + 1848 1873 BUG_ON(obj->pages_pin_count); 1849 1874 1850 1875 ret = ops->get_pages(obj); ··· 1905 1920 1906 1921 BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS); 1907 1922 BUG_ON(!obj->active); 1908 - 1909 - if (obj->pin_count) /* are we a framebuffer? */ 1910 - intel_mark_fb_idle(obj); 1911 1923 1912 1924 list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list); 1913 1925 ··· 2057 2075 2058 2076 if (!dev_priv->mm.suspended) { 2059 2077 if (i915_enable_hangcheck) { 2060 - mod_timer(&dev_priv->hangcheck_timer, 2078 + mod_timer(&dev_priv->gpu_error.hangcheck_timer, 2061 2079 round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES)); 2062 2080 } 2063 2081 if (was_empty) { ··· 2322 2340 int 2323 2341 i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file) 2324 2342 { 2343 + drm_i915_private_t *dev_priv = dev->dev_private; 2325 2344 struct drm_i915_gem_wait *args = data; 2326 2345 struct drm_i915_gem_object *obj; 2327 2346 struct intel_ring_buffer *ring = NULL; 2328 2347 struct timespec timeout_stack, *timeout = NULL; 2348 + unsigned reset_counter; 2329 2349 u32 seqno = 0; 2330 2350 int ret = 0; 2331 2351 ··· 2368 2384 } 2369 2385 2370 2386 drm_gem_object_unreference(&obj->base); 2387 + reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter); 2371 2388 mutex_unlock(&dev->struct_mutex); 2372 2389 2373 - ret = __wait_seqno(ring, seqno, true, timeout); 2390 + ret = __wait_seqno(ring, seqno, reset_counter, true, timeout); 2374 2391 if (timeout) { 2375 2392 WARN_ON(!timespec_valid(timeout)); 2376 2393 args->timeout_ns = timespec_to_ns(timeout); ··· 2435 2450 { 2436 2451 u32 old_write_domain, old_read_domains; 2437 2452 2438 - /* Act a barrier for all accesses through the GTT */ 2439 - mb(); 2440 - 2441 2453 /* Force a pagefault for domain tracking on next user access */ 2442 2454 i915_gem_release_mmap(obj); 2443 2455 2444 2456 if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0) 2445 2457 return; 2458 + 2459 + /* Wait for any direct GTT access to complete */ 2460 + mb(); 2446 2461 2447 2462 old_read_domains = obj->base.read_domains; 2448 2463 old_write_domain = obj->base.write_domain; ··· 2462 2477 i915_gem_object_unbind(struct drm_i915_gem_object *obj) 2463 2478 { 2464 2479 drm_i915_private_t *dev_priv = obj->base.dev->dev_private; 2465 - int ret = 0; 2480 + int ret; 2466 2481 2467 2482 if (obj->gtt_space == NULL) 2468 2483 return 0; ··· 2529 2544 return 0; 2530 2545 } 2531 2546 2532 - static void sandybridge_write_fence_reg(struct drm_device *dev, int reg, 2533 - struct drm_i915_gem_object *obj) 2534 - { 2535 - drm_i915_private_t *dev_priv = dev->dev_private; 2536 - uint64_t val; 2537 - 2538 - if (obj) { 2539 - u32 size = obj->gtt_space->size; 2540 - 2541 - val = (uint64_t)((obj->gtt_offset + size - 4096) & 2542 - 0xfffff000) << 32; 2543 - val |= obj->gtt_offset & 0xfffff000; 2544 - val |= (uint64_t)((obj->stride / 128) - 1) << 2545 - SANDYBRIDGE_FENCE_PITCH_SHIFT; 2546 - 2547 - if (obj->tiling_mode == I915_TILING_Y) 2548 - val |= 1 << I965_FENCE_TILING_Y_SHIFT; 2549 - val |= I965_FENCE_REG_VALID; 2550 - } else 2551 - val = 0; 2552 - 2553 - I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + reg * 8, val); 2554 - POSTING_READ(FENCE_REG_SANDYBRIDGE_0 + reg * 8); 2555 - } 2556 - 2557 2547 static void i965_write_fence_reg(struct drm_device *dev, int reg, 2558 2548 struct drm_i915_gem_object *obj) 2559 2549 { 2560 2550 drm_i915_private_t *dev_priv = dev->dev_private; 2551 + int fence_reg; 2552 + int fence_pitch_shift; 2561 2553 uint64_t val; 2554 + 2555 + if (INTEL_INFO(dev)->gen >= 6) { 2556 + fence_reg = FENCE_REG_SANDYBRIDGE_0; 2557 + fence_pitch_shift = SANDYBRIDGE_FENCE_PITCH_SHIFT; 2558 + } else { 2559 + fence_reg = FENCE_REG_965_0; 2560 + fence_pitch_shift = I965_FENCE_PITCH_SHIFT; 2561 + } 2562 2562 2563 2563 if (obj) { 2564 2564 u32 size = obj->gtt_space->size; ··· 2551 2581 val = (uint64_t)((obj->gtt_offset + size - 4096) & 2552 2582 0xfffff000) << 32; 2553 2583 val |= obj->gtt_offset & 0xfffff000; 2554 - val |= ((obj->stride / 128) - 1) << I965_FENCE_PITCH_SHIFT; 2584 + val |= (uint64_t)((obj->stride / 128) - 1) << fence_pitch_shift; 2555 2585 if (obj->tiling_mode == I915_TILING_Y) 2556 2586 val |= 1 << I965_FENCE_TILING_Y_SHIFT; 2557 2587 val |= I965_FENCE_REG_VALID; 2558 2588 } else 2559 2589 val = 0; 2560 2590 2561 - I915_WRITE64(FENCE_REG_965_0 + reg * 8, val); 2562 - POSTING_READ(FENCE_REG_965_0 + reg * 8); 2591 + fence_reg += reg * 8; 2592 + I915_WRITE64(fence_reg, val); 2593 + POSTING_READ(fence_reg); 2563 2594 } 2564 2595 2565 2596 static void i915_write_fence_reg(struct drm_device *dev, int reg, ··· 2639 2668 POSTING_READ(FENCE_REG_830_0 + reg * 4); 2640 2669 } 2641 2670 2671 + inline static bool i915_gem_object_needs_mb(struct drm_i915_gem_object *obj) 2672 + { 2673 + return obj && obj->base.read_domains & I915_GEM_DOMAIN_GTT; 2674 + } 2675 + 2642 2676 static void i915_gem_write_fence(struct drm_device *dev, int reg, 2643 2677 struct drm_i915_gem_object *obj) 2644 2678 { 2679 + struct drm_i915_private *dev_priv = dev->dev_private; 2680 + 2681 + /* Ensure that all CPU reads are completed before installing a fence 2682 + * and all writes before removing the fence. 2683 + */ 2684 + if (i915_gem_object_needs_mb(dev_priv->fence_regs[reg].obj)) 2685 + mb(); 2686 + 2645 2687 switch (INTEL_INFO(dev)->gen) { 2646 2688 case 7: 2647 - case 6: sandybridge_write_fence_reg(dev, reg, obj); break; 2689 + case 6: 2648 2690 case 5: 2649 2691 case 4: i965_write_fence_reg(dev, reg, obj); break; 2650 2692 case 3: i915_write_fence_reg(dev, reg, obj); break; 2651 2693 case 2: i830_write_fence_reg(dev, reg, obj); break; 2652 2694 default: BUG(); 2653 2695 } 2696 + 2697 + /* And similarly be paranoid that no direct access to this region 2698 + * is reordered to before the fence is installed. 2699 + */ 2700 + if (i915_gem_object_needs_mb(obj)) 2701 + mb(); 2654 2702 } 2655 2703 2656 2704 static inline int fence_number(struct drm_i915_private *dev_priv, ··· 2699 2709 } 2700 2710 2701 2711 static int 2702 - i915_gem_object_flush_fence(struct drm_i915_gem_object *obj) 2712 + i915_gem_object_wait_fence(struct drm_i915_gem_object *obj) 2703 2713 { 2704 2714 if (obj->last_fenced_seqno) { 2705 2715 int ret = i915_wait_seqno(obj->ring, obj->last_fenced_seqno); ··· 2708 2718 2709 2719 obj->last_fenced_seqno = 0; 2710 2720 } 2711 - 2712 - /* Ensure that all CPU reads are completed before installing a fence 2713 - * and all writes before removing the fence. 2714 - */ 2715 - if (obj->base.read_domains & I915_GEM_DOMAIN_GTT) 2716 - mb(); 2717 2721 2718 2722 obj->fenced_gpu_access = false; 2719 2723 return 0; ··· 2719 2735 struct drm_i915_private *dev_priv = obj->base.dev->dev_private; 2720 2736 int ret; 2721 2737 2722 - ret = i915_gem_object_flush_fence(obj); 2738 + ret = i915_gem_object_wait_fence(obj); 2723 2739 if (ret) 2724 2740 return ret; 2725 2741 ··· 2793 2809 * will need to serialise the write to the associated fence register? 2794 2810 */ 2795 2811 if (obj->fence_dirty) { 2796 - ret = i915_gem_object_flush_fence(obj); 2812 + ret = i915_gem_object_wait_fence(obj); 2797 2813 if (ret) 2798 2814 return ret; 2799 2815 } ··· 2814 2830 if (reg->obj) { 2815 2831 struct drm_i915_gem_object *old = reg->obj; 2816 2832 2817 - ret = i915_gem_object_flush_fence(old); 2833 + ret = i915_gem_object_wait_fence(old); 2818 2834 if (ret) 2819 2835 return ret; 2820 2836 ··· 2915 2931 bool mappable, fenceable; 2916 2932 int ret; 2917 2933 2918 - if (obj->madv != I915_MADV_WILLNEED) { 2919 - DRM_ERROR("Attempting to bind a purgeable object\n"); 2920 - return -EINVAL; 2921 - } 2922 - 2923 2934 fence_size = i915_gem_get_gtt_size(dev, 2924 2935 obj->base.size, 2925 2936 obj->tiling_mode); 2926 2937 fence_alignment = i915_gem_get_gtt_alignment(dev, 2927 2938 obj->base.size, 2928 - obj->tiling_mode); 2939 + obj->tiling_mode, true); 2929 2940 unfenced_alignment = 2930 - i915_gem_get_unfenced_gtt_alignment(dev, 2941 + i915_gem_get_gtt_alignment(dev, 2931 2942 obj->base.size, 2932 - obj->tiling_mode); 2943 + obj->tiling_mode, false); 2933 2944 2934 2945 if (alignment == 0) 2935 2946 alignment = map_and_fenceable ? fence_alignment : ··· 2940 2961 * before evicting everything in a vain attempt to find space. 2941 2962 */ 2942 2963 if (obj->base.size > 2943 - (map_and_fenceable ? dev_priv->mm.gtt_mappable_end : dev_priv->mm.gtt_total)) { 2964 + (map_and_fenceable ? dev_priv->gtt.mappable_end : dev_priv->gtt.total)) { 2944 2965 DRM_ERROR("Attempting to bind an object larger than the aperture\n"); 2945 2966 return -E2BIG; 2946 2967 } ··· 2961 2982 if (map_and_fenceable) 2962 2983 ret = drm_mm_insert_node_in_range_generic(&dev_priv->mm.gtt_space, node, 2963 2984 size, alignment, obj->cache_level, 2964 - 0, dev_priv->mm.gtt_mappable_end); 2985 + 0, dev_priv->gtt.mappable_end); 2965 2986 else 2966 2987 ret = drm_mm_insert_node_generic(&dev_priv->mm.gtt_space, node, 2967 2988 size, alignment, obj->cache_level); ··· 3001 3022 (node->start & (fence_alignment - 1)) == 0; 3002 3023 3003 3024 mappable = 3004 - obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end; 3025 + obj->gtt_offset + obj->base.size <= dev_priv->gtt.mappable_end; 3005 3026 3006 3027 obj->map_and_fenceable = mappable && fenceable; 3007 3028 ··· 3108 3129 return ret; 3109 3130 3110 3131 i915_gem_object_flush_cpu_write_domain(obj); 3132 + 3133 + /* Serialise direct access to this object with the barriers for 3134 + * coherent writes from the GPU, by effectively invalidating the 3135 + * GTT domain upon first access. 3136 + */ 3137 + if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0) 3138 + mb(); 3111 3139 3112 3140 old_write_domain = obj->base.write_domain; 3113 3141 old_read_domains = obj->base.read_domains; ··· 3422 3436 unsigned long recent_enough = jiffies - msecs_to_jiffies(20); 3423 3437 struct drm_i915_gem_request *request; 3424 3438 struct intel_ring_buffer *ring = NULL; 3439 + unsigned reset_counter; 3425 3440 u32 seqno = 0; 3426 3441 int ret; 3427 3442 3428 - if (atomic_read(&dev_priv->mm.wedged)) 3429 - return -EIO; 3443 + ret = i915_gem_wait_for_error(&dev_priv->gpu_error); 3444 + if (ret) 3445 + return ret; 3446 + 3447 + ret = i915_gem_check_wedge(&dev_priv->gpu_error, false); 3448 + if (ret) 3449 + return ret; 3430 3450 3431 3451 spin_lock(&file_priv->mm.lock); 3432 3452 list_for_each_entry(request, &file_priv->mm.request_list, client_list) { ··· 3442 3450 ring = request->ring; 3443 3451 seqno = request->seqno; 3444 3452 } 3453 + reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter); 3445 3454 spin_unlock(&file_priv->mm.lock); 3446 3455 3447 3456 if (seqno == 0) 3448 3457 return 0; 3449 3458 3450 - ret = __wait_seqno(ring, seqno, true, NULL); 3459 + ret = __wait_seqno(ring, seqno, reset_counter, true, NULL); 3451 3460 if (ret == 0) 3452 3461 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0); 3453 3462 ··· 3846 3853 * And not confound mm.suspended! 3847 3854 */ 3848 3855 dev_priv->mm.suspended = 1; 3849 - del_timer_sync(&dev_priv->hangcheck_timer); 3856 + del_timer_sync(&dev_priv->gpu_error.hangcheck_timer); 3850 3857 3851 3858 i915_kernel_lost_context(dev); 3852 3859 i915_gem_cleanup_ringbuffer(dev); ··· 3946 3953 3947 3954 i915_gem_init_swizzling(dev); 3948 3955 3949 - dev_priv->next_seqno = dev_priv->last_seqno = (u32)~0 - 0x1000; 3950 - 3951 3956 ret = intel_init_render_ring_buffer(dev); 3952 3957 if (ret) 3953 3958 return ret; ··· 3961 3970 if (ret) 3962 3971 goto cleanup_bsd_ring; 3963 3972 } 3973 + 3974 + ret = i915_gem_set_seqno(dev, ((u32)~0 - 0x1000)); 3975 + if (ret) 3976 + return ret; 3964 3977 3965 3978 /* 3966 3979 * XXX: There was some w/a described somewhere suggesting loading ··· 4023 4028 if (drm_core_check_feature(dev, DRIVER_MODESET)) 4024 4029 return 0; 4025 4030 4026 - if (atomic_read(&dev_priv->mm.wedged)) { 4031 + if (i915_reset_in_progress(&dev_priv->gpu_error)) { 4027 4032 DRM_ERROR("Reenabling wedged hardware, good luck\n"); 4028 - atomic_set(&dev_priv->mm.wedged, 0); 4033 + atomic_set(&dev_priv->gpu_error.reset_counter, 0); 4029 4034 } 4030 4035 4031 4036 mutex_lock(&dev->struct_mutex); ··· 4109 4114 INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list); 4110 4115 INIT_DELAYED_WORK(&dev_priv->mm.retire_work, 4111 4116 i915_gem_retire_work_handler); 4112 - init_completion(&dev_priv->error_completion); 4117 + init_waitqueue_head(&dev_priv->gpu_error.reset_queue); 4113 4118 4114 4119 /* On GEN3 we really need to make sure the ARB C3 LP bit is set */ 4115 4120 if (IS_GEN3(dev)) {
+1 -1
drivers/gpu/drm/i915/i915_gem_evict.c
··· 80 80 if (mappable) 81 81 drm_mm_init_scan_with_range(&dev_priv->mm.gtt_space, 82 82 min_size, alignment, cache_level, 83 - 0, dev_priv->mm.gtt_mappable_end); 83 + 0, dev_priv->gtt.mappable_end); 84 84 else 85 85 drm_mm_init_scan(&dev_priv->mm.gtt_space, 86 86 min_size, alignment, cache_level);
+163 -117
drivers/gpu/drm/i915/i915_gem_execbuffer.c
··· 34 34 #include <linux/dma_remapping.h> 35 35 36 36 struct eb_objects { 37 + struct list_head objects; 37 38 int and; 38 - struct hlist_head buckets[0]; 39 + union { 40 + struct drm_i915_gem_object *lut[0]; 41 + struct hlist_head buckets[0]; 42 + }; 39 43 }; 40 44 41 45 static struct eb_objects * 42 - eb_create(int size) 46 + eb_create(struct drm_i915_gem_execbuffer2 *args) 43 47 { 44 - struct eb_objects *eb; 45 - int count = PAGE_SIZE / sizeof(struct hlist_head) / 2; 46 - BUILD_BUG_ON(!is_power_of_2(PAGE_SIZE / sizeof(struct hlist_head))); 47 - while (count > size) 48 - count >>= 1; 49 - eb = kzalloc(count*sizeof(struct hlist_head) + 50 - sizeof(struct eb_objects), 51 - GFP_KERNEL); 52 - if (eb == NULL) 53 - return eb; 48 + struct eb_objects *eb = NULL; 54 49 55 - eb->and = count - 1; 50 + if (args->flags & I915_EXEC_HANDLE_LUT) { 51 + int size = args->buffer_count; 52 + size *= sizeof(struct drm_i915_gem_object *); 53 + size += sizeof(struct eb_objects); 54 + eb = kmalloc(size, GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY); 55 + } 56 + 57 + if (eb == NULL) { 58 + int size = args->buffer_count; 59 + int count = PAGE_SIZE / sizeof(struct hlist_head) / 2; 60 + BUILD_BUG_ON(!is_power_of_2(PAGE_SIZE / sizeof(struct hlist_head))); 61 + while (count > 2*size) 62 + count >>= 1; 63 + eb = kzalloc(count*sizeof(struct hlist_head) + 64 + sizeof(struct eb_objects), 65 + GFP_TEMPORARY); 66 + if (eb == NULL) 67 + return eb; 68 + 69 + eb->and = count - 1; 70 + } else 71 + eb->and = -args->buffer_count; 72 + 73 + INIT_LIST_HEAD(&eb->objects); 56 74 return eb; 57 75 } 58 76 59 77 static void 60 78 eb_reset(struct eb_objects *eb) 61 79 { 62 - memset(eb->buckets, 0, (eb->and+1)*sizeof(struct hlist_head)); 80 + if (eb->and >= 0) 81 + memset(eb->buckets, 0, (eb->and+1)*sizeof(struct hlist_head)); 63 82 } 64 83 65 - static void 66 - eb_add_object(struct eb_objects *eb, struct drm_i915_gem_object *obj) 84 + static int 85 + eb_lookup_objects(struct eb_objects *eb, 86 + struct drm_i915_gem_exec_object2 *exec, 87 + const struct drm_i915_gem_execbuffer2 *args, 88 + struct drm_file *file) 67 89 { 68 - hlist_add_head(&obj->exec_node, 69 - &eb->buckets[obj->exec_handle & eb->and]); 90 + int i; 91 + 92 + spin_lock(&file->table_lock); 93 + for (i = 0; i < args->buffer_count; i++) { 94 + struct drm_i915_gem_object *obj; 95 + 96 + obj = to_intel_bo(idr_find(&file->object_idr, exec[i].handle)); 97 + if (obj == NULL) { 98 + spin_unlock(&file->table_lock); 99 + DRM_DEBUG("Invalid object handle %d at index %d\n", 100 + exec[i].handle, i); 101 + return -ENOENT; 102 + } 103 + 104 + if (!list_empty(&obj->exec_list)) { 105 + spin_unlock(&file->table_lock); 106 + DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n", 107 + obj, exec[i].handle, i); 108 + return -EINVAL; 109 + } 110 + 111 + drm_gem_object_reference(&obj->base); 112 + list_add_tail(&obj->exec_list, &eb->objects); 113 + 114 + obj->exec_entry = &exec[i]; 115 + if (eb->and < 0) { 116 + eb->lut[i] = obj; 117 + } else { 118 + uint32_t handle = args->flags & I915_EXEC_HANDLE_LUT ? i : exec[i].handle; 119 + obj->exec_handle = handle; 120 + hlist_add_head(&obj->exec_node, 121 + &eb->buckets[handle & eb->and]); 122 + } 123 + } 124 + spin_unlock(&file->table_lock); 125 + 126 + return 0; 70 127 } 71 128 72 129 static struct drm_i915_gem_object * 73 130 eb_get_object(struct eb_objects *eb, unsigned long handle) 74 131 { 75 - struct hlist_head *head; 76 - struct hlist_node *node; 77 - struct drm_i915_gem_object *obj; 132 + if (eb->and < 0) { 133 + if (handle >= -eb->and) 134 + return NULL; 135 + return eb->lut[handle]; 136 + } else { 137 + struct hlist_head *head; 138 + struct hlist_node *node; 78 139 79 - head = &eb->buckets[handle & eb->and]; 80 - hlist_for_each(node, head) { 81 - obj = hlist_entry(node, struct drm_i915_gem_object, exec_node); 82 - if (obj->exec_handle == handle) 83 - return obj; 140 + head = &eb->buckets[handle & eb->and]; 141 + hlist_for_each(node, head) { 142 + struct drm_i915_gem_object *obj; 143 + 144 + obj = hlist_entry(node, struct drm_i915_gem_object, exec_node); 145 + if (obj->exec_handle == handle) 146 + return obj; 147 + } 148 + return NULL; 84 149 } 85 - 86 - return NULL; 87 150 } 88 151 89 152 static void 90 153 eb_destroy(struct eb_objects *eb) 91 154 { 155 + while (!list_empty(&eb->objects)) { 156 + struct drm_i915_gem_object *obj; 157 + 158 + obj = list_first_entry(&eb->objects, 159 + struct drm_i915_gem_object, 160 + exec_list); 161 + list_del_init(&obj->exec_list); 162 + drm_gem_object_unreference(&obj->base); 163 + } 92 164 kfree(eb); 93 165 } 94 166 ··· 281 209 282 210 /* Map the page containing the relocation we're going to perform. */ 283 211 reloc->offset += obj->gtt_offset; 284 - reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping, 212 + reloc_page = io_mapping_map_atomic_wc(dev_priv->gtt.mappable, 285 213 reloc->offset & PAGE_MASK); 286 214 reloc_entry = (uint32_t __iomem *) 287 215 (reloc_page + (reloc->offset & ~PAGE_MASK)); ··· 360 288 361 289 static int 362 290 i915_gem_execbuffer_relocate(struct drm_device *dev, 363 - struct eb_objects *eb, 364 - struct list_head *objects) 291 + struct eb_objects *eb) 365 292 { 366 293 struct drm_i915_gem_object *obj; 367 294 int ret = 0; ··· 373 302 * lockdep complains vehemently. 374 303 */ 375 304 pagefault_disable(); 376 - list_for_each_entry(obj, objects, exec_list) { 305 + list_for_each_entry(obj, &eb->objects, exec_list) { 377 306 ret = i915_gem_execbuffer_relocate_object(obj, eb); 378 307 if (ret) 379 308 break; ··· 395 324 396 325 static int 397 326 i915_gem_execbuffer_reserve_object(struct drm_i915_gem_object *obj, 398 - struct intel_ring_buffer *ring) 327 + struct intel_ring_buffer *ring, 328 + bool *need_reloc) 399 329 { 400 330 struct drm_i915_private *dev_priv = obj->base.dev->dev_private; 401 331 struct drm_i915_gem_exec_object2 *entry = obj->exec_entry; ··· 437 365 obj->has_aliasing_ppgtt_mapping = 1; 438 366 } 439 367 440 - entry->offset = obj->gtt_offset; 368 + if (entry->offset != obj->gtt_offset) { 369 + entry->offset = obj->gtt_offset; 370 + *need_reloc = true; 371 + } 372 + 373 + if (entry->flags & EXEC_OBJECT_WRITE) { 374 + obj->base.pending_read_domains = I915_GEM_DOMAIN_RENDER; 375 + obj->base.pending_write_domain = I915_GEM_DOMAIN_RENDER; 376 + } 377 + 378 + if (entry->flags & EXEC_OBJECT_NEEDS_GTT && 379 + !obj->has_global_gtt_mapping) 380 + i915_gem_gtt_bind_object(obj, obj->cache_level); 381 + 441 382 return 0; 442 383 } 443 384 ··· 476 391 static int 477 392 i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring, 478 393 struct drm_file *file, 479 - struct list_head *objects) 394 + struct list_head *objects, 395 + bool *need_relocs) 480 396 { 481 397 struct drm_i915_gem_object *obj; 482 398 struct list_head ordered_objects; ··· 505 419 else 506 420 list_move_tail(&obj->exec_list, &ordered_objects); 507 421 508 - obj->base.pending_read_domains = 0; 422 + obj->base.pending_read_domains = I915_GEM_GPU_DOMAINS & ~I915_GEM_DOMAIN_COMMAND; 509 423 obj->base.pending_write_domain = 0; 510 424 obj->pending_fenced_gpu_access = false; 511 425 } ··· 545 459 (need_mappable && !obj->map_and_fenceable)) 546 460 ret = i915_gem_object_unbind(obj); 547 461 else 548 - ret = i915_gem_execbuffer_reserve_object(obj, ring); 462 + ret = i915_gem_execbuffer_reserve_object(obj, ring, need_relocs); 549 463 if (ret) 550 464 goto err; 551 465 } ··· 555 469 if (obj->gtt_space) 556 470 continue; 557 471 558 - ret = i915_gem_execbuffer_reserve_object(obj, ring); 472 + ret = i915_gem_execbuffer_reserve_object(obj, ring, need_relocs); 559 473 if (ret) 560 474 goto err; 561 475 } ··· 575 489 576 490 static int 577 491 i915_gem_execbuffer_relocate_slow(struct drm_device *dev, 492 + struct drm_i915_gem_execbuffer2 *args, 578 493 struct drm_file *file, 579 494 struct intel_ring_buffer *ring, 580 - struct list_head *objects, 581 495 struct eb_objects *eb, 582 - struct drm_i915_gem_exec_object2 *exec, 583 - int count) 496 + struct drm_i915_gem_exec_object2 *exec) 584 497 { 585 498 struct drm_i915_gem_relocation_entry *reloc; 586 499 struct drm_i915_gem_object *obj; 500 + bool need_relocs; 587 501 int *reloc_offset; 588 502 int i, total, ret; 503 + int count = args->buffer_count; 589 504 590 505 /* We may process another execbuffer during the unlock... */ 591 - while (!list_empty(objects)) { 592 - obj = list_first_entry(objects, 506 + while (!list_empty(&eb->objects)) { 507 + obj = list_first_entry(&eb->objects, 593 508 struct drm_i915_gem_object, 594 509 exec_list); 595 510 list_del_init(&obj->exec_list); ··· 637 550 638 551 /* reacquire the objects */ 639 552 eb_reset(eb); 640 - for (i = 0; i < count; i++) { 641 - obj = to_intel_bo(drm_gem_object_lookup(dev, file, 642 - exec[i].handle)); 643 - if (&obj->base == NULL) { 644 - DRM_DEBUG("Invalid object handle %d at index %d\n", 645 - exec[i].handle, i); 646 - ret = -ENOENT; 647 - goto err; 648 - } 649 - 650 - list_add_tail(&obj->exec_list, objects); 651 - obj->exec_handle = exec[i].handle; 652 - obj->exec_entry = &exec[i]; 653 - eb_add_object(eb, obj); 654 - } 655 - 656 - ret = i915_gem_execbuffer_reserve(ring, file, objects); 553 + ret = eb_lookup_objects(eb, exec, args, file); 657 554 if (ret) 658 555 goto err; 659 556 660 - list_for_each_entry(obj, objects, exec_list) { 557 + need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0; 558 + ret = i915_gem_execbuffer_reserve(ring, file, &eb->objects, &need_relocs); 559 + if (ret) 560 + goto err; 561 + 562 + list_for_each_entry(obj, &eb->objects, exec_list) { 661 563 int offset = obj->exec_entry - exec; 662 564 ret = i915_gem_execbuffer_relocate_object_slow(obj, eb, 663 565 reloc + reloc_offset[offset]); ··· 700 624 static bool 701 625 i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec) 702 626 { 627 + if (exec->flags & __I915_EXEC_UNKNOWN_FLAGS) 628 + return false; 629 + 703 630 return ((exec->batch_start_offset | exec->batch_len) & 0x7) == 0; 704 631 } 705 632 ··· 716 637 char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr; 717 638 int length; /* limited by fault_in_pages_readable() */ 718 639 640 + if (exec[i].flags & __EXEC_OBJECT_UNKNOWN_FLAGS) 641 + return -EINVAL; 642 + 719 643 /* First check for malicious input causing overflow */ 720 644 if (exec[i].relocation_count > 721 645 INT_MAX / sizeof(struct drm_i915_gem_relocation_entry)) ··· 726 644 727 645 length = exec[i].relocation_count * 728 646 sizeof(struct drm_i915_gem_relocation_entry); 729 - if (!access_ok(VERIFY_READ, ptr, length)) 730 - return -EFAULT; 731 - 732 647 /* we may also need to update the presumed offsets */ 733 648 if (!access_ok(VERIFY_WRITE, ptr, length)) 734 649 return -EFAULT; ··· 747 668 u32 old_read = obj->base.read_domains; 748 669 u32 old_write = obj->base.write_domain; 749 670 750 - obj->base.read_domains = obj->base.pending_read_domains; 751 671 obj->base.write_domain = obj->base.pending_write_domain; 672 + if (obj->base.write_domain == 0) 673 + obj->base.pending_read_domains |= obj->base.read_domains; 674 + obj->base.read_domains = obj->base.pending_read_domains; 752 675 obj->fenced_gpu_access = obj->pending_fenced_gpu_access; 753 676 754 677 i915_gem_object_move_to_active(obj, ring); ··· 809 728 struct drm_i915_gem_exec_object2 *exec) 810 729 { 811 730 drm_i915_private_t *dev_priv = dev->dev_private; 812 - struct list_head objects; 813 731 struct eb_objects *eb; 814 732 struct drm_i915_gem_object *batch_obj; 815 733 struct drm_clip_rect *cliprects = NULL; 816 734 struct intel_ring_buffer *ring; 817 735 u32 ctx_id = i915_execbuffer2_get_context_id(*args); 818 736 u32 exec_start, exec_len; 819 - u32 mask; 820 - u32 flags; 737 + u32 mask, flags; 821 738 int ret, mode, i; 739 + bool need_relocs; 822 740 823 - if (!i915_gem_check_execbuffer(args)) { 824 - DRM_DEBUG("execbuf with invalid offset/length\n"); 741 + if (!i915_gem_check_execbuffer(args)) 825 742 return -EINVAL; 826 - } 827 743 828 744 ret = validate_exec_list(exec, args->buffer_count); 829 745 if (ret) ··· 941 863 goto pre_mutex_err; 942 864 } 943 865 944 - eb = eb_create(args->buffer_count); 866 + eb = eb_create(args); 945 867 if (eb == NULL) { 946 868 mutex_unlock(&dev->struct_mutex); 947 869 ret = -ENOMEM; ··· 949 871 } 950 872 951 873 /* Look up object handles */ 952 - INIT_LIST_HEAD(&objects); 953 - for (i = 0; i < args->buffer_count; i++) { 954 - struct drm_i915_gem_object *obj; 955 - 956 - obj = to_intel_bo(drm_gem_object_lookup(dev, file, 957 - exec[i].handle)); 958 - if (&obj->base == NULL) { 959 - DRM_DEBUG("Invalid object handle %d at index %d\n", 960 - exec[i].handle, i); 961 - /* prevent error path from reading uninitialized data */ 962 - ret = -ENOENT; 963 - goto err; 964 - } 965 - 966 - if (!list_empty(&obj->exec_list)) { 967 - DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n", 968 - obj, exec[i].handle, i); 969 - ret = -EINVAL; 970 - goto err; 971 - } 972 - 973 - list_add_tail(&obj->exec_list, &objects); 974 - obj->exec_handle = exec[i].handle; 975 - obj->exec_entry = &exec[i]; 976 - eb_add_object(eb, obj); 977 - } 874 + ret = eb_lookup_objects(eb, exec, args, file); 875 + if (ret) 876 + goto err; 978 877 979 878 /* take note of the batch buffer before we might reorder the lists */ 980 - batch_obj = list_entry(objects.prev, 879 + batch_obj = list_entry(eb->objects.prev, 981 880 struct drm_i915_gem_object, 982 881 exec_list); 983 882 984 883 /* Move the objects en-masse into the GTT, evicting if necessary. */ 985 - ret = i915_gem_execbuffer_reserve(ring, file, &objects); 884 + need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0; 885 + ret = i915_gem_execbuffer_reserve(ring, file, &eb->objects, &need_relocs); 986 886 if (ret) 987 887 goto err; 988 888 989 889 /* The objects are in their final locations, apply the relocations. */ 990 - ret = i915_gem_execbuffer_relocate(dev, eb, &objects); 890 + if (need_relocs) 891 + ret = i915_gem_execbuffer_relocate(dev, eb); 991 892 if (ret) { 992 893 if (ret == -EFAULT) { 993 - ret = i915_gem_execbuffer_relocate_slow(dev, file, ring, 994 - &objects, eb, 995 - exec, 996 - args->buffer_count); 894 + ret = i915_gem_execbuffer_relocate_slow(dev, args, file, ring, 895 + eb, exec); 997 896 BUG_ON(!mutex_is_locked(&dev->struct_mutex)); 998 897 } 999 898 if (ret) ··· 992 937 if (flags & I915_DISPATCH_SECURE && !batch_obj->has_global_gtt_mapping) 993 938 i915_gem_gtt_bind_object(batch_obj, batch_obj->cache_level); 994 939 995 - ret = i915_gem_execbuffer_move_to_gpu(ring, &objects); 940 + ret = i915_gem_execbuffer_move_to_gpu(ring, &eb->objects); 996 941 if (ret) 997 942 goto err; 998 943 ··· 1046 991 1047 992 trace_i915_gem_ring_dispatch(ring, intel_ring_get_seqno(ring), flags); 1048 993 1049 - i915_gem_execbuffer_move_to_active(&objects, ring); 994 + i915_gem_execbuffer_move_to_active(&eb->objects, ring); 1050 995 i915_gem_execbuffer_retire_commands(dev, file, ring); 1051 996 1052 997 err: 1053 998 eb_destroy(eb); 1054 - while (!list_empty(&objects)) { 1055 - struct drm_i915_gem_object *obj; 1056 - 1057 - obj = list_first_entry(&objects, 1058 - struct drm_i915_gem_object, 1059 - exec_list); 1060 - list_del_init(&obj->exec_list); 1061 - drm_gem_object_unreference(&obj->base); 1062 - } 1063 999 1064 1000 mutex_unlock(&dev->struct_mutex); 1065 1001 ··· 1159 1113 } 1160 1114 1161 1115 exec2_list = kmalloc(sizeof(*exec2_list)*args->buffer_count, 1162 - GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY); 1116 + GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY); 1163 1117 if (exec2_list == NULL) 1164 1118 exec2_list = drm_malloc_ab(sizeof(*exec2_list), 1165 1119 args->buffer_count);
+353 -295
drivers/gpu/drm/i915/i915_gem_gtt.c
··· 44 44 #define GEN6_PTE_CACHE_LLC_MLC (3 << 1) 45 45 #define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr) 46 46 47 - static inline gtt_pte_t pte_encode(struct drm_device *dev, 48 - dma_addr_t addr, 49 - enum i915_cache_level level) 47 + static inline gtt_pte_t gen6_pte_encode(struct drm_device *dev, 48 + dma_addr_t addr, 49 + enum i915_cache_level level) 50 50 { 51 51 gtt_pte_t pte = GEN6_PTE_VALID; 52 52 pte |= GEN6_PTE_ADDR_ENCODE(addr); ··· 77 77 } 78 78 79 79 /* PPGTT support for Sandybdrige/Gen6 and later */ 80 - static void i915_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt, 80 + static void gen6_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt, 81 81 unsigned first_entry, 82 82 unsigned num_entries) 83 83 { ··· 87 87 unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES; 88 88 unsigned last_pte, i; 89 89 90 - scratch_pte = pte_encode(ppgtt->dev, ppgtt->scratch_page_dma_addr, 91 - I915_CACHE_LLC); 90 + scratch_pte = gen6_pte_encode(ppgtt->dev, 91 + ppgtt->scratch_page_dma_addr, 92 + I915_CACHE_LLC); 92 93 93 94 while (num_entries) { 94 95 last_pte = first_pte + num_entries; ··· 109 108 } 110 109 } 111 110 112 - int i915_gem_init_aliasing_ppgtt(struct drm_device *dev) 113 - { 114 - struct drm_i915_private *dev_priv = dev->dev_private; 115 - struct i915_hw_ppgtt *ppgtt; 116 - unsigned first_pd_entry_in_global_pt; 117 - int i; 118 - int ret = -ENOMEM; 119 - 120 - /* ppgtt PDEs reside in the global gtt pagetable, which has 512*1024 121 - * entries. For aliasing ppgtt support we just steal them at the end for 122 - * now. */ 123 - first_pd_entry_in_global_pt = dev_priv->mm.gtt->gtt_total_entries - I915_PPGTT_PD_ENTRIES; 124 - 125 - ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL); 126 - if (!ppgtt) 127 - return ret; 128 - 129 - ppgtt->dev = dev; 130 - ppgtt->num_pd_entries = I915_PPGTT_PD_ENTRIES; 131 - ppgtt->pt_pages = kzalloc(sizeof(struct page *)*ppgtt->num_pd_entries, 132 - GFP_KERNEL); 133 - if (!ppgtt->pt_pages) 134 - goto err_ppgtt; 135 - 136 - for (i = 0; i < ppgtt->num_pd_entries; i++) { 137 - ppgtt->pt_pages[i] = alloc_page(GFP_KERNEL); 138 - if (!ppgtt->pt_pages[i]) 139 - goto err_pt_alloc; 140 - } 141 - 142 - if (dev_priv->mm.gtt->needs_dmar) { 143 - ppgtt->pt_dma_addr = kzalloc(sizeof(dma_addr_t) 144 - *ppgtt->num_pd_entries, 145 - GFP_KERNEL); 146 - if (!ppgtt->pt_dma_addr) 147 - goto err_pt_alloc; 148 - 149 - for (i = 0; i < ppgtt->num_pd_entries; i++) { 150 - dma_addr_t pt_addr; 151 - 152 - pt_addr = pci_map_page(dev->pdev, ppgtt->pt_pages[i], 153 - 0, 4096, 154 - PCI_DMA_BIDIRECTIONAL); 155 - 156 - if (pci_dma_mapping_error(dev->pdev, 157 - pt_addr)) { 158 - ret = -EIO; 159 - goto err_pd_pin; 160 - 161 - } 162 - ppgtt->pt_dma_addr[i] = pt_addr; 163 - } 164 - } 165 - 166 - ppgtt->scratch_page_dma_addr = dev_priv->mm.gtt->scratch_page_dma; 167 - 168 - i915_ppgtt_clear_range(ppgtt, 0, 169 - ppgtt->num_pd_entries*I915_PPGTT_PT_ENTRIES); 170 - 171 - ppgtt->pd_offset = (first_pd_entry_in_global_pt)*sizeof(gtt_pte_t); 172 - 173 - dev_priv->mm.aliasing_ppgtt = ppgtt; 174 - 175 - return 0; 176 - 177 - err_pd_pin: 178 - if (ppgtt->pt_dma_addr) { 179 - for (i--; i >= 0; i--) 180 - pci_unmap_page(dev->pdev, ppgtt->pt_dma_addr[i], 181 - 4096, PCI_DMA_BIDIRECTIONAL); 182 - } 183 - err_pt_alloc: 184 - kfree(ppgtt->pt_dma_addr); 185 - for (i = 0; i < ppgtt->num_pd_entries; i++) { 186 - if (ppgtt->pt_pages[i]) 187 - __free_page(ppgtt->pt_pages[i]); 188 - } 189 - kfree(ppgtt->pt_pages); 190 - err_ppgtt: 191 - kfree(ppgtt); 192 - 193 - return ret; 194 - } 195 - 196 - void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev) 197 - { 198 - struct drm_i915_private *dev_priv = dev->dev_private; 199 - struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt; 200 - int i; 201 - 202 - if (!ppgtt) 203 - return; 204 - 205 - if (ppgtt->pt_dma_addr) { 206 - for (i = 0; i < ppgtt->num_pd_entries; i++) 207 - pci_unmap_page(dev->pdev, ppgtt->pt_dma_addr[i], 208 - 4096, PCI_DMA_BIDIRECTIONAL); 209 - } 210 - 211 - kfree(ppgtt->pt_dma_addr); 212 - for (i = 0; i < ppgtt->num_pd_entries; i++) 213 - __free_page(ppgtt->pt_pages[i]); 214 - kfree(ppgtt->pt_pages); 215 - kfree(ppgtt); 216 - } 217 - 218 - static void i915_ppgtt_insert_sg_entries(struct i915_hw_ppgtt *ppgtt, 219 - const struct sg_table *pages, 220 - unsigned first_entry, 221 - enum i915_cache_level cache_level) 111 + static void gen6_ppgtt_insert_entries(struct i915_hw_ppgtt *ppgtt, 112 + struct sg_table *pages, 113 + unsigned first_entry, 114 + enum i915_cache_level cache_level) 222 115 { 223 116 gtt_pte_t *pt_vaddr; 224 117 unsigned act_pd = first_entry / I915_PPGTT_PT_ENTRIES; ··· 132 237 133 238 for (j = first_pte; j < I915_PPGTT_PT_ENTRIES; j++) { 134 239 page_addr = sg_dma_address(sg) + (m << PAGE_SHIFT); 135 - pt_vaddr[j] = pte_encode(ppgtt->dev, page_addr, 136 - cache_level); 240 + pt_vaddr[j] = gen6_pte_encode(ppgtt->dev, page_addr, 241 + cache_level); 137 242 138 243 /* grab the next page */ 139 244 if (++m == segment_len) { ··· 153 258 } 154 259 } 155 260 261 + static void gen6_ppgtt_cleanup(struct i915_hw_ppgtt *ppgtt) 262 + { 263 + int i; 264 + 265 + if (ppgtt->pt_dma_addr) { 266 + for (i = 0; i < ppgtt->num_pd_entries; i++) 267 + pci_unmap_page(ppgtt->dev->pdev, 268 + ppgtt->pt_dma_addr[i], 269 + 4096, PCI_DMA_BIDIRECTIONAL); 270 + } 271 + 272 + kfree(ppgtt->pt_dma_addr); 273 + for (i = 0; i < ppgtt->num_pd_entries; i++) 274 + __free_page(ppgtt->pt_pages[i]); 275 + kfree(ppgtt->pt_pages); 276 + kfree(ppgtt); 277 + } 278 + 279 + static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt) 280 + { 281 + struct drm_device *dev = ppgtt->dev; 282 + struct drm_i915_private *dev_priv = dev->dev_private; 283 + unsigned first_pd_entry_in_global_pt; 284 + int i; 285 + int ret = -ENOMEM; 286 + 287 + /* ppgtt PDEs reside in the global gtt pagetable, which has 512*1024 288 + * entries. For aliasing ppgtt support we just steal them at the end for 289 + * now. */ 290 + first_pd_entry_in_global_pt = 291 + gtt_total_entries(dev_priv->gtt) - I915_PPGTT_PD_ENTRIES; 292 + 293 + ppgtt->num_pd_entries = I915_PPGTT_PD_ENTRIES; 294 + ppgtt->clear_range = gen6_ppgtt_clear_range; 295 + ppgtt->insert_entries = gen6_ppgtt_insert_entries; 296 + ppgtt->cleanup = gen6_ppgtt_cleanup; 297 + ppgtt->pt_pages = kzalloc(sizeof(struct page *)*ppgtt->num_pd_entries, 298 + GFP_KERNEL); 299 + if (!ppgtt->pt_pages) 300 + return -ENOMEM; 301 + 302 + for (i = 0; i < ppgtt->num_pd_entries; i++) { 303 + ppgtt->pt_pages[i] = alloc_page(GFP_KERNEL); 304 + if (!ppgtt->pt_pages[i]) 305 + goto err_pt_alloc; 306 + } 307 + 308 + ppgtt->pt_dma_addr = kzalloc(sizeof(dma_addr_t) *ppgtt->num_pd_entries, 309 + GFP_KERNEL); 310 + if (!ppgtt->pt_dma_addr) 311 + goto err_pt_alloc; 312 + 313 + for (i = 0; i < ppgtt->num_pd_entries; i++) { 314 + dma_addr_t pt_addr; 315 + 316 + pt_addr = pci_map_page(dev->pdev, ppgtt->pt_pages[i], 0, 4096, 317 + PCI_DMA_BIDIRECTIONAL); 318 + 319 + if (pci_dma_mapping_error(dev->pdev, pt_addr)) { 320 + ret = -EIO; 321 + goto err_pd_pin; 322 + 323 + } 324 + ppgtt->pt_dma_addr[i] = pt_addr; 325 + } 326 + 327 + ppgtt->scratch_page_dma_addr = dev_priv->gtt.scratch_page_dma; 328 + 329 + ppgtt->clear_range(ppgtt, 0, 330 + ppgtt->num_pd_entries*I915_PPGTT_PT_ENTRIES); 331 + 332 + ppgtt->pd_offset = (first_pd_entry_in_global_pt)*sizeof(gtt_pte_t); 333 + 334 + return 0; 335 + 336 + err_pd_pin: 337 + if (ppgtt->pt_dma_addr) { 338 + for (i--; i >= 0; i--) 339 + pci_unmap_page(dev->pdev, ppgtt->pt_dma_addr[i], 340 + 4096, PCI_DMA_BIDIRECTIONAL); 341 + } 342 + err_pt_alloc: 343 + kfree(ppgtt->pt_dma_addr); 344 + for (i = 0; i < ppgtt->num_pd_entries; i++) { 345 + if (ppgtt->pt_pages[i]) 346 + __free_page(ppgtt->pt_pages[i]); 347 + } 348 + kfree(ppgtt->pt_pages); 349 + 350 + return ret; 351 + } 352 + 353 + static int i915_gem_init_aliasing_ppgtt(struct drm_device *dev) 354 + { 355 + struct drm_i915_private *dev_priv = dev->dev_private; 356 + struct i915_hw_ppgtt *ppgtt; 357 + int ret; 358 + 359 + ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL); 360 + if (!ppgtt) 361 + return -ENOMEM; 362 + 363 + ppgtt->dev = dev; 364 + 365 + ret = gen6_ppgtt_init(ppgtt); 366 + if (ret) 367 + kfree(ppgtt); 368 + else 369 + dev_priv->mm.aliasing_ppgtt = ppgtt; 370 + 371 + return ret; 372 + } 373 + 374 + void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev) 375 + { 376 + struct drm_i915_private *dev_priv = dev->dev_private; 377 + struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt; 378 + 379 + if (!ppgtt) 380 + return; 381 + 382 + ppgtt->cleanup(ppgtt); 383 + } 384 + 156 385 void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt, 157 386 struct drm_i915_gem_object *obj, 158 387 enum i915_cache_level cache_level) 159 388 { 160 - i915_ppgtt_insert_sg_entries(ppgtt, 161 - obj->pages, 162 - obj->gtt_space->start >> PAGE_SHIFT, 163 - cache_level); 389 + ppgtt->insert_entries(ppgtt, obj->pages, 390 + obj->gtt_space->start >> PAGE_SHIFT, 391 + cache_level); 164 392 } 165 393 166 394 void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt, 167 395 struct drm_i915_gem_object *obj) 168 396 { 169 - i915_ppgtt_clear_range(ppgtt, 170 - obj->gtt_space->start >> PAGE_SHIFT, 171 - obj->base.size >> PAGE_SHIFT); 397 + ppgtt->clear_range(ppgtt, 398 + obj->gtt_space->start >> PAGE_SHIFT, 399 + obj->base.size >> PAGE_SHIFT); 172 400 } 173 401 174 402 void i915_gem_init_ppgtt(struct drm_device *dev) ··· 308 290 return; 309 291 310 292 311 - pd_addr = (gtt_pte_t __iomem*)dev_priv->mm.gsm + ppgtt->pd_offset/sizeof(gtt_pte_t); 293 + pd_addr = (gtt_pte_t __iomem*)dev_priv->gtt.gsm + ppgtt->pd_offset/sizeof(gtt_pte_t); 312 294 for (i = 0; i < ppgtt->num_pd_entries; i++) { 313 295 dma_addr_t pt_addr; 314 296 315 - if (dev_priv->mm.gtt->needs_dmar) 316 - pt_addr = ppgtt->pt_dma_addr[i]; 317 - else 318 - pt_addr = page_to_phys(ppgtt->pt_pages[i]); 319 - 297 + pt_addr = ppgtt->pt_dma_addr[i]; 320 298 pd_entry = GEN6_PDE_ADDR_ENCODE(pt_addr); 321 299 pd_entry |= GEN6_PDE_VALID; 322 300 ··· 352 338 } 353 339 } 354 340 341 + extern int intel_iommu_gfx_mapped; 342 + /* Certain Gen5 chipsets require require idling the GPU before 343 + * unmapping anything from the GTT when VT-d is enabled. 344 + */ 345 + static inline bool needs_idle_maps(struct drm_device *dev) 346 + { 347 + #ifdef CONFIG_INTEL_IOMMU 348 + /* Query intel_iommu to see if we need the workaround. Presumably that 349 + * was loaded first. 350 + */ 351 + if (IS_GEN5(dev) && IS_MOBILE(dev) && intel_iommu_gfx_mapped) 352 + return true; 353 + #endif 354 + return false; 355 + } 356 + 355 357 static bool do_idling(struct drm_i915_private *dev_priv) 356 358 { 357 359 bool ret = dev_priv->mm.interruptible; 358 360 359 - if (unlikely(dev_priv->mm.gtt->do_idle_maps)) { 361 + if (unlikely(dev_priv->gtt.do_idle_maps)) { 360 362 dev_priv->mm.interruptible = false; 361 363 if (i915_gpu_idle(dev_priv->dev)) { 362 364 DRM_ERROR("Couldn't idle GPU\n"); ··· 386 356 387 357 static void undo_idling(struct drm_i915_private *dev_priv, bool interruptible) 388 358 { 389 - if (unlikely(dev_priv->mm.gtt->do_idle_maps)) 359 + if (unlikely(dev_priv->gtt.do_idle_maps)) 390 360 dev_priv->mm.interruptible = interruptible; 391 - } 392 - 393 - 394 - static void i915_ggtt_clear_range(struct drm_device *dev, 395 - unsigned first_entry, 396 - unsigned num_entries) 397 - { 398 - struct drm_i915_private *dev_priv = dev->dev_private; 399 - gtt_pte_t scratch_pte; 400 - gtt_pte_t __iomem *gtt_base = (gtt_pte_t __iomem *) dev_priv->mm.gsm + first_entry; 401 - const int max_entries = dev_priv->mm.gtt->gtt_total_entries - first_entry; 402 - int i; 403 - 404 - if (INTEL_INFO(dev)->gen < 6) { 405 - intel_gtt_clear_range(first_entry, num_entries); 406 - return; 407 - } 408 - 409 - if (WARN(num_entries > max_entries, 410 - "First entry = %d; Num entries = %d (max=%d)\n", 411 - first_entry, num_entries, max_entries)) 412 - num_entries = max_entries; 413 - 414 - scratch_pte = pte_encode(dev, dev_priv->mm.gtt->scratch_page_dma, I915_CACHE_LLC); 415 - for (i = 0; i < num_entries; i++) 416 - iowrite32(scratch_pte, &gtt_base[i]); 417 - readl(gtt_base); 418 361 } 419 362 420 363 void i915_gem_restore_gtt_mappings(struct drm_device *dev) ··· 396 393 struct drm_i915_gem_object *obj; 397 394 398 395 /* First fill our portion of the GTT with scratch pages */ 399 - i915_ggtt_clear_range(dev, dev_priv->mm.gtt_start / PAGE_SIZE, 400 - (dev_priv->mm.gtt_end - dev_priv->mm.gtt_start) / PAGE_SIZE); 396 + dev_priv->gtt.gtt_clear_range(dev, dev_priv->gtt.start / PAGE_SIZE, 397 + dev_priv->gtt.total / PAGE_SIZE); 401 398 402 399 list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) { 403 400 i915_gem_clflush_object(obj); ··· 426 423 * within the global GTT as well as accessible by the GPU through the GMADR 427 424 * mapped BAR (dev_priv->mm.gtt->gtt). 428 425 */ 429 - static void gen6_ggtt_bind_object(struct drm_i915_gem_object *obj, 430 - enum i915_cache_level level) 426 + static void gen6_ggtt_insert_entries(struct drm_device *dev, 427 + struct sg_table *st, 428 + unsigned int first_entry, 429 + enum i915_cache_level level) 431 430 { 432 - struct drm_device *dev = obj->base.dev; 433 431 struct drm_i915_private *dev_priv = dev->dev_private; 434 - struct sg_table *st = obj->pages; 435 432 struct scatterlist *sg = st->sgl; 436 - const int first_entry = obj->gtt_space->start >> PAGE_SHIFT; 437 - const int max_entries = dev_priv->mm.gtt->gtt_total_entries - first_entry; 438 433 gtt_pte_t __iomem *gtt_entries = 439 - (gtt_pte_t __iomem *)dev_priv->mm.gsm + first_entry; 434 + (gtt_pte_t __iomem *)dev_priv->gtt.gsm + first_entry; 440 435 int unused, i = 0; 441 436 unsigned int len, m = 0; 442 437 dma_addr_t addr; ··· 443 442 len = sg_dma_len(sg) >> PAGE_SHIFT; 444 443 for (m = 0; m < len; m++) { 445 444 addr = sg_dma_address(sg) + (m << PAGE_SHIFT); 446 - iowrite32(pte_encode(dev, addr, level), &gtt_entries[i]); 445 + iowrite32(gen6_pte_encode(dev, addr, level), 446 + &gtt_entries[i]); 447 447 i++; 448 448 } 449 449 } 450 - 451 - BUG_ON(i > max_entries); 452 - BUG_ON(i != obj->base.size / PAGE_SIZE); 453 450 454 451 /* XXX: This serves as a posting read to make sure that the PTE has 455 452 * actually been updated. There is some concern that even though ··· 456 457 * hardware should work, we must keep this posting read for paranoia. 457 458 */ 458 459 if (i != 0) 459 - WARN_ON(readl(&gtt_entries[i-1]) != pte_encode(dev, addr, level)); 460 + WARN_ON(readl(&gtt_entries[i-1]) 461 + != gen6_pte_encode(dev, addr, level)); 460 462 461 463 /* This next bit makes the above posting read even more important. We 462 464 * want to flush the TLBs only after we're certain all the PTE updates ··· 467 467 POSTING_READ(GFX_FLSH_CNTL_GEN6); 468 468 } 469 469 470 + static void gen6_ggtt_clear_range(struct drm_device *dev, 471 + unsigned int first_entry, 472 + unsigned int num_entries) 473 + { 474 + struct drm_i915_private *dev_priv = dev->dev_private; 475 + gtt_pte_t scratch_pte; 476 + gtt_pte_t __iomem *gtt_base = (gtt_pte_t __iomem *) dev_priv->gtt.gsm + first_entry; 477 + const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry; 478 + int i; 479 + 480 + if (WARN(num_entries > max_entries, 481 + "First entry = %d; Num entries = %d (max=%d)\n", 482 + first_entry, num_entries, max_entries)) 483 + num_entries = max_entries; 484 + 485 + scratch_pte = gen6_pte_encode(dev, dev_priv->gtt.scratch_page_dma, 486 + I915_CACHE_LLC); 487 + for (i = 0; i < num_entries; i++) 488 + iowrite32(scratch_pte, &gtt_base[i]); 489 + readl(gtt_base); 490 + } 491 + 492 + 493 + static void i915_ggtt_insert_entries(struct drm_device *dev, 494 + struct sg_table *st, 495 + unsigned int pg_start, 496 + enum i915_cache_level cache_level) 497 + { 498 + unsigned int flags = (cache_level == I915_CACHE_NONE) ? 499 + AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY; 500 + 501 + intel_gtt_insert_sg_entries(st, pg_start, flags); 502 + 503 + } 504 + 505 + static void i915_ggtt_clear_range(struct drm_device *dev, 506 + unsigned int first_entry, 507 + unsigned int num_entries) 508 + { 509 + intel_gtt_clear_range(first_entry, num_entries); 510 + } 511 + 512 + 470 513 void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj, 471 514 enum i915_cache_level cache_level) 472 515 { 473 516 struct drm_device *dev = obj->base.dev; 474 - if (INTEL_INFO(dev)->gen < 6) { 475 - unsigned int flags = (cache_level == I915_CACHE_NONE) ? 476 - AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY; 477 - intel_gtt_insert_sg_entries(obj->pages, 478 - obj->gtt_space->start >> PAGE_SHIFT, 479 - flags); 480 - } else { 481 - gen6_ggtt_bind_object(obj, cache_level); 482 - } 517 + struct drm_i915_private *dev_priv = dev->dev_private; 518 + 519 + dev_priv->gtt.gtt_insert_entries(dev, obj->pages, 520 + obj->gtt_space->start >> PAGE_SHIFT, 521 + cache_level); 483 522 484 523 obj->has_global_gtt_mapping = 1; 485 524 } 486 525 487 526 void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj) 488 527 { 489 - i915_ggtt_clear_range(obj->base.dev, 490 - obj->gtt_space->start >> PAGE_SHIFT, 491 - obj->base.size >> PAGE_SHIFT); 528 + struct drm_device *dev = obj->base.dev; 529 + struct drm_i915_private *dev_priv = dev->dev_private; 530 + 531 + dev_priv->gtt.gtt_clear_range(obj->base.dev, 532 + obj->gtt_space->start >> PAGE_SHIFT, 533 + obj->base.size >> PAGE_SHIFT); 492 534 493 535 obj->has_global_gtt_mapping = 0; 494 536 } ··· 567 525 *end -= 4096; 568 526 } 569 527 } 570 - 571 528 void i915_gem_setup_global_gtt(struct drm_device *dev, 572 529 unsigned long start, 573 530 unsigned long mappable_end, 574 531 unsigned long end) 575 532 { 533 + /* Let GEM Manage all of the aperture. 534 + * 535 + * However, leave one page at the end still bound to the scratch page. 536 + * There are a number of places where the hardware apparently prefetches 537 + * past the end of the object, and we've seen multiple hangs with the 538 + * GPU head pointer stuck in a batchbuffer bound at the last page of the 539 + * aperture. One page should be enough to keep any prefetching inside 540 + * of the aperture. 541 + */ 576 542 drm_i915_private_t *dev_priv = dev->dev_private; 577 543 struct drm_mm_node *entry; 578 544 struct drm_i915_gem_object *obj; 579 545 unsigned long hole_start, hole_end; 546 + 547 + BUG_ON(mappable_end > end); 580 548 581 549 /* Subtract the guard page ... */ 582 550 drm_mm_init(&dev_priv->mm.gtt_space, start, end - start - PAGE_SIZE); ··· 606 554 obj->has_global_gtt_mapping = 1; 607 555 } 608 556 609 - dev_priv->mm.gtt_start = start; 610 - dev_priv->mm.gtt_mappable_end = mappable_end; 611 - dev_priv->mm.gtt_end = end; 612 - dev_priv->mm.gtt_total = end - start; 613 - dev_priv->mm.mappable_gtt_total = min(end, mappable_end) - start; 557 + dev_priv->gtt.start = start; 558 + dev_priv->gtt.total = end - start; 614 559 615 560 /* Clear any non-preallocated blocks */ 616 561 drm_mm_for_each_hole(entry, &dev_priv->mm.gtt_space, 617 562 hole_start, hole_end) { 618 563 DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n", 619 564 hole_start, hole_end); 620 - i915_ggtt_clear_range(dev, 621 - hole_start / PAGE_SIZE, 622 - (hole_end-hole_start) / PAGE_SIZE); 565 + dev_priv->gtt.gtt_clear_range(dev, hole_start / PAGE_SIZE, 566 + (hole_end-hole_start) / PAGE_SIZE); 623 567 } 624 568 625 569 /* And finally clear the reserved guard page */ 626 - i915_ggtt_clear_range(dev, end / PAGE_SIZE - 1, 1); 570 + dev_priv->gtt.gtt_clear_range(dev, end / PAGE_SIZE - 1, 1); 627 571 } 628 572 629 573 static bool ··· 641 593 { 642 594 struct drm_i915_private *dev_priv = dev->dev_private; 643 595 unsigned long gtt_size, mappable_size; 644 - int ret; 645 596 646 - gtt_size = dev_priv->mm.gtt->gtt_total_entries << PAGE_SHIFT; 647 - mappable_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT; 597 + gtt_size = dev_priv->gtt.total; 598 + mappable_size = dev_priv->gtt.mappable_end; 648 599 649 600 if (intel_enable_ppgtt(dev) && HAS_ALIASING_PPGTT(dev)) { 601 + int ret; 650 602 /* PPGTT pdes are stolen from global gtt ptes, so shrink the 651 603 * aperture accordingly when using aliasing ppgtt. */ 652 604 gtt_size -= I915_PPGTT_PD_ENTRIES*PAGE_SIZE; ··· 654 606 i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size); 655 607 656 608 ret = i915_gem_init_aliasing_ppgtt(dev); 657 - if (ret) { 658 - mutex_unlock(&dev->struct_mutex); 609 + if (!ret) 659 610 return; 660 - } 661 - } else { 662 - /* Let GEM Manage all of the aperture. 663 - * 664 - * However, leave one page at the end still bound to the scratch 665 - * page. There are a number of places where the hardware 666 - * apparently prefetches past the end of the object, and we've 667 - * seen multiple hangs with the GPU head pointer stuck in a 668 - * batchbuffer bound at the last page of the aperture. One page 669 - * should be enough to keep any prefetching inside of the 670 - * aperture. 671 - */ 672 - i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size); 611 + 612 + DRM_ERROR("Aliased PPGTT setup failed %d\n", ret); 613 + drm_mm_takedown(&dev_priv->mm.gtt_space); 614 + gtt_size += I915_PPGTT_PD_ENTRIES*PAGE_SIZE; 673 615 } 616 + i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size); 674 617 } 675 618 676 619 static int setup_scratch_page(struct drm_device *dev) ··· 684 645 #else 685 646 dma_addr = page_to_phys(page); 686 647 #endif 687 - dev_priv->mm.gtt->scratch_page = page; 688 - dev_priv->mm.gtt->scratch_page_dma = dma_addr; 648 + dev_priv->gtt.scratch_page = page; 649 + dev_priv->gtt.scratch_page_dma = dma_addr; 689 650 690 651 return 0; 691 652 } ··· 693 654 static void teardown_scratch_page(struct drm_device *dev) 694 655 { 695 656 struct drm_i915_private *dev_priv = dev->dev_private; 696 - set_pages_wb(dev_priv->mm.gtt->scratch_page, 1); 697 - pci_unmap_page(dev->pdev, dev_priv->mm.gtt->scratch_page_dma, 657 + set_pages_wb(dev_priv->gtt.scratch_page, 1); 658 + pci_unmap_page(dev->pdev, dev_priv->gtt.scratch_page_dma, 698 659 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); 699 - put_page(dev_priv->mm.gtt->scratch_page); 700 - __free_page(dev_priv->mm.gtt->scratch_page); 660 + put_page(dev_priv->gtt.scratch_page); 661 + __free_page(dev_priv->gtt.scratch_page); 701 662 } 702 663 703 664 static inline unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl) ··· 707 668 return snb_gmch_ctl << 20; 708 669 } 709 670 710 - static inline unsigned int gen6_get_stolen_size(u16 snb_gmch_ctl) 671 + static inline size_t gen6_get_stolen_size(u16 snb_gmch_ctl) 711 672 { 712 673 snb_gmch_ctl >>= SNB_GMCH_GMS_SHIFT; 713 674 snb_gmch_ctl &= SNB_GMCH_GMS_MASK; 714 675 return snb_gmch_ctl << 25; /* 32 MB units */ 715 676 } 716 677 717 - static inline unsigned int gen7_get_stolen_size(u16 snb_gmch_ctl) 678 + static inline size_t gen7_get_stolen_size(u16 snb_gmch_ctl) 718 679 { 719 680 static const int stolen_decoder[] = { 720 681 0, 0, 0, 0, 0, 32, 48, 64, 128, 256, 96, 160, 224, 352}; ··· 723 684 return stolen_decoder[snb_gmch_ctl] << 20; 724 685 } 725 686 726 - int i915_gem_gtt_init(struct drm_device *dev) 687 + static int gen6_gmch_probe(struct drm_device *dev, 688 + size_t *gtt_total, 689 + size_t *stolen) 727 690 { 728 691 struct drm_i915_private *dev_priv = dev->dev_private; 729 692 phys_addr_t gtt_bus_addr; 693 + unsigned int gtt_size; 730 694 u16 snb_gmch_ctl; 731 695 int ret; 732 696 733 - /* On modern platforms we need not worry ourself with the legacy 734 - * hostbridge query stuff. Skip it entirely 697 + /* 64/512MB is the current min/max we actually know of, but this is just 698 + * a coarse sanity check. 735 699 */ 736 - if (INTEL_INFO(dev)->gen < 6) { 737 - ret = intel_gmch_probe(dev_priv->bridge_dev, dev->pdev, NULL); 738 - if (!ret) { 739 - DRM_ERROR("failed to set up gmch\n"); 740 - return -EIO; 741 - } 742 - 743 - dev_priv->mm.gtt = intel_gtt_get(); 744 - if (!dev_priv->mm.gtt) { 745 - DRM_ERROR("Failed to initialize GTT\n"); 746 - intel_gmch_remove(); 747 - return -ENODEV; 748 - } 749 - return 0; 700 + if ((dev_priv->gtt.mappable_end < (64<<20) || 701 + (dev_priv->gtt.mappable_end > (512<<20)))) { 702 + DRM_ERROR("Unknown GMADR size (%lx)\n", 703 + dev_priv->gtt.mappable_end); 704 + return -ENXIO; 750 705 } 751 - 752 - dev_priv->mm.gtt = kzalloc(sizeof(*dev_priv->mm.gtt), GFP_KERNEL); 753 - if (!dev_priv->mm.gtt) 754 - return -ENOMEM; 755 706 756 707 if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(40))) 757 708 pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(40)); 709 + pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl); 710 + gtt_size = gen6_get_total_gtt_size(snb_gmch_ctl); 758 711 759 - #ifdef CONFIG_INTEL_IOMMU 760 - dev_priv->mm.gtt->needs_dmar = 1; 761 - #endif 712 + if (IS_GEN7(dev)) 713 + *stolen = gen7_get_stolen_size(snb_gmch_ctl); 714 + else 715 + *stolen = gen6_get_stolen_size(snb_gmch_ctl); 716 + 717 + *gtt_total = (gtt_size / sizeof(gtt_pte_t)) << PAGE_SHIFT; 762 718 763 719 /* For GEN6+ the PTEs for the ggtt live at 2MB + BAR0 */ 764 720 gtt_bus_addr = pci_resource_start(dev->pdev, 0) + (2<<20); 765 - dev_priv->mm.gtt->gma_bus_addr = pci_resource_start(dev->pdev, 2); 766 - 767 - /* i9xx_setup */ 768 - pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl); 769 - dev_priv->mm.gtt->gtt_total_entries = 770 - gen6_get_total_gtt_size(snb_gmch_ctl) / sizeof(gtt_pte_t); 771 - if (INTEL_INFO(dev)->gen < 7) 772 - dev_priv->mm.gtt->stolen_size = gen6_get_stolen_size(snb_gmch_ctl); 773 - else 774 - dev_priv->mm.gtt->stolen_size = gen7_get_stolen_size(snb_gmch_ctl); 775 - 776 - dev_priv->mm.gtt->gtt_mappable_entries = pci_resource_len(dev->pdev, 2) >> PAGE_SHIFT; 777 - /* 64/512MB is the current min/max we actually know of, but this is just a 778 - * coarse sanity check. 779 - */ 780 - if ((dev_priv->mm.gtt->gtt_mappable_entries >> 8) < 64 || 781 - dev_priv->mm.gtt->gtt_mappable_entries > dev_priv->mm.gtt->gtt_total_entries) { 782 - DRM_ERROR("Unknown GMADR entries (%d)\n", 783 - dev_priv->mm.gtt->gtt_mappable_entries); 784 - ret = -ENXIO; 785 - goto err_out; 721 + dev_priv->gtt.gsm = ioremap_wc(gtt_bus_addr, gtt_size); 722 + if (!dev_priv->gtt.gsm) { 723 + DRM_ERROR("Failed to map the gtt page table\n"); 724 + return -ENOMEM; 786 725 } 787 726 788 727 ret = setup_scratch_page(dev); 789 - if (ret) { 728 + if (ret) 790 729 DRM_ERROR("Scratch setup failed\n"); 791 - goto err_out; 792 - } 793 730 794 - dev_priv->mm.gsm = ioremap_wc(gtt_bus_addr, 795 - dev_priv->mm.gtt->gtt_total_entries * sizeof(gtt_pte_t)); 796 - if (!dev_priv->mm.gsm) { 797 - DRM_ERROR("Failed to map the gtt page table\n"); 798 - teardown_scratch_page(dev); 799 - ret = -ENOMEM; 800 - goto err_out; 801 - } 731 + dev_priv->gtt.gtt_clear_range = gen6_ggtt_clear_range; 732 + dev_priv->gtt.gtt_insert_entries = gen6_ggtt_insert_entries; 802 733 803 - /* GMADR is the PCI aperture used by SW to access tiled GFX surfaces in a linear fashion. */ 804 - DRM_INFO("Memory usable by graphics device = %dM\n", dev_priv->mm.gtt->gtt_total_entries >> 8); 805 - DRM_DEBUG_DRIVER("GMADR size = %dM\n", dev_priv->mm.gtt->gtt_mappable_entries >> 8); 806 - DRM_DEBUG_DRIVER("GTT stolen size = %dM\n", dev_priv->mm.gtt->stolen_size >> 20); 807 - 808 - return 0; 809 - 810 - err_out: 811 - kfree(dev_priv->mm.gtt); 812 - if (INTEL_INFO(dev)->gen < 6) 813 - intel_gmch_remove(); 814 734 return ret; 815 735 } 816 736 817 - void i915_gem_gtt_fini(struct drm_device *dev) 737 + static void gen6_gmch_remove(struct drm_device *dev) 818 738 { 819 739 struct drm_i915_private *dev_priv = dev->dev_private; 820 - iounmap(dev_priv->mm.gsm); 821 - teardown_scratch_page(dev); 822 - if (INTEL_INFO(dev)->gen < 6) 823 - intel_gmch_remove(); 824 - kfree(dev_priv->mm.gtt); 740 + iounmap(dev_priv->gtt.gsm); 741 + teardown_scratch_page(dev_priv->dev); 742 + } 743 + 744 + static int i915_gmch_probe(struct drm_device *dev, 745 + size_t *gtt_total, 746 + size_t *stolen) 747 + { 748 + struct drm_i915_private *dev_priv = dev->dev_private; 749 + int ret; 750 + 751 + ret = intel_gmch_probe(dev_priv->bridge_dev, dev_priv->dev->pdev, NULL); 752 + if (!ret) { 753 + DRM_ERROR("failed to set up gmch\n"); 754 + return -EIO; 755 + } 756 + 757 + intel_gtt_get(gtt_total, stolen); 758 + 759 + dev_priv->gtt.do_idle_maps = needs_idle_maps(dev_priv->dev); 760 + dev_priv->gtt.gtt_clear_range = i915_ggtt_clear_range; 761 + dev_priv->gtt.gtt_insert_entries = i915_ggtt_insert_entries; 762 + 763 + return 0; 764 + } 765 + 766 + static void i915_gmch_remove(struct drm_device *dev) 767 + { 768 + intel_gmch_remove(); 769 + } 770 + 771 + int i915_gem_gtt_init(struct drm_device *dev) 772 + { 773 + struct drm_i915_private *dev_priv = dev->dev_private; 774 + struct i915_gtt *gtt = &dev_priv->gtt; 775 + unsigned long gtt_size; 776 + int ret; 777 + 778 + gtt->mappable_base = pci_resource_start(dev->pdev, 2); 779 + gtt->mappable_end = pci_resource_len(dev->pdev, 2); 780 + 781 + if (INTEL_INFO(dev)->gen <= 5) { 782 + dev_priv->gtt.gtt_probe = i915_gmch_probe; 783 + dev_priv->gtt.gtt_remove = i915_gmch_remove; 784 + } else { 785 + dev_priv->gtt.gtt_probe = gen6_gmch_probe; 786 + dev_priv->gtt.gtt_remove = gen6_gmch_remove; 787 + } 788 + 789 + ret = dev_priv->gtt.gtt_probe(dev, &dev_priv->gtt.total, 790 + &dev_priv->gtt.stolen_size); 791 + if (ret) 792 + return ret; 793 + 794 + gtt_size = (dev_priv->gtt.total >> PAGE_SHIFT) * sizeof(gtt_pte_t); 795 + 796 + /* GMADR is the PCI mmio aperture into the global GTT. */ 797 + DRM_INFO("Memory usable by graphics device = %zdM\n", 798 + dev_priv->gtt.total >> 20); 799 + DRM_DEBUG_DRIVER("GMADR size = %ldM\n", 800 + dev_priv->gtt.mappable_end >> 20); 801 + DRM_DEBUG_DRIVER("GTT stolen size = %zdM\n", 802 + dev_priv->gtt.stolen_size >> 20); 803 + 804 + return 0; 825 805 }
+4 -4
drivers/gpu/drm/i915/i915_gem_stolen.c
··· 187 187 if (dev_priv->mm.stolen_base == 0) 188 188 return 0; 189 189 190 - DRM_DEBUG_KMS("found %d bytes of stolen memory at %08lx\n", 191 - dev_priv->mm.gtt->stolen_size, dev_priv->mm.stolen_base); 190 + DRM_DEBUG_KMS("found %zd bytes of stolen memory at %08lx\n", 191 + dev_priv->gtt.stolen_size, dev_priv->mm.stolen_base); 192 192 193 193 /* Basic memrange allocator for stolen space */ 194 - drm_mm_init(&dev_priv->mm.stolen, 0, dev_priv->mm.gtt->stolen_size); 194 + drm_mm_init(&dev_priv->mm.stolen, 0, dev_priv->gtt.stolen_size); 195 195 196 196 return 0; 197 197 } ··· 205 205 struct scatterlist *sg; 206 206 207 207 DRM_DEBUG_DRIVER("offset=0x%x, size=%d\n", offset, size); 208 - BUG_ON(offset > dev_priv->mm.gtt->stolen_size - size); 208 + BUG_ON(offset > dev_priv->gtt.stolen_size - size); 209 209 210 210 /* We hide that we have no struct page backing our stolen object 211 211 * by wrapping the contiguous physical allocation with a fake
+5 -16
drivers/gpu/drm/i915/i915_gem_tiling.c
··· 272 272 return false; 273 273 } 274 274 275 - /* 276 - * Previous chips need to be aligned to the size of the smallest 277 - * fence register that can contain the object. 278 - */ 279 - if (INTEL_INFO(obj->base.dev)->gen == 3) 280 - size = 1024*1024; 281 - else 282 - size = 512*1024; 283 - 284 - while (size < obj->base.size) 285 - size <<= 1; 286 - 275 + size = i915_gem_get_gtt_size(obj->base.dev, obj->base.size, tiling_mode); 287 276 if (obj->gtt_space->size != size) 288 277 return false; 289 278 ··· 357 368 358 369 obj->map_and_fenceable = 359 370 obj->gtt_space == NULL || 360 - (obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end && 371 + (obj->gtt_offset + obj->base.size <= dev_priv->gtt.mappable_end && 361 372 i915_gem_object_fence_ok(obj, args->tiling_mode)); 362 373 363 374 /* Rebind if we need a change of alignment */ 364 375 if (!obj->map_and_fenceable) { 365 376 u32 unfenced_alignment = 366 - i915_gem_get_unfenced_gtt_alignment(dev, 367 - obj->base.size, 368 - args->tiling_mode); 377 + i915_gem_get_gtt_alignment(dev, obj->base.size, 378 + args->tiling_mode, 379 + false); 369 380 if (obj->gtt_offset & (unfenced_alignment - 1)) 370 381 ret = i915_gem_object_unbind(obj); 371 382 }
+88 -42
drivers/gpu/drm/i915/i915_irq.c
··· 356 356 357 357 wake_up_all(&ring->irq_queue); 358 358 if (i915_enable_hangcheck) { 359 - dev_priv->hangcheck_count = 0; 360 - mod_timer(&dev_priv->hangcheck_timer, 359 + dev_priv->gpu_error.hangcheck_count = 0; 360 + mod_timer(&dev_priv->gpu_error.hangcheck_timer, 361 361 round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES)); 362 362 } 363 363 } ··· 862 862 */ 863 863 static void i915_error_work_func(struct work_struct *work) 864 864 { 865 - drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, 866 - error_work); 865 + struct i915_gpu_error *error = container_of(work, struct i915_gpu_error, 866 + work); 867 + drm_i915_private_t *dev_priv = container_of(error, drm_i915_private_t, 868 + gpu_error); 867 869 struct drm_device *dev = dev_priv->dev; 870 + struct intel_ring_buffer *ring; 868 871 char *error_event[] = { "ERROR=1", NULL }; 869 872 char *reset_event[] = { "RESET=1", NULL }; 870 873 char *reset_done_event[] = { "ERROR=0", NULL }; 874 + int i, ret; 871 875 872 876 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event); 873 877 874 - if (atomic_read(&dev_priv->mm.wedged)) { 878 + /* 879 + * Note that there's only one work item which does gpu resets, so we 880 + * need not worry about concurrent gpu resets potentially incrementing 881 + * error->reset_counter twice. We only need to take care of another 882 + * racing irq/hangcheck declaring the gpu dead for a second time. A 883 + * quick check for that is good enough: schedule_work ensures the 884 + * correct ordering between hang detection and this work item, and since 885 + * the reset in-progress bit is only ever set by code outside of this 886 + * work we don't need to worry about any other races. 887 + */ 888 + if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) { 875 889 DRM_DEBUG_DRIVER("resetting chip\n"); 876 - kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_event); 877 - if (!i915_reset(dev)) { 878 - atomic_set(&dev_priv->mm.wedged, 0); 879 - kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_done_event); 890 + kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, 891 + reset_event); 892 + 893 + ret = i915_reset(dev); 894 + 895 + if (ret == 0) { 896 + /* 897 + * After all the gem state is reset, increment the reset 898 + * counter and wake up everyone waiting for the reset to 899 + * complete. 900 + * 901 + * Since unlock operations are a one-sided barrier only, 902 + * we need to insert a barrier here to order any seqno 903 + * updates before 904 + * the counter increment. 905 + */ 906 + smp_mb__before_atomic_inc(); 907 + atomic_inc(&dev_priv->gpu_error.reset_counter); 908 + 909 + kobject_uevent_env(&dev->primary->kdev.kobj, 910 + KOBJ_CHANGE, reset_done_event); 911 + } else { 912 + atomic_set(&error->reset_counter, I915_WEDGED); 880 913 } 881 - complete_all(&dev_priv->error_completion); 914 + 915 + for_each_ring(ring, dev_priv, i) 916 + wake_up_all(&ring->irq_queue); 917 + 918 + wake_up_all(&dev_priv->gpu_error.reset_queue); 882 919 } 883 920 } 884 921 ··· 976 939 goto unwind; 977 940 978 941 local_irq_save(flags); 979 - if (reloc_offset < dev_priv->mm.gtt_mappable_end && 942 + if (reloc_offset < dev_priv->gtt.mappable_end && 980 943 src->has_global_gtt_mapping) { 981 944 void __iomem *s; 982 945 ··· 985 948 * captures what the GPU read. 986 949 */ 987 950 988 - s = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping, 951 + s = io_mapping_map_atomic_wc(dev_priv->gtt.mappable, 989 952 reloc_offset); 990 953 memcpy_fromio(d, s, PAGE_SIZE); 991 954 io_mapping_unmap_atomic(s); ··· 1292 1255 unsigned long flags; 1293 1256 int i, pipe; 1294 1257 1295 - spin_lock_irqsave(&dev_priv->error_lock, flags); 1296 - error = dev_priv->first_error; 1297 - spin_unlock_irqrestore(&dev_priv->error_lock, flags); 1258 + spin_lock_irqsave(&dev_priv->gpu_error.lock, flags); 1259 + error = dev_priv->gpu_error.first_error; 1260 + spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags); 1298 1261 if (error) 1299 1262 return; 1300 1263 ··· 1305 1268 return; 1306 1269 } 1307 1270 1308 - DRM_INFO("capturing error event; look for more information in /debug/dri/%d/i915_error_state\n", 1271 + DRM_INFO("capturing error event; look for more information in" 1272 + "/sys/kernel/debug/dri/%d/i915_error_state\n", 1309 1273 dev->primary->index); 1310 1274 1311 1275 kref_init(&error->ref); ··· 1379 1341 error->overlay = intel_overlay_capture_error_state(dev); 1380 1342 error->display = intel_display_capture_error_state(dev); 1381 1343 1382 - spin_lock_irqsave(&dev_priv->error_lock, flags); 1383 - if (dev_priv->first_error == NULL) { 1384 - dev_priv->first_error = error; 1344 + spin_lock_irqsave(&dev_priv->gpu_error.lock, flags); 1345 + if (dev_priv->gpu_error.first_error == NULL) { 1346 + dev_priv->gpu_error.first_error = error; 1385 1347 error = NULL; 1386 1348 } 1387 - spin_unlock_irqrestore(&dev_priv->error_lock, flags); 1349 + spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags); 1388 1350 1389 1351 if (error) 1390 1352 i915_error_state_free(&error->ref); ··· 1396 1358 struct drm_i915_error_state *error; 1397 1359 unsigned long flags; 1398 1360 1399 - spin_lock_irqsave(&dev_priv->error_lock, flags); 1400 - error = dev_priv->first_error; 1401 - dev_priv->first_error = NULL; 1402 - spin_unlock_irqrestore(&dev_priv->error_lock, flags); 1361 + spin_lock_irqsave(&dev_priv->gpu_error.lock, flags); 1362 + error = dev_priv->gpu_error.first_error; 1363 + dev_priv->gpu_error.first_error = NULL; 1364 + spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags); 1403 1365 1404 1366 if (error) 1405 1367 kref_put(&error->ref, i915_error_state_free); ··· 1520 1482 i915_report_and_clear_eir(dev); 1521 1483 1522 1484 if (wedged) { 1523 - INIT_COMPLETION(dev_priv->error_completion); 1524 - atomic_set(&dev_priv->mm.wedged, 1); 1485 + atomic_set_mask(I915_RESET_IN_PROGRESS_FLAG, 1486 + &dev_priv->gpu_error.reset_counter); 1525 1487 1526 1488 /* 1527 - * Wakeup waiting processes so they don't hang 1489 + * Wakeup waiting processes so that the reset work item 1490 + * doesn't deadlock trying to grab various locks. 1528 1491 */ 1529 1492 for_each_ring(ring, dev_priv, i) 1530 1493 wake_up_all(&ring->irq_queue); 1531 1494 } 1532 1495 1533 - queue_work(dev_priv->wq, &dev_priv->error_work); 1496 + queue_work(dev_priv->wq, &dev_priv->gpu_error.work); 1534 1497 } 1535 1498 1536 1499 static void i915_pageflip_stall_check(struct drm_device *dev, int pipe) ··· 1762 1723 { 1763 1724 drm_i915_private_t *dev_priv = dev->dev_private; 1764 1725 1765 - if (dev_priv->hangcheck_count++ > 1) { 1726 + if (dev_priv->gpu_error.hangcheck_count++ > 1) { 1766 1727 bool hung = true; 1767 1728 1768 1729 DRM_ERROR("Hangcheck timer elapsed... GPU hung\n"); ··· 1821 1782 goto repeat; 1822 1783 } 1823 1784 1824 - dev_priv->hangcheck_count = 0; 1785 + dev_priv->gpu_error.hangcheck_count = 0; 1825 1786 return; 1826 1787 } 1827 1788 1828 1789 i915_get_extra_instdone(dev, instdone); 1829 - if (memcmp(dev_priv->last_acthd, acthd, sizeof(acthd)) == 0 && 1830 - memcmp(dev_priv->prev_instdone, instdone, sizeof(instdone)) == 0) { 1790 + if (memcmp(dev_priv->gpu_error.last_acthd, acthd, 1791 + sizeof(acthd)) == 0 && 1792 + memcmp(dev_priv->gpu_error.prev_instdone, instdone, 1793 + sizeof(instdone)) == 0) { 1831 1794 if (i915_hangcheck_hung(dev)) 1832 1795 return; 1833 1796 } else { 1834 - dev_priv->hangcheck_count = 0; 1797 + dev_priv->gpu_error.hangcheck_count = 0; 1835 1798 1836 - memcpy(dev_priv->last_acthd, acthd, sizeof(acthd)); 1837 - memcpy(dev_priv->prev_instdone, instdone, sizeof(instdone)); 1799 + memcpy(dev_priv->gpu_error.last_acthd, acthd, 1800 + sizeof(acthd)); 1801 + memcpy(dev_priv->gpu_error.prev_instdone, instdone, 1802 + sizeof(instdone)); 1838 1803 } 1839 1804 1840 1805 repeat: 1841 1806 /* Reset timer case chip hangs without another request being added */ 1842 - mod_timer(&dev_priv->hangcheck_timer, 1807 + mod_timer(&dev_priv->gpu_error.hangcheck_timer, 1843 1808 round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES)); 1844 1809 } 1845 1810 ··· 1935 1892 DE_AUX_CHANNEL_A; 1936 1893 u32 render_irqs; 1937 1894 u32 hotplug_mask; 1895 + u32 pch_irq_mask; 1938 1896 1939 1897 dev_priv->irq_mask = ~display_mask; 1940 1898 ··· 1979 1935 SDE_AUX_MASK); 1980 1936 } 1981 1937 1982 - dev_priv->pch_irq_mask = ~hotplug_mask; 1938 + pch_irq_mask = ~hotplug_mask; 1983 1939 1984 1940 I915_WRITE(SDEIIR, I915_READ(SDEIIR)); 1985 - I915_WRITE(SDEIMR, dev_priv->pch_irq_mask); 1941 + I915_WRITE(SDEIMR, pch_irq_mask); 1986 1942 I915_WRITE(SDEIER, hotplug_mask); 1987 1943 POSTING_READ(SDEIER); 1988 1944 ··· 2010 1966 DE_AUX_CHANNEL_A_IVB; 2011 1967 u32 render_irqs; 2012 1968 u32 hotplug_mask; 1969 + u32 pch_irq_mask; 2013 1970 2014 1971 dev_priv->irq_mask = ~display_mask; 2015 1972 ··· 2040 1995 SDE_PORTD_HOTPLUG_CPT | 2041 1996 SDE_GMBUS_CPT | 2042 1997 SDE_AUX_MASK_CPT); 2043 - dev_priv->pch_irq_mask = ~hotplug_mask; 1998 + pch_irq_mask = ~hotplug_mask; 2044 1999 2045 2000 I915_WRITE(SDEIIR, I915_READ(SDEIIR)); 2046 - I915_WRITE(SDEIMR, dev_priv->pch_irq_mask); 2001 + I915_WRITE(SDEIMR, pch_irq_mask); 2047 2002 I915_WRITE(SDEIER, hotplug_mask); 2048 2003 POSTING_READ(SDEIER); 2049 2004 ··· 2812 2767 struct drm_i915_private *dev_priv = dev->dev_private; 2813 2768 2814 2769 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func); 2815 - INIT_WORK(&dev_priv->error_work, i915_error_work_func); 2770 + INIT_WORK(&dev_priv->gpu_error.work, i915_error_work_func); 2816 2771 INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work); 2817 2772 INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work); 2818 2773 2819 - setup_timer(&dev_priv->hangcheck_timer, i915_hangcheck_elapsed, 2774 + setup_timer(&dev_priv->gpu_error.hangcheck_timer, 2775 + i915_hangcheck_elapsed, 2820 2776 (unsigned long) dev); 2821 2777 2822 2778 pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
+149 -135
drivers/gpu/drm/i915/i915_reg.h
··· 141 141 #define VGA_MSR_MEM_EN (1<<1) 142 142 #define VGA_MSR_CGA_MODE (1<<0) 143 143 144 - #define VGA_SR_INDEX 0x3c4 144 + /* 145 + * SR01 is the only VGA register touched on non-UMS setups. 146 + * VLV doesn't do UMS, so the sequencer index/data registers 147 + * are the only VGA registers which need to include 148 + * display_mmio_offset. 149 + */ 150 + #define VGA_SR_INDEX (dev_priv->info->display_mmio_offset + 0x3c4) 145 151 #define SR01 1 146 - #define VGA_SR_DATA 0x3c5 152 + #define VGA_SR_DATA (dev_priv->info->display_mmio_offset + 0x3c5) 147 153 148 154 #define VGA_AR_INDEX 0x3c0 149 155 #define VGA_AR_VID_EN (1<<5) ··· 342 336 * 0x801c/3c: core clock bits 343 337 * 0x8048/68: low pass filter coefficients 344 338 * 0x8100: fast clock controls 339 + * 340 + * DPIO is VLV only. 345 341 */ 346 - #define DPIO_PKT 0x2100 342 + #define DPIO_PKT (VLV_DISPLAY_BASE + 0x2100) 347 343 #define DPIO_RID (0<<24) 348 344 #define DPIO_OP_WRITE (1<<16) 349 345 #define DPIO_OP_READ (0<<16) 350 346 #define DPIO_PORTID (0x12<<8) 351 347 #define DPIO_BYTE (0xf<<4) 352 348 #define DPIO_BUSY (1<<0) /* status only */ 353 - #define DPIO_DATA 0x2104 354 - #define DPIO_REG 0x2108 355 - #define DPIO_CTL 0x2110 349 + #define DPIO_DATA (VLV_DISPLAY_BASE + 0x2104) 350 + #define DPIO_REG (VLV_DISPLAY_BASE + 0x2108) 351 + #define DPIO_CTL (VLV_DISPLAY_BASE + 0x2110) 356 352 #define DPIO_MODSEL1 (1<<3) /* if ref clk b == 27 */ 357 353 #define DPIO_MODSEL0 (1<<2) /* if ref clk a == 27 */ 358 354 #define DPIO_SFR_BYPASS (1<<1) ··· 562 554 #define IIR 0x020a4 563 555 #define IMR 0x020a8 564 556 #define ISR 0x020ac 565 - #define VLV_GUNIT_CLOCK_GATE 0x182060 557 + #define VLV_GUNIT_CLOCK_GATE (VLV_DISPLAY_BASE + 0x2060) 566 558 #define GCFG_DIS (1<<8) 567 - #define VLV_IIR_RW 0x182084 568 - #define VLV_IER 0x1820a0 569 - #define VLV_IIR 0x1820a4 570 - #define VLV_IMR 0x1820a8 571 - #define VLV_ISR 0x1820ac 559 + #define VLV_IIR_RW (VLV_DISPLAY_BASE + 0x2084) 560 + #define VLV_IER (VLV_DISPLAY_BASE + 0x20a0) 561 + #define VLV_IIR (VLV_DISPLAY_BASE + 0x20a4) 562 + #define VLV_IMR (VLV_DISPLAY_BASE + 0x20a8) 563 + #define VLV_ISR (VLV_DISPLAY_BASE + 0x20ac) 572 564 #define I915_PIPE_CONTROL_NOTIFY_INTERRUPT (1<<18) 573 565 #define I915_DISPLAY_PORT_INTERRUPT (1<<17) 574 566 #define I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT (1<<15) ··· 741 733 #define GEN7_FF_TS_SCHED_HS0 (0x3<<16) 742 734 #define GEN7_FF_TS_SCHED_LOAD_BALANCE (0x1<<16) 743 735 #define GEN7_FF_TS_SCHED_HW (0x0<<16) /* Default */ 736 + #define GEN7_FF_VS_REF_CNT_FFME (1 << 15) 744 737 #define GEN7_FF_VS_SCHED_HS1 (0x5<<12) 745 738 #define GEN7_FF_VS_SCHED_HS0 (0x3<<12) 746 739 #define GEN7_FF_VS_SCHED_LOAD_BALANCE (0x1<<12) /* Default */ ··· 928 919 #define VGA1_PD_P1_DIV_2 (1 << 13) 929 920 #define VGA1_PD_P1_SHIFT 8 930 921 #define VGA1_PD_P1_MASK (0x1f << 8) 931 - #define _DPLL_A 0x06014 932 - #define _DPLL_B 0x06018 922 + #define _DPLL_A (dev_priv->info->display_mmio_offset + 0x6014) 923 + #define _DPLL_B (dev_priv->info->display_mmio_offset + 0x6018) 933 924 #define DPLL(pipe) _PIPE(pipe, _DPLL_A, _DPLL_B) 934 925 #define DPLL_VCO_ENABLE (1 << 31) 935 926 #define DPLL_DVO_HIGH_SPEED (1 << 30) ··· 988 979 #define SDVO_MULTIPLIER_MASK 0x000000ff 989 980 #define SDVO_MULTIPLIER_SHIFT_HIRES 4 990 981 #define SDVO_MULTIPLIER_SHIFT_VGA 0 991 - #define _DPLL_A_MD 0x0601c /* 965+ only */ 982 + #define _DPLL_A_MD (dev_priv->info->display_mmio_offset + 0x601c) /* 965+ only */ 992 983 /* 993 984 * UDI pixel divider, controlling how many pixels are stuffed into a packet. 994 985 * ··· 1025 1016 */ 1026 1017 #define DPLL_MD_VGA_UDI_MULTIPLIER_MASK 0x0000003f 1027 1018 #define DPLL_MD_VGA_UDI_MULTIPLIER_SHIFT 0 1028 - #define _DPLL_B_MD 0x06020 /* 965+ only */ 1019 + #define _DPLL_B_MD (dev_priv->info->display_mmio_offset + 0x6020) /* 965+ only */ 1029 1020 #define DPLL_MD(pipe) _PIPE(pipe, _DPLL_A_MD, _DPLL_B_MD) 1030 1021 1031 1022 #define _FPA0 0x06040 ··· 1168 1159 #define RAMCLK_GATE_D 0x6210 /* CRL only */ 1169 1160 #define DEUC 0x6214 /* CRL only */ 1170 1161 1171 - #define FW_BLC_SELF_VLV 0x6500 1162 + #define FW_BLC_SELF_VLV (VLV_DISPLAY_BASE + 0x6500) 1172 1163 #define FW_CSPWRDWNEN (1<<15) 1173 1164 1174 1165 /* 1175 1166 * Palette regs 1176 1167 */ 1177 1168 1178 - #define _PALETTE_A 0x0a000 1179 - #define _PALETTE_B 0x0a800 1169 + #define _PALETTE_A (dev_priv->info->display_mmio_offset + 0xa000) 1170 + #define _PALETTE_B (dev_priv->info->display_mmio_offset + 0xa800) 1180 1171 #define PALETTE(pipe) _PIPE(pipe, _PALETTE_A, _PALETTE_B) 1181 1172 1182 1173 /* MCH MMIO space */ ··· 1541 1532 */ 1542 1533 1543 1534 /* Pipe A timing regs */ 1544 - #define _HTOTAL_A 0x60000 1545 - #define _HBLANK_A 0x60004 1546 - #define _HSYNC_A 0x60008 1547 - #define _VTOTAL_A 0x6000c 1548 - #define _VBLANK_A 0x60010 1549 - #define _VSYNC_A 0x60014 1550 - #define _PIPEASRC 0x6001c 1551 - #define _BCLRPAT_A 0x60020 1552 - #define _VSYNCSHIFT_A 0x60028 1535 + #define _HTOTAL_A (dev_priv->info->display_mmio_offset + 0x60000) 1536 + #define _HBLANK_A (dev_priv->info->display_mmio_offset + 0x60004) 1537 + #define _HSYNC_A (dev_priv->info->display_mmio_offset + 0x60008) 1538 + #define _VTOTAL_A (dev_priv->info->display_mmio_offset + 0x6000c) 1539 + #define _VBLANK_A (dev_priv->info->display_mmio_offset + 0x60010) 1540 + #define _VSYNC_A (dev_priv->info->display_mmio_offset + 0x60014) 1541 + #define _PIPEASRC (dev_priv->info->display_mmio_offset + 0x6001c) 1542 + #define _BCLRPAT_A (dev_priv->info->display_mmio_offset + 0x60020) 1543 + #define _VSYNCSHIFT_A (dev_priv->info->display_mmio_offset + 0x60028) 1553 1544 1554 1545 /* Pipe B timing regs */ 1555 - #define _HTOTAL_B 0x61000 1556 - #define _HBLANK_B 0x61004 1557 - #define _HSYNC_B 0x61008 1558 - #define _VTOTAL_B 0x6100c 1559 - #define _VBLANK_B 0x61010 1560 - #define _VSYNC_B 0x61014 1561 - #define _PIPEBSRC 0x6101c 1562 - #define _BCLRPAT_B 0x61020 1563 - #define _VSYNCSHIFT_B 0x61028 1546 + #define _HTOTAL_B (dev_priv->info->display_mmio_offset + 0x61000) 1547 + #define _HBLANK_B (dev_priv->info->display_mmio_offset + 0x61004) 1548 + #define _HSYNC_B (dev_priv->info->display_mmio_offset + 0x61008) 1549 + #define _VTOTAL_B (dev_priv->info->display_mmio_offset + 0x6100c) 1550 + #define _VBLANK_B (dev_priv->info->display_mmio_offset + 0x61010) 1551 + #define _VSYNC_B (dev_priv->info->display_mmio_offset + 0x61014) 1552 + #define _PIPEBSRC (dev_priv->info->display_mmio_offset + 0x6101c) 1553 + #define _BCLRPAT_B (dev_priv->info->display_mmio_offset + 0x61020) 1554 + #define _VSYNCSHIFT_B (dev_priv->info->display_mmio_offset + 0x61028) 1564 1555 1565 1556 1566 1557 #define HTOTAL(trans) _TRANSCODER(trans, _HTOTAL_A, _HTOTAL_B) ··· 1621 1612 1622 1613 1623 1614 /* Hotplug control (945+ only) */ 1624 - #define PORT_HOTPLUG_EN 0x61110 1615 + #define PORT_HOTPLUG_EN (dev_priv->info->display_mmio_offset + 0x61110) 1625 1616 #define HDMIB_HOTPLUG_INT_EN (1 << 29) 1626 1617 #define DPB_HOTPLUG_INT_EN (1 << 29) 1627 1618 #define HDMIC_HOTPLUG_INT_EN (1 << 28) ··· 1648 1639 #define CRT_HOTPLUG_DETECT_VOLTAGE_325MV (0 << 2) 1649 1640 #define CRT_HOTPLUG_DETECT_VOLTAGE_475MV (1 << 2) 1650 1641 1651 - #define PORT_HOTPLUG_STAT 0x61114 1642 + #define PORT_HOTPLUG_STAT (dev_priv->info->display_mmio_offset + 0x61114) 1652 1643 /* HDMI/DP bits are gen4+ */ 1653 1644 #define DPB_HOTPLUG_LIVE_STATUS (1 << 29) 1654 1645 #define DPC_HOTPLUG_LIVE_STATUS (1 << 28) ··· 1867 1858 #define PP_DIVISOR 0x61210 1868 1859 1869 1860 /* Panel fitting */ 1870 - #define PFIT_CONTROL 0x61230 1861 + #define PFIT_CONTROL (dev_priv->info->display_mmio_offset + 0x61230) 1871 1862 #define PFIT_ENABLE (1 << 31) 1872 1863 #define PFIT_PIPE_MASK (3 << 29) 1873 1864 #define PFIT_PIPE_SHIFT 29 ··· 1885 1876 #define PFIT_SCALING_PROGRAMMED (1 << 26) 1886 1877 #define PFIT_SCALING_PILLAR (2 << 26) 1887 1878 #define PFIT_SCALING_LETTER (3 << 26) 1888 - #define PFIT_PGM_RATIOS 0x61234 1879 + #define PFIT_PGM_RATIOS (dev_priv->info->display_mmio_offset + 0x61234) 1889 1880 /* Pre-965 */ 1890 1881 #define PFIT_VERT_SCALE_SHIFT 20 1891 1882 #define PFIT_VERT_SCALE_MASK 0xfff00000 ··· 1897 1888 #define PFIT_HORIZ_SCALE_SHIFT_965 0 1898 1889 #define PFIT_HORIZ_SCALE_MASK_965 0x00001fff 1899 1890 1900 - #define PFIT_AUTO_RATIOS 0x61238 1891 + #define PFIT_AUTO_RATIOS (dev_priv->info->display_mmio_offset + 0x61238) 1901 1892 1902 1893 /* Backlight control */ 1903 1894 #define BLC_PWM_CTL2 0x61250 /* 965+ only */ ··· 2627 2618 /* Display & cursor control */ 2628 2619 2629 2620 /* Pipe A */ 2630 - #define _PIPEADSL 0x70000 2621 + #define _PIPEADSL (dev_priv->info->display_mmio_offset + 0x70000) 2631 2622 #define DSL_LINEMASK_GEN2 0x00000fff 2632 2623 #define DSL_LINEMASK_GEN3 0x00001fff 2633 - #define _PIPEACONF 0x70008 2624 + #define _PIPEACONF (dev_priv->info->display_mmio_offset + 0x70008) 2634 2625 #define PIPECONF_ENABLE (1<<31) 2635 2626 #define PIPECONF_DISABLE 0 2636 2627 #define PIPECONF_DOUBLE_WIDE (1<<30) ··· 2659 2650 #define PIPECONF_INTERLACED_DBL_ILK (4 << 21) /* ilk/snb only */ 2660 2651 #define PIPECONF_PFIT_PF_INTERLACED_DBL_ILK (5 << 21) /* ilk/snb only */ 2661 2652 #define PIPECONF_CXSR_DOWNCLOCK (1<<16) 2653 + #define PIPECONF_COLOR_RANGE_SELECT (1 << 13) 2662 2654 #define PIPECONF_BPC_MASK (0x7 << 5) 2663 2655 #define PIPECONF_8BPC (0<<5) 2664 2656 #define PIPECONF_10BPC (1<<5) ··· 2671 2661 #define PIPECONF_DITHER_TYPE_ST1 (1<<2) 2672 2662 #define PIPECONF_DITHER_TYPE_ST2 (2<<2) 2673 2663 #define PIPECONF_DITHER_TYPE_TEMP (3<<2) 2674 - #define _PIPEASTAT 0x70024 2664 + #define _PIPEASTAT (dev_priv->info->display_mmio_offset + 0x70024) 2675 2665 #define PIPE_FIFO_UNDERRUN_STATUS (1UL<<31) 2676 2666 #define SPRITE1_FLIPDONE_INT_EN_VLV (1UL<<30) 2677 2667 #define PIPE_CRC_ERROR_ENABLE (1UL<<29) ··· 2682 2672 #define PIPE_VSYNC_INTERRUPT_ENABLE (1UL<<25) 2683 2673 #define PIPE_DISPLAY_LINE_COMPARE_ENABLE (1UL<<24) 2684 2674 #define PIPE_DPST_EVENT_ENABLE (1UL<<23) 2685 - #define SPRITE0_FLIP_DONE_INT_EN_VLV (1UL<<26) 2675 + #define SPRITE0_FLIP_DONE_INT_EN_VLV (1UL<<22) 2686 2676 #define PIPE_LEGACY_BLC_EVENT_ENABLE (1UL<<22) 2687 2677 #define PIPE_ODD_FIELD_INTERRUPT_ENABLE (1UL<<21) 2688 2678 #define PIPE_EVEN_FIELD_INTERRUPT_ENABLE (1UL<<20) ··· 2692 2682 #define PIPEA_HBLANK_INT_EN_VLV (1UL<<16) 2693 2683 #define PIPE_OVERLAY_UPDATED_ENABLE (1UL<<16) 2694 2684 #define SPRITE1_FLIPDONE_INT_STATUS_VLV (1UL<<15) 2695 - #define SPRITE0_FLIPDONE_INT_STATUS_VLV (1UL<<15) 2685 + #define SPRITE0_FLIPDONE_INT_STATUS_VLV (1UL<<14) 2696 2686 #define PIPE_CRC_ERROR_INTERRUPT_STATUS (1UL<<13) 2697 2687 #define PIPE_CRC_DONE_INTERRUPT_STATUS (1UL<<12) 2698 2688 #define PIPE_GMBUS_INTERRUPT_STATUS (1UL<<11) ··· 2716 2706 #define PIPEFRAMEPIXEL(pipe) _PIPE(pipe, _PIPEAFRAMEPIXEL, _PIPEBFRAMEPIXEL) 2717 2707 #define PIPESTAT(pipe) _PIPE(pipe, _PIPEASTAT, _PIPEBSTAT) 2718 2708 2719 - #define VLV_DPFLIPSTAT 0x70028 2709 + #define VLV_DPFLIPSTAT (VLV_DISPLAY_BASE + 0x70028) 2720 2710 #define PIPEB_LINE_COMPARE_INT_EN (1<<29) 2721 2711 #define PIPEB_HLINE_INT_EN (1<<28) 2722 2712 #define PIPEB_VBLANK_INT_EN (1<<27) ··· 2730 2720 #define SPRITEA_FLIPDONE_INT_EN (1<<17) 2731 2721 #define PLANEA_FLIPDONE_INT_EN (1<<16) 2732 2722 2733 - #define DPINVGTT 0x7002c /* VLV only */ 2723 + #define DPINVGTT (VLV_DISPLAY_BASE + 0x7002c) /* VLV only */ 2734 2724 #define CURSORB_INVALID_GTT_INT_EN (1<<23) 2735 2725 #define CURSORA_INVALID_GTT_INT_EN (1<<22) 2736 2726 #define SPRITED_INVALID_GTT_INT_EN (1<<21) ··· 2758 2748 #define DSPARB_BEND_SHIFT 9 /* on 855 */ 2759 2749 #define DSPARB_AEND_SHIFT 0 2760 2750 2761 - #define DSPFW1 0x70034 2751 + #define DSPFW1 (dev_priv->info->display_mmio_offset + 0x70034) 2762 2752 #define DSPFW_SR_SHIFT 23 2763 2753 #define DSPFW_SR_MASK (0x1ff<<23) 2764 2754 #define DSPFW_CURSORB_SHIFT 16 ··· 2766 2756 #define DSPFW_PLANEB_SHIFT 8 2767 2757 #define DSPFW_PLANEB_MASK (0x7f<<8) 2768 2758 #define DSPFW_PLANEA_MASK (0x7f) 2769 - #define DSPFW2 0x70038 2759 + #define DSPFW2 (dev_priv->info->display_mmio_offset + 0x70038) 2770 2760 #define DSPFW_CURSORA_MASK 0x00003f00 2771 2761 #define DSPFW_CURSORA_SHIFT 8 2772 2762 #define DSPFW_PLANEC_MASK (0x7f) 2773 - #define DSPFW3 0x7003c 2763 + #define DSPFW3 (dev_priv->info->display_mmio_offset + 0x7003c) 2774 2764 #define DSPFW_HPLL_SR_EN (1<<31) 2775 2765 #define DSPFW_CURSOR_SR_SHIFT 24 2776 2766 #define PINEVIEW_SELF_REFRESH_EN (1<<30) ··· 2782 2772 /* drain latency register values*/ 2783 2773 #define DRAIN_LATENCY_PRECISION_32 32 2784 2774 #define DRAIN_LATENCY_PRECISION_16 16 2785 - #define VLV_DDL1 0x70050 2775 + #define VLV_DDL1 (VLV_DISPLAY_BASE + 0x70050) 2786 2776 #define DDL_CURSORA_PRECISION_32 (1<<31) 2787 2777 #define DDL_CURSORA_PRECISION_16 (0<<31) 2788 2778 #define DDL_CURSORA_SHIFT 24 2789 2779 #define DDL_PLANEA_PRECISION_32 (1<<7) 2790 2780 #define DDL_PLANEA_PRECISION_16 (0<<7) 2791 - #define VLV_DDL2 0x70054 2781 + #define VLV_DDL2 (VLV_DISPLAY_BASE + 0x70054) 2792 2782 #define DDL_CURSORB_PRECISION_32 (1<<31) 2793 2783 #define DDL_CURSORB_PRECISION_16 (0<<31) 2794 2784 #define DDL_CURSORB_SHIFT 24 ··· 2932 2922 * } while (high1 != high2); 2933 2923 * frame = (high1 << 8) | low1; 2934 2924 */ 2935 - #define _PIPEAFRAMEHIGH 0x70040 2925 + #define _PIPEAFRAMEHIGH (dev_priv->info->display_mmio_offset + 0x70040) 2936 2926 #define PIPE_FRAME_HIGH_MASK 0x0000ffff 2937 2927 #define PIPE_FRAME_HIGH_SHIFT 0 2938 - #define _PIPEAFRAMEPIXEL 0x70044 2928 + #define _PIPEAFRAMEPIXEL (dev_priv->info->display_mmio_offset + 0x70044) 2939 2929 #define PIPE_FRAME_LOW_MASK 0xff000000 2940 2930 #define PIPE_FRAME_LOW_SHIFT 24 2941 2931 #define PIPE_PIXEL_MASK 0x00ffffff ··· 2946 2936 #define PIPE_FRMCOUNT_GM45(pipe) _PIPE(pipe, _PIPEA_FRMCOUNT_GM45, _PIPEB_FRMCOUNT_GM45) 2947 2937 2948 2938 /* Cursor A & B regs */ 2949 - #define _CURACNTR 0x70080 2939 + #define _CURACNTR (dev_priv->info->display_mmio_offset + 0x70080) 2950 2940 /* Old style CUR*CNTR flags (desktop 8xx) */ 2951 2941 #define CURSOR_ENABLE 0x80000000 2952 2942 #define CURSOR_GAMMA_ENABLE 0x40000000 ··· 2967 2957 #define MCURSOR_PIPE_A 0x00 2968 2958 #define MCURSOR_PIPE_B (1 << 28) 2969 2959 #define MCURSOR_GAMMA_ENABLE (1 << 26) 2970 - #define _CURABASE 0x70084 2971 - #define _CURAPOS 0x70088 2960 + #define _CURABASE (dev_priv->info->display_mmio_offset + 0x70084) 2961 + #define _CURAPOS (dev_priv->info->display_mmio_offset + 0x70088) 2972 2962 #define CURSOR_POS_MASK 0x007FF 2973 2963 #define CURSOR_POS_SIGN 0x8000 2974 2964 #define CURSOR_X_SHIFT 0 2975 2965 #define CURSOR_Y_SHIFT 16 2976 2966 #define CURSIZE 0x700a0 2977 - #define _CURBCNTR 0x700c0 2978 - #define _CURBBASE 0x700c4 2979 - #define _CURBPOS 0x700c8 2967 + #define _CURBCNTR (dev_priv->info->display_mmio_offset + 0x700c0) 2968 + #define _CURBBASE (dev_priv->info->display_mmio_offset + 0x700c4) 2969 + #define _CURBPOS (dev_priv->info->display_mmio_offset + 0x700c8) 2980 2970 2981 2971 #define _CURBCNTR_IVB 0x71080 2982 2972 #define _CURBBASE_IVB 0x71084 ··· 2991 2981 #define CURPOS_IVB(pipe) _PIPE(pipe, _CURAPOS, _CURBPOS_IVB) 2992 2982 2993 2983 /* Display A control */ 2994 - #define _DSPACNTR 0x70180 2984 + #define _DSPACNTR (dev_priv->info->display_mmio_offset + 0x70180) 2995 2985 #define DISPLAY_PLANE_ENABLE (1<<31) 2996 2986 #define DISPLAY_PLANE_DISABLE 0 2997 2987 #define DISPPLANE_GAMMA_ENABLE (1<<30) ··· 3024 3014 #define DISPPLANE_STEREO_POLARITY_SECOND (1<<18) 3025 3015 #define DISPPLANE_TRICKLE_FEED_DISABLE (1<<14) /* Ironlake */ 3026 3016 #define DISPPLANE_TILED (1<<10) 3027 - #define _DSPAADDR 0x70184 3028 - #define _DSPASTRIDE 0x70188 3029 - #define _DSPAPOS 0x7018C /* reserved */ 3030 - #define _DSPASIZE 0x70190 3031 - #define _DSPASURF 0x7019C /* 965+ only */ 3032 - #define _DSPATILEOFF 0x701A4 /* 965+ only */ 3033 - #define _DSPAOFFSET 0x701A4 /* HSW */ 3034 - #define _DSPASURFLIVE 0x701AC 3017 + #define _DSPAADDR (dev_priv->info->display_mmio_offset + 0x70184) 3018 + #define _DSPASTRIDE (dev_priv->info->display_mmio_offset + 0x70188) 3019 + #define _DSPAPOS (dev_priv->info->display_mmio_offset + 0x7018C) /* reserved */ 3020 + #define _DSPASIZE (dev_priv->info->display_mmio_offset + 0x70190) 3021 + #define _DSPASURF (dev_priv->info->display_mmio_offset + 0x7019C) /* 965+ only */ 3022 + #define _DSPATILEOFF (dev_priv->info->display_mmio_offset + 0x701A4) /* 965+ only */ 3023 + #define _DSPAOFFSET (dev_priv->info->display_mmio_offset + 0x701A4) /* HSW */ 3024 + #define _DSPASURFLIVE (dev_priv->info->display_mmio_offset + 0x701AC) 3035 3025 3036 3026 #define DSPCNTR(plane) _PIPE(plane, _DSPACNTR, _DSPBCNTR) 3037 3027 #define DSPADDR(plane) _PIPE(plane, _DSPAADDR, _DSPBADDR) ··· 3052 3042 (I915_WRITE((reg), (gfx_addr) | I915_LO_DISPBASE(I915_READ(reg)))) 3053 3043 3054 3044 /* VBIOS flags */ 3055 - #define SWF00 0x71410 3056 - #define SWF01 0x71414 3057 - #define SWF02 0x71418 3058 - #define SWF03 0x7141c 3059 - #define SWF04 0x71420 3060 - #define SWF05 0x71424 3061 - #define SWF06 0x71428 3062 - #define SWF10 0x70410 3063 - #define SWF11 0x70414 3064 - #define SWF14 0x71420 3065 - #define SWF30 0x72414 3066 - #define SWF31 0x72418 3067 - #define SWF32 0x7241c 3045 + #define SWF00 (dev_priv->info->display_mmio_offset + 0x71410) 3046 + #define SWF01 (dev_priv->info->display_mmio_offset + 0x71414) 3047 + #define SWF02 (dev_priv->info->display_mmio_offset + 0x71418) 3048 + #define SWF03 (dev_priv->info->display_mmio_offset + 0x7141c) 3049 + #define SWF04 (dev_priv->info->display_mmio_offset + 0x71420) 3050 + #define SWF05 (dev_priv->info->display_mmio_offset + 0x71424) 3051 + #define SWF06 (dev_priv->info->display_mmio_offset + 0x71428) 3052 + #define SWF10 (dev_priv->info->display_mmio_offset + 0x70410) 3053 + #define SWF11 (dev_priv->info->display_mmio_offset + 0x70414) 3054 + #define SWF14 (dev_priv->info->display_mmio_offset + 0x71420) 3055 + #define SWF30 (dev_priv->info->display_mmio_offset + 0x72414) 3056 + #define SWF31 (dev_priv->info->display_mmio_offset + 0x72418) 3057 + #define SWF32 (dev_priv->info->display_mmio_offset + 0x7241c) 3068 3058 3069 3059 /* Pipe B */ 3070 - #define _PIPEBDSL 0x71000 3071 - #define _PIPEBCONF 0x71008 3072 - #define _PIPEBSTAT 0x71024 3073 - #define _PIPEBFRAMEHIGH 0x71040 3074 - #define _PIPEBFRAMEPIXEL 0x71044 3060 + #define _PIPEBDSL (dev_priv->info->display_mmio_offset + 0x71000) 3061 + #define _PIPEBCONF (dev_priv->info->display_mmio_offset + 0x71008) 3062 + #define _PIPEBSTAT (dev_priv->info->display_mmio_offset + 0x71024) 3063 + #define _PIPEBFRAMEHIGH (dev_priv->info->display_mmio_offset + 0x71040) 3064 + #define _PIPEBFRAMEPIXEL (dev_priv->info->display_mmio_offset + 0x71044) 3075 3065 #define _PIPEB_FRMCOUNT_GM45 0x71040 3076 3066 #define _PIPEB_FLIPCOUNT_GM45 0x71044 3077 3067 3078 3068 3079 3069 /* Display B control */ 3080 - #define _DSPBCNTR 0x71180 3070 + #define _DSPBCNTR (dev_priv->info->display_mmio_offset + 0x71180) 3081 3071 #define DISPPLANE_ALPHA_TRANS_ENABLE (1<<15) 3082 3072 #define DISPPLANE_ALPHA_TRANS_DISABLE 0 3083 3073 #define DISPPLANE_SPRITE_ABOVE_DISPLAY 0 3084 3074 #define DISPPLANE_SPRITE_ABOVE_OVERLAY (1) 3085 - #define _DSPBADDR 0x71184 3086 - #define _DSPBSTRIDE 0x71188 3087 - #define _DSPBPOS 0x7118C 3088 - #define _DSPBSIZE 0x71190 3089 - #define _DSPBSURF 0x7119C 3090 - #define _DSPBTILEOFF 0x711A4 3091 - #define _DSPBOFFSET 0x711A4 3092 - #define _DSPBSURFLIVE 0x711AC 3075 + #define _DSPBADDR (dev_priv->info->display_mmio_offset + 0x71184) 3076 + #define _DSPBSTRIDE (dev_priv->info->display_mmio_offset + 0x71188) 3077 + #define _DSPBPOS (dev_priv->info->display_mmio_offset + 0x7118C) 3078 + #define _DSPBSIZE (dev_priv->info->display_mmio_offset + 0x71190) 3079 + #define _DSPBSURF (dev_priv->info->display_mmio_offset + 0x7119C) 3080 + #define _DSPBTILEOFF (dev_priv->info->display_mmio_offset + 0x711A4) 3081 + #define _DSPBOFFSET (dev_priv->info->display_mmio_offset + 0x711A4) 3082 + #define _DSPBSURFLIVE (dev_priv->info->display_mmio_offset + 0x711AC) 3093 3083 3094 3084 /* Sprite A control */ 3095 3085 #define _DVSACNTR 0x72180 ··· 3238 3228 # define VGA_2X_MODE (1 << 30) 3239 3229 # define VGA_PIPE_B_SELECT (1 << 29) 3240 3230 3231 + #define VLV_VGACNTRL (VLV_DISPLAY_BASE + 0x71400) 3232 + 3241 3233 /* Ironlake */ 3242 3234 3243 3235 #define CPU_VGACNTRL 0x41000 ··· 3280 3268 #define FDI_PLL_FREQ_DISABLE_COUNT_LIMIT_MASK 0xff 3281 3269 3282 3270 3283 - #define _PIPEA_DATA_M1 0x60030 3271 + #define _PIPEA_DATA_M1 (dev_priv->info->display_mmio_offset + 0x60030) 3284 3272 #define TU_SIZE(x) (((x)-1) << 25) /* default size 64 */ 3285 3273 #define TU_SIZE_MASK 0x7e000000 3286 3274 #define PIPE_DATA_M1_OFFSET 0 3287 - #define _PIPEA_DATA_N1 0x60034 3275 + #define _PIPEA_DATA_N1 (dev_priv->info->display_mmio_offset + 0x60034) 3288 3276 #define PIPE_DATA_N1_OFFSET 0 3289 3277 3290 - #define _PIPEA_DATA_M2 0x60038 3278 + #define _PIPEA_DATA_M2 (dev_priv->info->display_mmio_offset + 0x60038) 3291 3279 #define PIPE_DATA_M2_OFFSET 0 3292 - #define _PIPEA_DATA_N2 0x6003c 3280 + #define _PIPEA_DATA_N2 (dev_priv->info->display_mmio_offset + 0x6003c) 3293 3281 #define PIPE_DATA_N2_OFFSET 0 3294 3282 3295 - #define _PIPEA_LINK_M1 0x60040 3283 + #define _PIPEA_LINK_M1 (dev_priv->info->display_mmio_offset + 0x60040) 3296 3284 #define PIPE_LINK_M1_OFFSET 0 3297 - #define _PIPEA_LINK_N1 0x60044 3285 + #define _PIPEA_LINK_N1 (dev_priv->info->display_mmio_offset + 0x60044) 3298 3286 #define PIPE_LINK_N1_OFFSET 0 3299 3287 3300 - #define _PIPEA_LINK_M2 0x60048 3288 + #define _PIPEA_LINK_M2 (dev_priv->info->display_mmio_offset + 0x60048) 3301 3289 #define PIPE_LINK_M2_OFFSET 0 3302 - #define _PIPEA_LINK_N2 0x6004c 3290 + #define _PIPEA_LINK_N2 (dev_priv->info->display_mmio_offset + 0x6004c) 3303 3291 #define PIPE_LINK_N2_OFFSET 0 3304 3292 3305 3293 /* PIPEB timing regs are same start from 0x61000 */ 3306 3294 3307 - #define _PIPEB_DATA_M1 0x61030 3308 - #define _PIPEB_DATA_N1 0x61034 3295 + #define _PIPEB_DATA_M1 (dev_priv->info->display_mmio_offset + 0x61030) 3296 + #define _PIPEB_DATA_N1 (dev_priv->info->display_mmio_offset + 0x61034) 3309 3297 3310 - #define _PIPEB_DATA_M2 0x61038 3311 - #define _PIPEB_DATA_N2 0x6103c 3298 + #define _PIPEB_DATA_M2 (dev_priv->info->display_mmio_offset + 0x61038) 3299 + #define _PIPEB_DATA_N2 (dev_priv->info->display_mmio_offset + 0x6103c) 3312 3300 3313 - #define _PIPEB_LINK_M1 0x61040 3314 - #define _PIPEB_LINK_N1 0x61044 3301 + #define _PIPEB_LINK_M1 (dev_priv->info->display_mmio_offset + 0x61040) 3302 + #define _PIPEB_LINK_N1 (dev_priv->info->display_mmio_offset + 0x61044) 3315 3303 3316 - #define _PIPEB_LINK_M2 0x61048 3317 - #define _PIPEB_LINK_N2 0x6104c 3304 + #define _PIPEB_LINK_M2 (dev_priv->info->display_mmio_offset + 0x61048) 3305 + #define _PIPEB_LINK_N2 (dev_priv->info->display_mmio_offset + 0x6104c) 3318 3306 3319 3307 #define PIPE_DATA_M1(tran) _TRANSCODER(tran, _PIPEA_DATA_M1, _PIPEB_DATA_M1) 3320 3308 #define PIPE_DATA_N1(tran) _TRANSCODER(tran, _PIPEA_DATA_N1, _PIPEB_DATA_N1) ··· 3711 3699 #define TVIDEO_DIP_DATA(pipe) _PIPE(pipe, _VIDEO_DIP_DATA_A, _VIDEO_DIP_DATA_B) 3712 3700 #define TVIDEO_DIP_GCP(pipe) _PIPE(pipe, _VIDEO_DIP_GCP_A, _VIDEO_DIP_GCP_B) 3713 3701 3714 - #define VLV_VIDEO_DIP_CTL_A 0x60200 3715 - #define VLV_VIDEO_DIP_DATA_A 0x60208 3716 - #define VLV_VIDEO_DIP_GDCP_PAYLOAD_A 0x60210 3702 + #define VLV_VIDEO_DIP_CTL_A (VLV_DISPLAY_BASE + 0x60200) 3703 + #define VLV_VIDEO_DIP_DATA_A (VLV_DISPLAY_BASE + 0x60208) 3704 + #define VLV_VIDEO_DIP_GDCP_PAYLOAD_A (VLV_DISPLAY_BASE + 0x60210) 3717 3705 3718 - #define VLV_VIDEO_DIP_CTL_B 0x61170 3719 - #define VLV_VIDEO_DIP_DATA_B 0x61174 3720 - #define VLV_VIDEO_DIP_GDCP_PAYLOAD_B 0x61178 3706 + #define VLV_VIDEO_DIP_CTL_B (VLV_DISPLAY_BASE + 0x61170) 3707 + #define VLV_VIDEO_DIP_DATA_B (VLV_DISPLAY_BASE + 0x61174) 3708 + #define VLV_VIDEO_DIP_GDCP_PAYLOAD_B (VLV_DISPLAY_BASE + 0x61178) 3721 3709 3722 3710 #define VLV_TVIDEO_DIP_CTL(pipe) \ 3723 3711 _PIPE(pipe, VLV_VIDEO_DIP_CTL_A, VLV_VIDEO_DIP_CTL_B) ··· 4007 3995 #define LVDS_DETECTED (1 << 1) 4008 3996 4009 3997 /* vlv has 2 sets of panel control regs. */ 4010 - #define PIPEA_PP_STATUS 0x61200 4011 - #define PIPEA_PP_CONTROL 0x61204 4012 - #define PIPEA_PP_ON_DELAYS 0x61208 4013 - #define PIPEA_PP_OFF_DELAYS 0x6120c 4014 - #define PIPEA_PP_DIVISOR 0x61210 3998 + #define PIPEA_PP_STATUS (VLV_DISPLAY_BASE + 0x61200) 3999 + #define PIPEA_PP_CONTROL (VLV_DISPLAY_BASE + 0x61204) 4000 + #define PIPEA_PP_ON_DELAYS (VLV_DISPLAY_BASE + 0x61208) 4001 + #define PIPEA_PP_OFF_DELAYS (VLV_DISPLAY_BASE + 0x6120c) 4002 + #define PIPEA_PP_DIVISOR (VLV_DISPLAY_BASE + 0x61210) 4015 4003 4016 - #define PIPEB_PP_STATUS 0x61300 4017 - #define PIPEB_PP_CONTROL 0x61304 4018 - #define PIPEB_PP_ON_DELAYS 0x61308 4019 - #define PIPEB_PP_OFF_DELAYS 0x6130c 4020 - #define PIPEB_PP_DIVISOR 0x61310 4004 + #define PIPEB_PP_STATUS (VLV_DISPLAY_BASE + 0x61300) 4005 + #define PIPEB_PP_CONTROL (VLV_DISPLAY_BASE + 0x61304) 4006 + #define PIPEB_PP_ON_DELAYS (VLV_DISPLAY_BASE + 0x61308) 4007 + #define PIPEB_PP_OFF_DELAYS (VLV_DISPLAY_BASE + 0x6130c) 4008 + #define PIPEB_PP_DIVISOR (VLV_DISPLAY_BASE + 0x61310) 4021 4009 4022 4010 #define PCH_PP_STATUS 0xc7200 4023 4011 #define PCH_PP_CONTROL 0xc7204 ··· 4198 4186 #define GEN6_RP_INTERRUPT_LIMITS 0xA014 4199 4187 #define GEN6_RPSTAT1 0xA01C 4200 4188 #define GEN6_CAGF_SHIFT 8 4189 + #define HSW_CAGF_SHIFT 7 4201 4190 #define GEN6_CAGF_MASK (0x7f << GEN6_CAGF_SHIFT) 4191 + #define HSW_CAGF_MASK (0x7f << HSW_CAGF_SHIFT) 4202 4192 #define GEN6_RP_CONTROL 0xA024 4203 4193 #define GEN6_RP_MEDIA_TURBO (1<<11) 4204 4194 #define GEN6_RP_MEDIA_MODE_MASK (3<<9) ··· 4311 4297 #define GEN7_ROW_CHICKEN2_GT2 0xf4f4 4312 4298 #define DOP_CLOCK_GATING_DISABLE (1<<0) 4313 4299 4314 - #define G4X_AUD_VID_DID 0x62020 4300 + #define G4X_AUD_VID_DID (dev_priv->info->display_mmio_offset + 0x62020) 4315 4301 #define INTEL_AUDIO_DEVCL 0x808629FB 4316 4302 #define INTEL_AUDIO_DEVBLC 0x80862801 4317 4303 #define INTEL_AUDIO_DEVCTG 0x80862802 ··· 4427 4413 #define AUDIO_CP_READY_C (1<<9) 4428 4414 4429 4415 /* HSW Power Wells */ 4430 - #define HSW_PWR_WELL_CTL1 0x45400 /* BIOS */ 4431 - #define HSW_PWR_WELL_CTL2 0x45404 /* Driver */ 4432 - #define HSW_PWR_WELL_CTL3 0x45408 /* KVMR */ 4433 - #define HSW_PWR_WELL_CTL4 0x4540C /* Debug */ 4416 + #define HSW_PWR_WELL_BIOS 0x45400 /* CTL1 */ 4417 + #define HSW_PWR_WELL_DRIVER 0x45404 /* CTL2 */ 4418 + #define HSW_PWR_WELL_KVMR 0x45408 /* CTL3 */ 4419 + #define HSW_PWR_WELL_DEBUG 0x4540C /* CTL4 */ 4434 4420 #define HSW_PWR_WELL_ENABLE (1<<31) 4435 4421 #define HSW_PWR_WELL_STATE (1<<30) 4436 4422 #define HSW_PWR_WELL_CTL5 0x45410
+29 -511
drivers/gpu/drm/i915/i915_suspend.c
··· 29 29 #include "intel_drv.h" 30 30 #include "i915_reg.h" 31 31 32 - static bool i915_pipe_enabled(struct drm_device *dev, enum pipe pipe) 33 - { 34 - struct drm_i915_private *dev_priv = dev->dev_private; 35 - u32 dpll_reg; 36 - 37 - /* On IVB, 3rd pipe shares PLL with another one */ 38 - if (pipe > 1) 39 - return false; 40 - 41 - if (HAS_PCH_SPLIT(dev)) 42 - dpll_reg = _PCH_DPLL(pipe); 43 - else 44 - dpll_reg = (pipe == PIPE_A) ? _DPLL_A : _DPLL_B; 45 - 46 - return (I915_READ(dpll_reg) & DPLL_VCO_ENABLE); 47 - } 48 - 49 - static void i915_save_palette(struct drm_device *dev, enum pipe pipe) 50 - { 51 - struct drm_i915_private *dev_priv = dev->dev_private; 52 - unsigned long reg = (pipe == PIPE_A ? _PALETTE_A : _PALETTE_B); 53 - u32 *array; 54 - int i; 55 - 56 - if (!i915_pipe_enabled(dev, pipe)) 57 - return; 58 - 59 - if (HAS_PCH_SPLIT(dev)) 60 - reg = (pipe == PIPE_A) ? _LGC_PALETTE_A : _LGC_PALETTE_B; 61 - 62 - if (pipe == PIPE_A) 63 - array = dev_priv->regfile.save_palette_a; 64 - else 65 - array = dev_priv->regfile.save_palette_b; 66 - 67 - for (i = 0; i < 256; i++) 68 - array[i] = I915_READ(reg + (i << 2)); 69 - } 70 - 71 - static void i915_restore_palette(struct drm_device *dev, enum pipe pipe) 72 - { 73 - struct drm_i915_private *dev_priv = dev->dev_private; 74 - unsigned long reg = (pipe == PIPE_A ? _PALETTE_A : _PALETTE_B); 75 - u32 *array; 76 - int i; 77 - 78 - if (!i915_pipe_enabled(dev, pipe)) 79 - return; 80 - 81 - if (HAS_PCH_SPLIT(dev)) 82 - reg = (pipe == PIPE_A) ? _LGC_PALETTE_A : _LGC_PALETTE_B; 83 - 84 - if (pipe == PIPE_A) 85 - array = dev_priv->regfile.save_palette_a; 86 - else 87 - array = dev_priv->regfile.save_palette_b; 88 - 89 - for (i = 0; i < 256; i++) 90 - I915_WRITE(reg + (i << 2), array[i]); 91 - } 92 - 93 32 static u8 i915_read_indexed(struct drm_device *dev, u16 index_port, u16 data_port, u8 reg) 94 33 { 95 34 struct drm_i915_private *dev_priv = dev->dev_private; ··· 68 129 struct drm_i915_private *dev_priv = dev->dev_private; 69 130 int i; 70 131 u16 cr_index, cr_data, st01; 132 + 133 + /* VGA state */ 134 + dev_priv->regfile.saveVGA0 = I915_READ(VGA0); 135 + dev_priv->regfile.saveVGA1 = I915_READ(VGA1); 136 + dev_priv->regfile.saveVGA_PD = I915_READ(VGA_PD); 137 + dev_priv->regfile.saveVGACNTRL = I915_READ(i915_vgacntrl_reg(dev)); 71 138 72 139 /* VGA color palette registers */ 73 140 dev_priv->regfile.saveDACMASK = I915_READ8(VGA_DACMASK); ··· 133 188 int i; 134 189 u16 cr_index, cr_data, st01; 135 190 191 + /* VGA state */ 192 + I915_WRITE(i915_vgacntrl_reg(dev), dev_priv->regfile.saveVGACNTRL); 193 + 194 + I915_WRITE(VGA0, dev_priv->regfile.saveVGA0); 195 + I915_WRITE(VGA1, dev_priv->regfile.saveVGA1); 196 + I915_WRITE(VGA_PD, dev_priv->regfile.saveVGA_PD); 197 + POSTING_READ(VGA_PD); 198 + udelay(150); 199 + 136 200 /* MSR bits */ 137 201 I915_WRITE8(VGA_MSR_WRITE, dev_priv->regfile.saveMSR); 138 202 if (dev_priv->regfile.saveMSR & VGA_MSR_CGA_MODE) { ··· 189 235 I915_WRITE8(VGA_DACMASK, dev_priv->regfile.saveDACMASK); 190 236 } 191 237 192 - static void i915_save_modeset_reg(struct drm_device *dev) 193 - { 194 - struct drm_i915_private *dev_priv = dev->dev_private; 195 - int i; 196 - 197 - if (drm_core_check_feature(dev, DRIVER_MODESET)) 198 - return; 199 - 200 - /* Cursor state */ 201 - dev_priv->regfile.saveCURACNTR = I915_READ(_CURACNTR); 202 - dev_priv->regfile.saveCURAPOS = I915_READ(_CURAPOS); 203 - dev_priv->regfile.saveCURABASE = I915_READ(_CURABASE); 204 - dev_priv->regfile.saveCURBCNTR = I915_READ(_CURBCNTR); 205 - dev_priv->regfile.saveCURBPOS = I915_READ(_CURBPOS); 206 - dev_priv->regfile.saveCURBBASE = I915_READ(_CURBBASE); 207 - if (IS_GEN2(dev)) 208 - dev_priv->regfile.saveCURSIZE = I915_READ(CURSIZE); 209 - 210 - if (HAS_PCH_SPLIT(dev)) { 211 - dev_priv->regfile.savePCH_DREF_CONTROL = I915_READ(PCH_DREF_CONTROL); 212 - dev_priv->regfile.saveDISP_ARB_CTL = I915_READ(DISP_ARB_CTL); 213 - } 214 - 215 - /* Pipe & plane A info */ 216 - dev_priv->regfile.savePIPEACONF = I915_READ(_PIPEACONF); 217 - dev_priv->regfile.savePIPEASRC = I915_READ(_PIPEASRC); 218 - if (HAS_PCH_SPLIT(dev)) { 219 - dev_priv->regfile.saveFPA0 = I915_READ(_PCH_FPA0); 220 - dev_priv->regfile.saveFPA1 = I915_READ(_PCH_FPA1); 221 - dev_priv->regfile.saveDPLL_A = I915_READ(_PCH_DPLL_A); 222 - } else { 223 - dev_priv->regfile.saveFPA0 = I915_READ(_FPA0); 224 - dev_priv->regfile.saveFPA1 = I915_READ(_FPA1); 225 - dev_priv->regfile.saveDPLL_A = I915_READ(_DPLL_A); 226 - } 227 - if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) 228 - dev_priv->regfile.saveDPLL_A_MD = I915_READ(_DPLL_A_MD); 229 - dev_priv->regfile.saveHTOTAL_A = I915_READ(_HTOTAL_A); 230 - dev_priv->regfile.saveHBLANK_A = I915_READ(_HBLANK_A); 231 - dev_priv->regfile.saveHSYNC_A = I915_READ(_HSYNC_A); 232 - dev_priv->regfile.saveVTOTAL_A = I915_READ(_VTOTAL_A); 233 - dev_priv->regfile.saveVBLANK_A = I915_READ(_VBLANK_A); 234 - dev_priv->regfile.saveVSYNC_A = I915_READ(_VSYNC_A); 235 - if (!HAS_PCH_SPLIT(dev)) 236 - dev_priv->regfile.saveBCLRPAT_A = I915_READ(_BCLRPAT_A); 237 - 238 - if (HAS_PCH_SPLIT(dev)) { 239 - dev_priv->regfile.savePIPEA_DATA_M1 = I915_READ(_PIPEA_DATA_M1); 240 - dev_priv->regfile.savePIPEA_DATA_N1 = I915_READ(_PIPEA_DATA_N1); 241 - dev_priv->regfile.savePIPEA_LINK_M1 = I915_READ(_PIPEA_LINK_M1); 242 - dev_priv->regfile.savePIPEA_LINK_N1 = I915_READ(_PIPEA_LINK_N1); 243 - 244 - dev_priv->regfile.saveFDI_TXA_CTL = I915_READ(_FDI_TXA_CTL); 245 - dev_priv->regfile.saveFDI_RXA_CTL = I915_READ(_FDI_RXA_CTL); 246 - 247 - dev_priv->regfile.savePFA_CTL_1 = I915_READ(_PFA_CTL_1); 248 - dev_priv->regfile.savePFA_WIN_SZ = I915_READ(_PFA_WIN_SZ); 249 - dev_priv->regfile.savePFA_WIN_POS = I915_READ(_PFA_WIN_POS); 250 - 251 - dev_priv->regfile.saveTRANSACONF = I915_READ(_TRANSACONF); 252 - dev_priv->regfile.saveTRANS_HTOTAL_A = I915_READ(_TRANS_HTOTAL_A); 253 - dev_priv->regfile.saveTRANS_HBLANK_A = I915_READ(_TRANS_HBLANK_A); 254 - dev_priv->regfile.saveTRANS_HSYNC_A = I915_READ(_TRANS_HSYNC_A); 255 - dev_priv->regfile.saveTRANS_VTOTAL_A = I915_READ(_TRANS_VTOTAL_A); 256 - dev_priv->regfile.saveTRANS_VBLANK_A = I915_READ(_TRANS_VBLANK_A); 257 - dev_priv->regfile.saveTRANS_VSYNC_A = I915_READ(_TRANS_VSYNC_A); 258 - } 259 - 260 - dev_priv->regfile.saveDSPACNTR = I915_READ(_DSPACNTR); 261 - dev_priv->regfile.saveDSPASTRIDE = I915_READ(_DSPASTRIDE); 262 - dev_priv->regfile.saveDSPASIZE = I915_READ(_DSPASIZE); 263 - dev_priv->regfile.saveDSPAPOS = I915_READ(_DSPAPOS); 264 - dev_priv->regfile.saveDSPAADDR = I915_READ(_DSPAADDR); 265 - if (INTEL_INFO(dev)->gen >= 4) { 266 - dev_priv->regfile.saveDSPASURF = I915_READ(_DSPASURF); 267 - dev_priv->regfile.saveDSPATILEOFF = I915_READ(_DSPATILEOFF); 268 - } 269 - i915_save_palette(dev, PIPE_A); 270 - dev_priv->regfile.savePIPEASTAT = I915_READ(_PIPEASTAT); 271 - 272 - /* Pipe & plane B info */ 273 - dev_priv->regfile.savePIPEBCONF = I915_READ(_PIPEBCONF); 274 - dev_priv->regfile.savePIPEBSRC = I915_READ(_PIPEBSRC); 275 - if (HAS_PCH_SPLIT(dev)) { 276 - dev_priv->regfile.saveFPB0 = I915_READ(_PCH_FPB0); 277 - dev_priv->regfile.saveFPB1 = I915_READ(_PCH_FPB1); 278 - dev_priv->regfile.saveDPLL_B = I915_READ(_PCH_DPLL_B); 279 - } else { 280 - dev_priv->regfile.saveFPB0 = I915_READ(_FPB0); 281 - dev_priv->regfile.saveFPB1 = I915_READ(_FPB1); 282 - dev_priv->regfile.saveDPLL_B = I915_READ(_DPLL_B); 283 - } 284 - if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) 285 - dev_priv->regfile.saveDPLL_B_MD = I915_READ(_DPLL_B_MD); 286 - dev_priv->regfile.saveHTOTAL_B = I915_READ(_HTOTAL_B); 287 - dev_priv->regfile.saveHBLANK_B = I915_READ(_HBLANK_B); 288 - dev_priv->regfile.saveHSYNC_B = I915_READ(_HSYNC_B); 289 - dev_priv->regfile.saveVTOTAL_B = I915_READ(_VTOTAL_B); 290 - dev_priv->regfile.saveVBLANK_B = I915_READ(_VBLANK_B); 291 - dev_priv->regfile.saveVSYNC_B = I915_READ(_VSYNC_B); 292 - if (!HAS_PCH_SPLIT(dev)) 293 - dev_priv->regfile.saveBCLRPAT_B = I915_READ(_BCLRPAT_B); 294 - 295 - if (HAS_PCH_SPLIT(dev)) { 296 - dev_priv->regfile.savePIPEB_DATA_M1 = I915_READ(_PIPEB_DATA_M1); 297 - dev_priv->regfile.savePIPEB_DATA_N1 = I915_READ(_PIPEB_DATA_N1); 298 - dev_priv->regfile.savePIPEB_LINK_M1 = I915_READ(_PIPEB_LINK_M1); 299 - dev_priv->regfile.savePIPEB_LINK_N1 = I915_READ(_PIPEB_LINK_N1); 300 - 301 - dev_priv->regfile.saveFDI_TXB_CTL = I915_READ(_FDI_TXB_CTL); 302 - dev_priv->regfile.saveFDI_RXB_CTL = I915_READ(_FDI_RXB_CTL); 303 - 304 - dev_priv->regfile.savePFB_CTL_1 = I915_READ(_PFB_CTL_1); 305 - dev_priv->regfile.savePFB_WIN_SZ = I915_READ(_PFB_WIN_SZ); 306 - dev_priv->regfile.savePFB_WIN_POS = I915_READ(_PFB_WIN_POS); 307 - 308 - dev_priv->regfile.saveTRANSBCONF = I915_READ(_TRANSBCONF); 309 - dev_priv->regfile.saveTRANS_HTOTAL_B = I915_READ(_TRANS_HTOTAL_B); 310 - dev_priv->regfile.saveTRANS_HBLANK_B = I915_READ(_TRANS_HBLANK_B); 311 - dev_priv->regfile.saveTRANS_HSYNC_B = I915_READ(_TRANS_HSYNC_B); 312 - dev_priv->regfile.saveTRANS_VTOTAL_B = I915_READ(_TRANS_VTOTAL_B); 313 - dev_priv->regfile.saveTRANS_VBLANK_B = I915_READ(_TRANS_VBLANK_B); 314 - dev_priv->regfile.saveTRANS_VSYNC_B = I915_READ(_TRANS_VSYNC_B); 315 - } 316 - 317 - dev_priv->regfile.saveDSPBCNTR = I915_READ(_DSPBCNTR); 318 - dev_priv->regfile.saveDSPBSTRIDE = I915_READ(_DSPBSTRIDE); 319 - dev_priv->regfile.saveDSPBSIZE = I915_READ(_DSPBSIZE); 320 - dev_priv->regfile.saveDSPBPOS = I915_READ(_DSPBPOS); 321 - dev_priv->regfile.saveDSPBADDR = I915_READ(_DSPBADDR); 322 - if (INTEL_INFO(dev)->gen >= 4) { 323 - dev_priv->regfile.saveDSPBSURF = I915_READ(_DSPBSURF); 324 - dev_priv->regfile.saveDSPBTILEOFF = I915_READ(_DSPBTILEOFF); 325 - } 326 - i915_save_palette(dev, PIPE_B); 327 - dev_priv->regfile.savePIPEBSTAT = I915_READ(_PIPEBSTAT); 328 - 329 - /* Fences */ 330 - switch (INTEL_INFO(dev)->gen) { 331 - case 7: 332 - case 6: 333 - for (i = 0; i < 16; i++) 334 - dev_priv->regfile.saveFENCE[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8)); 335 - break; 336 - case 5: 337 - case 4: 338 - for (i = 0; i < 16; i++) 339 - dev_priv->regfile.saveFENCE[i] = I915_READ64(FENCE_REG_965_0 + (i * 8)); 340 - break; 341 - case 3: 342 - if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) 343 - for (i = 0; i < 8; i++) 344 - dev_priv->regfile.saveFENCE[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4)); 345 - case 2: 346 - for (i = 0; i < 8; i++) 347 - dev_priv->regfile.saveFENCE[i] = I915_READ(FENCE_REG_830_0 + (i * 4)); 348 - break; 349 - } 350 - 351 - /* CRT state */ 352 - if (HAS_PCH_SPLIT(dev)) 353 - dev_priv->regfile.saveADPA = I915_READ(PCH_ADPA); 354 - else 355 - dev_priv->regfile.saveADPA = I915_READ(ADPA); 356 - 357 - return; 358 - } 359 - 360 - static void i915_restore_modeset_reg(struct drm_device *dev) 361 - { 362 - struct drm_i915_private *dev_priv = dev->dev_private; 363 - int dpll_a_reg, fpa0_reg, fpa1_reg; 364 - int dpll_b_reg, fpb0_reg, fpb1_reg; 365 - int i; 366 - 367 - if (drm_core_check_feature(dev, DRIVER_MODESET)) 368 - return; 369 - 370 - /* Fences */ 371 - switch (INTEL_INFO(dev)->gen) { 372 - case 7: 373 - case 6: 374 - for (i = 0; i < 16; i++) 375 - I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (i * 8), dev_priv->regfile.saveFENCE[i]); 376 - break; 377 - case 5: 378 - case 4: 379 - for (i = 0; i < 16; i++) 380 - I915_WRITE64(FENCE_REG_965_0 + (i * 8), dev_priv->regfile.saveFENCE[i]); 381 - break; 382 - case 3: 383 - case 2: 384 - if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) 385 - for (i = 0; i < 8; i++) 386 - I915_WRITE(FENCE_REG_945_8 + (i * 4), dev_priv->regfile.saveFENCE[i+8]); 387 - for (i = 0; i < 8; i++) 388 - I915_WRITE(FENCE_REG_830_0 + (i * 4), dev_priv->regfile.saveFENCE[i]); 389 - break; 390 - } 391 - 392 - 393 - if (HAS_PCH_SPLIT(dev)) { 394 - dpll_a_reg = _PCH_DPLL_A; 395 - dpll_b_reg = _PCH_DPLL_B; 396 - fpa0_reg = _PCH_FPA0; 397 - fpb0_reg = _PCH_FPB0; 398 - fpa1_reg = _PCH_FPA1; 399 - fpb1_reg = _PCH_FPB1; 400 - } else { 401 - dpll_a_reg = _DPLL_A; 402 - dpll_b_reg = _DPLL_B; 403 - fpa0_reg = _FPA0; 404 - fpb0_reg = _FPB0; 405 - fpa1_reg = _FPA1; 406 - fpb1_reg = _FPB1; 407 - } 408 - 409 - if (HAS_PCH_SPLIT(dev)) { 410 - I915_WRITE(PCH_DREF_CONTROL, dev_priv->regfile.savePCH_DREF_CONTROL); 411 - I915_WRITE(DISP_ARB_CTL, dev_priv->regfile.saveDISP_ARB_CTL); 412 - } 413 - 414 - /* Pipe & plane A info */ 415 - /* Prime the clock */ 416 - if (dev_priv->regfile.saveDPLL_A & DPLL_VCO_ENABLE) { 417 - I915_WRITE(dpll_a_reg, dev_priv->regfile.saveDPLL_A & 418 - ~DPLL_VCO_ENABLE); 419 - POSTING_READ(dpll_a_reg); 420 - udelay(150); 421 - } 422 - I915_WRITE(fpa0_reg, dev_priv->regfile.saveFPA0); 423 - I915_WRITE(fpa1_reg, dev_priv->regfile.saveFPA1); 424 - /* Actually enable it */ 425 - I915_WRITE(dpll_a_reg, dev_priv->regfile.saveDPLL_A); 426 - POSTING_READ(dpll_a_reg); 427 - udelay(150); 428 - if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) { 429 - I915_WRITE(_DPLL_A_MD, dev_priv->regfile.saveDPLL_A_MD); 430 - POSTING_READ(_DPLL_A_MD); 431 - } 432 - udelay(150); 433 - 434 - /* Restore mode */ 435 - I915_WRITE(_HTOTAL_A, dev_priv->regfile.saveHTOTAL_A); 436 - I915_WRITE(_HBLANK_A, dev_priv->regfile.saveHBLANK_A); 437 - I915_WRITE(_HSYNC_A, dev_priv->regfile.saveHSYNC_A); 438 - I915_WRITE(_VTOTAL_A, dev_priv->regfile.saveVTOTAL_A); 439 - I915_WRITE(_VBLANK_A, dev_priv->regfile.saveVBLANK_A); 440 - I915_WRITE(_VSYNC_A, dev_priv->regfile.saveVSYNC_A); 441 - if (!HAS_PCH_SPLIT(dev)) 442 - I915_WRITE(_BCLRPAT_A, dev_priv->regfile.saveBCLRPAT_A); 443 - 444 - if (HAS_PCH_SPLIT(dev)) { 445 - I915_WRITE(_PIPEA_DATA_M1, dev_priv->regfile.savePIPEA_DATA_M1); 446 - I915_WRITE(_PIPEA_DATA_N1, dev_priv->regfile.savePIPEA_DATA_N1); 447 - I915_WRITE(_PIPEA_LINK_M1, dev_priv->regfile.savePIPEA_LINK_M1); 448 - I915_WRITE(_PIPEA_LINK_N1, dev_priv->regfile.savePIPEA_LINK_N1); 449 - 450 - I915_WRITE(_FDI_RXA_CTL, dev_priv->regfile.saveFDI_RXA_CTL); 451 - I915_WRITE(_FDI_TXA_CTL, dev_priv->regfile.saveFDI_TXA_CTL); 452 - 453 - I915_WRITE(_PFA_CTL_1, dev_priv->regfile.savePFA_CTL_1); 454 - I915_WRITE(_PFA_WIN_SZ, dev_priv->regfile.savePFA_WIN_SZ); 455 - I915_WRITE(_PFA_WIN_POS, dev_priv->regfile.savePFA_WIN_POS); 456 - 457 - I915_WRITE(_TRANSACONF, dev_priv->regfile.saveTRANSACONF); 458 - I915_WRITE(_TRANS_HTOTAL_A, dev_priv->regfile.saveTRANS_HTOTAL_A); 459 - I915_WRITE(_TRANS_HBLANK_A, dev_priv->regfile.saveTRANS_HBLANK_A); 460 - I915_WRITE(_TRANS_HSYNC_A, dev_priv->regfile.saveTRANS_HSYNC_A); 461 - I915_WRITE(_TRANS_VTOTAL_A, dev_priv->regfile.saveTRANS_VTOTAL_A); 462 - I915_WRITE(_TRANS_VBLANK_A, dev_priv->regfile.saveTRANS_VBLANK_A); 463 - I915_WRITE(_TRANS_VSYNC_A, dev_priv->regfile.saveTRANS_VSYNC_A); 464 - } 465 - 466 - /* Restore plane info */ 467 - I915_WRITE(_DSPASIZE, dev_priv->regfile.saveDSPASIZE); 468 - I915_WRITE(_DSPAPOS, dev_priv->regfile.saveDSPAPOS); 469 - I915_WRITE(_PIPEASRC, dev_priv->regfile.savePIPEASRC); 470 - I915_WRITE(_DSPAADDR, dev_priv->regfile.saveDSPAADDR); 471 - I915_WRITE(_DSPASTRIDE, dev_priv->regfile.saveDSPASTRIDE); 472 - if (INTEL_INFO(dev)->gen >= 4) { 473 - I915_WRITE(_DSPASURF, dev_priv->regfile.saveDSPASURF); 474 - I915_WRITE(_DSPATILEOFF, dev_priv->regfile.saveDSPATILEOFF); 475 - } 476 - 477 - I915_WRITE(_PIPEACONF, dev_priv->regfile.savePIPEACONF); 478 - 479 - i915_restore_palette(dev, PIPE_A); 480 - /* Enable the plane */ 481 - I915_WRITE(_DSPACNTR, dev_priv->regfile.saveDSPACNTR); 482 - I915_WRITE(_DSPAADDR, I915_READ(_DSPAADDR)); 483 - 484 - /* Pipe & plane B info */ 485 - if (dev_priv->regfile.saveDPLL_B & DPLL_VCO_ENABLE) { 486 - I915_WRITE(dpll_b_reg, dev_priv->regfile.saveDPLL_B & 487 - ~DPLL_VCO_ENABLE); 488 - POSTING_READ(dpll_b_reg); 489 - udelay(150); 490 - } 491 - I915_WRITE(fpb0_reg, dev_priv->regfile.saveFPB0); 492 - I915_WRITE(fpb1_reg, dev_priv->regfile.saveFPB1); 493 - /* Actually enable it */ 494 - I915_WRITE(dpll_b_reg, dev_priv->regfile.saveDPLL_B); 495 - POSTING_READ(dpll_b_reg); 496 - udelay(150); 497 - if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) { 498 - I915_WRITE(_DPLL_B_MD, dev_priv->regfile.saveDPLL_B_MD); 499 - POSTING_READ(_DPLL_B_MD); 500 - } 501 - udelay(150); 502 - 503 - /* Restore mode */ 504 - I915_WRITE(_HTOTAL_B, dev_priv->regfile.saveHTOTAL_B); 505 - I915_WRITE(_HBLANK_B, dev_priv->regfile.saveHBLANK_B); 506 - I915_WRITE(_HSYNC_B, dev_priv->regfile.saveHSYNC_B); 507 - I915_WRITE(_VTOTAL_B, dev_priv->regfile.saveVTOTAL_B); 508 - I915_WRITE(_VBLANK_B, dev_priv->regfile.saveVBLANK_B); 509 - I915_WRITE(_VSYNC_B, dev_priv->regfile.saveVSYNC_B); 510 - if (!HAS_PCH_SPLIT(dev)) 511 - I915_WRITE(_BCLRPAT_B, dev_priv->regfile.saveBCLRPAT_B); 512 - 513 - if (HAS_PCH_SPLIT(dev)) { 514 - I915_WRITE(_PIPEB_DATA_M1, dev_priv->regfile.savePIPEB_DATA_M1); 515 - I915_WRITE(_PIPEB_DATA_N1, dev_priv->regfile.savePIPEB_DATA_N1); 516 - I915_WRITE(_PIPEB_LINK_M1, dev_priv->regfile.savePIPEB_LINK_M1); 517 - I915_WRITE(_PIPEB_LINK_N1, dev_priv->regfile.savePIPEB_LINK_N1); 518 - 519 - I915_WRITE(_FDI_RXB_CTL, dev_priv->regfile.saveFDI_RXB_CTL); 520 - I915_WRITE(_FDI_TXB_CTL, dev_priv->regfile.saveFDI_TXB_CTL); 521 - 522 - I915_WRITE(_PFB_CTL_1, dev_priv->regfile.savePFB_CTL_1); 523 - I915_WRITE(_PFB_WIN_SZ, dev_priv->regfile.savePFB_WIN_SZ); 524 - I915_WRITE(_PFB_WIN_POS, dev_priv->regfile.savePFB_WIN_POS); 525 - 526 - I915_WRITE(_TRANSBCONF, dev_priv->regfile.saveTRANSBCONF); 527 - I915_WRITE(_TRANS_HTOTAL_B, dev_priv->regfile.saveTRANS_HTOTAL_B); 528 - I915_WRITE(_TRANS_HBLANK_B, dev_priv->regfile.saveTRANS_HBLANK_B); 529 - I915_WRITE(_TRANS_HSYNC_B, dev_priv->regfile.saveTRANS_HSYNC_B); 530 - I915_WRITE(_TRANS_VTOTAL_B, dev_priv->regfile.saveTRANS_VTOTAL_B); 531 - I915_WRITE(_TRANS_VBLANK_B, dev_priv->regfile.saveTRANS_VBLANK_B); 532 - I915_WRITE(_TRANS_VSYNC_B, dev_priv->regfile.saveTRANS_VSYNC_B); 533 - } 534 - 535 - /* Restore plane info */ 536 - I915_WRITE(_DSPBSIZE, dev_priv->regfile.saveDSPBSIZE); 537 - I915_WRITE(_DSPBPOS, dev_priv->regfile.saveDSPBPOS); 538 - I915_WRITE(_PIPEBSRC, dev_priv->regfile.savePIPEBSRC); 539 - I915_WRITE(_DSPBADDR, dev_priv->regfile.saveDSPBADDR); 540 - I915_WRITE(_DSPBSTRIDE, dev_priv->regfile.saveDSPBSTRIDE); 541 - if (INTEL_INFO(dev)->gen >= 4) { 542 - I915_WRITE(_DSPBSURF, dev_priv->regfile.saveDSPBSURF); 543 - I915_WRITE(_DSPBTILEOFF, dev_priv->regfile.saveDSPBTILEOFF); 544 - } 545 - 546 - I915_WRITE(_PIPEBCONF, dev_priv->regfile.savePIPEBCONF); 547 - 548 - i915_restore_palette(dev, PIPE_B); 549 - /* Enable the plane */ 550 - I915_WRITE(_DSPBCNTR, dev_priv->regfile.saveDSPBCNTR); 551 - I915_WRITE(_DSPBADDR, I915_READ(_DSPBADDR)); 552 - 553 - /* Cursor state */ 554 - I915_WRITE(_CURAPOS, dev_priv->regfile.saveCURAPOS); 555 - I915_WRITE(_CURACNTR, dev_priv->regfile.saveCURACNTR); 556 - I915_WRITE(_CURABASE, dev_priv->regfile.saveCURABASE); 557 - I915_WRITE(_CURBPOS, dev_priv->regfile.saveCURBPOS); 558 - I915_WRITE(_CURBCNTR, dev_priv->regfile.saveCURBCNTR); 559 - I915_WRITE(_CURBBASE, dev_priv->regfile.saveCURBBASE); 560 - if (IS_GEN2(dev)) 561 - I915_WRITE(CURSIZE, dev_priv->regfile.saveCURSIZE); 562 - 563 - /* CRT state */ 564 - if (HAS_PCH_SPLIT(dev)) 565 - I915_WRITE(PCH_ADPA, dev_priv->regfile.saveADPA); 566 - else 567 - I915_WRITE(ADPA, dev_priv->regfile.saveADPA); 568 - 569 - return; 570 - } 571 - 572 238 static void i915_save_display(struct drm_device *dev) 573 239 { 574 240 struct drm_i915_private *dev_priv = dev->dev_private; 575 241 576 242 /* Display arbitration control */ 577 - dev_priv->regfile.saveDSPARB = I915_READ(DSPARB); 243 + if (INTEL_INFO(dev)->gen <= 4) 244 + dev_priv->regfile.saveDSPARB = I915_READ(DSPARB); 578 245 579 246 /* This is only meaningful in non-KMS mode */ 580 247 /* Don't regfile.save them in KMS mode */ 581 - i915_save_modeset_reg(dev); 248 + if (!drm_core_check_feature(dev, DRIVER_MODESET)) 249 + i915_save_display_reg(dev); 582 250 583 251 /* LVDS state */ 584 252 if (HAS_PCH_SPLIT(dev)) { ··· 234 658 dev_priv->regfile.savePP_DIVISOR = I915_READ(PP_DIVISOR); 235 659 } 236 660 237 - if (!drm_core_check_feature(dev, DRIVER_MODESET)) { 238 - /* Display Port state */ 239 - if (SUPPORTS_INTEGRATED_DP(dev)) { 240 - dev_priv->regfile.saveDP_B = I915_READ(DP_B); 241 - dev_priv->regfile.saveDP_C = I915_READ(DP_C); 242 - dev_priv->regfile.saveDP_D = I915_READ(DP_D); 243 - dev_priv->regfile.savePIPEA_GMCH_DATA_M = I915_READ(_PIPEA_GMCH_DATA_M); 244 - dev_priv->regfile.savePIPEB_GMCH_DATA_M = I915_READ(_PIPEB_GMCH_DATA_M); 245 - dev_priv->regfile.savePIPEA_GMCH_DATA_N = I915_READ(_PIPEA_GMCH_DATA_N); 246 - dev_priv->regfile.savePIPEB_GMCH_DATA_N = I915_READ(_PIPEB_GMCH_DATA_N); 247 - dev_priv->regfile.savePIPEA_DP_LINK_M = I915_READ(_PIPEA_DP_LINK_M); 248 - dev_priv->regfile.savePIPEB_DP_LINK_M = I915_READ(_PIPEB_DP_LINK_M); 249 - dev_priv->regfile.savePIPEA_DP_LINK_N = I915_READ(_PIPEA_DP_LINK_N); 250 - dev_priv->regfile.savePIPEB_DP_LINK_N = I915_READ(_PIPEB_DP_LINK_N); 251 - } 252 - /* FIXME: regfile.save TV & SDVO state */ 253 - } 254 - 255 661 /* Only regfile.save FBC state on the platform that supports FBC */ 256 662 if (I915_HAS_FBC(dev)) { 257 663 if (HAS_PCH_SPLIT(dev)) { ··· 248 690 } 249 691 } 250 692 251 - /* VGA state */ 252 - dev_priv->regfile.saveVGA0 = I915_READ(VGA0); 253 - dev_priv->regfile.saveVGA1 = I915_READ(VGA1); 254 - dev_priv->regfile.saveVGA_PD = I915_READ(VGA_PD); 255 - if (HAS_PCH_SPLIT(dev)) 256 - dev_priv->regfile.saveVGACNTRL = I915_READ(CPU_VGACNTRL); 257 - else 258 - dev_priv->regfile.saveVGACNTRL = I915_READ(VGACNTRL); 259 - 260 - i915_save_vga(dev); 693 + if (!drm_core_check_feature(dev, DRIVER_MODESET)) 694 + i915_save_vga(dev); 261 695 } 262 696 263 697 static void i915_restore_display(struct drm_device *dev) ··· 257 707 struct drm_i915_private *dev_priv = dev->dev_private; 258 708 259 709 /* Display arbitration */ 260 - I915_WRITE(DSPARB, dev_priv->regfile.saveDSPARB); 710 + if (INTEL_INFO(dev)->gen <= 4) 711 + I915_WRITE(DSPARB, dev_priv->regfile.saveDSPARB); 261 712 262 - if (!drm_core_check_feature(dev, DRIVER_MODESET)) { 263 - /* Display port ratios (must be done before clock is set) */ 264 - if (SUPPORTS_INTEGRATED_DP(dev)) { 265 - I915_WRITE(_PIPEA_GMCH_DATA_M, dev_priv->regfile.savePIPEA_GMCH_DATA_M); 266 - I915_WRITE(_PIPEB_GMCH_DATA_M, dev_priv->regfile.savePIPEB_GMCH_DATA_M); 267 - I915_WRITE(_PIPEA_GMCH_DATA_N, dev_priv->regfile.savePIPEA_GMCH_DATA_N); 268 - I915_WRITE(_PIPEB_GMCH_DATA_N, dev_priv->regfile.savePIPEB_GMCH_DATA_N); 269 - I915_WRITE(_PIPEA_DP_LINK_M, dev_priv->regfile.savePIPEA_DP_LINK_M); 270 - I915_WRITE(_PIPEB_DP_LINK_M, dev_priv->regfile.savePIPEB_DP_LINK_M); 271 - I915_WRITE(_PIPEA_DP_LINK_N, dev_priv->regfile.savePIPEA_DP_LINK_N); 272 - I915_WRITE(_PIPEB_DP_LINK_N, dev_priv->regfile.savePIPEB_DP_LINK_N); 273 - } 274 - } 275 - 276 - /* This is only meaningful in non-KMS mode */ 277 - /* Don't restore them in KMS mode */ 278 - i915_restore_modeset_reg(dev); 713 + if (!drm_core_check_feature(dev, DRIVER_MODESET)) 714 + i915_restore_display_reg(dev); 279 715 280 716 /* LVDS state */ 281 717 if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) ··· 299 763 I915_WRITE(PP_CONTROL, dev_priv->regfile.savePP_CONTROL); 300 764 } 301 765 302 - if (!drm_core_check_feature(dev, DRIVER_MODESET)) { 303 - /* Display Port state */ 304 - if (SUPPORTS_INTEGRATED_DP(dev)) { 305 - I915_WRITE(DP_B, dev_priv->regfile.saveDP_B); 306 - I915_WRITE(DP_C, dev_priv->regfile.saveDP_C); 307 - I915_WRITE(DP_D, dev_priv->regfile.saveDP_D); 308 - } 309 - /* FIXME: restore TV & SDVO state */ 310 - } 311 - 312 766 /* only restore FBC info on the platform that supports FBC*/ 313 767 intel_disable_fbc(dev); 314 768 if (I915_HAS_FBC(dev)) { ··· 313 787 I915_WRITE(FBC_CONTROL, dev_priv->regfile.saveFBC_CONTROL); 314 788 } 315 789 } 316 - /* VGA state */ 317 - if (HAS_PCH_SPLIT(dev)) 318 - I915_WRITE(CPU_VGACNTRL, dev_priv->regfile.saveVGACNTRL); 790 + 791 + if (!drm_core_check_feature(dev, DRIVER_MODESET)) 792 + i915_restore_vga(dev); 319 793 else 320 - I915_WRITE(VGACNTRL, dev_priv->regfile.saveVGACNTRL); 321 - 322 - I915_WRITE(VGA0, dev_priv->regfile.saveVGA0); 323 - I915_WRITE(VGA1, dev_priv->regfile.saveVGA1); 324 - I915_WRITE(VGA_PD, dev_priv->regfile.saveVGA_PD); 325 - POSTING_READ(VGA_PD); 326 - udelay(150); 327 - 328 - i915_restore_vga(dev); 794 + i915_redisable_vga(dev); 329 795 } 330 796 331 797 int i915_save_state(struct drm_device *dev)
+503
drivers/gpu/drm/i915/i915_ums.c
··· 1 + /* 2 + * 3 + * Copyright 2008 (c) Intel Corporation 4 + * Jesse Barnes <jbarnes@virtuousgeek.org> 5 + * Copyright 2013 (c) Intel Corporation 6 + * Daniel Vetter <daniel.vetter@ffwll.ch> 7 + * 8 + * Permission is hereby granted, free of charge, to any person obtaining a 9 + * copy of this software and associated documentation files (the 10 + * "Software"), to deal in the Software without restriction, including 11 + * without limitation the rights to use, copy, modify, merge, publish, 12 + * distribute, sub license, and/or sell copies of the Software, and to 13 + * permit persons to whom the Software is furnished to do so, subject to 14 + * the following conditions: 15 + * 16 + * The above copyright notice and this permission notice (including the 17 + * next paragraph) shall be included in all copies or substantial portions 18 + * of the Software. 19 + * 20 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 21 + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 22 + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 23 + * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 24 + * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 25 + * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 26 + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 27 + */ 28 + 29 + #include <drm/drmP.h> 30 + #include <drm/i915_drm.h> 31 + #include "intel_drv.h" 32 + #include "i915_reg.h" 33 + 34 + static bool i915_pipe_enabled(struct drm_device *dev, enum pipe pipe) 35 + { 36 + struct drm_i915_private *dev_priv = dev->dev_private; 37 + u32 dpll_reg; 38 + 39 + /* On IVB, 3rd pipe shares PLL with another one */ 40 + if (pipe > 1) 41 + return false; 42 + 43 + if (HAS_PCH_SPLIT(dev)) 44 + dpll_reg = _PCH_DPLL(pipe); 45 + else 46 + dpll_reg = (pipe == PIPE_A) ? _DPLL_A : _DPLL_B; 47 + 48 + return (I915_READ(dpll_reg) & DPLL_VCO_ENABLE); 49 + } 50 + 51 + static void i915_save_palette(struct drm_device *dev, enum pipe pipe) 52 + { 53 + struct drm_i915_private *dev_priv = dev->dev_private; 54 + unsigned long reg = (pipe == PIPE_A ? _PALETTE_A : _PALETTE_B); 55 + u32 *array; 56 + int i; 57 + 58 + if (!i915_pipe_enabled(dev, pipe)) 59 + return; 60 + 61 + if (HAS_PCH_SPLIT(dev)) 62 + reg = (pipe == PIPE_A) ? _LGC_PALETTE_A : _LGC_PALETTE_B; 63 + 64 + if (pipe == PIPE_A) 65 + array = dev_priv->regfile.save_palette_a; 66 + else 67 + array = dev_priv->regfile.save_palette_b; 68 + 69 + for (i = 0; i < 256; i++) 70 + array[i] = I915_READ(reg + (i << 2)); 71 + } 72 + 73 + static void i915_restore_palette(struct drm_device *dev, enum pipe pipe) 74 + { 75 + struct drm_i915_private *dev_priv = dev->dev_private; 76 + unsigned long reg = (pipe == PIPE_A ? _PALETTE_A : _PALETTE_B); 77 + u32 *array; 78 + int i; 79 + 80 + if (!i915_pipe_enabled(dev, pipe)) 81 + return; 82 + 83 + if (HAS_PCH_SPLIT(dev)) 84 + reg = (pipe == PIPE_A) ? _LGC_PALETTE_A : _LGC_PALETTE_B; 85 + 86 + if (pipe == PIPE_A) 87 + array = dev_priv->regfile.save_palette_a; 88 + else 89 + array = dev_priv->regfile.save_palette_b; 90 + 91 + for (i = 0; i < 256; i++) 92 + I915_WRITE(reg + (i << 2), array[i]); 93 + } 94 + 95 + void i915_save_display_reg(struct drm_device *dev) 96 + { 97 + struct drm_i915_private *dev_priv = dev->dev_private; 98 + int i; 99 + 100 + /* Cursor state */ 101 + dev_priv->regfile.saveCURACNTR = I915_READ(_CURACNTR); 102 + dev_priv->regfile.saveCURAPOS = I915_READ(_CURAPOS); 103 + dev_priv->regfile.saveCURABASE = I915_READ(_CURABASE); 104 + dev_priv->regfile.saveCURBCNTR = I915_READ(_CURBCNTR); 105 + dev_priv->regfile.saveCURBPOS = I915_READ(_CURBPOS); 106 + dev_priv->regfile.saveCURBBASE = I915_READ(_CURBBASE); 107 + if (IS_GEN2(dev)) 108 + dev_priv->regfile.saveCURSIZE = I915_READ(CURSIZE); 109 + 110 + if (HAS_PCH_SPLIT(dev)) { 111 + dev_priv->regfile.savePCH_DREF_CONTROL = I915_READ(PCH_DREF_CONTROL); 112 + dev_priv->regfile.saveDISP_ARB_CTL = I915_READ(DISP_ARB_CTL); 113 + } 114 + 115 + /* Pipe & plane A info */ 116 + dev_priv->regfile.savePIPEACONF = I915_READ(_PIPEACONF); 117 + dev_priv->regfile.savePIPEASRC = I915_READ(_PIPEASRC); 118 + if (HAS_PCH_SPLIT(dev)) { 119 + dev_priv->regfile.saveFPA0 = I915_READ(_PCH_FPA0); 120 + dev_priv->regfile.saveFPA1 = I915_READ(_PCH_FPA1); 121 + dev_priv->regfile.saveDPLL_A = I915_READ(_PCH_DPLL_A); 122 + } else { 123 + dev_priv->regfile.saveFPA0 = I915_READ(_FPA0); 124 + dev_priv->regfile.saveFPA1 = I915_READ(_FPA1); 125 + dev_priv->regfile.saveDPLL_A = I915_READ(_DPLL_A); 126 + } 127 + if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) 128 + dev_priv->regfile.saveDPLL_A_MD = I915_READ(_DPLL_A_MD); 129 + dev_priv->regfile.saveHTOTAL_A = I915_READ(_HTOTAL_A); 130 + dev_priv->regfile.saveHBLANK_A = I915_READ(_HBLANK_A); 131 + dev_priv->regfile.saveHSYNC_A = I915_READ(_HSYNC_A); 132 + dev_priv->regfile.saveVTOTAL_A = I915_READ(_VTOTAL_A); 133 + dev_priv->regfile.saveVBLANK_A = I915_READ(_VBLANK_A); 134 + dev_priv->regfile.saveVSYNC_A = I915_READ(_VSYNC_A); 135 + if (!HAS_PCH_SPLIT(dev)) 136 + dev_priv->regfile.saveBCLRPAT_A = I915_READ(_BCLRPAT_A); 137 + 138 + if (HAS_PCH_SPLIT(dev)) { 139 + dev_priv->regfile.savePIPEA_DATA_M1 = I915_READ(_PIPEA_DATA_M1); 140 + dev_priv->regfile.savePIPEA_DATA_N1 = I915_READ(_PIPEA_DATA_N1); 141 + dev_priv->regfile.savePIPEA_LINK_M1 = I915_READ(_PIPEA_LINK_M1); 142 + dev_priv->regfile.savePIPEA_LINK_N1 = I915_READ(_PIPEA_LINK_N1); 143 + 144 + dev_priv->regfile.saveFDI_TXA_CTL = I915_READ(_FDI_TXA_CTL); 145 + dev_priv->regfile.saveFDI_RXA_CTL = I915_READ(_FDI_RXA_CTL); 146 + 147 + dev_priv->regfile.savePFA_CTL_1 = I915_READ(_PFA_CTL_1); 148 + dev_priv->regfile.savePFA_WIN_SZ = I915_READ(_PFA_WIN_SZ); 149 + dev_priv->regfile.savePFA_WIN_POS = I915_READ(_PFA_WIN_POS); 150 + 151 + dev_priv->regfile.saveTRANSACONF = I915_READ(_TRANSACONF); 152 + dev_priv->regfile.saveTRANS_HTOTAL_A = I915_READ(_TRANS_HTOTAL_A); 153 + dev_priv->regfile.saveTRANS_HBLANK_A = I915_READ(_TRANS_HBLANK_A); 154 + dev_priv->regfile.saveTRANS_HSYNC_A = I915_READ(_TRANS_HSYNC_A); 155 + dev_priv->regfile.saveTRANS_VTOTAL_A = I915_READ(_TRANS_VTOTAL_A); 156 + dev_priv->regfile.saveTRANS_VBLANK_A = I915_READ(_TRANS_VBLANK_A); 157 + dev_priv->regfile.saveTRANS_VSYNC_A = I915_READ(_TRANS_VSYNC_A); 158 + } 159 + 160 + dev_priv->regfile.saveDSPACNTR = I915_READ(_DSPACNTR); 161 + dev_priv->regfile.saveDSPASTRIDE = I915_READ(_DSPASTRIDE); 162 + dev_priv->regfile.saveDSPASIZE = I915_READ(_DSPASIZE); 163 + dev_priv->regfile.saveDSPAPOS = I915_READ(_DSPAPOS); 164 + dev_priv->regfile.saveDSPAADDR = I915_READ(_DSPAADDR); 165 + if (INTEL_INFO(dev)->gen >= 4) { 166 + dev_priv->regfile.saveDSPASURF = I915_READ(_DSPASURF); 167 + dev_priv->regfile.saveDSPATILEOFF = I915_READ(_DSPATILEOFF); 168 + } 169 + i915_save_palette(dev, PIPE_A); 170 + dev_priv->regfile.savePIPEASTAT = I915_READ(_PIPEASTAT); 171 + 172 + /* Pipe & plane B info */ 173 + dev_priv->regfile.savePIPEBCONF = I915_READ(_PIPEBCONF); 174 + dev_priv->regfile.savePIPEBSRC = I915_READ(_PIPEBSRC); 175 + if (HAS_PCH_SPLIT(dev)) { 176 + dev_priv->regfile.saveFPB0 = I915_READ(_PCH_FPB0); 177 + dev_priv->regfile.saveFPB1 = I915_READ(_PCH_FPB1); 178 + dev_priv->regfile.saveDPLL_B = I915_READ(_PCH_DPLL_B); 179 + } else { 180 + dev_priv->regfile.saveFPB0 = I915_READ(_FPB0); 181 + dev_priv->regfile.saveFPB1 = I915_READ(_FPB1); 182 + dev_priv->regfile.saveDPLL_B = I915_READ(_DPLL_B); 183 + } 184 + if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) 185 + dev_priv->regfile.saveDPLL_B_MD = I915_READ(_DPLL_B_MD); 186 + dev_priv->regfile.saveHTOTAL_B = I915_READ(_HTOTAL_B); 187 + dev_priv->regfile.saveHBLANK_B = I915_READ(_HBLANK_B); 188 + dev_priv->regfile.saveHSYNC_B = I915_READ(_HSYNC_B); 189 + dev_priv->regfile.saveVTOTAL_B = I915_READ(_VTOTAL_B); 190 + dev_priv->regfile.saveVBLANK_B = I915_READ(_VBLANK_B); 191 + dev_priv->regfile.saveVSYNC_B = I915_READ(_VSYNC_B); 192 + if (!HAS_PCH_SPLIT(dev)) 193 + dev_priv->regfile.saveBCLRPAT_B = I915_READ(_BCLRPAT_B); 194 + 195 + if (HAS_PCH_SPLIT(dev)) { 196 + dev_priv->regfile.savePIPEB_DATA_M1 = I915_READ(_PIPEB_DATA_M1); 197 + dev_priv->regfile.savePIPEB_DATA_N1 = I915_READ(_PIPEB_DATA_N1); 198 + dev_priv->regfile.savePIPEB_LINK_M1 = I915_READ(_PIPEB_LINK_M1); 199 + dev_priv->regfile.savePIPEB_LINK_N1 = I915_READ(_PIPEB_LINK_N1); 200 + 201 + dev_priv->regfile.saveFDI_TXB_CTL = I915_READ(_FDI_TXB_CTL); 202 + dev_priv->regfile.saveFDI_RXB_CTL = I915_READ(_FDI_RXB_CTL); 203 + 204 + dev_priv->regfile.savePFB_CTL_1 = I915_READ(_PFB_CTL_1); 205 + dev_priv->regfile.savePFB_WIN_SZ = I915_READ(_PFB_WIN_SZ); 206 + dev_priv->regfile.savePFB_WIN_POS = I915_READ(_PFB_WIN_POS); 207 + 208 + dev_priv->regfile.saveTRANSBCONF = I915_READ(_TRANSBCONF); 209 + dev_priv->regfile.saveTRANS_HTOTAL_B = I915_READ(_TRANS_HTOTAL_B); 210 + dev_priv->regfile.saveTRANS_HBLANK_B = I915_READ(_TRANS_HBLANK_B); 211 + dev_priv->regfile.saveTRANS_HSYNC_B = I915_READ(_TRANS_HSYNC_B); 212 + dev_priv->regfile.saveTRANS_VTOTAL_B = I915_READ(_TRANS_VTOTAL_B); 213 + dev_priv->regfile.saveTRANS_VBLANK_B = I915_READ(_TRANS_VBLANK_B); 214 + dev_priv->regfile.saveTRANS_VSYNC_B = I915_READ(_TRANS_VSYNC_B); 215 + } 216 + 217 + dev_priv->regfile.saveDSPBCNTR = I915_READ(_DSPBCNTR); 218 + dev_priv->regfile.saveDSPBSTRIDE = I915_READ(_DSPBSTRIDE); 219 + dev_priv->regfile.saveDSPBSIZE = I915_READ(_DSPBSIZE); 220 + dev_priv->regfile.saveDSPBPOS = I915_READ(_DSPBPOS); 221 + dev_priv->regfile.saveDSPBADDR = I915_READ(_DSPBADDR); 222 + if (INTEL_INFO(dev)->gen >= 4) { 223 + dev_priv->regfile.saveDSPBSURF = I915_READ(_DSPBSURF); 224 + dev_priv->regfile.saveDSPBTILEOFF = I915_READ(_DSPBTILEOFF); 225 + } 226 + i915_save_palette(dev, PIPE_B); 227 + dev_priv->regfile.savePIPEBSTAT = I915_READ(_PIPEBSTAT); 228 + 229 + /* Fences */ 230 + switch (INTEL_INFO(dev)->gen) { 231 + case 7: 232 + case 6: 233 + for (i = 0; i < 16; i++) 234 + dev_priv->regfile.saveFENCE[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8)); 235 + break; 236 + case 5: 237 + case 4: 238 + for (i = 0; i < 16; i++) 239 + dev_priv->regfile.saveFENCE[i] = I915_READ64(FENCE_REG_965_0 + (i * 8)); 240 + break; 241 + case 3: 242 + if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) 243 + for (i = 0; i < 8; i++) 244 + dev_priv->regfile.saveFENCE[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4)); 245 + case 2: 246 + for (i = 0; i < 8; i++) 247 + dev_priv->regfile.saveFENCE[i] = I915_READ(FENCE_REG_830_0 + (i * 4)); 248 + break; 249 + } 250 + 251 + /* CRT state */ 252 + if (HAS_PCH_SPLIT(dev)) 253 + dev_priv->regfile.saveADPA = I915_READ(PCH_ADPA); 254 + else 255 + dev_priv->regfile.saveADPA = I915_READ(ADPA); 256 + 257 + /* Display Port state */ 258 + if (SUPPORTS_INTEGRATED_DP(dev)) { 259 + dev_priv->regfile.saveDP_B = I915_READ(DP_B); 260 + dev_priv->regfile.saveDP_C = I915_READ(DP_C); 261 + dev_priv->regfile.saveDP_D = I915_READ(DP_D); 262 + dev_priv->regfile.savePIPEA_GMCH_DATA_M = I915_READ(_PIPEA_GMCH_DATA_M); 263 + dev_priv->regfile.savePIPEB_GMCH_DATA_M = I915_READ(_PIPEB_GMCH_DATA_M); 264 + dev_priv->regfile.savePIPEA_GMCH_DATA_N = I915_READ(_PIPEA_GMCH_DATA_N); 265 + dev_priv->regfile.savePIPEB_GMCH_DATA_N = I915_READ(_PIPEB_GMCH_DATA_N); 266 + dev_priv->regfile.savePIPEA_DP_LINK_M = I915_READ(_PIPEA_DP_LINK_M); 267 + dev_priv->regfile.savePIPEB_DP_LINK_M = I915_READ(_PIPEB_DP_LINK_M); 268 + dev_priv->regfile.savePIPEA_DP_LINK_N = I915_READ(_PIPEA_DP_LINK_N); 269 + dev_priv->regfile.savePIPEB_DP_LINK_N = I915_READ(_PIPEB_DP_LINK_N); 270 + } 271 + /* FIXME: regfile.save TV & SDVO state */ 272 + 273 + return; 274 + } 275 + 276 + void i915_restore_display_reg(struct drm_device *dev) 277 + { 278 + struct drm_i915_private *dev_priv = dev->dev_private; 279 + int dpll_a_reg, fpa0_reg, fpa1_reg; 280 + int dpll_b_reg, fpb0_reg, fpb1_reg; 281 + int i; 282 + 283 + /* Display port ratios (must be done before clock is set) */ 284 + if (SUPPORTS_INTEGRATED_DP(dev)) { 285 + I915_WRITE(_PIPEA_GMCH_DATA_M, dev_priv->regfile.savePIPEA_GMCH_DATA_M); 286 + I915_WRITE(_PIPEB_GMCH_DATA_M, dev_priv->regfile.savePIPEB_GMCH_DATA_M); 287 + I915_WRITE(_PIPEA_GMCH_DATA_N, dev_priv->regfile.savePIPEA_GMCH_DATA_N); 288 + I915_WRITE(_PIPEB_GMCH_DATA_N, dev_priv->regfile.savePIPEB_GMCH_DATA_N); 289 + I915_WRITE(_PIPEA_DP_LINK_M, dev_priv->regfile.savePIPEA_DP_LINK_M); 290 + I915_WRITE(_PIPEB_DP_LINK_M, dev_priv->regfile.savePIPEB_DP_LINK_M); 291 + I915_WRITE(_PIPEA_DP_LINK_N, dev_priv->regfile.savePIPEA_DP_LINK_N); 292 + I915_WRITE(_PIPEB_DP_LINK_N, dev_priv->regfile.savePIPEB_DP_LINK_N); 293 + } 294 + 295 + /* Fences */ 296 + switch (INTEL_INFO(dev)->gen) { 297 + case 7: 298 + case 6: 299 + for (i = 0; i < 16; i++) 300 + I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (i * 8), dev_priv->regfile.saveFENCE[i]); 301 + break; 302 + case 5: 303 + case 4: 304 + for (i = 0; i < 16; i++) 305 + I915_WRITE64(FENCE_REG_965_0 + (i * 8), dev_priv->regfile.saveFENCE[i]); 306 + break; 307 + case 3: 308 + case 2: 309 + if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) 310 + for (i = 0; i < 8; i++) 311 + I915_WRITE(FENCE_REG_945_8 + (i * 4), dev_priv->regfile.saveFENCE[i+8]); 312 + for (i = 0; i < 8; i++) 313 + I915_WRITE(FENCE_REG_830_0 + (i * 4), dev_priv->regfile.saveFENCE[i]); 314 + break; 315 + } 316 + 317 + 318 + if (HAS_PCH_SPLIT(dev)) { 319 + dpll_a_reg = _PCH_DPLL_A; 320 + dpll_b_reg = _PCH_DPLL_B; 321 + fpa0_reg = _PCH_FPA0; 322 + fpb0_reg = _PCH_FPB0; 323 + fpa1_reg = _PCH_FPA1; 324 + fpb1_reg = _PCH_FPB1; 325 + } else { 326 + dpll_a_reg = _DPLL_A; 327 + dpll_b_reg = _DPLL_B; 328 + fpa0_reg = _FPA0; 329 + fpb0_reg = _FPB0; 330 + fpa1_reg = _FPA1; 331 + fpb1_reg = _FPB1; 332 + } 333 + 334 + if (HAS_PCH_SPLIT(dev)) { 335 + I915_WRITE(PCH_DREF_CONTROL, dev_priv->regfile.savePCH_DREF_CONTROL); 336 + I915_WRITE(DISP_ARB_CTL, dev_priv->regfile.saveDISP_ARB_CTL); 337 + } 338 + 339 + /* Pipe & plane A info */ 340 + /* Prime the clock */ 341 + if (dev_priv->regfile.saveDPLL_A & DPLL_VCO_ENABLE) { 342 + I915_WRITE(dpll_a_reg, dev_priv->regfile.saveDPLL_A & 343 + ~DPLL_VCO_ENABLE); 344 + POSTING_READ(dpll_a_reg); 345 + udelay(150); 346 + } 347 + I915_WRITE(fpa0_reg, dev_priv->regfile.saveFPA0); 348 + I915_WRITE(fpa1_reg, dev_priv->regfile.saveFPA1); 349 + /* Actually enable it */ 350 + I915_WRITE(dpll_a_reg, dev_priv->regfile.saveDPLL_A); 351 + POSTING_READ(dpll_a_reg); 352 + udelay(150); 353 + if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) { 354 + I915_WRITE(_DPLL_A_MD, dev_priv->regfile.saveDPLL_A_MD); 355 + POSTING_READ(_DPLL_A_MD); 356 + } 357 + udelay(150); 358 + 359 + /* Restore mode */ 360 + I915_WRITE(_HTOTAL_A, dev_priv->regfile.saveHTOTAL_A); 361 + I915_WRITE(_HBLANK_A, dev_priv->regfile.saveHBLANK_A); 362 + I915_WRITE(_HSYNC_A, dev_priv->regfile.saveHSYNC_A); 363 + I915_WRITE(_VTOTAL_A, dev_priv->regfile.saveVTOTAL_A); 364 + I915_WRITE(_VBLANK_A, dev_priv->regfile.saveVBLANK_A); 365 + I915_WRITE(_VSYNC_A, dev_priv->regfile.saveVSYNC_A); 366 + if (!HAS_PCH_SPLIT(dev)) 367 + I915_WRITE(_BCLRPAT_A, dev_priv->regfile.saveBCLRPAT_A); 368 + 369 + if (HAS_PCH_SPLIT(dev)) { 370 + I915_WRITE(_PIPEA_DATA_M1, dev_priv->regfile.savePIPEA_DATA_M1); 371 + I915_WRITE(_PIPEA_DATA_N1, dev_priv->regfile.savePIPEA_DATA_N1); 372 + I915_WRITE(_PIPEA_LINK_M1, dev_priv->regfile.savePIPEA_LINK_M1); 373 + I915_WRITE(_PIPEA_LINK_N1, dev_priv->regfile.savePIPEA_LINK_N1); 374 + 375 + I915_WRITE(_FDI_RXA_CTL, dev_priv->regfile.saveFDI_RXA_CTL); 376 + I915_WRITE(_FDI_TXA_CTL, dev_priv->regfile.saveFDI_TXA_CTL); 377 + 378 + I915_WRITE(_PFA_CTL_1, dev_priv->regfile.savePFA_CTL_1); 379 + I915_WRITE(_PFA_WIN_SZ, dev_priv->regfile.savePFA_WIN_SZ); 380 + I915_WRITE(_PFA_WIN_POS, dev_priv->regfile.savePFA_WIN_POS); 381 + 382 + I915_WRITE(_TRANSACONF, dev_priv->regfile.saveTRANSACONF); 383 + I915_WRITE(_TRANS_HTOTAL_A, dev_priv->regfile.saveTRANS_HTOTAL_A); 384 + I915_WRITE(_TRANS_HBLANK_A, dev_priv->regfile.saveTRANS_HBLANK_A); 385 + I915_WRITE(_TRANS_HSYNC_A, dev_priv->regfile.saveTRANS_HSYNC_A); 386 + I915_WRITE(_TRANS_VTOTAL_A, dev_priv->regfile.saveTRANS_VTOTAL_A); 387 + I915_WRITE(_TRANS_VBLANK_A, dev_priv->regfile.saveTRANS_VBLANK_A); 388 + I915_WRITE(_TRANS_VSYNC_A, dev_priv->regfile.saveTRANS_VSYNC_A); 389 + } 390 + 391 + /* Restore plane info */ 392 + I915_WRITE(_DSPASIZE, dev_priv->regfile.saveDSPASIZE); 393 + I915_WRITE(_DSPAPOS, dev_priv->regfile.saveDSPAPOS); 394 + I915_WRITE(_PIPEASRC, dev_priv->regfile.savePIPEASRC); 395 + I915_WRITE(_DSPAADDR, dev_priv->regfile.saveDSPAADDR); 396 + I915_WRITE(_DSPASTRIDE, dev_priv->regfile.saveDSPASTRIDE); 397 + if (INTEL_INFO(dev)->gen >= 4) { 398 + I915_WRITE(_DSPASURF, dev_priv->regfile.saveDSPASURF); 399 + I915_WRITE(_DSPATILEOFF, dev_priv->regfile.saveDSPATILEOFF); 400 + } 401 + 402 + I915_WRITE(_PIPEACONF, dev_priv->regfile.savePIPEACONF); 403 + 404 + i915_restore_palette(dev, PIPE_A); 405 + /* Enable the plane */ 406 + I915_WRITE(_DSPACNTR, dev_priv->regfile.saveDSPACNTR); 407 + I915_WRITE(_DSPAADDR, I915_READ(_DSPAADDR)); 408 + 409 + /* Pipe & plane B info */ 410 + if (dev_priv->regfile.saveDPLL_B & DPLL_VCO_ENABLE) { 411 + I915_WRITE(dpll_b_reg, dev_priv->regfile.saveDPLL_B & 412 + ~DPLL_VCO_ENABLE); 413 + POSTING_READ(dpll_b_reg); 414 + udelay(150); 415 + } 416 + I915_WRITE(fpb0_reg, dev_priv->regfile.saveFPB0); 417 + I915_WRITE(fpb1_reg, dev_priv->regfile.saveFPB1); 418 + /* Actually enable it */ 419 + I915_WRITE(dpll_b_reg, dev_priv->regfile.saveDPLL_B); 420 + POSTING_READ(dpll_b_reg); 421 + udelay(150); 422 + if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) { 423 + I915_WRITE(_DPLL_B_MD, dev_priv->regfile.saveDPLL_B_MD); 424 + POSTING_READ(_DPLL_B_MD); 425 + } 426 + udelay(150); 427 + 428 + /* Restore mode */ 429 + I915_WRITE(_HTOTAL_B, dev_priv->regfile.saveHTOTAL_B); 430 + I915_WRITE(_HBLANK_B, dev_priv->regfile.saveHBLANK_B); 431 + I915_WRITE(_HSYNC_B, dev_priv->regfile.saveHSYNC_B); 432 + I915_WRITE(_VTOTAL_B, dev_priv->regfile.saveVTOTAL_B); 433 + I915_WRITE(_VBLANK_B, dev_priv->regfile.saveVBLANK_B); 434 + I915_WRITE(_VSYNC_B, dev_priv->regfile.saveVSYNC_B); 435 + if (!HAS_PCH_SPLIT(dev)) 436 + I915_WRITE(_BCLRPAT_B, dev_priv->regfile.saveBCLRPAT_B); 437 + 438 + if (HAS_PCH_SPLIT(dev)) { 439 + I915_WRITE(_PIPEB_DATA_M1, dev_priv->regfile.savePIPEB_DATA_M1); 440 + I915_WRITE(_PIPEB_DATA_N1, dev_priv->regfile.savePIPEB_DATA_N1); 441 + I915_WRITE(_PIPEB_LINK_M1, dev_priv->regfile.savePIPEB_LINK_M1); 442 + I915_WRITE(_PIPEB_LINK_N1, dev_priv->regfile.savePIPEB_LINK_N1); 443 + 444 + I915_WRITE(_FDI_RXB_CTL, dev_priv->regfile.saveFDI_RXB_CTL); 445 + I915_WRITE(_FDI_TXB_CTL, dev_priv->regfile.saveFDI_TXB_CTL); 446 + 447 + I915_WRITE(_PFB_CTL_1, dev_priv->regfile.savePFB_CTL_1); 448 + I915_WRITE(_PFB_WIN_SZ, dev_priv->regfile.savePFB_WIN_SZ); 449 + I915_WRITE(_PFB_WIN_POS, dev_priv->regfile.savePFB_WIN_POS); 450 + 451 + I915_WRITE(_TRANSBCONF, dev_priv->regfile.saveTRANSBCONF); 452 + I915_WRITE(_TRANS_HTOTAL_B, dev_priv->regfile.saveTRANS_HTOTAL_B); 453 + I915_WRITE(_TRANS_HBLANK_B, dev_priv->regfile.saveTRANS_HBLANK_B); 454 + I915_WRITE(_TRANS_HSYNC_B, dev_priv->regfile.saveTRANS_HSYNC_B); 455 + I915_WRITE(_TRANS_VTOTAL_B, dev_priv->regfile.saveTRANS_VTOTAL_B); 456 + I915_WRITE(_TRANS_VBLANK_B, dev_priv->regfile.saveTRANS_VBLANK_B); 457 + I915_WRITE(_TRANS_VSYNC_B, dev_priv->regfile.saveTRANS_VSYNC_B); 458 + } 459 + 460 + /* Restore plane info */ 461 + I915_WRITE(_DSPBSIZE, dev_priv->regfile.saveDSPBSIZE); 462 + I915_WRITE(_DSPBPOS, dev_priv->regfile.saveDSPBPOS); 463 + I915_WRITE(_PIPEBSRC, dev_priv->regfile.savePIPEBSRC); 464 + I915_WRITE(_DSPBADDR, dev_priv->regfile.saveDSPBADDR); 465 + I915_WRITE(_DSPBSTRIDE, dev_priv->regfile.saveDSPBSTRIDE); 466 + if (INTEL_INFO(dev)->gen >= 4) { 467 + I915_WRITE(_DSPBSURF, dev_priv->regfile.saveDSPBSURF); 468 + I915_WRITE(_DSPBTILEOFF, dev_priv->regfile.saveDSPBTILEOFF); 469 + } 470 + 471 + I915_WRITE(_PIPEBCONF, dev_priv->regfile.savePIPEBCONF); 472 + 473 + i915_restore_palette(dev, PIPE_B); 474 + /* Enable the plane */ 475 + I915_WRITE(_DSPBCNTR, dev_priv->regfile.saveDSPBCNTR); 476 + I915_WRITE(_DSPBADDR, I915_READ(_DSPBADDR)); 477 + 478 + /* Cursor state */ 479 + I915_WRITE(_CURAPOS, dev_priv->regfile.saveCURAPOS); 480 + I915_WRITE(_CURACNTR, dev_priv->regfile.saveCURACNTR); 481 + I915_WRITE(_CURABASE, dev_priv->regfile.saveCURABASE); 482 + I915_WRITE(_CURBPOS, dev_priv->regfile.saveCURBPOS); 483 + I915_WRITE(_CURBCNTR, dev_priv->regfile.saveCURBCNTR); 484 + I915_WRITE(_CURBBASE, dev_priv->regfile.saveCURBBASE); 485 + if (IS_GEN2(dev)) 486 + I915_WRITE(CURSIZE, dev_priv->regfile.saveCURSIZE); 487 + 488 + /* CRT state */ 489 + if (HAS_PCH_SPLIT(dev)) 490 + I915_WRITE(PCH_ADPA, dev_priv->regfile.saveADPA); 491 + else 492 + I915_WRITE(ADPA, dev_priv->regfile.saveADPA); 493 + 494 + /* Display Port state */ 495 + if (SUPPORTS_INTEGRATED_DP(dev)) { 496 + I915_WRITE(DP_B, dev_priv->regfile.saveDP_B); 497 + I915_WRITE(DP_C, dev_priv->regfile.saveDP_C); 498 + I915_WRITE(DP_D, dev_priv->regfile.saveDP_D); 499 + } 500 + /* FIXME: restore TV & SDVO state */ 501 + 502 + return; 503 + }
+15 -14
drivers/gpu/drm/i915/intel_crt.c
··· 267 267 268 268 crt->force_hotplug_required = 0; 269 269 270 - save_adpa = adpa = I915_READ(PCH_ADPA); 270 + save_adpa = adpa = I915_READ(crt->adpa_reg); 271 271 DRM_DEBUG_KMS("trigger hotplug detect cycle: adpa=0x%x\n", adpa); 272 272 273 273 adpa |= ADPA_CRT_HOTPLUG_FORCE_TRIGGER; 274 274 if (turn_off_dac) 275 275 adpa &= ~ADPA_DAC_ENABLE; 276 276 277 - I915_WRITE(PCH_ADPA, adpa); 277 + I915_WRITE(crt->adpa_reg, adpa); 278 278 279 - if (wait_for((I915_READ(PCH_ADPA) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) == 0, 279 + if (wait_for((I915_READ(crt->adpa_reg) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) == 0, 280 280 1000)) 281 281 DRM_DEBUG_KMS("timed out waiting for FORCE_TRIGGER"); 282 282 283 283 if (turn_off_dac) { 284 - I915_WRITE(PCH_ADPA, save_adpa); 285 - POSTING_READ(PCH_ADPA); 284 + I915_WRITE(crt->adpa_reg, save_adpa); 285 + POSTING_READ(crt->adpa_reg); 286 286 } 287 287 } 288 288 289 289 /* Check the status to see if both blue and green are on now */ 290 - adpa = I915_READ(PCH_ADPA); 290 + adpa = I915_READ(crt->adpa_reg); 291 291 if ((adpa & ADPA_CRT_HOTPLUG_MONITOR_MASK) != 0) 292 292 ret = true; 293 293 else ··· 300 300 static bool valleyview_crt_detect_hotplug(struct drm_connector *connector) 301 301 { 302 302 struct drm_device *dev = connector->dev; 303 + struct intel_crt *crt = intel_attached_crt(connector); 303 304 struct drm_i915_private *dev_priv = dev->dev_private; 304 305 u32 adpa; 305 306 bool ret; 306 307 u32 save_adpa; 307 308 308 - save_adpa = adpa = I915_READ(ADPA); 309 + save_adpa = adpa = I915_READ(crt->adpa_reg); 309 310 DRM_DEBUG_KMS("trigger hotplug detect cycle: adpa=0x%x\n", adpa); 310 311 311 312 adpa |= ADPA_CRT_HOTPLUG_FORCE_TRIGGER; 312 313 313 - I915_WRITE(ADPA, adpa); 314 + I915_WRITE(crt->adpa_reg, adpa); 314 315 315 - if (wait_for((I915_READ(ADPA) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) == 0, 316 + if (wait_for((I915_READ(crt->adpa_reg) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) == 0, 316 317 1000)) { 317 318 DRM_DEBUG_KMS("timed out waiting for FORCE_TRIGGER"); 318 - I915_WRITE(ADPA, save_adpa); 319 + I915_WRITE(crt->adpa_reg, save_adpa); 319 320 } 320 321 321 322 /* Check the status to see if both blue and green are on now */ 322 - adpa = I915_READ(ADPA); 323 + adpa = I915_READ(crt->adpa_reg); 323 324 if ((adpa & ADPA_CRT_HOTPLUG_MONITOR_MASK) != 0) 324 325 ret = true; 325 326 else ··· 666 665 if (HAS_PCH_SPLIT(dev)) { 667 666 u32 adpa; 668 667 669 - adpa = I915_READ(PCH_ADPA); 668 + adpa = I915_READ(crt->adpa_reg); 670 669 adpa &= ~ADPA_CRT_HOTPLUG_MASK; 671 670 adpa |= ADPA_HOTPLUG_BITS; 672 - I915_WRITE(PCH_ADPA, adpa); 673 - POSTING_READ(PCH_ADPA); 671 + I915_WRITE(crt->adpa_reg, adpa); 672 + POSTING_READ(crt->adpa_reg); 674 673 675 674 DRM_DEBUG_KMS("pch crt adpa set to 0x%x\n", adpa); 676 675 crt->force_hotplug_required = 1;
+28 -1
drivers/gpu/drm/i915/intel_ddi.c
··· 677 677 DRM_DEBUG_KMS("Preparing DDI mode for Haswell on port %c, pipe %c\n", 678 678 port_name(port), pipe_name(pipe)); 679 679 680 + intel_crtc->eld_vld = false; 680 681 if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) { 681 682 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 682 683 ··· 988 987 if (cpu_transcoder == TRANSCODER_EDP) { 989 988 switch (pipe) { 990 989 case PIPE_A: 991 - temp |= TRANS_DDI_EDP_INPUT_A_ONOFF; 990 + /* Can only use the always-on power well for eDP when 991 + * not using the panel fitter, and when not using motion 992 + * blur mitigation (which we don't support). */ 993 + if (dev_priv->pch_pf_size) 994 + temp |= TRANS_DDI_EDP_INPUT_A_ONOFF; 995 + else 996 + temp |= TRANS_DDI_EDP_INPUT_A_ON; 992 997 break; 993 998 case PIPE_B: 994 999 temp |= TRANS_DDI_EDP_INPUT_B_ONOFF; ··· 1294 1287 static void intel_enable_ddi(struct intel_encoder *intel_encoder) 1295 1288 { 1296 1289 struct drm_encoder *encoder = &intel_encoder->base; 1290 + struct drm_crtc *crtc = encoder->crtc; 1291 + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 1292 + int pipe = intel_crtc->pipe; 1297 1293 struct drm_device *dev = encoder->dev; 1298 1294 struct drm_i915_private *dev_priv = dev->dev_private; 1299 1295 enum port port = intel_ddi_get_encoder_port(intel_encoder); 1300 1296 int type = intel_encoder->type; 1297 + uint32_t tmp; 1301 1298 1302 1299 if (type == INTEL_OUTPUT_HDMI) { 1303 1300 /* In HDMI/DVI mode, the port width, and swing/emphasis values ··· 1314 1303 1315 1304 ironlake_edp_backlight_on(intel_dp); 1316 1305 } 1306 + 1307 + if (intel_crtc->eld_vld) { 1308 + tmp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD); 1309 + tmp |= ((AUDIO_OUTPUT_ENABLE_A | AUDIO_ELD_VALID_A) << (pipe * 4)); 1310 + I915_WRITE(HSW_AUD_PIN_ELD_CP_VLD, tmp); 1311 + } 1317 1312 } 1318 1313 1319 1314 static void intel_disable_ddi(struct intel_encoder *intel_encoder) 1320 1315 { 1321 1316 struct drm_encoder *encoder = &intel_encoder->base; 1317 + struct drm_crtc *crtc = encoder->crtc; 1318 + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 1319 + int pipe = intel_crtc->pipe; 1322 1320 int type = intel_encoder->type; 1321 + struct drm_device *dev = encoder->dev; 1322 + struct drm_i915_private *dev_priv = dev->dev_private; 1323 + uint32_t tmp; 1323 1324 1324 1325 if (type == INTEL_OUTPUT_EDP) { 1325 1326 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 1326 1327 1327 1328 ironlake_edp_backlight_off(intel_dp); 1328 1329 } 1330 + 1331 + tmp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD); 1332 + tmp &= ~((AUDIO_OUTPUT_ENABLE_A | AUDIO_ELD_VALID_A) << (pipe * 4)); 1333 + I915_WRITE(HSW_AUD_PIN_ELD_CP_VLD, tmp); 1329 1334 } 1330 1335 1331 1336 int intel_ddi_get_cdclk_freq(struct drm_i915_private *dev_priv)
+99 -61
drivers/gpu/drm/i915/intel_display.c
··· 1214 1214 if (pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) 1215 1215 state = true; 1216 1216 1217 - reg = PIPECONF(cpu_transcoder); 1218 - val = I915_READ(reg); 1219 - cur_state = !!(val & PIPECONF_ENABLE); 1217 + if (IS_HASWELL(dev_priv->dev) && cpu_transcoder != TRANSCODER_EDP && 1218 + !(I915_READ(HSW_PWR_WELL_DRIVER) & HSW_PWR_WELL_ENABLE)) { 1219 + cur_state = false; 1220 + } else { 1221 + reg = PIPECONF(cpu_transcoder); 1222 + val = I915_READ(reg); 1223 + cur_state = !!(val & PIPECONF_ENABLE); 1224 + } 1225 + 1220 1226 WARN(cur_state != state, 1221 1227 "pipe %c assertion failure (expected %s, current %s)\n", 1222 1228 pipe_name(pipe), state_string(state), state_string(cur_state)); ··· 2226 2220 bool was_interruptible = dev_priv->mm.interruptible; 2227 2221 int ret; 2228 2222 2223 + WARN_ON(waitqueue_active(&dev_priv->pending_flip_queue)); 2224 + 2229 2225 wait_event(dev_priv->pending_flip_queue, 2230 - atomic_read(&dev_priv->mm.wedged) || 2226 + i915_reset_in_progress(&dev_priv->gpu_error) || 2231 2227 atomic_read(&obj->pending_flip) == 0); 2232 2228 2233 2229 /* Big Hammer, we also need to ensure that any pending ··· 2877 2869 unsigned long flags; 2878 2870 bool pending; 2879 2871 2880 - if (atomic_read(&dev_priv->mm.wedged)) 2872 + if (i915_reset_in_progress(&dev_priv->gpu_error)) 2881 2873 return false; 2882 2874 2883 2875 spin_lock_irqsave(&dev->event_lock, flags); ··· 2894 2886 2895 2887 if (crtc->fb == NULL) 2896 2888 return; 2889 + 2890 + WARN_ON(waitqueue_active(&dev_priv->pending_flip_queue)); 2897 2891 2898 2892 wait_event(dev_priv->pending_flip_queue, 2899 2893 !intel_crtc_has_pending_flip(crtc)); ··· 3727 3717 struct drm_device *dev = crtc->dev; 3728 3718 struct drm_connector *connector; 3729 3719 struct drm_i915_private *dev_priv = dev->dev_private; 3720 + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3730 3721 3731 3722 /* crtc should still be enabled when we disable it. */ 3732 3723 WARN_ON(!crtc->enabled); 3733 3724 3725 + intel_crtc->eld_vld = false; 3734 3726 dev_priv->display.crtc_disable(crtc); 3735 3727 intel_crtc_update_sarea(crtc, false); 3736 3728 dev_priv->display.off(crtc); ··· 4879 4867 if (!has_vga) 4880 4868 return; 4881 4869 4870 + mutex_lock(&dev_priv->dpio_lock); 4871 + 4882 4872 /* XXX: Rip out SDV support once Haswell ships for real. */ 4883 4873 if (IS_HASWELL(dev) && (dev->pci_device & 0xFF00) == 0x0C00) 4884 4874 is_sdv = true; ··· 5023 5009 tmp = intel_sbi_read(dev_priv, SBI_DBUFF0, SBI_ICLK); 5024 5010 tmp |= SBI_DBUFF0_ENABLE; 5025 5011 intel_sbi_write(dev_priv, SBI_DBUFF0, tmp, SBI_ICLK); 5012 + 5013 + mutex_unlock(&dev_priv->dpio_lock); 5026 5014 } 5027 5015 5028 5016 /* ··· 5107 5091 val |= PIPECONF_INTERLACED_ILK; 5108 5092 else 5109 5093 val |= PIPECONF_PROGRESSIVE; 5094 + 5095 + if (adjusted_mode->private_flags & INTEL_MODE_LIMITED_COLOR_RANGE) 5096 + val |= PIPECONF_COLOR_RANGE_SELECT; 5097 + else 5098 + val &= ~PIPECONF_COLOR_RANGE_SELECT; 5110 5099 5111 5100 I915_WRITE(PIPECONF(pipe), val); 5112 5101 POSTING_READ(PIPECONF(pipe)); ··· 5607 5586 return fdi_config_ok ? ret : -EINVAL; 5608 5587 } 5609 5588 5589 + static void haswell_modeset_global_resources(struct drm_device *dev) 5590 + { 5591 + struct drm_i915_private *dev_priv = dev->dev_private; 5592 + bool enable = false; 5593 + struct intel_crtc *crtc; 5594 + struct intel_encoder *encoder; 5595 + 5596 + list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) { 5597 + if (crtc->pipe != PIPE_A && crtc->base.enabled) 5598 + enable = true; 5599 + /* XXX: Should check for edp transcoder here, but thanks to init 5600 + * sequence that's not yet available. Just in case desktop eDP 5601 + * on PORT D is possible on haswell, too. */ 5602 + } 5603 + 5604 + list_for_each_entry(encoder, &dev->mode_config.encoder_list, 5605 + base.head) { 5606 + if (encoder->type != INTEL_OUTPUT_EDP && 5607 + encoder->connectors_active) 5608 + enable = true; 5609 + } 5610 + 5611 + /* Even the eDP panel fitter is outside the always-on well. */ 5612 + if (dev_priv->pch_pf_size) 5613 + enable = true; 5614 + 5615 + intel_set_power_well(dev, enable); 5616 + } 5617 + 5610 5618 static int haswell_crtc_mode_set(struct drm_crtc *crtc, 5611 5619 struct drm_display_mode *mode, 5612 5620 struct drm_display_mode *adjusted_mode, ··· 5667 5617 5668 5618 num_connectors++; 5669 5619 } 5670 - 5671 - if (is_cpu_edp) 5672 - intel_crtc->cpu_transcoder = TRANSCODER_EDP; 5673 - else 5674 - intel_crtc->cpu_transcoder = pipe; 5675 5620 5676 5621 /* We are not sure yet this won't happen. */ 5677 5622 WARN(!HAS_PCH_LPT(dev), "Unexpected PCH type %d\n", ··· 5731 5686 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5732 5687 int pipe = intel_crtc->pipe; 5733 5688 int ret; 5689 + 5690 + if (IS_HASWELL(dev) && intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP)) 5691 + intel_crtc->cpu_transcoder = TRANSCODER_EDP; 5692 + else 5693 + intel_crtc->cpu_transcoder = pipe; 5734 5694 5735 5695 drm_vblank_pre_modeset(dev, pipe); 5736 5696 ··· 5833 5783 struct drm_i915_private *dev_priv = connector->dev->dev_private; 5834 5784 uint8_t *eld = connector->eld; 5835 5785 struct drm_device *dev = crtc->dev; 5786 + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5836 5787 uint32_t eldv; 5837 5788 uint32_t i; 5838 5789 int len; ··· 5875 5824 DRM_DEBUG_DRIVER("ELD on pipe %c\n", pipe_name(pipe)); 5876 5825 5877 5826 eldv = AUDIO_ELD_VALID_A << (pipe * 4); 5827 + intel_crtc->eld_vld = true; 5878 5828 5879 5829 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) { 5880 5830 DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n"); ··· 6769 6717 6770 6718 void intel_mark_idle(struct drm_device *dev) 6771 6719 { 6720 + struct drm_crtc *crtc; 6721 + 6722 + if (!i915_powersave) 6723 + return; 6724 + 6725 + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 6726 + if (!crtc->fb) 6727 + continue; 6728 + 6729 + intel_decrease_pllclock(crtc); 6730 + } 6772 6731 } 6773 6732 6774 6733 void intel_mark_fb_busy(struct drm_i915_gem_object *obj) ··· 6796 6733 6797 6734 if (to_intel_framebuffer(crtc->fb)->obj == obj) 6798 6735 intel_increase_pllclock(crtc); 6799 - } 6800 - } 6801 - 6802 - void intel_mark_fb_idle(struct drm_i915_gem_object *obj) 6803 - { 6804 - struct drm_device *dev = obj->base.dev; 6805 - struct drm_crtc *crtc; 6806 - 6807 - if (!i915_powersave) 6808 - return; 6809 - 6810 - list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 6811 - if (!crtc->fb) 6812 - continue; 6813 - 6814 - if (to_intel_framebuffer(crtc->fb)->obj == obj) 6815 - intel_decrease_pllclock(crtc); 6816 6736 } 6817 6737 } 6818 6738 ··· 6879 6833 6880 6834 obj = work->old_fb_obj; 6881 6835 6882 - wake_up(&dev_priv->pending_flip_queue); 6836 + wake_up_all(&dev_priv->pending_flip_queue); 6883 6837 6884 6838 queue_work(dev_priv->wq, &work->work); 6885 6839 ··· 8265 8219 if (I915_READ(PCH_DP_D) & DP_DETECTED) 8266 8220 intel_dp_init(dev, PCH_DP_D, PORT_D); 8267 8221 } else if (IS_VALLEYVIEW(dev)) { 8268 - int found; 8269 - 8270 8222 /* Check for built-in panel first. Shares lanes with HDMI on SDVOC */ 8271 - if (I915_READ(DP_C) & DP_DETECTED) 8272 - intel_dp_init(dev, DP_C, PORT_C); 8223 + if (I915_READ(VLV_DISPLAY_BASE + DP_C) & DP_DETECTED) 8224 + intel_dp_init(dev, VLV_DISPLAY_BASE + DP_C, PORT_C); 8273 8225 8274 - if (I915_READ(SDVOB) & PORT_DETECTED) { 8275 - /* SDVOB multiplex with HDMIB */ 8276 - found = intel_sdvo_init(dev, SDVOB, true); 8277 - if (!found) 8278 - intel_hdmi_init(dev, SDVOB, PORT_B); 8279 - if (!found && (I915_READ(DP_B) & DP_DETECTED)) 8280 - intel_dp_init(dev, DP_B, PORT_B); 8226 + if (I915_READ(VLV_DISPLAY_BASE + SDVOB) & PORT_DETECTED) { 8227 + intel_hdmi_init(dev, VLV_DISPLAY_BASE + SDVOB, PORT_B); 8228 + if (I915_READ(VLV_DISPLAY_BASE + DP_B) & DP_DETECTED) 8229 + intel_dp_init(dev, VLV_DISPLAY_BASE + DP_B, PORT_B); 8281 8230 } 8282 8231 8283 - if (I915_READ(SDVOC) & PORT_DETECTED) 8284 - intel_hdmi_init(dev, SDVOC, PORT_C); 8232 + if (I915_READ(VLV_DISPLAY_BASE + SDVOC) & PORT_DETECTED) 8233 + intel_hdmi_init(dev, VLV_DISPLAY_BASE + SDVOC, PORT_C); 8285 8234 8286 8235 } else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) { 8287 8236 bool found = false; ··· 8536 8495 } else if (IS_HASWELL(dev)) { 8537 8496 dev_priv->display.fdi_link_train = hsw_fdi_link_train; 8538 8497 dev_priv->display.write_eld = haswell_write_eld; 8498 + dev_priv->display.modeset_global_resources = 8499 + haswell_modeset_global_resources; 8539 8500 } 8540 8501 } else if (IS_G4X(dev)) { 8541 8502 dev_priv->display.write_eld = g4x_write_eld; ··· 8660 8617 8661 8618 /* Acer Aspire 5734Z must invert backlight brightness */ 8662 8619 { 0x2a42, 0x1025, 0x0459, quirk_invert_brightness }, 8620 + 8621 + /* Acer/eMachines G725 */ 8622 + { 0x2a42, 0x1025, 0x0210, quirk_invert_brightness }, 8623 + 8624 + /* Acer/eMachines e725 */ 8625 + { 0x2a42, 0x1025, 0x0212, quirk_invert_brightness }, 8626 + 8627 + /* Acer/Packard Bell NCL20 */ 8628 + { 0x2a42, 0x1025, 0x034b, quirk_invert_brightness }, 8663 8629 }; 8664 8630 8665 8631 static void intel_init_quirks(struct drm_device *dev) ··· 8697 8645 { 8698 8646 struct drm_i915_private *dev_priv = dev->dev_private; 8699 8647 u8 sr1; 8700 - u32 vga_reg; 8701 - 8702 - if (HAS_PCH_SPLIT(dev)) 8703 - vga_reg = CPU_VGACNTRL; 8704 - else 8705 - vga_reg = VGACNTRL; 8648 + u32 vga_reg = i915_vgacntrl_reg(dev); 8706 8649 8707 8650 vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO); 8708 8651 outb(SR01, VGA_SR_INDEX); ··· 8712 8665 8713 8666 void intel_modeset_init_hw(struct drm_device *dev) 8714 8667 { 8715 - /* We attempt to init the necessary power wells early in the initialization 8716 - * time, so the subsystems that expect power to be enabled can work. 8717 - */ 8718 - intel_init_power_wells(dev); 8668 + intel_init_power_well(dev); 8719 8669 8720 8670 intel_prepare_ddi(dev); 8721 8671 ··· 8754 8710 dev->mode_config.max_width = 8192; 8755 8711 dev->mode_config.max_height = 8192; 8756 8712 } 8757 - dev->mode_config.fb_base = dev_priv->mm.gtt_base_addr; 8713 + dev->mode_config.fb_base = dev_priv->gtt.mappable_base; 8758 8714 8759 8715 DRM_DEBUG_KMS("%d display pipe%s available.\n", 8760 8716 dev_priv->num_pipe, dev_priv->num_pipe > 1 ? "s" : ""); ··· 8956 8912 * the crtc fixup. */ 8957 8913 } 8958 8914 8959 - static void i915_redisable_vga(struct drm_device *dev) 8915 + void i915_redisable_vga(struct drm_device *dev) 8960 8916 { 8961 8917 struct drm_i915_private *dev_priv = dev->dev_private; 8962 - u32 vga_reg; 8963 - 8964 - if (HAS_PCH_SPLIT(dev)) 8965 - vga_reg = CPU_VGACNTRL; 8966 - else 8967 - vga_reg = VGACNTRL; 8918 + u32 vga_reg = i915_vgacntrl_reg(dev); 8968 8919 8969 8920 if (I915_READ(vga_reg) != VGA_DISP_DISABLE) { 8970 8921 DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n"); 8971 - I915_WRITE(vga_reg, VGA_DISP_DISABLE); 8972 - POSTING_READ(vga_reg); 8922 + i915_disable_vga(dev); 8973 8923 } 8974 8924 } 8975 8925
+49 -15
drivers/gpu/drm/i915/intel_dp.c
··· 763 763 return false; 764 764 765 765 bpp = adjusted_mode->private_flags & INTEL_MODE_DP_FORCE_6BPC ? 18 : 24; 766 + 767 + if (intel_dp->color_range_auto) { 768 + /* 769 + * See: 770 + * CEA-861-E - 5.1 Default Encoding Parameters 771 + * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry 772 + */ 773 + if (bpp != 18 && drm_mode_cea_vic(adjusted_mode) > 1) 774 + intel_dp->color_range = DP_COLOR_RANGE_16_235; 775 + else 776 + intel_dp->color_range = 0; 777 + } 778 + 779 + if (intel_dp->color_range) 780 + adjusted_mode->private_flags |= INTEL_MODE_LIMITED_COLOR_RANGE; 781 + 766 782 mode_rate = intel_dp_link_required(adjusted_mode->clock, bpp); 767 783 768 784 for (clock = 0; clock <= max_clock; clock++) { ··· 983 967 else 984 968 intel_dp->DP |= DP_PLL_FREQ_270MHZ; 985 969 } else if (!HAS_PCH_CPT(dev) || is_cpu_edp(intel_dp)) { 986 - intel_dp->DP |= intel_dp->color_range; 970 + if (!HAS_PCH_SPLIT(dev)) 971 + intel_dp->DP |= intel_dp->color_range; 987 972 988 973 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) 989 974 intel_dp->DP |= DP_SYNC_HS_HIGH; ··· 1787 1770 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK; 1788 1771 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) { 1789 1772 case DP_TRAINING_PATTERN_DISABLE: 1790 - temp |= DP_TP_CTL_LINK_TRAIN_IDLE; 1791 - I915_WRITE(DP_TP_CTL(port), temp); 1792 1773 1793 - if (wait_for((I915_READ(DP_TP_STATUS(port)) & 1794 - DP_TP_STATUS_IDLE_DONE), 1)) 1795 - DRM_ERROR("Timed out waiting for DP idle patterns\n"); 1774 + if (port != PORT_A) { 1775 + temp |= DP_TP_CTL_LINK_TRAIN_IDLE; 1776 + I915_WRITE(DP_TP_CTL(port), temp); 1796 1777 1797 - temp &= ~DP_TP_CTL_LINK_TRAIN_MASK; 1778 + if (wait_for((I915_READ(DP_TP_STATUS(port)) & 1779 + DP_TP_STATUS_IDLE_DONE), 1)) 1780 + DRM_ERROR("Timed out waiting for DP idle patterns\n"); 1781 + 1782 + temp &= ~DP_TP_CTL_LINK_TRAIN_MASK; 1783 + } 1784 + 1798 1785 temp |= DP_TP_CTL_LINK_TRAIN_NORMAL; 1799 1786 1800 1787 break; ··· 2297 2276 { 2298 2277 struct drm_device *dev = intel_dp_to_dev(intel_dp); 2299 2278 struct drm_i915_private *dev_priv = dev->dev_private; 2279 + struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 2300 2280 uint32_t bit; 2301 2281 2302 - switch (intel_dp->output_reg) { 2303 - case DP_B: 2282 + switch (intel_dig_port->port) { 2283 + case PORT_B: 2304 2284 bit = DPB_HOTPLUG_LIVE_STATUS; 2305 2285 break; 2306 - case DP_C: 2286 + case PORT_C: 2307 2287 bit = DPC_HOTPLUG_LIVE_STATUS; 2308 2288 break; 2309 - case DP_D: 2289 + case PORT_D: 2310 2290 bit = DPD_HOTPLUG_LIVE_STATUS; 2311 2291 break; 2312 2292 default: ··· 2481 2459 } 2482 2460 2483 2461 if (property == dev_priv->broadcast_rgb_property) { 2484 - if (val == !!intel_dp->color_range) 2485 - return 0; 2486 - 2487 - intel_dp->color_range = val ? DP_COLOR_RANGE_16_235 : 0; 2462 + switch (val) { 2463 + case INTEL_BROADCAST_RGB_AUTO: 2464 + intel_dp->color_range_auto = true; 2465 + break; 2466 + case INTEL_BROADCAST_RGB_FULL: 2467 + intel_dp->color_range_auto = false; 2468 + intel_dp->color_range = 0; 2469 + break; 2470 + case INTEL_BROADCAST_RGB_LIMITED: 2471 + intel_dp->color_range_auto = false; 2472 + intel_dp->color_range = DP_COLOR_RANGE_16_235; 2473 + break; 2474 + default: 2475 + return -EINVAL; 2476 + } 2488 2477 goto done; 2489 2478 } 2490 2479 ··· 2636 2603 2637 2604 intel_attach_force_audio_property(connector); 2638 2605 intel_attach_broadcast_rgb_property(connector); 2606 + intel_dp->color_range_auto = true; 2639 2607 2640 2608 if (is_edp(intel_dp)) { 2641 2609 drm_mode_create_scaling_mode_property(connector->dev);
+15 -3
drivers/gpu/drm/i915/intel_drv.h
··· 109 109 * timings in the mode to prevent the crtc fixup from overwriting them. 110 110 * Currently only lvds needs that. */ 111 111 #define INTEL_MODE_CRTC_TIMINGS_SET (0x20) 112 + /* 113 + * Set when limited 16-235 (as opposed to full 0-255) RGB color range is 114 + * to be used. 115 + */ 116 + #define INTEL_MODE_LIMITED_COLOR_RANGE (0x40) 112 117 113 118 static inline void 114 119 intel_mode_set_pixel_multiplier(struct drm_display_mode *mode, ··· 211 206 * some outputs connected to this crtc. 212 207 */ 213 208 bool active; 209 + bool eld_vld; 214 210 bool primary_disabled; /* is the crtc obscured by a plane? */ 215 211 bool lowfreq_avail; 216 212 struct intel_overlay *overlay; ··· 290 284 #define DIP_LEN_AVI 13 291 285 #define DIP_AVI_PR_1 0 292 286 #define DIP_AVI_PR_2 1 287 + #define DIP_AVI_RGB_QUANT_RANGE_DEFAULT (0 << 2) 288 + #define DIP_AVI_RGB_QUANT_RANGE_LIMITED (1 << 2) 289 + #define DIP_AVI_RGB_QUANT_RANGE_FULL (2 << 2) 293 290 294 291 #define DIP_TYPE_SPD 0x83 295 292 #define DIP_VERSION_SPD 0x1 ··· 347 338 u32 sdvox_reg; 348 339 int ddc_bus; 349 340 uint32_t color_range; 341 + bool color_range_auto; 350 342 bool has_hdmi_sink; 351 343 bool has_audio; 352 344 enum hdmi_force_audio force_audio; 345 + bool rgb_quant_range_selectable; 353 346 void (*write_infoframe)(struct drm_encoder *encoder, 354 347 struct dip_infoframe *frame); 355 348 void (*set_infoframes)(struct drm_encoder *encoder, ··· 368 357 bool has_audio; 369 358 enum hdmi_force_audio force_audio; 370 359 uint32_t color_range; 360 + bool color_range_auto; 371 361 uint8_t link_bw; 372 362 uint8_t lane_count; 373 363 uint8_t dpcd[DP_RECEIVER_CAP_SIZE]; ··· 452 440 extern void intel_dvo_init(struct drm_device *dev); 453 441 extern void intel_tv_init(struct drm_device *dev); 454 442 extern void intel_mark_busy(struct drm_device *dev); 455 - extern void intel_mark_idle(struct drm_device *dev); 456 443 extern void intel_mark_fb_busy(struct drm_i915_gem_object *obj); 457 - extern void intel_mark_fb_idle(struct drm_i915_gem_object *obj); 444 + extern void intel_mark_idle(struct drm_device *dev); 458 445 extern bool intel_lvds_init(struct drm_device *dev); 459 446 extern bool intel_is_dual_link_lvds(struct drm_device *dev); 460 447 extern void intel_dp_init(struct drm_device *dev, int output_reg, ··· 666 655 extern void intel_gpu_ips_init(struct drm_i915_private *dev_priv); 667 656 extern void intel_gpu_ips_teardown(void); 668 657 669 - extern void intel_init_power_wells(struct drm_device *dev); 658 + extern void intel_init_power_well(struct drm_device *dev); 659 + extern void intel_set_power_well(struct drm_device *dev, bool enable); 670 660 extern void intel_enable_gt_powersave(struct drm_device *dev); 671 661 extern void intel_disable_gt_powersave(struct drm_device *dev); 672 662 extern void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv);
+4 -4
drivers/gpu/drm/i915/intel_fb.c
··· 135 135 goto out_unpin; 136 136 } 137 137 info->apertures->ranges[0].base = dev->mode_config.fb_base; 138 - info->apertures->ranges[0].size = 139 - dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT; 138 + info->apertures->ranges[0].size = dev_priv->gtt.mappable_end; 140 139 141 140 info->fix.smem_start = dev->mode_config.fb_base + obj->gtt_offset; 142 141 info->fix.smem_len = size; 143 142 144 143 info->screen_base = 145 - ioremap_wc(dev_priv->mm.gtt_base_addr + obj->gtt_offset, 144 + ioremap_wc(dev_priv->gtt.mappable_base + obj->gtt_offset, 146 145 size); 147 146 if (!info->screen_base) { 148 147 ret = -ENOSPC; ··· 305 306 306 307 /* Be sure to shut off any planes that may be active */ 307 308 list_for_each_entry(plane, &config->plane_list, head) 308 - plane->funcs->disable_plane(plane); 309 + if (plane->enabled) 310 + plane->funcs->disable_plane(plane); 309 311 310 312 drm_modeset_unlock_all(dev); 311 313 }
+56 -16
drivers/gpu/drm/i915/intel_hdmi.c
··· 331 331 static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder, 332 332 struct drm_display_mode *adjusted_mode) 333 333 { 334 + struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); 334 335 struct dip_infoframe avi_if = { 335 336 .type = DIP_TYPE_AVI, 336 337 .ver = DIP_VERSION_AVI, ··· 340 339 341 340 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK) 342 341 avi_if.body.avi.YQ_CN_PR |= DIP_AVI_PR_2; 342 + 343 + if (intel_hdmi->rgb_quant_range_selectable) { 344 + if (adjusted_mode->private_flags & INTEL_MODE_LIMITED_COLOR_RANGE) 345 + avi_if.body.avi.ITC_EC_Q_SC |= DIP_AVI_RGB_QUANT_RANGE_LIMITED; 346 + else 347 + avi_if.body.avi.ITC_EC_Q_SC |= DIP_AVI_RGB_QUANT_RANGE_FULL; 348 + } 343 349 344 350 avi_if.body.avi.VIC = drm_mode_cea_vic(adjusted_mode); 345 351 ··· 372 364 struct drm_display_mode *adjusted_mode) 373 365 { 374 366 struct drm_i915_private *dev_priv = encoder->dev->dev_private; 375 - struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); 367 + struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); 368 + struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi; 376 369 u32 reg = VIDEO_DIP_CTL; 377 370 u32 val = I915_READ(reg); 378 371 u32 port; ··· 400 391 return; 401 392 } 402 393 403 - switch (intel_hdmi->sdvox_reg) { 404 - case SDVOB: 394 + switch (intel_dig_port->port) { 395 + case PORT_B: 405 396 port = VIDEO_DIP_PORT_B; 406 397 break; 407 - case SDVOC: 398 + case PORT_C: 408 399 port = VIDEO_DIP_PORT_C; 409 400 break; 410 401 default: ··· 437 428 { 438 429 struct drm_i915_private *dev_priv = encoder->dev->dev_private; 439 430 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); 440 - struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); 431 + struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); 432 + struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi; 441 433 u32 reg = TVIDEO_DIP_CTL(intel_crtc->pipe); 442 434 u32 val = I915_READ(reg); 443 435 u32 port; ··· 457 447 return; 458 448 } 459 449 460 - switch (intel_hdmi->sdvox_reg) { 461 - case HDMIB: 450 + switch (intel_dig_port->port) { 451 + case PORT_B: 462 452 port = VIDEO_DIP_PORT_B; 463 453 break; 464 - case HDMIC: 454 + case PORT_C: 465 455 port = VIDEO_DIP_PORT_C; 466 456 break; 467 - case HDMID: 457 + case PORT_D: 468 458 port = VIDEO_DIP_PORT_D; 469 459 break; 470 460 default: ··· 776 766 const struct drm_display_mode *mode, 777 767 struct drm_display_mode *adjusted_mode) 778 768 { 769 + struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); 770 + 771 + if (intel_hdmi->color_range_auto) { 772 + /* See CEA-861-E - 5.1 Default Encoding Parameters */ 773 + if (intel_hdmi->has_hdmi_sink && 774 + drm_mode_cea_vic(adjusted_mode) > 1) 775 + intel_hdmi->color_range = SDVO_COLOR_RANGE_16_235; 776 + else 777 + intel_hdmi->color_range = 0; 778 + } 779 + 780 + if (intel_hdmi->color_range) 781 + adjusted_mode->private_flags |= INTEL_MODE_LIMITED_COLOR_RANGE; 782 + 779 783 return true; 780 784 } 781 785 ··· 797 773 { 798 774 struct drm_device *dev = intel_hdmi_to_dev(intel_hdmi); 799 775 struct drm_i915_private *dev_priv = dev->dev_private; 776 + struct intel_digital_port *intel_dig_port = hdmi_to_dig_port(intel_hdmi); 800 777 uint32_t bit; 801 778 802 - switch (intel_hdmi->sdvox_reg) { 803 - case SDVOB: 779 + switch (intel_dig_port->port) { 780 + case PORT_B: 804 781 bit = HDMIB_HOTPLUG_LIVE_STATUS; 805 782 break; 806 - case SDVOC: 783 + case PORT_C: 807 784 bit = HDMIC_HOTPLUG_LIVE_STATUS; 808 785 break; 809 786 default: ··· 836 811 837 812 intel_hdmi->has_hdmi_sink = false; 838 813 intel_hdmi->has_audio = false; 814 + intel_hdmi->rgb_quant_range_selectable = false; 839 815 edid = drm_get_edid(connector, 840 816 intel_gmbus_get_adapter(dev_priv, 841 817 intel_hdmi->ddc_bus)); ··· 848 822 intel_hdmi->has_hdmi_sink = 849 823 drm_detect_hdmi_monitor(edid); 850 824 intel_hdmi->has_audio = drm_detect_monitor_audio(edid); 825 + intel_hdmi->rgb_quant_range_selectable = 826 + drm_rgb_quant_range_selectable(edid); 851 827 } 852 828 kfree(edid); 853 829 } ··· 935 907 } 936 908 937 909 if (property == dev_priv->broadcast_rgb_property) { 938 - if (val == !!intel_hdmi->color_range) 939 - return 0; 940 - 941 - intel_hdmi->color_range = val ? SDVO_COLOR_RANGE_16_235 : 0; 910 + switch (val) { 911 + case INTEL_BROADCAST_RGB_AUTO: 912 + intel_hdmi->color_range_auto = true; 913 + break; 914 + case INTEL_BROADCAST_RGB_FULL: 915 + intel_hdmi->color_range_auto = false; 916 + intel_hdmi->color_range = 0; 917 + break; 918 + case INTEL_BROADCAST_RGB_LIMITED: 919 + intel_hdmi->color_range_auto = false; 920 + intel_hdmi->color_range = SDVO_COLOR_RANGE_16_235; 921 + break; 922 + default: 923 + return -EINVAL; 924 + } 942 925 goto done; 943 926 } 944 927 ··· 998 959 { 999 960 intel_attach_force_audio_property(connector); 1000 961 intel_attach_broadcast_rgb_property(connector); 962 + intel_hdmi->color_range_auto = true; 1001 963 } 1002 964 1003 965 void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
+2
drivers/gpu/drm/i915/intel_i2c.c
··· 515 515 516 516 if (HAS_PCH_SPLIT(dev)) 517 517 dev_priv->gpio_mmio_base = PCH_GPIOA - GPIOA; 518 + else if (IS_VALLEYVIEW(dev)) 519 + dev_priv->gpio_mmio_base = VLV_DISPLAY_BASE; 518 520 else 519 521 dev_priv->gpio_mmio_base = 0; 520 522
+3 -2
drivers/gpu/drm/i915/intel_modes.c
··· 100 100 } 101 101 102 102 static const struct drm_prop_enum_list broadcast_rgb_names[] = { 103 - { 0, "Full" }, 104 - { 1, "Limited 16:235" }, 103 + { INTEL_BROADCAST_RGB_AUTO, "Automatic" }, 104 + { INTEL_BROADCAST_RGB_FULL, "Full" }, 105 + { INTEL_BROADCAST_RGB_LIMITED, "Limited 16:235" }, 105 106 }; 106 107 107 108 void
+1 -1
drivers/gpu/drm/i915/intel_opregion.c
··· 347 347 int i = 0; 348 348 349 349 handle = DEVICE_ACPI_HANDLE(&dev->pdev->dev); 350 - if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &acpi_dev))) 350 + if (!handle || acpi_bus_get_device(handle, &acpi_dev)) 351 351 return; 352 352 353 353 if (acpi_is_video_device(acpi_dev))
+2 -2
drivers/gpu/drm/i915/intel_overlay.c
··· 195 195 if (OVERLAY_NEEDS_PHYSICAL(overlay->dev)) 196 196 regs = (struct overlay_registers __iomem *)overlay->reg_bo->phys_obj->handle->vaddr; 197 197 else 198 - regs = io_mapping_map_wc(dev_priv->mm.gtt_mapping, 198 + regs = io_mapping_map_wc(dev_priv->gtt.mappable, 199 199 overlay->reg_bo->gtt_offset); 200 200 201 201 return regs; ··· 1434 1434 regs = (struct overlay_registers __iomem *) 1435 1435 overlay->reg_bo->phys_obj->handle->vaddr; 1436 1436 else 1437 - regs = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping, 1437 + regs = io_mapping_map_atomic_wc(dev_priv->gtt.mappable, 1438 1438 overlay->reg_bo->gtt_offset); 1439 1439 1440 1440 return regs;
+44 -18
drivers/gpu/drm/i915/intel_pm.c
··· 3687 3687 reg |= GEN7_FF_VS_SCHED_HW; 3688 3688 reg |= GEN7_FF_DS_SCHED_HW; 3689 3689 3690 + /* WaVSRefCountFullforceMissDisable */ 3691 + if (IS_HASWELL(dev_priv->dev)) 3692 + reg &= ~GEN7_FF_VS_REF_CNT_FFME; 3693 + 3690 3694 I915_WRITE(GEN7_FF_THREAD_MODE, reg); 3691 3695 } 3692 3696 ··· 4054 4050 dev_priv->display.init_clock_gating(dev); 4055 4051 } 4056 4052 4057 - /* Starting with Haswell, we have different power wells for 4058 - * different parts of the GPU. This attempts to enable them all. 4059 - */ 4060 - void intel_init_power_wells(struct drm_device *dev) 4053 + void intel_set_power_well(struct drm_device *dev, bool enable) 4061 4054 { 4062 4055 struct drm_i915_private *dev_priv = dev->dev_private; 4063 - unsigned long power_wells[] = { 4064 - HSW_PWR_WELL_CTL1, 4065 - HSW_PWR_WELL_CTL2, 4066 - HSW_PWR_WELL_CTL4 4067 - }; 4068 - int i; 4056 + bool is_enabled, enable_requested; 4057 + uint32_t tmp; 4069 4058 4070 4059 if (!IS_HASWELL(dev)) 4071 4060 return; 4072 4061 4073 - mutex_lock(&dev->struct_mutex); 4062 + tmp = I915_READ(HSW_PWR_WELL_DRIVER); 4063 + is_enabled = tmp & HSW_PWR_WELL_STATE; 4064 + enable_requested = tmp & HSW_PWR_WELL_ENABLE; 4074 4065 4075 - for (i = 0; i < ARRAY_SIZE(power_wells); i++) { 4076 - int well = I915_READ(power_wells[i]); 4066 + if (enable) { 4067 + if (!enable_requested) 4068 + I915_WRITE(HSW_PWR_WELL_DRIVER, HSW_PWR_WELL_ENABLE); 4077 4069 4078 - if ((well & HSW_PWR_WELL_STATE) == 0) { 4079 - I915_WRITE(power_wells[i], well & HSW_PWR_WELL_ENABLE); 4080 - if (wait_for((I915_READ(power_wells[i]) & HSW_PWR_WELL_STATE), 20)) 4081 - DRM_ERROR("Error enabling power well %lx\n", power_wells[i]); 4070 + if (!is_enabled) { 4071 + DRM_DEBUG_KMS("Enabling power well\n"); 4072 + if (wait_for((I915_READ(HSW_PWR_WELL_DRIVER) & 4073 + HSW_PWR_WELL_STATE), 20)) 4074 + DRM_ERROR("Timeout enabling power well\n"); 4075 + } 4076 + } else { 4077 + if (enable_requested) { 4078 + I915_WRITE(HSW_PWR_WELL_DRIVER, 0); 4079 + DRM_DEBUG_KMS("Requesting to disable the power well\n"); 4082 4080 } 4083 4081 } 4082 + } 4084 4083 4085 - mutex_unlock(&dev->struct_mutex); 4084 + /* 4085 + * Starting with Haswell, we have a "Power Down Well" that can be turned off 4086 + * when not needed anymore. We have 4 registers that can request the power well 4087 + * to be enabled, and it will only be disabled if none of the registers is 4088 + * requesting it to be enabled. 4089 + */ 4090 + void intel_init_power_well(struct drm_device *dev) 4091 + { 4092 + struct drm_i915_private *dev_priv = dev->dev_private; 4093 + 4094 + if (!IS_HASWELL(dev)) 4095 + return; 4096 + 4097 + /* For now, we need the power well to be always enabled. */ 4098 + intel_set_power_well(dev, true); 4099 + 4100 + /* We're taking over the BIOS, so clear any requests made by it since 4101 + * the driver is in charge now. */ 4102 + if (I915_READ(HSW_PWR_WELL_BIOS) & HSW_PWR_WELL_ENABLE) 4103 + I915_WRITE(HSW_PWR_WELL_BIOS, 0); 4086 4104 } 4087 4105 4088 4106 /* Set up chip specific power management-related functions */
+6 -6
drivers/gpu/drm/i915/intel_ringbuffer.c
··· 1203 1203 goto err_unpin; 1204 1204 1205 1205 ring->virtual_start = 1206 - ioremap_wc(dev_priv->mm.gtt->gma_bus_addr + obj->gtt_offset, 1206 + ioremap_wc(dev_priv->gtt.mappable_base + obj->gtt_offset, 1207 1207 ring->size); 1208 1208 if (ring->virtual_start == NULL) { 1209 1209 DRM_ERROR("Failed to map ringbuffer.\n"); ··· 1222 1222 ring->effective_size = ring->size; 1223 1223 if (IS_I830(ring->dev) || IS_845G(ring->dev)) 1224 1224 ring->effective_size -= 128; 1225 - 1226 - intel_ring_init_seqno(ring, dev_priv->last_seqno); 1227 1225 1228 1226 return 0; 1229 1227 ··· 1369 1371 1370 1372 msleep(1); 1371 1373 1372 - ret = i915_gem_check_wedge(dev_priv, dev_priv->mm.interruptible); 1374 + ret = i915_gem_check_wedge(&dev_priv->gpu_error, 1375 + dev_priv->mm.interruptible); 1373 1376 if (ret) 1374 1377 return ret; 1375 1378 } while (!time_after(jiffies, end)); ··· 1459 1460 drm_i915_private_t *dev_priv = ring->dev->dev_private; 1460 1461 int ret; 1461 1462 1462 - ret = i915_gem_check_wedge(dev_priv, dev_priv->mm.interruptible); 1463 + ret = i915_gem_check_wedge(&dev_priv->gpu_error, 1464 + dev_priv->mm.interruptible); 1463 1465 if (ret) 1464 1466 return ret; 1465 1467 ··· 1491 1491 struct drm_i915_private *dev_priv = ring->dev->dev_private; 1492 1492 1493 1493 ring->tail &= ring->size - 1; 1494 - if (dev_priv->stop_rings & intel_ring_flag(ring)) 1494 + if (dev_priv->gpu_error.stop_rings & intel_ring_flag(ring)) 1495 1495 return; 1496 1496 ring->write_tail(ring, ring->tail); 1497 1497 }
+49 -10
drivers/gpu/drm/i915/intel_sdvo.c
··· 103 103 * It is only valid when using TMDS encoding and 8 bit per color mode. 104 104 */ 105 105 uint32_t color_range; 106 + bool color_range_auto; 106 107 107 108 /** 108 109 * This is set if we're going to treat the device as TV-out. ··· 126 125 bool is_hdmi; 127 126 bool has_hdmi_monitor; 128 127 bool has_hdmi_audio; 128 + bool rgb_quant_range_selectable; 129 129 130 130 /** 131 131 * This is set if we detect output of sdvo device as LVDS and ··· 948 946 &tx_rate, 1); 949 947 } 950 948 951 - static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo) 949 + static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo, 950 + const struct drm_display_mode *adjusted_mode) 952 951 { 953 952 struct dip_infoframe avi_if = { 954 953 .type = DIP_TYPE_AVI, ··· 957 954 .len = DIP_LEN_AVI, 958 955 }; 959 956 uint8_t sdvo_data[4 + sizeof(avi_if.body.avi)]; 957 + 958 + if (intel_sdvo->rgb_quant_range_selectable) { 959 + if (adjusted_mode->private_flags & INTEL_MODE_LIMITED_COLOR_RANGE) 960 + avi_if.body.avi.ITC_EC_Q_SC |= DIP_AVI_RGB_QUANT_RANGE_LIMITED; 961 + else 962 + avi_if.body.avi.ITC_EC_Q_SC |= DIP_AVI_RGB_QUANT_RANGE_FULL; 963 + } 960 964 961 965 intel_dip_infoframe_csum(&avi_if); 962 966 ··· 1074 1064 multiplier = intel_sdvo_get_pixel_multiplier(adjusted_mode); 1075 1065 intel_mode_set_pixel_multiplier(adjusted_mode, multiplier); 1076 1066 1067 + if (intel_sdvo->color_range_auto) { 1068 + /* See CEA-861-E - 5.1 Default Encoding Parameters */ 1069 + if (intel_sdvo->has_hdmi_monitor && 1070 + drm_mode_cea_vic(adjusted_mode) > 1) 1071 + intel_sdvo->color_range = SDVO_COLOR_RANGE_16_235; 1072 + else 1073 + intel_sdvo->color_range = 0; 1074 + } 1075 + 1076 + if (intel_sdvo->color_range) 1077 + adjusted_mode->private_flags |= INTEL_MODE_LIMITED_COLOR_RANGE; 1078 + 1077 1079 return true; 1078 1080 } 1079 1081 ··· 1143 1121 intel_sdvo_set_encode(intel_sdvo, SDVO_ENCODE_HDMI); 1144 1122 intel_sdvo_set_colorimetry(intel_sdvo, 1145 1123 SDVO_COLORIMETRY_RGB256); 1146 - intel_sdvo_set_avi_infoframe(intel_sdvo); 1124 + intel_sdvo_set_avi_infoframe(intel_sdvo, adjusted_mode); 1147 1125 } else 1148 1126 intel_sdvo_set_encode(intel_sdvo, SDVO_ENCODE_DVI); 1149 1127 ··· 1175 1153 /* The real mode polarity is set by the SDVO commands, using 1176 1154 * struct intel_sdvo_dtd. */ 1177 1155 sdvox = SDVO_VSYNC_ACTIVE_HIGH | SDVO_HSYNC_ACTIVE_HIGH; 1178 - if (intel_sdvo->is_hdmi) 1156 + if (!HAS_PCH_SPLIT(dev) && intel_sdvo->is_hdmi) 1179 1157 sdvox |= intel_sdvo->color_range; 1180 1158 if (INTEL_INFO(dev)->gen < 5) 1181 1159 sdvox |= SDVO_BORDER_ENABLE; ··· 1535 1513 if (intel_sdvo->is_hdmi) { 1536 1514 intel_sdvo->has_hdmi_monitor = drm_detect_hdmi_monitor(edid); 1537 1515 intel_sdvo->has_hdmi_audio = drm_detect_monitor_audio(edid); 1516 + intel_sdvo->rgb_quant_range_selectable = 1517 + drm_rgb_quant_range_selectable(edid); 1538 1518 } 1539 1519 } else 1540 1520 status = connector_status_disconnected; ··· 1588 1564 1589 1565 intel_sdvo->has_hdmi_monitor = false; 1590 1566 intel_sdvo->has_hdmi_audio = false; 1567 + intel_sdvo->rgb_quant_range_selectable = false; 1591 1568 1592 1569 if ((intel_sdvo_connector->output_flag & response) == 0) 1593 1570 ret = connector_status_disconnected; ··· 1922 1897 } 1923 1898 1924 1899 if (property == dev_priv->broadcast_rgb_property) { 1925 - if (val == !!intel_sdvo->color_range) 1926 - return 0; 1927 - 1928 - intel_sdvo->color_range = val ? SDVO_COLOR_RANGE_16_235 : 0; 1900 + switch (val) { 1901 + case INTEL_BROADCAST_RGB_AUTO: 1902 + intel_sdvo->color_range_auto = true; 1903 + break; 1904 + case INTEL_BROADCAST_RGB_FULL: 1905 + intel_sdvo->color_range_auto = false; 1906 + intel_sdvo->color_range = 0; 1907 + break; 1908 + case INTEL_BROADCAST_RGB_LIMITED: 1909 + intel_sdvo->color_range_auto = false; 1910 + intel_sdvo->color_range = SDVO_COLOR_RANGE_16_235; 1911 + break; 1912 + default: 1913 + return -EINVAL; 1914 + } 1929 1915 goto done; 1930 1916 } 1931 1917 ··· 2233 2197 } 2234 2198 2235 2199 static void 2236 - intel_sdvo_add_hdmi_properties(struct intel_sdvo_connector *connector) 2200 + intel_sdvo_add_hdmi_properties(struct intel_sdvo *intel_sdvo, 2201 + struct intel_sdvo_connector *connector) 2237 2202 { 2238 2203 struct drm_device *dev = connector->base.base.dev; 2239 2204 2240 2205 intel_attach_force_audio_property(&connector->base.base); 2241 - if (INTEL_INFO(dev)->gen >= 4 && IS_MOBILE(dev)) 2206 + if (INTEL_INFO(dev)->gen >= 4 && IS_MOBILE(dev)) { 2242 2207 intel_attach_broadcast_rgb_property(&connector->base.base); 2208 + intel_sdvo->color_range_auto = true; 2209 + } 2243 2210 } 2244 2211 2245 2212 static bool ··· 2290 2251 2291 2252 intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo); 2292 2253 if (intel_sdvo->is_hdmi) 2293 - intel_sdvo_add_hdmi_properties(intel_sdvo_connector); 2254 + intel_sdvo_add_hdmi_properties(intel_sdvo, intel_sdvo_connector); 2294 2255 2295 2256 return true; 2296 2257 }
+1
include/drm/drm_crtc.h
··· 1063 1063 extern u8 drm_match_cea_mode(struct drm_display_mode *to_match); 1064 1064 extern bool drm_detect_hdmi_monitor(struct edid *edid); 1065 1065 extern bool drm_detect_monitor_audio(struct edid *edid); 1066 + extern bool drm_rgb_quant_range_selectable(struct edid *edid); 1066 1067 extern int drm_mode_page_flip_ioctl(struct drm_device *dev, 1067 1068 void *data, struct drm_file *file_priv); 1068 1069 extern struct drm_display_mode *drm_cvt_mode(struct drm_device *dev,
+1 -18
include/drm/intel-gtt.h
··· 3 3 #ifndef _DRM_INTEL_GTT_H 4 4 #define _DRM_INTEL_GTT_H 5 5 6 - struct intel_gtt { 7 - /* Size of memory reserved for graphics by the BIOS */ 8 - unsigned int stolen_size; 9 - /* Total number of gtt entries. */ 10 - unsigned int gtt_total_entries; 11 - /* Part of the gtt that is mappable by the cpu, for those chips where 12 - * this is not the full gtt. */ 13 - unsigned int gtt_mappable_entries; 14 - /* Whether i915 needs to use the dmar apis or not. */ 15 - unsigned int needs_dmar : 1; 16 - /* Whether we idle the gpu before mapping/unmapping */ 17 - unsigned int do_idle_maps : 1; 18 - /* Share the scratch page dma with ppgtts. */ 19 - dma_addr_t scratch_page_dma; 20 - struct page *scratch_page; 21 - /* needed for ioremap in drm/i915 */ 22 - phys_addr_t gma_bus_addr; 23 - } *intel_gtt_get(void); 6 + void intel_gtt_get(size_t *gtt_total, size_t *stolen_size); 24 7 25 8 int intel_gmch_probe(struct pci_dev *bridge_pdev, struct pci_dev *gpu_pdev, 26 9 struct agp_bridge_data *bridge);
+20
include/uapi/drm/i915_drm.h
··· 308 308 #define I915_PARAM_RSVD_FOR_FUTURE_USE 22 309 309 #define I915_PARAM_HAS_SECURE_BATCHES 23 310 310 #define I915_PARAM_HAS_PINNED_BATCHES 24 311 + #define I915_PARAM_HAS_EXEC_NO_RELOC 25 312 + #define I915_PARAM_HAS_EXEC_HANDLE_LUT 26 311 313 312 314 typedef struct drm_i915_getparam { 313 315 int param; ··· 630 628 __u64 offset; 631 629 632 630 #define EXEC_OBJECT_NEEDS_FENCE (1<<0) 631 + #define EXEC_OBJECT_NEEDS_GTT (1<<1) 632 + #define EXEC_OBJECT_WRITE (1<<2) 633 + #define __EXEC_OBJECT_UNKNOWN_FLAGS -(EXEC_OBJECT_WRITE<<1) 633 634 __u64 flags; 635 + 634 636 __u64 rsvd1; 635 637 __u64 rsvd2; 636 638 }; ··· 692 686 * userspace assumes the responsibility for ensuring the same. 693 687 */ 694 688 #define I915_EXEC_IS_PINNED (1<<10) 689 + 690 + /** Provide a hint to the kernel that the command stream and auxilliary 691 + * state buffers already holds the correct presumed addresses and so the 692 + * relocation process may be skipped if no buffers need to be moved in 693 + * preparation for the execbuffer. 694 + */ 695 + #define I915_EXEC_NO_RELOC (1<<11) 696 + 697 + /** Use the reloc.handle as an index into the exec object array rather 698 + * than as the per-file handle. 699 + */ 700 + #define I915_EXEC_HANDLE_LUT (1<<12) 701 + 702 + #define __I915_EXEC_UNKNOWN_FLAGS -(I915_EXEC_HANDLE_LUT<<1) 695 703 696 704 #define I915_EXEC_CONTEXT_ID_MASK (0xffffffff) 697 705 #define i915_execbuffer2_set_context_id(eb2, context) \