Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'drm-intel-next-2013-03-23' of git://people.freedesktop.org/~danvet/drm-intel into drm-next

Daniel writes:
Highlights:
- Imre's for_each_sg_pages rework (now also with the stolen mem backed
case fixed with a hack) plus the drm prime sg list coalescing patch from
Rahul Sharma. I have some follow-up cleanups pending, already acked by
Andrew Morton.
- Some prep-work for the crazy no-pch/display-less platform by Ben.
- Some vlv patches, by far not all (Jesse et al).
- Clean up the HDMI/SDVO #define confusion (Paulo)
- gen2-4 vblank fixes from Ville.
- Unclaimed register warning fixes for hsw (Paulo). More still to come ...
- Complete pageflips which have been stuck in a gpu hang, should prevent
stuck gl compositors (Ville).
- pm patches for vt-switchless resume (Jesse). Note that the i915 enabling
is not (yet) included, that took a bit longer to settle. PM patches are
acked by Rafael Wysocki.
- Minor fixlets all over from various people.

* tag 'drm-intel-next-2013-03-23' of git://people.freedesktop.org/~danvet/drm-intel: (79 commits)
drm/i915: Implement WaSwitchSolVfFArbitrationPriority
drm/i915: Set the VIC in AVI infoframe for SDVO
drm/i915: Kill a strange comment about DPMS functions
drm/i915: Correct sandybrige overclocking
drm/i915: Introduce GEN7_FEATURES for device info
drm/i915: Move num_pipes to intel info
drm/i915: fixup pd vs pt confusion in gen6 ppgtt code
style nit: Align function parameter continuation properly.
drm/i915: VLV doesn't have HDMI on port C
drm/i915: DSPFW and BLC regs are in the display offset range
drm/i915: set conservative clock gating values on VLV v2
drm/i915: fix WaDisablePSDDualDispatchEnable on VLV v2
drm/i915: add more VLV IDs
drm/i915: use VLV DIP routines on VLV v2
drm/i915: add media well to VLV force wake routines v2
drm/i915: don't use plane pipe select on VLV
drm: modify pages_to_sg prime helper to create optimized SG table
drm/i915: use for_each_sg_page for setting up the gtt ptes
drm/i915: create compact dma scatter lists for gem objects
drm/i915: handle walking compact dma scatter lists
...

+1032 -916
+3 -4
drivers/gpu/drm/drm_cache.c
··· 105 105 { 106 106 #if defined(CONFIG_X86) 107 107 if (cpu_has_clflush) { 108 - struct scatterlist *sg; 109 - int i; 108 + struct sg_page_iter sg_iter; 110 109 111 110 mb(); 112 - for_each_sg(st->sgl, sg, st->nents, i) 113 - drm_clflush_page(sg_page(sg)); 111 + for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) 112 + drm_clflush_page(sg_iter.page); 114 113 mb(); 115 114 116 115 return;
+2 -6
drivers/gpu/drm/drm_prime.c
··· 401 401 struct sg_table *drm_prime_pages_to_sg(struct page **pages, int nr_pages) 402 402 { 403 403 struct sg_table *sg = NULL; 404 - struct scatterlist *iter; 405 - int i; 406 404 int ret; 407 405 408 406 sg = kmalloc(sizeof(struct sg_table), GFP_KERNEL); 409 407 if (!sg) 410 408 goto out; 411 409 412 - ret = sg_alloc_table(sg, nr_pages, GFP_KERNEL); 410 + ret = sg_alloc_table_from_pages(sg, pages, nr_pages, 0, 411 + nr_pages << PAGE_SHIFT, GFP_KERNEL); 413 412 if (ret) 414 413 goto out; 415 - 416 - for_each_sg(sg->sgl, iter, nr_pages, i) 417 - sg_set_page(iter, pages[i], PAGE_SIZE, 0); 418 414 419 415 return sg; 420 416 out:
+120 -301
drivers/gpu/drm/i915/i915_debugfs.c
··· 772 772 } 773 773 } 774 774 } 775 + 776 + obj = error->ring[i].ctx; 777 + if (obj) { 778 + seq_printf(m, "%s --- HW Context = 0x%08x\n", 779 + dev_priv->ring[i].name, 780 + obj->gtt_offset); 781 + offset = 0; 782 + for (elt = 0; elt < PAGE_SIZE/16; elt += 4) { 783 + seq_printf(m, "[%04x] %08x %08x %08x %08x\n", 784 + offset, 785 + obj->pages[0][elt], 786 + obj->pages[0][elt+1], 787 + obj->pages[0][elt+2], 788 + obj->pages[0][elt+3]); 789 + offset += 16; 790 + } 791 + } 775 792 } 776 793 777 794 if (error->overlay) ··· 866 849 .release = i915_error_state_release, 867 850 }; 868 851 869 - static ssize_t 870 - i915_next_seqno_read(struct file *filp, 871 - char __user *ubuf, 872 - size_t max, 873 - loff_t *ppos) 852 + static int 853 + i915_next_seqno_get(void *data, u64 *val) 874 854 { 875 - struct drm_device *dev = filp->private_data; 855 + struct drm_device *dev = data; 876 856 drm_i915_private_t *dev_priv = dev->dev_private; 877 - char buf[80]; 878 - int len; 879 857 int ret; 880 858 881 859 ret = mutex_lock_interruptible(&dev->struct_mutex); 882 860 if (ret) 883 861 return ret; 884 862 885 - len = snprintf(buf, sizeof(buf), 886 - "next_seqno : 0x%x\n", 887 - dev_priv->next_seqno); 888 - 863 + *val = dev_priv->next_seqno; 889 864 mutex_unlock(&dev->struct_mutex); 890 865 891 - if (len > sizeof(buf)) 892 - len = sizeof(buf); 893 - 894 - return simple_read_from_buffer(ubuf, max, ppos, buf, len); 866 + return 0; 895 867 } 896 868 897 - static ssize_t 898 - i915_next_seqno_write(struct file *filp, 899 - const char __user *ubuf, 900 - size_t cnt, 901 - loff_t *ppos) 869 + static int 870 + i915_next_seqno_set(void *data, u64 val) 902 871 { 903 - struct drm_device *dev = filp->private_data; 904 - char buf[20]; 905 - u32 val = 1; 872 + struct drm_device *dev = data; 906 873 int ret; 907 - 908 - if (cnt > 0) { 909 - if (cnt > sizeof(buf) - 1) 910 - return -EINVAL; 911 - 912 - if (copy_from_user(buf, ubuf, cnt)) 913 - return -EFAULT; 914 - buf[cnt] = 0; 915 - 916 - ret = kstrtouint(buf, 0, &val); 917 - if (ret < 0) 918 - return ret; 919 - } 920 874 921 875 ret = mutex_lock_interruptible(&dev->struct_mutex); 922 876 if (ret) 923 877 return ret; 924 878 925 879 ret = i915_gem_set_seqno(dev, val); 926 - 927 880 mutex_unlock(&dev->struct_mutex); 928 881 929 - return ret ?: cnt; 882 + return ret; 930 883 } 931 884 932 - static const struct file_operations i915_next_seqno_fops = { 933 - .owner = THIS_MODULE, 934 - .open = simple_open, 935 - .read = i915_next_seqno_read, 936 - .write = i915_next_seqno_write, 937 - .llseek = default_llseek, 938 - }; 885 + DEFINE_SIMPLE_ATTRIBUTE(i915_next_seqno_fops, 886 + i915_next_seqno_get, i915_next_seqno_set, 887 + "next_seqno : 0x%llx\n"); 939 888 940 889 static int i915_rstdby_delays(struct seq_file *m, void *unused) 941 890 { ··· 1663 1680 return 0; 1664 1681 } 1665 1682 1666 - static ssize_t 1667 - i915_wedged_read(struct file *filp, 1668 - char __user *ubuf, 1669 - size_t max, 1670 - loff_t *ppos) 1683 + static int 1684 + i915_wedged_get(void *data, u64 *val) 1671 1685 { 1672 - struct drm_device *dev = filp->private_data; 1686 + struct drm_device *dev = data; 1673 1687 drm_i915_private_t *dev_priv = dev->dev_private; 1674 - char buf[80]; 1675 - int len; 1676 1688 1677 - len = snprintf(buf, sizeof(buf), 1678 - "wedged : %d\n", 1679 - atomic_read(&dev_priv->gpu_error.reset_counter)); 1689 + *val = atomic_read(&dev_priv->gpu_error.reset_counter); 1680 1690 1681 - if (len > sizeof(buf)) 1682 - len = sizeof(buf); 1683 - 1684 - return simple_read_from_buffer(ubuf, max, ppos, buf, len); 1691 + return 0; 1685 1692 } 1686 1693 1687 - static ssize_t 1688 - i915_wedged_write(struct file *filp, 1689 - const char __user *ubuf, 1690 - size_t cnt, 1691 - loff_t *ppos) 1694 + static int 1695 + i915_wedged_set(void *data, u64 val) 1692 1696 { 1693 - struct drm_device *dev = filp->private_data; 1694 - char buf[20]; 1695 - int val = 1; 1697 + struct drm_device *dev = data; 1696 1698 1697 - if (cnt > 0) { 1698 - if (cnt > sizeof(buf) - 1) 1699 - return -EINVAL; 1700 - 1701 - if (copy_from_user(buf, ubuf, cnt)) 1702 - return -EFAULT; 1703 - buf[cnt] = 0; 1704 - 1705 - val = simple_strtoul(buf, NULL, 0); 1706 - } 1707 - 1708 - DRM_INFO("Manually setting wedged to %d\n", val); 1699 + DRM_INFO("Manually setting wedged to %llu\n", val); 1709 1700 i915_handle_error(dev, val); 1710 1701 1711 - return cnt; 1702 + return 0; 1712 1703 } 1713 1704 1714 - static const struct file_operations i915_wedged_fops = { 1715 - .owner = THIS_MODULE, 1716 - .open = simple_open, 1717 - .read = i915_wedged_read, 1718 - .write = i915_wedged_write, 1719 - .llseek = default_llseek, 1720 - }; 1705 + DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops, 1706 + i915_wedged_get, i915_wedged_set, 1707 + "wedged : %llu\n"); 1721 1708 1722 - static ssize_t 1723 - i915_ring_stop_read(struct file *filp, 1724 - char __user *ubuf, 1725 - size_t max, 1726 - loff_t *ppos) 1709 + static int 1710 + i915_ring_stop_get(void *data, u64 *val) 1727 1711 { 1728 - struct drm_device *dev = filp->private_data; 1712 + struct drm_device *dev = data; 1729 1713 drm_i915_private_t *dev_priv = dev->dev_private; 1730 - char buf[20]; 1731 - int len; 1732 1714 1733 - len = snprintf(buf, sizeof(buf), 1734 - "0x%08x\n", dev_priv->gpu_error.stop_rings); 1715 + *val = dev_priv->gpu_error.stop_rings; 1735 1716 1736 - if (len > sizeof(buf)) 1737 - len = sizeof(buf); 1738 - 1739 - return simple_read_from_buffer(ubuf, max, ppos, buf, len); 1717 + return 0; 1740 1718 } 1741 1719 1742 - static ssize_t 1743 - i915_ring_stop_write(struct file *filp, 1744 - const char __user *ubuf, 1745 - size_t cnt, 1746 - loff_t *ppos) 1720 + static int 1721 + i915_ring_stop_set(void *data, u64 val) 1747 1722 { 1748 - struct drm_device *dev = filp->private_data; 1723 + struct drm_device *dev = data; 1749 1724 struct drm_i915_private *dev_priv = dev->dev_private; 1750 - char buf[20]; 1751 - int val = 0, ret; 1725 + int ret; 1752 1726 1753 - if (cnt > 0) { 1754 - if (cnt > sizeof(buf) - 1) 1755 - return -EINVAL; 1756 - 1757 - if (copy_from_user(buf, ubuf, cnt)) 1758 - return -EFAULT; 1759 - buf[cnt] = 0; 1760 - 1761 - val = simple_strtoul(buf, NULL, 0); 1762 - } 1763 - 1764 - DRM_DEBUG_DRIVER("Stopping rings 0x%08x\n", val); 1727 + DRM_DEBUG_DRIVER("Stopping rings 0x%08llx\n", val); 1765 1728 1766 1729 ret = mutex_lock_interruptible(&dev->struct_mutex); 1767 1730 if (ret) ··· 1716 1787 dev_priv->gpu_error.stop_rings = val; 1717 1788 mutex_unlock(&dev->struct_mutex); 1718 1789 1719 - return cnt; 1790 + return 0; 1720 1791 } 1721 1792 1722 - static const struct file_operations i915_ring_stop_fops = { 1723 - .owner = THIS_MODULE, 1724 - .open = simple_open, 1725 - .read = i915_ring_stop_read, 1726 - .write = i915_ring_stop_write, 1727 - .llseek = default_llseek, 1728 - }; 1793 + DEFINE_SIMPLE_ATTRIBUTE(i915_ring_stop_fops, 1794 + i915_ring_stop_get, i915_ring_stop_set, 1795 + "0x%08llx\n"); 1729 1796 1730 1797 #define DROP_UNBOUND 0x1 1731 1798 #define DROP_BOUND 0x2 ··· 1731 1806 DROP_BOUND | \ 1732 1807 DROP_RETIRE | \ 1733 1808 DROP_ACTIVE) 1734 - static ssize_t 1735 - i915_drop_caches_read(struct file *filp, 1736 - char __user *ubuf, 1737 - size_t max, 1738 - loff_t *ppos) 1809 + static int 1810 + i915_drop_caches_get(void *data, u64 *val) 1739 1811 { 1740 - char buf[20]; 1741 - int len; 1812 + *val = DROP_ALL; 1742 1813 1743 - len = snprintf(buf, sizeof(buf), "0x%08x\n", DROP_ALL); 1744 - if (len > sizeof(buf)) 1745 - len = sizeof(buf); 1746 - 1747 - return simple_read_from_buffer(ubuf, max, ppos, buf, len); 1814 + return 0; 1748 1815 } 1749 1816 1750 - static ssize_t 1751 - i915_drop_caches_write(struct file *filp, 1752 - const char __user *ubuf, 1753 - size_t cnt, 1754 - loff_t *ppos) 1817 + static int 1818 + i915_drop_caches_set(void *data, u64 val) 1755 1819 { 1756 - struct drm_device *dev = filp->private_data; 1820 + struct drm_device *dev = data; 1757 1821 struct drm_i915_private *dev_priv = dev->dev_private; 1758 1822 struct drm_i915_gem_object *obj, *next; 1759 - char buf[20]; 1760 - int val = 0, ret; 1823 + int ret; 1761 1824 1762 - if (cnt > 0) { 1763 - if (cnt > sizeof(buf) - 1) 1764 - return -EINVAL; 1765 - 1766 - if (copy_from_user(buf, ubuf, cnt)) 1767 - return -EFAULT; 1768 - buf[cnt] = 0; 1769 - 1770 - val = simple_strtoul(buf, NULL, 0); 1771 - } 1772 - 1773 - DRM_DEBUG_DRIVER("Dropping caches: 0x%08x\n", val); 1825 + DRM_DEBUG_DRIVER("Dropping caches: 0x%08llx\n", val); 1774 1826 1775 1827 /* No need to check and wait for gpu resets, only libdrm auto-restarts 1776 1828 * on ioctls on -EAGAIN. */ ··· 1785 1883 unlock: 1786 1884 mutex_unlock(&dev->struct_mutex); 1787 1885 1788 - return ret ?: cnt; 1886 + return ret; 1789 1887 } 1790 1888 1791 - static const struct file_operations i915_drop_caches_fops = { 1792 - .owner = THIS_MODULE, 1793 - .open = simple_open, 1794 - .read = i915_drop_caches_read, 1795 - .write = i915_drop_caches_write, 1796 - .llseek = default_llseek, 1797 - }; 1889 + DEFINE_SIMPLE_ATTRIBUTE(i915_drop_caches_fops, 1890 + i915_drop_caches_get, i915_drop_caches_set, 1891 + "0x%08llx\n"); 1798 1892 1799 - static ssize_t 1800 - i915_max_freq_read(struct file *filp, 1801 - char __user *ubuf, 1802 - size_t max, 1803 - loff_t *ppos) 1893 + static int 1894 + i915_max_freq_get(void *data, u64 *val) 1804 1895 { 1805 - struct drm_device *dev = filp->private_data; 1896 + struct drm_device *dev = data; 1806 1897 drm_i915_private_t *dev_priv = dev->dev_private; 1807 - char buf[80]; 1808 - int len, ret; 1898 + int ret; 1809 1899 1810 1900 if (!(IS_GEN6(dev) || IS_GEN7(dev))) 1811 1901 return -ENODEV; ··· 1806 1912 if (ret) 1807 1913 return ret; 1808 1914 1809 - len = snprintf(buf, sizeof(buf), 1810 - "max freq: %d\n", dev_priv->rps.max_delay * GT_FREQUENCY_MULTIPLIER); 1915 + *val = dev_priv->rps.max_delay * GT_FREQUENCY_MULTIPLIER; 1811 1916 mutex_unlock(&dev_priv->rps.hw_lock); 1812 1917 1813 - if (len > sizeof(buf)) 1814 - len = sizeof(buf); 1815 - 1816 - return simple_read_from_buffer(ubuf, max, ppos, buf, len); 1918 + return 0; 1817 1919 } 1818 1920 1819 - static ssize_t 1820 - i915_max_freq_write(struct file *filp, 1821 - const char __user *ubuf, 1822 - size_t cnt, 1823 - loff_t *ppos) 1921 + static int 1922 + i915_max_freq_set(void *data, u64 val) 1824 1923 { 1825 - struct drm_device *dev = filp->private_data; 1924 + struct drm_device *dev = data; 1826 1925 struct drm_i915_private *dev_priv = dev->dev_private; 1827 - char buf[20]; 1828 - int val = 1, ret; 1926 + int ret; 1829 1927 1830 1928 if (!(IS_GEN6(dev) || IS_GEN7(dev))) 1831 1929 return -ENODEV; 1832 1930 1833 - if (cnt > 0) { 1834 - if (cnt > sizeof(buf) - 1) 1835 - return -EINVAL; 1836 - 1837 - if (copy_from_user(buf, ubuf, cnt)) 1838 - return -EFAULT; 1839 - buf[cnt] = 0; 1840 - 1841 - val = simple_strtoul(buf, NULL, 0); 1842 - } 1843 - 1844 - DRM_DEBUG_DRIVER("Manually setting max freq to %d\n", val); 1931 + DRM_DEBUG_DRIVER("Manually setting max freq to %llu\n", val); 1845 1932 1846 1933 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock); 1847 1934 if (ret) ··· 1831 1956 /* 1832 1957 * Turbo will still be enabled, but won't go above the set value. 1833 1958 */ 1834 - dev_priv->rps.max_delay = val / GT_FREQUENCY_MULTIPLIER; 1835 - 1836 - gen6_set_rps(dev, val / GT_FREQUENCY_MULTIPLIER); 1959 + do_div(val, GT_FREQUENCY_MULTIPLIER); 1960 + dev_priv->rps.max_delay = val; 1961 + gen6_set_rps(dev, val); 1837 1962 mutex_unlock(&dev_priv->rps.hw_lock); 1838 1963 1839 - return cnt; 1964 + return 0; 1840 1965 } 1841 1966 1842 - static const struct file_operations i915_max_freq_fops = { 1843 - .owner = THIS_MODULE, 1844 - .open = simple_open, 1845 - .read = i915_max_freq_read, 1846 - .write = i915_max_freq_write, 1847 - .llseek = default_llseek, 1848 - }; 1967 + DEFINE_SIMPLE_ATTRIBUTE(i915_max_freq_fops, 1968 + i915_max_freq_get, i915_max_freq_set, 1969 + "max freq: %llu\n"); 1849 1970 1850 - static ssize_t 1851 - i915_min_freq_read(struct file *filp, char __user *ubuf, size_t max, 1852 - loff_t *ppos) 1971 + static int 1972 + i915_min_freq_get(void *data, u64 *val) 1853 1973 { 1854 - struct drm_device *dev = filp->private_data; 1974 + struct drm_device *dev = data; 1855 1975 drm_i915_private_t *dev_priv = dev->dev_private; 1856 - char buf[80]; 1857 - int len, ret; 1976 + int ret; 1858 1977 1859 1978 if (!(IS_GEN6(dev) || IS_GEN7(dev))) 1860 1979 return -ENODEV; ··· 1857 1988 if (ret) 1858 1989 return ret; 1859 1990 1860 - len = snprintf(buf, sizeof(buf), 1861 - "min freq: %d\n", dev_priv->rps.min_delay * GT_FREQUENCY_MULTIPLIER); 1991 + *val = dev_priv->rps.min_delay * GT_FREQUENCY_MULTIPLIER; 1862 1992 mutex_unlock(&dev_priv->rps.hw_lock); 1863 1993 1864 - if (len > sizeof(buf)) 1865 - len = sizeof(buf); 1866 - 1867 - return simple_read_from_buffer(ubuf, max, ppos, buf, len); 1994 + return 0; 1868 1995 } 1869 1996 1870 - static ssize_t 1871 - i915_min_freq_write(struct file *filp, const char __user *ubuf, size_t cnt, 1872 - loff_t *ppos) 1997 + static int 1998 + i915_min_freq_set(void *data, u64 val) 1873 1999 { 1874 - struct drm_device *dev = filp->private_data; 2000 + struct drm_device *dev = data; 1875 2001 struct drm_i915_private *dev_priv = dev->dev_private; 1876 - char buf[20]; 1877 - int val = 1, ret; 2002 + int ret; 1878 2003 1879 2004 if (!(IS_GEN6(dev) || IS_GEN7(dev))) 1880 2005 return -ENODEV; 1881 2006 1882 - if (cnt > 0) { 1883 - if (cnt > sizeof(buf) - 1) 1884 - return -EINVAL; 1885 - 1886 - if (copy_from_user(buf, ubuf, cnt)) 1887 - return -EFAULT; 1888 - buf[cnt] = 0; 1889 - 1890 - val = simple_strtoul(buf, NULL, 0); 1891 - } 1892 - 1893 - DRM_DEBUG_DRIVER("Manually setting min freq to %d\n", val); 2007 + DRM_DEBUG_DRIVER("Manually setting min freq to %llu\n", val); 1894 2008 1895 2009 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock); 1896 2010 if (ret) ··· 1882 2030 /* 1883 2031 * Turbo will still be enabled, but won't go below the set value. 1884 2032 */ 1885 - dev_priv->rps.min_delay = val / GT_FREQUENCY_MULTIPLIER; 1886 - 1887 - gen6_set_rps(dev, val / GT_FREQUENCY_MULTIPLIER); 2033 + do_div(val, GT_FREQUENCY_MULTIPLIER); 2034 + dev_priv->rps.min_delay = val; 2035 + gen6_set_rps(dev, val); 1888 2036 mutex_unlock(&dev_priv->rps.hw_lock); 1889 2037 1890 - return cnt; 2038 + return 0; 1891 2039 } 1892 2040 1893 - static const struct file_operations i915_min_freq_fops = { 1894 - .owner = THIS_MODULE, 1895 - .open = simple_open, 1896 - .read = i915_min_freq_read, 1897 - .write = i915_min_freq_write, 1898 - .llseek = default_llseek, 1899 - }; 2041 + DEFINE_SIMPLE_ATTRIBUTE(i915_min_freq_fops, 2042 + i915_min_freq_get, i915_min_freq_set, 2043 + "min freq: %llu\n"); 1900 2044 1901 - static ssize_t 1902 - i915_cache_sharing_read(struct file *filp, 1903 - char __user *ubuf, 1904 - size_t max, 1905 - loff_t *ppos) 2045 + static int 2046 + i915_cache_sharing_get(void *data, u64 *val) 1906 2047 { 1907 - struct drm_device *dev = filp->private_data; 2048 + struct drm_device *dev = data; 1908 2049 drm_i915_private_t *dev_priv = dev->dev_private; 1909 - char buf[80]; 1910 2050 u32 snpcr; 1911 - int len, ret; 2051 + int ret; 1912 2052 1913 2053 if (!(IS_GEN6(dev) || IS_GEN7(dev))) 1914 2054 return -ENODEV; ··· 1912 2068 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR); 1913 2069 mutex_unlock(&dev_priv->dev->struct_mutex); 1914 2070 1915 - len = snprintf(buf, sizeof(buf), 1916 - "%d\n", (snpcr & GEN6_MBC_SNPCR_MASK) >> 1917 - GEN6_MBC_SNPCR_SHIFT); 2071 + *val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT; 1918 2072 1919 - if (len > sizeof(buf)) 1920 - len = sizeof(buf); 1921 - 1922 - return simple_read_from_buffer(ubuf, max, ppos, buf, len); 2073 + return 0; 1923 2074 } 1924 2075 1925 - static ssize_t 1926 - i915_cache_sharing_write(struct file *filp, 1927 - const char __user *ubuf, 1928 - size_t cnt, 1929 - loff_t *ppos) 2076 + static int 2077 + i915_cache_sharing_set(void *data, u64 val) 1930 2078 { 1931 - struct drm_device *dev = filp->private_data; 2079 + struct drm_device *dev = data; 1932 2080 struct drm_i915_private *dev_priv = dev->dev_private; 1933 - char buf[20]; 1934 2081 u32 snpcr; 1935 - int val = 1; 1936 2082 1937 2083 if (!(IS_GEN6(dev) || IS_GEN7(dev))) 1938 2084 return -ENODEV; 1939 2085 1940 - if (cnt > 0) { 1941 - if (cnt > sizeof(buf) - 1) 1942 - return -EINVAL; 1943 - 1944 - if (copy_from_user(buf, ubuf, cnt)) 1945 - return -EFAULT; 1946 - buf[cnt] = 0; 1947 - 1948 - val = simple_strtoul(buf, NULL, 0); 1949 - } 1950 - 1951 - if (val < 0 || val > 3) 2086 + if (val > 3) 1952 2087 return -EINVAL; 1953 2088 1954 - DRM_DEBUG_DRIVER("Manually setting uncore sharing to %d\n", val); 2089 + DRM_DEBUG_DRIVER("Manually setting uncore sharing to %llu\n", val); 1955 2090 1956 2091 /* Update the cache sharing policy here as well */ 1957 2092 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR); ··· 1938 2115 snpcr |= (val << GEN6_MBC_SNPCR_SHIFT); 1939 2116 I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr); 1940 2117 1941 - return cnt; 2118 + return 0; 1942 2119 } 1943 2120 1944 - static const struct file_operations i915_cache_sharing_fops = { 1945 - .owner = THIS_MODULE, 1946 - .open = simple_open, 1947 - .read = i915_cache_sharing_read, 1948 - .write = i915_cache_sharing_write, 1949 - .llseek = default_llseek, 1950 - }; 2121 + DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops, 2122 + i915_cache_sharing_get, i915_cache_sharing_set, 2123 + "%llu\n"); 1951 2124 1952 2125 /* As the drm_debugfs_init() routines are called before dev->dev_private is 1953 2126 * allocated we need to hook into the minor for release. */
+19 -8
drivers/gpu/drm/i915/i915_dma.c
··· 1453 1453 } 1454 1454 1455 1455 /** 1456 + * intel_early_sanitize_regs - clean up BIOS state 1457 + * @dev: DRM device 1458 + * 1459 + * This function must be called before we do any I915_READ or I915_WRITE. Its 1460 + * purpose is to clean up any state left by the BIOS that may affect us when 1461 + * reading and/or writing registers. 1462 + */ 1463 + static void intel_early_sanitize_regs(struct drm_device *dev) 1464 + { 1465 + struct drm_i915_private *dev_priv = dev->dev_private; 1466 + 1467 + if (IS_HASWELL(dev)) 1468 + I915_WRITE_NOTRACE(FPGA_DBG, FPGA_DBG_RM_NOCLAIM); 1469 + } 1470 + 1471 + /** 1456 1472 * i915_driver_load - setup chip and create an initial config 1457 1473 * @dev: DRM device 1458 1474 * @flags: startup flags ··· 1558 1542 goto put_gmch; 1559 1543 } 1560 1544 1545 + intel_early_sanitize_regs(dev); 1546 + 1561 1547 aperture_size = dev_priv->gtt.mappable_end; 1562 1548 1563 1549 dev_priv->gtt.mappable = ··· 1630 1612 mutex_init(&dev_priv->rps.hw_lock); 1631 1613 mutex_init(&dev_priv->modeset_restore_lock); 1632 1614 1633 - if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) 1634 - dev_priv->num_pipe = 3; 1635 - else if (IS_MOBILE(dev) || !IS_GEN2(dev)) 1636 - dev_priv->num_pipe = 2; 1637 - else 1638 - dev_priv->num_pipe = 1; 1639 - 1640 - ret = drm_vblank_init(dev, dev_priv->num_pipe); 1615 + ret = drm_vblank_init(dev, INTEL_INFO(dev)->num_pipes); 1641 1616 if (ret) 1642 1617 goto out_gem_unload; 1643 1618
+68 -64
drivers/gpu/drm/i915/i915_drv.c
··· 121 121 unsigned int i915_preliminary_hw_support __read_mostly = 0; 122 122 module_param_named(preliminary_hw_support, i915_preliminary_hw_support, int, 0600); 123 123 MODULE_PARM_DESC(preliminary_hw_support, 124 - "Enable preliminary hardware support. " 125 - "Enable Haswell and ValleyView Support. " 126 - "(default: false)"); 124 + "Enable preliminary hardware support. (default: false)"); 127 125 128 126 int i915_disable_power_well __read_mostly = 0; 129 127 module_param_named(disable_power_well, i915_disable_power_well, int, 0600); ··· 141 143 .driver_data = (unsigned long) info } 142 144 143 145 static const struct intel_device_info intel_i830_info = { 144 - .gen = 2, .is_mobile = 1, .cursor_needs_physical = 1, 146 + .gen = 2, .is_mobile = 1, .cursor_needs_physical = 1, .num_pipes = 2, 145 147 .has_overlay = 1, .overlay_needs_physical = 1, 146 148 }; 147 149 148 150 static const struct intel_device_info intel_845g_info = { 149 - .gen = 2, 151 + .gen = 2, .num_pipes = 1, 150 152 .has_overlay = 1, .overlay_needs_physical = 1, 151 153 }; 152 154 153 155 static const struct intel_device_info intel_i85x_info = { 154 - .gen = 2, .is_i85x = 1, .is_mobile = 1, 156 + .gen = 2, .is_i85x = 1, .is_mobile = 1, .num_pipes = 2, 155 157 .cursor_needs_physical = 1, 156 158 .has_overlay = 1, .overlay_needs_physical = 1, 157 159 }; 158 160 159 161 static const struct intel_device_info intel_i865g_info = { 160 - .gen = 2, 162 + .gen = 2, .num_pipes = 1, 161 163 .has_overlay = 1, .overlay_needs_physical = 1, 162 164 }; 163 165 164 166 static const struct intel_device_info intel_i915g_info = { 165 - .gen = 3, .is_i915g = 1, .cursor_needs_physical = 1, 167 + .gen = 3, .is_i915g = 1, .cursor_needs_physical = 1, .num_pipes = 2, 166 168 .has_overlay = 1, .overlay_needs_physical = 1, 167 169 }; 168 170 static const struct intel_device_info intel_i915gm_info = { 169 - .gen = 3, .is_mobile = 1, 171 + .gen = 3, .is_mobile = 1, .num_pipes = 2, 170 172 .cursor_needs_physical = 1, 171 173 .has_overlay = 1, .overlay_needs_physical = 1, 172 174 .supports_tv = 1, 173 175 }; 174 176 static const struct intel_device_info intel_i945g_info = { 175 - .gen = 3, .has_hotplug = 1, .cursor_needs_physical = 1, 177 + .gen = 3, .has_hotplug = 1, .cursor_needs_physical = 1, .num_pipes = 2, 176 178 .has_overlay = 1, .overlay_needs_physical = 1, 177 179 }; 178 180 static const struct intel_device_info intel_i945gm_info = { 179 - .gen = 3, .is_i945gm = 1, .is_mobile = 1, 181 + .gen = 3, .is_i945gm = 1, .is_mobile = 1, .num_pipes = 2, 180 182 .has_hotplug = 1, .cursor_needs_physical = 1, 181 183 .has_overlay = 1, .overlay_needs_physical = 1, 182 184 .supports_tv = 1, 183 185 }; 184 186 185 187 static const struct intel_device_info intel_i965g_info = { 186 - .gen = 4, .is_broadwater = 1, 188 + .gen = 4, .is_broadwater = 1, .num_pipes = 2, 187 189 .has_hotplug = 1, 188 190 .has_overlay = 1, 189 191 }; 190 192 191 193 static const struct intel_device_info intel_i965gm_info = { 192 - .gen = 4, .is_crestline = 1, 194 + .gen = 4, .is_crestline = 1, .num_pipes = 2, 193 195 .is_mobile = 1, .has_fbc = 1, .has_hotplug = 1, 194 196 .has_overlay = 1, 195 197 .supports_tv = 1, 196 198 }; 197 199 198 200 static const struct intel_device_info intel_g33_info = { 199 - .gen = 3, .is_g33 = 1, 201 + .gen = 3, .is_g33 = 1, .num_pipes = 2, 200 202 .need_gfx_hws = 1, .has_hotplug = 1, 201 203 .has_overlay = 1, 202 204 }; 203 205 204 206 static const struct intel_device_info intel_g45_info = { 205 - .gen = 4, .is_g4x = 1, .need_gfx_hws = 1, 207 + .gen = 4, .is_g4x = 1, .need_gfx_hws = 1, .num_pipes = 2, 206 208 .has_pipe_cxsr = 1, .has_hotplug = 1, 207 209 .has_bsd_ring = 1, 208 210 }; 209 211 210 212 static const struct intel_device_info intel_gm45_info = { 211 - .gen = 4, .is_g4x = 1, 213 + .gen = 4, .is_g4x = 1, .num_pipes = 2, 212 214 .is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1, 213 215 .has_pipe_cxsr = 1, .has_hotplug = 1, 214 216 .supports_tv = 1, ··· 216 218 }; 217 219 218 220 static const struct intel_device_info intel_pineview_info = { 219 - .gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, 221 + .gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .num_pipes = 2, 220 222 .need_gfx_hws = 1, .has_hotplug = 1, 221 223 .has_overlay = 1, 222 224 }; 223 225 224 226 static const struct intel_device_info intel_ironlake_d_info = { 225 - .gen = 5, 227 + .gen = 5, .num_pipes = 2, 226 228 .need_gfx_hws = 1, .has_hotplug = 1, 227 229 .has_bsd_ring = 1, 228 230 }; 229 231 230 232 static const struct intel_device_info intel_ironlake_m_info = { 231 - .gen = 5, .is_mobile = 1, 233 + .gen = 5, .is_mobile = 1, .num_pipes = 2, 232 234 .need_gfx_hws = 1, .has_hotplug = 1, 233 235 .has_fbc = 1, 234 236 .has_bsd_ring = 1, 235 237 }; 236 238 237 239 static const struct intel_device_info intel_sandybridge_d_info = { 238 - .gen = 6, 240 + .gen = 6, .num_pipes = 2, 239 241 .need_gfx_hws = 1, .has_hotplug = 1, 240 242 .has_bsd_ring = 1, 241 243 .has_blt_ring = 1, ··· 244 246 }; 245 247 246 248 static const struct intel_device_info intel_sandybridge_m_info = { 247 - .gen = 6, .is_mobile = 1, 249 + .gen = 6, .is_mobile = 1, .num_pipes = 2, 248 250 .need_gfx_hws = 1, .has_hotplug = 1, 249 251 .has_fbc = 1, 250 252 .has_bsd_ring = 1, ··· 253 255 .has_force_wake = 1, 254 256 }; 255 257 258 + #define GEN7_FEATURES \ 259 + .gen = 7, .num_pipes = 3, \ 260 + .need_gfx_hws = 1, .has_hotplug = 1, \ 261 + .has_bsd_ring = 1, \ 262 + .has_blt_ring = 1, \ 263 + .has_llc = 1, \ 264 + .has_force_wake = 1 265 + 256 266 static const struct intel_device_info intel_ivybridge_d_info = { 257 - .is_ivybridge = 1, .gen = 7, 258 - .need_gfx_hws = 1, .has_hotplug = 1, 259 - .has_bsd_ring = 1, 260 - .has_blt_ring = 1, 261 - .has_llc = 1, 262 - .has_force_wake = 1, 267 + GEN7_FEATURES, 268 + .is_ivybridge = 1, 263 269 }; 264 270 265 271 static const struct intel_device_info intel_ivybridge_m_info = { 266 - .is_ivybridge = 1, .gen = 7, .is_mobile = 1, 267 - .need_gfx_hws = 1, .has_hotplug = 1, 268 - .has_fbc = 0, /* FBC is not enabled on Ivybridge mobile yet */ 269 - .has_bsd_ring = 1, 270 - .has_blt_ring = 1, 271 - .has_llc = 1, 272 - .has_force_wake = 1, 272 + GEN7_FEATURES, 273 + .is_ivybridge = 1, 274 + .is_mobile = 1, 273 275 }; 274 276 275 277 static const struct intel_device_info intel_valleyview_m_info = { 276 - .gen = 7, .is_mobile = 1, 277 - .need_gfx_hws = 1, .has_hotplug = 1, 278 - .has_fbc = 0, 279 - .has_bsd_ring = 1, 280 - .has_blt_ring = 1, 278 + GEN7_FEATURES, 279 + .is_mobile = 1, 280 + .num_pipes = 2, 281 281 .is_valleyview = 1, 282 282 .display_mmio_offset = VLV_DISPLAY_BASE, 283 283 }; 284 284 285 285 static const struct intel_device_info intel_valleyview_d_info = { 286 - .gen = 7, 287 - .need_gfx_hws = 1, .has_hotplug = 1, 288 - .has_fbc = 0, 289 - .has_bsd_ring = 1, 290 - .has_blt_ring = 1, 286 + GEN7_FEATURES, 287 + .num_pipes = 2, 291 288 .is_valleyview = 1, 292 289 .display_mmio_offset = VLV_DISPLAY_BASE, 293 290 }; 294 291 295 292 static const struct intel_device_info intel_haswell_d_info = { 296 - .is_haswell = 1, .gen = 7, 297 - .need_gfx_hws = 1, .has_hotplug = 1, 298 - .has_bsd_ring = 1, 299 - .has_blt_ring = 1, 300 - .has_llc = 1, 301 - .has_force_wake = 1, 293 + GEN7_FEATURES, 294 + .is_haswell = 1, 302 295 }; 303 296 304 297 static const struct intel_device_info intel_haswell_m_info = { 305 - .is_haswell = 1, .gen = 7, .is_mobile = 1, 306 - .need_gfx_hws = 1, .has_hotplug = 1, 307 - .has_bsd_ring = 1, 308 - .has_blt_ring = 1, 309 - .has_llc = 1, 310 - .has_force_wake = 1, 298 + GEN7_FEATURES, 299 + .is_haswell = 1, 300 + .is_mobile = 1, 311 301 }; 312 302 313 303 static const struct pci_device_id pciidlist[] = { /* aka */ ··· 380 394 INTEL_VGA_DEVICE(0x0D16, &intel_haswell_m_info), /* CRW GT2 mobile */ 381 395 INTEL_VGA_DEVICE(0x0D26, &intel_haswell_m_info), /* CRW GT2 mobile */ 382 396 INTEL_VGA_DEVICE(0x0f30, &intel_valleyview_m_info), 397 + INTEL_VGA_DEVICE(0x0f31, &intel_valleyview_m_info), 398 + INTEL_VGA_DEVICE(0x0f32, &intel_valleyview_m_info), 399 + INTEL_VGA_DEVICE(0x0f33, &intel_valleyview_m_info), 383 400 INTEL_VGA_DEVICE(0x0157, &intel_valleyview_m_info), 384 401 INTEL_VGA_DEVICE(0x0155, &intel_valleyview_d_info), 385 402 {0, 0, 0} ··· 1136 1147 I915_WRITE_NOTRACE(MI_MODE, 0); 1137 1148 } 1138 1149 1150 + static void 1151 + hsw_unclaimed_reg_clear(struct drm_i915_private *dev_priv, u32 reg) 1152 + { 1153 + if (IS_HASWELL(dev_priv->dev) && 1154 + (I915_READ_NOTRACE(FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) { 1155 + DRM_ERROR("Unknown unclaimed register before writing to %x\n", 1156 + reg); 1157 + I915_WRITE_NOTRACE(FPGA_DBG, FPGA_DBG_RM_NOCLAIM); 1158 + } 1159 + } 1160 + 1161 + static void 1162 + hsw_unclaimed_reg_check(struct drm_i915_private *dev_priv, u32 reg) 1163 + { 1164 + if (IS_HASWELL(dev_priv->dev) && 1165 + (I915_READ_NOTRACE(FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) { 1166 + DRM_ERROR("Unclaimed write to %x\n", reg); 1167 + I915_WRITE_NOTRACE(FPGA_DBG, FPGA_DBG_RM_NOCLAIM); 1168 + } 1169 + } 1170 + 1139 1171 #define __i915_read(x, y) \ 1140 1172 u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \ 1141 1173 u##x val = 0; \ ··· 1193 1183 } \ 1194 1184 if (IS_GEN5(dev_priv->dev)) \ 1195 1185 ilk_dummy_write(dev_priv); \ 1196 - if (IS_HASWELL(dev_priv->dev) && (I915_READ_NOTRACE(GEN7_ERR_INT) & ERR_INT_MMIO_UNCLAIMED)) { \ 1197 - DRM_ERROR("Unknown unclaimed register before writing to %x\n", reg); \ 1198 - I915_WRITE_NOTRACE(GEN7_ERR_INT, ERR_INT_MMIO_UNCLAIMED); \ 1199 - } \ 1186 + hsw_unclaimed_reg_clear(dev_priv, reg); \ 1200 1187 write##y(val, dev_priv->regs + reg); \ 1201 1188 if (unlikely(__fifo_ret)) { \ 1202 1189 gen6_gt_check_fifodbg(dev_priv); \ 1203 1190 } \ 1204 - if (IS_HASWELL(dev_priv->dev) && (I915_READ_NOTRACE(GEN7_ERR_INT) & ERR_INT_MMIO_UNCLAIMED)) { \ 1205 - DRM_ERROR("Unclaimed write to %x\n", reg); \ 1206 - writel(ERR_INT_MMIO_UNCLAIMED, dev_priv->regs + GEN7_ERR_INT); \ 1207 - } \ 1191 + hsw_unclaimed_reg_check(dev_priv, reg); \ 1208 1192 } 1209 1193 __i915_write(8, b) 1210 1194 __i915_write(16, w)
+14 -14
drivers/gpu/drm/i915/i915_drv.h
··· 93 93 I915_GEM_DOMAIN_INSTRUCTION | \ 94 94 I915_GEM_DOMAIN_VERTEX) 95 95 96 - #define for_each_pipe(p) for ((p) = 0; (p) < dev_priv->num_pipe; (p)++) 96 + #define for_each_pipe(p) for ((p) = 0; (p) < INTEL_INFO(dev)->num_pipes; (p)++) 97 97 98 98 #define for_each_encoder_on_crtc(dev, __crtc, intel_encoder) \ 99 99 list_for_each_entry((intel_encoder), &(dev)->mode_config.encoder_list, base.head) \ ··· 243 243 int page_count; 244 244 u32 gtt_offset; 245 245 u32 *pages[0]; 246 - } *ringbuffer, *batchbuffer; 246 + } *ringbuffer, *batchbuffer, *ctx; 247 247 struct drm_i915_error_request { 248 248 long jiffies; 249 249 u32 seqno; ··· 341 341 342 342 struct intel_device_info { 343 343 u32 display_mmio_offset; 344 + u8 num_pipes:3; 344 345 u8 gen; 345 346 u8 is_mobile:1; 346 347 u8 is_i85x:1; ··· 906 905 struct mutex dpio_lock; 907 906 908 907 /** Cached value of IMR to avoid reads in updating the bitfield */ 909 - u32 pipestat[2]; 910 908 u32 irq_mask; 911 909 u32 gt_irq_mask; 912 910 ··· 913 913 struct work_struct hotplug_work; 914 914 bool enable_hotplug_processing; 915 915 916 - int num_pipe; 917 916 int num_pch_pll; 918 917 919 918 unsigned long cfb_size; ··· 1339 1340 #define HAS_PIPE_CONTROL(dev) (INTEL_INFO(dev)->gen >= 5) 1340 1341 1341 1342 #define HAS_DDI(dev) (IS_HASWELL(dev)) 1343 + #define HAS_POWER_WELL(dev) (IS_HASWELL(dev)) 1342 1344 1343 1345 #define INTEL_PCH_DEVICE_ID_MASK 0xff00 1344 1346 #define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00 ··· 1529 1529 int __must_check i915_gem_object_get_pages(struct drm_i915_gem_object *obj); 1530 1530 static inline struct page *i915_gem_object_get_page(struct drm_i915_gem_object *obj, int n) 1531 1531 { 1532 - struct scatterlist *sg = obj->pages->sgl; 1533 - int nents = obj->pages->nents; 1534 - while (nents > SG_MAX_SINGLE_ALLOC) { 1535 - if (n < SG_MAX_SINGLE_ALLOC - 1) 1536 - break; 1532 + struct sg_page_iter sg_iter; 1537 1533 1538 - sg = sg_chain_ptr(sg + SG_MAX_SINGLE_ALLOC - 1); 1539 - n -= SG_MAX_SINGLE_ALLOC - 1; 1540 - nents -= SG_MAX_SINGLE_ALLOC - 1; 1541 - } 1542 - return sg_page(sg+n); 1534 + for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, n) 1535 + return sg_iter.page; 1536 + 1537 + return NULL; 1543 1538 } 1544 1539 static inline void i915_gem_object_pin_pages(struct drm_i915_gem_object *obj) 1545 1540 { ··· 1894 1899 return VLV_VGACNTRL; 1895 1900 else 1896 1901 return VGACNTRL; 1902 + } 1903 + 1904 + static inline void __user *to_user_ptr(u64 address) 1905 + { 1906 + return (void __user *)(uintptr_t)address; 1897 1907 } 1898 1908 1899 1909 #endif
+46 -32
drivers/gpu/drm/i915/i915_gem.c
··· 411 411 int obj_do_bit17_swizzling, page_do_bit17_swizzling; 412 412 int prefaulted = 0; 413 413 int needs_clflush = 0; 414 - struct scatterlist *sg; 415 - int i; 414 + struct sg_page_iter sg_iter; 416 415 417 - user_data = (char __user *) (uintptr_t) args->data_ptr; 416 + user_data = to_user_ptr(args->data_ptr); 418 417 remain = args->size; 419 418 420 419 obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj); ··· 440 441 441 442 offset = args->offset; 442 443 443 - for_each_sg(obj->pages->sgl, sg, obj->pages->nents, i) { 444 - struct page *page; 445 - 446 - if (i < offset >> PAGE_SHIFT) 447 - continue; 444 + for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 445 + offset >> PAGE_SHIFT) { 446 + struct page *page = sg_iter.page; 448 447 449 448 if (remain <= 0) 450 449 break; ··· 457 460 if ((shmem_page_offset + page_length) > PAGE_SIZE) 458 461 page_length = PAGE_SIZE - shmem_page_offset; 459 462 460 - page = sg_page(sg); 461 463 page_do_bit17_swizzling = obj_do_bit17_swizzling && 462 464 (page_to_phys(page) & (1 << 17)) != 0; 463 465 ··· 518 522 return 0; 519 523 520 524 if (!access_ok(VERIFY_WRITE, 521 - (char __user *)(uintptr_t)args->data_ptr, 525 + to_user_ptr(args->data_ptr), 522 526 args->size)) 523 527 return -EFAULT; 524 528 ··· 609 613 if (ret) 610 614 goto out_unpin; 611 615 612 - user_data = (char __user *) (uintptr_t) args->data_ptr; 616 + user_data = to_user_ptr(args->data_ptr); 613 617 remain = args->size; 614 618 615 619 offset = obj->gtt_offset + args->offset; ··· 728 732 int hit_slowpath = 0; 729 733 int needs_clflush_after = 0; 730 734 int needs_clflush_before = 0; 731 - int i; 732 - struct scatterlist *sg; 735 + struct sg_page_iter sg_iter; 733 736 734 - user_data = (char __user *) (uintptr_t) args->data_ptr; 737 + user_data = to_user_ptr(args->data_ptr); 735 738 remain = args->size; 736 739 737 740 obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj); ··· 763 768 offset = args->offset; 764 769 obj->dirty = 1; 765 770 766 - for_each_sg(obj->pages->sgl, sg, obj->pages->nents, i) { 767 - struct page *page; 771 + for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 772 + offset >> PAGE_SHIFT) { 773 + struct page *page = sg_iter.page; 768 774 int partial_cacheline_write; 769 - 770 - if (i < offset >> PAGE_SHIFT) 771 - continue; 772 775 773 776 if (remain <= 0) 774 777 break; ··· 789 796 ((shmem_page_offset | page_length) 790 797 & (boot_cpu_data.x86_clflush_size - 1)); 791 798 792 - page = sg_page(sg); 793 799 page_do_bit17_swizzling = obj_do_bit17_swizzling && 794 800 (page_to_phys(page) & (1 << 17)) != 0; 795 801 ··· 859 867 return 0; 860 868 861 869 if (!access_ok(VERIFY_READ, 862 - (char __user *)(uintptr_t)args->data_ptr, 870 + to_user_ptr(args->data_ptr), 863 871 args->size)) 864 872 return -EFAULT; 865 873 866 - ret = fault_in_multipages_readable((char __user *)(uintptr_t)args->data_ptr, 874 + ret = fault_in_multipages_readable(to_user_ptr(args->data_ptr), 867 875 args->size); 868 876 if (ret) 869 877 return -EFAULT; ··· 1625 1633 static void 1626 1634 i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj) 1627 1635 { 1628 - int page_count = obj->base.size / PAGE_SIZE; 1629 - struct scatterlist *sg; 1630 - int ret, i; 1636 + struct sg_page_iter sg_iter; 1637 + int ret; 1631 1638 1632 1639 BUG_ON(obj->madv == __I915_MADV_PURGED); 1633 1640 ··· 1646 1655 if (obj->madv == I915_MADV_DONTNEED) 1647 1656 obj->dirty = 0; 1648 1657 1649 - for_each_sg(obj->pages->sgl, sg, page_count, i) { 1650 - struct page *page = sg_page(sg); 1658 + for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) { 1659 + struct page *page = sg_iter.page; 1651 1660 1652 1661 if (obj->dirty) 1653 1662 set_page_dirty(page); ··· 1748 1757 struct address_space *mapping; 1749 1758 struct sg_table *st; 1750 1759 struct scatterlist *sg; 1760 + struct sg_page_iter sg_iter; 1751 1761 struct page *page; 1762 + unsigned long last_pfn = 0; /* suppress gcc warning */ 1752 1763 gfp_t gfp; 1753 1764 1754 1765 /* Assert that the object is not currently in any GPU domain. As it ··· 1780 1787 gfp = mapping_gfp_mask(mapping); 1781 1788 gfp |= __GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD; 1782 1789 gfp &= ~(__GFP_IO | __GFP_WAIT); 1783 - for_each_sg(st->sgl, sg, page_count, i) { 1790 + sg = st->sgl; 1791 + st->nents = 0; 1792 + for (i = 0; i < page_count; i++) { 1784 1793 page = shmem_read_mapping_page_gfp(mapping, i, gfp); 1785 1794 if (IS_ERR(page)) { 1786 1795 i915_gem_purge(dev_priv, page_count); ··· 1805 1810 gfp &= ~(__GFP_IO | __GFP_WAIT); 1806 1811 } 1807 1812 1808 - sg_set_page(sg, page, PAGE_SIZE, 0); 1813 + if (!i || page_to_pfn(page) != last_pfn + 1) { 1814 + if (i) 1815 + sg = sg_next(sg); 1816 + st->nents++; 1817 + sg_set_page(sg, page, PAGE_SIZE, 0); 1818 + } else { 1819 + sg->length += PAGE_SIZE; 1820 + } 1821 + last_pfn = page_to_pfn(page); 1809 1822 } 1810 1823 1824 + sg_mark_end(sg); 1811 1825 obj->pages = st; 1812 1826 1813 1827 if (i915_gem_object_needs_bit17_swizzle(obj)) ··· 1825 1821 return 0; 1826 1822 1827 1823 err_pages: 1828 - for_each_sg(st->sgl, sg, i, page_count) 1829 - page_cache_release(sg_page(sg)); 1824 + sg_mark_end(sg); 1825 + for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) 1826 + page_cache_release(sg_iter.page); 1830 1827 sg_free_table(st); 1831 1828 kfree(st); 1832 1829 return PTR_ERR(page); ··· 4015 4010 int ret; 4016 4011 4017 4012 mutex_lock(&dev->struct_mutex); 4013 + 4014 + if (IS_VALLEYVIEW(dev)) { 4015 + /* VLVA0 (potential hack), BIOS isn't actually waking us */ 4016 + I915_WRITE(VLV_GTLC_WAKE_CTRL, 1); 4017 + if (wait_for((I915_READ(VLV_GTLC_PW_STATUS) & 1) == 1, 10)) 4018 + DRM_DEBUG_DRIVER("allow wake ack timed out\n"); 4019 + } 4020 + 4018 4021 i915_gem_init_global_gtt(dev); 4022 + 4019 4023 ret = i915_gem_init_hw(dev); 4020 4024 mutex_unlock(&dev->struct_mutex); 4021 4025 if (ret) { ··· 4341 4327 struct drm_file *file_priv) 4342 4328 { 4343 4329 void *vaddr = obj->phys_obj->handle->vaddr + args->offset; 4344 - char __user *user_data = (char __user *) (uintptr_t) args->data_ptr; 4330 + char __user *user_data = to_user_ptr(args->data_ptr); 4345 4331 4346 4332 if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) { 4347 4333 unsigned long unwritten;
+7 -6
drivers/gpu/drm/i915/i915_gem_dmabuf.c
··· 62 62 src = obj->pages->sgl; 63 63 dst = st->sgl; 64 64 for (i = 0; i < obj->pages->nents; i++) { 65 - sg_set_page(dst, sg_page(src), PAGE_SIZE, 0); 65 + sg_set_page(dst, sg_page(src), src->length, 0); 66 66 dst = sg_next(dst); 67 67 src = sg_next(src); 68 68 } ··· 105 105 { 106 106 struct drm_i915_gem_object *obj = dma_buf->priv; 107 107 struct drm_device *dev = obj->base.dev; 108 - struct scatterlist *sg; 108 + struct sg_page_iter sg_iter; 109 109 struct page **pages; 110 110 int ret, i; 111 111 ··· 124 124 125 125 ret = -ENOMEM; 126 126 127 - pages = drm_malloc_ab(obj->pages->nents, sizeof(struct page *)); 127 + pages = drm_malloc_ab(obj->base.size >> PAGE_SHIFT, sizeof(*pages)); 128 128 if (pages == NULL) 129 129 goto error; 130 130 131 - for_each_sg(obj->pages->sgl, sg, obj->pages->nents, i) 132 - pages[i] = sg_page(sg); 131 + i = 0; 132 + for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0); 133 + pages[i++] = sg_iter.page; 133 134 134 - obj->dma_buf_vmapping = vmap(pages, obj->pages->nents, 0, PAGE_KERNEL); 135 + obj->dma_buf_vmapping = vmap(pages, i, 0, PAGE_KERNEL); 135 136 drm_free_large(pages); 136 137 137 138 if (!obj->dma_buf_vmapping)
+18 -18
drivers/gpu/drm/i915/i915_gem_execbuffer.c
··· 305 305 struct drm_i915_gem_exec_object2 *entry = obj->exec_entry; 306 306 int remain, ret; 307 307 308 - user_relocs = (void __user *)(uintptr_t)entry->relocs_ptr; 308 + user_relocs = to_user_ptr(entry->relocs_ptr); 309 309 310 310 remain = entry->relocation_count; 311 311 while (remain) { ··· 359 359 } 360 360 361 361 static int 362 - i915_gem_execbuffer_relocate(struct drm_device *dev, 363 - struct eb_objects *eb) 362 + i915_gem_execbuffer_relocate(struct eb_objects *eb) 364 363 { 365 364 struct drm_i915_gem_object *obj; 366 365 int ret = 0; ··· 474 475 475 476 static int 476 477 i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring, 477 - struct drm_file *file, 478 478 struct list_head *objects, 479 479 bool *need_relocs) 480 480 { ··· 616 618 u64 invalid_offset = (u64)-1; 617 619 int j; 618 620 619 - user_relocs = (void __user *)(uintptr_t)exec[i].relocs_ptr; 621 + user_relocs = to_user_ptr(exec[i].relocs_ptr); 620 622 621 623 if (copy_from_user(reloc+total, user_relocs, 622 624 exec[i].relocation_count * sizeof(*reloc))) { ··· 661 663 goto err; 662 664 663 665 need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0; 664 - ret = i915_gem_execbuffer_reserve(ring, file, &eb->objects, &need_relocs); 666 + ret = i915_gem_execbuffer_reserve(ring, &eb->objects, &need_relocs); 665 667 if (ret) 666 668 goto err; 667 669 ··· 734 736 int relocs_max = INT_MAX / sizeof(struct drm_i915_gem_relocation_entry); 735 737 736 738 for (i = 0; i < count; i++) { 737 - char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr; 739 + char __user *ptr = to_user_ptr(exec[i].relocs_ptr); 738 740 int length; /* limited by fault_in_pages_readable() */ 739 741 740 742 if (exec[i].flags & __EXEC_OBJECT_UNKNOWN_FLAGS) ··· 750 752 751 753 length = exec[i].relocation_count * 752 754 sizeof(struct drm_i915_gem_relocation_entry); 753 - /* we may also need to update the presumed offsets */ 755 + /* 756 + * We must check that the entire relocation array is safe 757 + * to read, but since we may need to update the presumed 758 + * offsets during execution, check for full write access. 759 + */ 754 760 if (!access_ok(VERIFY_WRITE, ptr, length)) 755 761 return -EFAULT; 756 762 ··· 951 949 } 952 950 953 951 if (copy_from_user(cliprects, 954 - (struct drm_clip_rect __user *)(uintptr_t) 955 - args->cliprects_ptr, 956 - sizeof(*cliprects)*args->num_cliprects)) { 952 + to_user_ptr(args->cliprects_ptr), 953 + sizeof(*cliprects)*args->num_cliprects)) { 957 954 ret = -EFAULT; 958 955 goto pre_mutex_err; 959 956 } ··· 987 986 988 987 /* Move the objects en-masse into the GTT, evicting if necessary. */ 989 988 need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0; 990 - ret = i915_gem_execbuffer_reserve(ring, file, &eb->objects, &need_relocs); 989 + ret = i915_gem_execbuffer_reserve(ring, &eb->objects, &need_relocs); 991 990 if (ret) 992 991 goto err; 993 992 994 993 /* The objects are in their final locations, apply the relocations. */ 995 994 if (need_relocs) 996 - ret = i915_gem_execbuffer_relocate(dev, eb); 995 + ret = i915_gem_execbuffer_relocate(eb); 997 996 if (ret) { 998 997 if (ret == -EFAULT) { 999 998 ret = i915_gem_execbuffer_relocate_slow(dev, args, file, ring, ··· 1116 1115 return -ENOMEM; 1117 1116 } 1118 1117 ret = copy_from_user(exec_list, 1119 - (void __user *)(uintptr_t)args->buffers_ptr, 1118 + to_user_ptr(args->buffers_ptr), 1120 1119 sizeof(*exec_list) * args->buffer_count); 1121 1120 if (ret != 0) { 1122 1121 DRM_DEBUG("copy %d exec entries failed %d\n", ··· 1155 1154 for (i = 0; i < args->buffer_count; i++) 1156 1155 exec_list[i].offset = exec2_list[i].offset; 1157 1156 /* ... and back out to userspace */ 1158 - ret = copy_to_user((void __user *)(uintptr_t)args->buffers_ptr, 1157 + ret = copy_to_user(to_user_ptr(args->buffers_ptr), 1159 1158 exec_list, 1160 1159 sizeof(*exec_list) * args->buffer_count); 1161 1160 if (ret) { ··· 1196 1195 return -ENOMEM; 1197 1196 } 1198 1197 ret = copy_from_user(exec2_list, 1199 - (struct drm_i915_relocation_entry __user *) 1200 - (uintptr_t) args->buffers_ptr, 1198 + to_user_ptr(args->buffers_ptr), 1201 1199 sizeof(*exec2_list) * args->buffer_count); 1202 1200 if (ret != 0) { 1203 1201 DRM_DEBUG("copy %d exec entries failed %d\n", ··· 1208 1208 ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list); 1209 1209 if (!ret) { 1210 1210 /* Copy the new buffer offsets back to the user's exec list. */ 1211 - ret = copy_to_user((void __user *)(uintptr_t)args->buffers_ptr, 1211 + ret = copy_to_user(to_user_ptr(args->buffers_ptr), 1212 1212 exec2_list, 1213 1213 sizeof(*exec2_list) * args->buffer_count); 1214 1214 if (ret) {
+27 -46
drivers/gpu/drm/i915/i915_gem_gtt.c
··· 83 83 { 84 84 gtt_pte_t *pt_vaddr; 85 85 gtt_pte_t scratch_pte; 86 - unsigned act_pd = first_entry / I915_PPGTT_PT_ENTRIES; 86 + unsigned act_pt = first_entry / I915_PPGTT_PT_ENTRIES; 87 87 unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES; 88 88 unsigned last_pte, i; 89 89 ··· 96 96 if (last_pte > I915_PPGTT_PT_ENTRIES) 97 97 last_pte = I915_PPGTT_PT_ENTRIES; 98 98 99 - pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pd]); 99 + pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pt]); 100 100 101 101 for (i = first_pte; i < last_pte; i++) 102 102 pt_vaddr[i] = scratch_pte; ··· 105 105 106 106 num_entries -= last_pte - first_pte; 107 107 first_pte = 0; 108 - act_pd++; 108 + act_pt++; 109 109 } 110 110 } 111 111 ··· 115 115 enum i915_cache_level cache_level) 116 116 { 117 117 gtt_pte_t *pt_vaddr; 118 - unsigned act_pd = first_entry / I915_PPGTT_PT_ENTRIES; 119 - unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES; 120 - unsigned i, j, m, segment_len; 121 - dma_addr_t page_addr; 122 - struct scatterlist *sg; 118 + unsigned act_pt = first_entry / I915_PPGTT_PT_ENTRIES; 119 + unsigned act_pte = first_entry % I915_PPGTT_PT_ENTRIES; 120 + struct sg_page_iter sg_iter; 123 121 124 - /* init sg walking */ 125 - sg = pages->sgl; 126 - i = 0; 127 - segment_len = sg_dma_len(sg) >> PAGE_SHIFT; 128 - m = 0; 122 + pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pt]); 123 + for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) { 124 + dma_addr_t page_addr; 129 125 130 - while (i < pages->nents) { 131 - pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pd]); 126 + page_addr = sg_dma_address(sg_iter.sg) + 127 + (sg_iter.sg_pgoffset << PAGE_SHIFT); 128 + pt_vaddr[act_pte] = gen6_pte_encode(ppgtt->dev, page_addr, 129 + cache_level); 130 + if (++act_pte == I915_PPGTT_PT_ENTRIES) { 131 + kunmap_atomic(pt_vaddr); 132 + act_pt++; 133 + pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pt]); 134 + act_pte = 0; 132 135 133 - for (j = first_pte; j < I915_PPGTT_PT_ENTRIES; j++) { 134 - page_addr = sg_dma_address(sg) + (m << PAGE_SHIFT); 135 - pt_vaddr[j] = gen6_pte_encode(ppgtt->dev, page_addr, 136 - cache_level); 137 - 138 - /* grab the next page */ 139 - if (++m == segment_len) { 140 - if (++i == pages->nents) 141 - break; 142 - 143 - sg = sg_next(sg); 144 - segment_len = sg_dma_len(sg) >> PAGE_SHIFT; 145 - m = 0; 146 - } 147 136 } 148 - 149 - kunmap_atomic(pt_vaddr); 150 - 151 - first_pte = 0; 152 - act_pd++; 153 137 } 138 + kunmap_atomic(pt_vaddr); 154 139 } 155 140 156 141 static void gen6_ppgtt_cleanup(struct i915_hw_ppgtt *ppgtt) ··· 417 432 enum i915_cache_level level) 418 433 { 419 434 struct drm_i915_private *dev_priv = dev->dev_private; 420 - struct scatterlist *sg = st->sgl; 421 435 gtt_pte_t __iomem *gtt_entries = 422 436 (gtt_pte_t __iomem *)dev_priv->gtt.gsm + first_entry; 423 - int unused, i = 0; 424 - unsigned int len, m = 0; 437 + int i = 0; 438 + struct sg_page_iter sg_iter; 425 439 dma_addr_t addr; 426 440 427 - for_each_sg(st->sgl, sg, st->nents, unused) { 428 - len = sg_dma_len(sg) >> PAGE_SHIFT; 429 - for (m = 0; m < len; m++) { 430 - addr = sg_dma_address(sg) + (m << PAGE_SHIFT); 431 - iowrite32(gen6_pte_encode(dev, addr, level), 432 - &gtt_entries[i]); 433 - i++; 434 - } 441 + for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) { 442 + addr = sg_dma_address(sg_iter.sg) + 443 + (sg_iter.sg_pgoffset << PAGE_SHIFT); 444 + iowrite32(gen6_pte_encode(dev, addr, level), &gtt_entries[i]); 445 + i++; 435 446 } 436 447 437 448 /* XXX: This serves as a posting read to make sure that the PTE has ··· 733 752 pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl); 734 753 gtt_size = gen6_get_total_gtt_size(snb_gmch_ctl); 735 754 736 - if (IS_GEN7(dev)) 755 + if (IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) 737 756 *stolen = gen7_get_stolen_size(snb_gmch_ctl); 738 757 else 739 758 *stolen = gen6_get_stolen_size(snb_gmch_ctl);
+2 -2
drivers/gpu/drm/i915/i915_gem_stolen.c
··· 222 222 } 223 223 224 224 sg = st->sgl; 225 - sg->offset = offset; 226 - sg->length = size; 225 + /* we set the dummy page here only to make for_each_sg_page work */ 226 + sg_set_page(sg, dev_priv->gtt.scratch_page, size, offset); 227 227 228 228 sg_dma_address(sg) = (dma_addr_t)dev_priv->mm.stolen_base + offset; 229 229 sg_dma_len(sg) = size;
+10 -8
drivers/gpu/drm/i915/i915_gem_tiling.c
··· 473 473 void 474 474 i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj) 475 475 { 476 - struct scatterlist *sg; 477 - int page_count = obj->base.size >> PAGE_SHIFT; 476 + struct sg_page_iter sg_iter; 478 477 int i; 479 478 480 479 if (obj->bit_17 == NULL) 481 480 return; 482 481 483 - for_each_sg(obj->pages->sgl, sg, page_count, i) { 484 - struct page *page = sg_page(sg); 482 + i = 0; 483 + for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) { 484 + struct page *page = sg_iter.page; 485 485 char new_bit_17 = page_to_phys(page) >> 17; 486 486 if ((new_bit_17 & 0x1) != 487 487 (test_bit(i, obj->bit_17) != 0)) { 488 488 i915_gem_swizzle_page(page); 489 489 set_page_dirty(page); 490 490 } 491 + i++; 491 492 } 492 493 } 493 494 494 495 void 495 496 i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj) 496 497 { 497 - struct scatterlist *sg; 498 + struct sg_page_iter sg_iter; 498 499 int page_count = obj->base.size >> PAGE_SHIFT; 499 500 int i; 500 501 ··· 509 508 } 510 509 } 511 510 512 - for_each_sg(obj->pages->sgl, sg, page_count, i) { 513 - struct page *page = sg_page(sg); 514 - if (page_to_phys(page) & (1 << 17)) 511 + i = 0; 512 + for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) { 513 + if (page_to_phys(sg_iter.page) & (1 << 17)) 515 514 __set_bit(i, obj->bit_17); 516 515 else 517 516 __clear_bit(i, obj->bit_17); 517 + i++; 518 518 } 519 519 }
+175 -74
drivers/gpu/drm/i915/i915_irq.c
··· 60 60 void 61 61 i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask) 62 62 { 63 - if ((dev_priv->pipestat[pipe] & mask) != mask) { 64 - u32 reg = PIPESTAT(pipe); 63 + u32 reg = PIPESTAT(pipe); 64 + u32 pipestat = I915_READ(reg) & 0x7fff0000; 65 65 66 - dev_priv->pipestat[pipe] |= mask; 67 - /* Enable the interrupt, clear any pending status */ 68 - I915_WRITE(reg, dev_priv->pipestat[pipe] | (mask >> 16)); 69 - POSTING_READ(reg); 70 - } 66 + if ((pipestat & mask) == mask) 67 + return; 68 + 69 + /* Enable the interrupt, clear any pending status */ 70 + pipestat |= mask | (mask >> 16); 71 + I915_WRITE(reg, pipestat); 72 + POSTING_READ(reg); 71 73 } 72 74 73 75 void 74 76 i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask) 75 77 { 76 - if ((dev_priv->pipestat[pipe] & mask) != 0) { 77 - u32 reg = PIPESTAT(pipe); 78 + u32 reg = PIPESTAT(pipe); 79 + u32 pipestat = I915_READ(reg) & 0x7fff0000; 78 80 79 - dev_priv->pipestat[pipe] &= ~mask; 80 - I915_WRITE(reg, dev_priv->pipestat[pipe]); 81 - POSTING_READ(reg); 82 - } 81 + if ((pipestat & mask) == 0) 82 + return; 83 + 84 + pipestat &= ~mask; 85 + I915_WRITE(reg, pipestat); 86 + POSTING_READ(reg); 83 87 } 84 88 85 89 /** ··· 254 250 struct timeval *vblank_time, 255 251 unsigned flags) 256 252 { 257 - struct drm_i915_private *dev_priv = dev->dev_private; 258 253 struct drm_crtc *crtc; 259 254 260 - if (pipe < 0 || pipe >= dev_priv->num_pipe) { 255 + if (pipe < 0 || pipe >= INTEL_INFO(dev)->num_pipes) { 261 256 DRM_ERROR("Invalid crtc %d\n", pipe); 262 257 return -EINVAL; 263 258 } ··· 940 937 for_each_ring(ring, dev_priv, i) 941 938 wake_up_all(&ring->irq_queue); 942 939 940 + intel_display_handle_reset(dev); 941 + 943 942 wake_up_all(&dev_priv->gpu_error.reset_queue); 944 943 } 945 944 } ··· 977 972 978 973 #ifdef CONFIG_DEBUG_FS 979 974 static struct drm_i915_error_object * 980 - i915_error_object_create(struct drm_i915_private *dev_priv, 981 - struct drm_i915_gem_object *src) 975 + i915_error_object_create_sized(struct drm_i915_private *dev_priv, 976 + struct drm_i915_gem_object *src, 977 + const int num_pages) 982 978 { 983 979 struct drm_i915_error_object *dst; 984 - int i, count; 980 + int i; 985 981 u32 reloc_offset; 986 982 987 983 if (src == NULL || src->pages == NULL) 988 984 return NULL; 989 985 990 - count = src->base.size / PAGE_SIZE; 991 - 992 - dst = kmalloc(sizeof(*dst) + count * sizeof(u32 *), GFP_ATOMIC); 986 + dst = kmalloc(sizeof(*dst) + num_pages * sizeof(u32 *), GFP_ATOMIC); 993 987 if (dst == NULL) 994 988 return NULL; 995 989 996 990 reloc_offset = src->gtt_offset; 997 - for (i = 0; i < count; i++) { 991 + for (i = 0; i < num_pages; i++) { 998 992 unsigned long flags; 999 993 void *d; 1000 994 ··· 1043 1039 1044 1040 reloc_offset += PAGE_SIZE; 1045 1041 } 1046 - dst->page_count = count; 1042 + dst->page_count = num_pages; 1047 1043 dst->gtt_offset = src->gtt_offset; 1048 1044 1049 1045 return dst; ··· 1054 1050 kfree(dst); 1055 1051 return NULL; 1056 1052 } 1053 + #define i915_error_object_create(dev_priv, src) \ 1054 + i915_error_object_create_sized((dev_priv), (src), \ 1055 + (src)->base.size>>PAGE_SHIFT) 1057 1056 1058 1057 static void 1059 1058 i915_error_object_free(struct drm_i915_error_object *obj) ··· 1263 1256 error->cpu_ring_tail[ring->id] = ring->tail; 1264 1257 } 1265 1258 1259 + 1260 + static void i915_gem_record_active_context(struct intel_ring_buffer *ring, 1261 + struct drm_i915_error_state *error, 1262 + struct drm_i915_error_ring *ering) 1263 + { 1264 + struct drm_i915_private *dev_priv = ring->dev->dev_private; 1265 + struct drm_i915_gem_object *obj; 1266 + 1267 + /* Currently render ring is the only HW context user */ 1268 + if (ring->id != RCS || !error->ccid) 1269 + return; 1270 + 1271 + list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) { 1272 + if ((error->ccid & PAGE_MASK) == obj->gtt_offset) { 1273 + ering->ctx = i915_error_object_create_sized(dev_priv, 1274 + obj, 1); 1275 + } 1276 + } 1277 + } 1278 + 1266 1279 static void i915_gem_record_rings(struct drm_device *dev, 1267 1280 struct drm_i915_error_state *error) 1268 1281 { ··· 1299 1272 1300 1273 error->ring[i].ringbuffer = 1301 1274 i915_error_object_create(dev_priv, ring->obj); 1275 + 1276 + 1277 + i915_gem_record_active_context(ring, error, &error->ring[i]); 1302 1278 1303 1279 count = 0; 1304 1280 list_for_each_entry(request, &ring->request_list, list) ··· 1358 1328 return; 1359 1329 } 1360 1330 1361 - DRM_INFO("capturing error event; look for more information in" 1331 + DRM_INFO("capturing error event; look for more information in " 1362 1332 "/sys/kernel/debug/dri/%d/i915_error_state\n", 1363 1333 dev->primary->index); 1364 1334 1365 1335 kref_init(&error->ref); 1366 1336 error->eir = I915_READ(EIR); 1367 1337 error->pgtbl_er = I915_READ(PGTBL_ER); 1368 - error->ccid = I915_READ(CCID); 1338 + if (HAS_HW_CONTEXTS(dev)) 1339 + error->ccid = I915_READ(CCID); 1369 1340 1370 1341 if (HAS_PCH_SPLIT(dev)) 1371 1342 error->ier = I915_READ(DEIER) | I915_READ(GTIER); ··· 1598 1567 queue_work(dev_priv->wq, &dev_priv->gpu_error.work); 1599 1568 } 1600 1569 1601 - static void i915_pageflip_stall_check(struct drm_device *dev, int pipe) 1570 + static void __always_unused i915_pageflip_stall_check(struct drm_device *dev, int pipe) 1602 1571 { 1603 1572 drm_i915_private_t *dev_priv = dev->dev_private; 1604 1573 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; ··· 1808 1777 return false; 1809 1778 } 1810 1779 1780 + static bool semaphore_passed(struct intel_ring_buffer *ring) 1781 + { 1782 + struct drm_i915_private *dev_priv = ring->dev->dev_private; 1783 + u32 acthd = intel_ring_get_active_head(ring) & HEAD_ADDR; 1784 + struct intel_ring_buffer *signaller; 1785 + u32 cmd, ipehr, acthd_min; 1786 + 1787 + ipehr = I915_READ(RING_IPEHR(ring->mmio_base)); 1788 + if ((ipehr & ~(0x3 << 16)) != 1789 + (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE | MI_SEMAPHORE_REGISTER)) 1790 + return false; 1791 + 1792 + /* ACTHD is likely pointing to the dword after the actual command, 1793 + * so scan backwards until we find the MBOX. 1794 + */ 1795 + acthd_min = max((int)acthd - 3 * 4, 0); 1796 + do { 1797 + cmd = ioread32(ring->virtual_start + acthd); 1798 + if (cmd == ipehr) 1799 + break; 1800 + 1801 + acthd -= 4; 1802 + if (acthd < acthd_min) 1803 + return false; 1804 + } while (1); 1805 + 1806 + signaller = &dev_priv->ring[(ring->id + (((ipehr >> 17) & 1) + 1)) % 3]; 1807 + return i915_seqno_passed(signaller->get_seqno(signaller, false), 1808 + ioread32(ring->virtual_start+acthd+4)+1); 1809 + } 1810 + 1811 1811 static bool kick_ring(struct intel_ring_buffer *ring) 1812 1812 { 1813 1813 struct drm_device *dev = ring->dev; ··· 1846 1784 u32 tmp = I915_READ_CTL(ring); 1847 1785 if (tmp & RING_WAIT) { 1848 1786 DRM_ERROR("Kicking stuck wait on %s\n", 1787 + ring->name); 1788 + I915_WRITE_CTL(ring, tmp); 1789 + return true; 1790 + } 1791 + 1792 + if (INTEL_INFO(dev)->gen >= 6 && 1793 + tmp & RING_WAIT_SEMAPHORE && 1794 + semaphore_passed(ring)) { 1795 + DRM_ERROR("Kicking stuck semaphore on %s\n", 1849 1796 ring->name); 1850 1797 I915_WRITE_CTL(ring, tmp); 1851 1798 return true; ··· 2160 2089 I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT | 2161 2090 I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; 2162 2091 2163 - dev_priv->pipestat[0] = 0; 2164 - dev_priv->pipestat[1] = 0; 2165 - 2166 2092 /* Hack for broken MSIs on VLV */ 2167 2093 pci_write_config_dword(dev_priv->dev->pdev, 0x94, 0xfee00000); 2168 2094 pci_read_config_word(dev->pdev, 0x98, &msid); ··· 2289 2221 { 2290 2222 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2291 2223 2292 - dev_priv->pipestat[0] = 0; 2293 - dev_priv->pipestat[1] = 0; 2294 - 2295 2224 I915_WRITE16(EMR, 2296 2225 ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH)); 2297 2226 ··· 2309 2244 POSTING_READ16(IER); 2310 2245 2311 2246 return 0; 2247 + } 2248 + 2249 + /* 2250 + * Returns true when a page flip has completed. 2251 + */ 2252 + static bool i8xx_handle_vblank(struct drm_device *dev, 2253 + int pipe, u16 iir) 2254 + { 2255 + drm_i915_private_t *dev_priv = dev->dev_private; 2256 + u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(pipe); 2257 + 2258 + if (!drm_handle_vblank(dev, pipe)) 2259 + return false; 2260 + 2261 + if ((iir & flip_pending) == 0) 2262 + return false; 2263 + 2264 + intel_prepare_page_flip(dev, pipe); 2265 + 2266 + /* We detect FlipDone by looking for the change in PendingFlip from '1' 2267 + * to '0' on the following vblank, i.e. IIR has the Pendingflip 2268 + * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence 2269 + * the flip is completed (no longer pending). Since this doesn't raise 2270 + * an interrupt per se, we watch for the change at vblank. 2271 + */ 2272 + if (I915_READ16(ISR) & flip_pending) 2273 + return false; 2274 + 2275 + intel_finish_page_flip(dev, pipe); 2276 + 2277 + return true; 2312 2278 } 2313 2279 2314 2280 static irqreturn_t i8xx_irq_handler(int irq, void *arg) ··· 2397 2301 notify_ring(dev, &dev_priv->ring[RCS]); 2398 2302 2399 2303 if (pipe_stats[0] & PIPE_VBLANK_INTERRUPT_STATUS && 2400 - drm_handle_vblank(dev, 0)) { 2401 - if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT) { 2402 - intel_prepare_page_flip(dev, 0); 2403 - intel_finish_page_flip(dev, 0); 2404 - flip_mask &= ~I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT; 2405 - } 2406 - } 2304 + i8xx_handle_vblank(dev, 0, iir)) 2305 + flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(0); 2407 2306 2408 2307 if (pipe_stats[1] & PIPE_VBLANK_INTERRUPT_STATUS && 2409 - drm_handle_vblank(dev, 1)) { 2410 - if (iir & I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT) { 2411 - intel_prepare_page_flip(dev, 1); 2412 - intel_finish_page_flip(dev, 1); 2413 - flip_mask &= ~I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 2414 - } 2415 - } 2308 + i8xx_handle_vblank(dev, 1, iir)) 2309 + flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(1); 2416 2310 2417 2311 iir = new_iir; 2418 2312 } ··· 2449 2363 { 2450 2364 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2451 2365 u32 enable_mask; 2452 - 2453 - dev_priv->pipestat[0] = 0; 2454 - dev_priv->pipestat[1] = 0; 2455 2366 2456 2367 I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH)); 2457 2368 ··· 2516 2433 } 2517 2434 } 2518 2435 2436 + /* 2437 + * Returns true when a page flip has completed. 2438 + */ 2439 + static bool i915_handle_vblank(struct drm_device *dev, 2440 + int plane, int pipe, u32 iir) 2441 + { 2442 + drm_i915_private_t *dev_priv = dev->dev_private; 2443 + u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane); 2444 + 2445 + if (!drm_handle_vblank(dev, pipe)) 2446 + return false; 2447 + 2448 + if ((iir & flip_pending) == 0) 2449 + return false; 2450 + 2451 + intel_prepare_page_flip(dev, plane); 2452 + 2453 + /* We detect FlipDone by looking for the change in PendingFlip from '1' 2454 + * to '0' on the following vblank, i.e. IIR has the Pendingflip 2455 + * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence 2456 + * the flip is completed (no longer pending). Since this doesn't raise 2457 + * an interrupt per se, we watch for the change at vblank. 2458 + */ 2459 + if (I915_READ(ISR) & flip_pending) 2460 + return false; 2461 + 2462 + intel_finish_page_flip(dev, pipe); 2463 + 2464 + return true; 2465 + } 2466 + 2519 2467 static irqreturn_t i915_irq_handler(int irq, void *arg) 2520 2468 { 2521 2469 struct drm_device *dev = (struct drm_device *) arg; ··· 2556 2442 u32 flip_mask = 2557 2443 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 2558 2444 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 2559 - u32 flip[2] = { 2560 - I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT, 2561 - I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT 2562 - }; 2563 2445 int pipe, ret = IRQ_NONE; 2564 2446 2565 2447 atomic_inc(&dev_priv->irq_received); ··· 2617 2507 int plane = pipe; 2618 2508 if (IS_MOBILE(dev)) 2619 2509 plane = !plane; 2510 + 2620 2511 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS && 2621 - drm_handle_vblank(dev, pipe)) { 2622 - if (iir & flip[plane]) { 2623 - intel_prepare_page_flip(dev, plane); 2624 - intel_finish_page_flip(dev, pipe); 2625 - flip_mask &= ~flip[plane]; 2626 - } 2627 - } 2512 + i915_handle_vblank(dev, plane, pipe, iir)) 2513 + flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane); 2628 2514 2629 2515 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 2630 2516 blc_event = true; ··· 2709 2603 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 2710 2604 2711 2605 enable_mask = ~dev_priv->irq_mask; 2606 + enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 2607 + I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT); 2712 2608 enable_mask |= I915_USER_INTERRUPT; 2713 2609 2714 2610 if (IS_G4X(dev)) 2715 2611 enable_mask |= I915_BSD_USER_INTERRUPT; 2716 2612 2717 - dev_priv->pipestat[0] = 0; 2718 - dev_priv->pipestat[1] = 0; 2719 2613 i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE); 2720 2614 2721 2615 /* ··· 2795 2689 unsigned long irqflags; 2796 2690 int irq_received; 2797 2691 int ret = IRQ_NONE, pipe; 2692 + u32 flip_mask = 2693 + I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 2694 + I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 2798 2695 2799 2696 atomic_inc(&dev_priv->irq_received); 2800 2697 ··· 2806 2697 for (;;) { 2807 2698 bool blc_event = false; 2808 2699 2809 - irq_received = iir != 0; 2700 + irq_received = (iir & ~flip_mask) != 0; 2810 2701 2811 2702 /* Can't rely on pipestat interrupt bit in iir as it might 2812 2703 * have been cleared after the pipestat interrupt was received. ··· 2853 2744 I915_READ(PORT_HOTPLUG_STAT); 2854 2745 } 2855 2746 2856 - I915_WRITE(IIR, iir); 2747 + I915_WRITE(IIR, iir & ~flip_mask); 2857 2748 new_iir = I915_READ(IIR); /* Flush posted writes */ 2858 2749 2859 2750 if (iir & I915_USER_INTERRUPT) ··· 2861 2752 if (iir & I915_BSD_USER_INTERRUPT) 2862 2753 notify_ring(dev, &dev_priv->ring[VCS]); 2863 2754 2864 - if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT) 2865 - intel_prepare_page_flip(dev, 0); 2866 - 2867 - if (iir & I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT) 2868 - intel_prepare_page_flip(dev, 1); 2869 - 2870 2755 for_each_pipe(pipe) { 2871 2756 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS && 2872 - drm_handle_vblank(dev, pipe)) { 2873 - i915_pageflip_stall_check(dev, pipe); 2874 - intel_finish_page_flip(dev, pipe); 2875 - } 2757 + i915_handle_vblank(dev, pipe, pipe, iir)) 2758 + flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe); 2876 2759 2877 2760 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 2878 2761 blc_event = true;
+74 -68
drivers/gpu/drm/i915/i915_reg.h
··· 121 121 122 122 #define GAM_ECOCHK 0x4090 123 123 #define ECOCHK_SNB_BIT (1<<10) 124 + #define HSW_ECOCHK_ARB_PRIO_SOL (1<<6) 124 125 #define ECOCHK_PPGTT_CACHE64B (0x3<<3) 125 126 #define ECOCHK_PPGTT_CACHE4B (0x0<<3) 126 127 ··· 523 522 #define GEN7_ERR_INT 0x44040 524 523 #define ERR_INT_MMIO_UNCLAIMED (1<<13) 525 524 525 + #define FPGA_DBG 0x42300 526 + #define FPGA_DBG_RM_NOCLAIM (1<<31) 527 + 526 528 #define DERRMR 0x44050 527 529 528 530 /* GM45+ chicken bits -- debug workaround bits that may be required ··· 595 591 #define I915_USER_INTERRUPT (1<<1) 596 592 #define I915_ASLE_INTERRUPT (1<<0) 597 593 #define I915_BSD_USER_INTERRUPT (1<<25) 594 + #define DISPLAY_PLANE_FLIP_PENDING(plane) (1<<(11-(plane))) /* A and B only */ 598 595 #define EIR 0x020b0 599 596 #define EMR 0x020b4 600 597 #define ESR 0x020b8 ··· 1681 1676 #define SDVOC_HOTPLUG_INT_STATUS_I915 (1 << 7) 1682 1677 #define SDVOB_HOTPLUG_INT_STATUS_I915 (1 << 6) 1683 1678 1684 - /* SDVO port control */ 1685 - #define SDVOB 0x61140 1686 - #define SDVOC 0x61160 1687 - #define SDVO_ENABLE (1 << 31) 1688 - #define SDVO_PIPE_B_SELECT (1 << 30) 1689 - #define SDVO_STALL_SELECT (1 << 29) 1690 - #define SDVO_INTERRUPT_ENABLE (1 << 26) 1679 + /* SDVO and HDMI port control. 1680 + * The same register may be used for SDVO or HDMI */ 1681 + #define GEN3_SDVOB 0x61140 1682 + #define GEN3_SDVOC 0x61160 1683 + #define GEN4_HDMIB GEN3_SDVOB 1684 + #define GEN4_HDMIC GEN3_SDVOC 1685 + #define PCH_SDVOB 0xe1140 1686 + #define PCH_HDMIB PCH_SDVOB 1687 + #define PCH_HDMIC 0xe1150 1688 + #define PCH_HDMID 0xe1160 1689 + 1690 + /* Gen 3 SDVO bits: */ 1691 + #define SDVO_ENABLE (1 << 31) 1692 + #define SDVO_PIPE_SEL(pipe) ((pipe) << 30) 1693 + #define SDVO_PIPE_SEL_MASK (1 << 30) 1694 + #define SDVO_PIPE_B_SELECT (1 << 30) 1695 + #define SDVO_STALL_SELECT (1 << 29) 1696 + #define SDVO_INTERRUPT_ENABLE (1 << 26) 1691 1697 /** 1692 1698 * 915G/GM SDVO pixel multiplier. 1693 - * 1694 1699 * Programmed value is multiplier - 1, up to 5x. 1695 - * 1696 1700 * \sa DPLL_MD_UDI_MULTIPLIER_MASK 1697 1701 */ 1698 - #define SDVO_PORT_MULTIPLY_MASK (7 << 23) 1702 + #define SDVO_PORT_MULTIPLY_MASK (7 << 23) 1699 1703 #define SDVO_PORT_MULTIPLY_SHIFT 23 1700 - #define SDVO_PHASE_SELECT_MASK (15 << 19) 1701 - #define SDVO_PHASE_SELECT_DEFAULT (6 << 19) 1702 - #define SDVO_CLOCK_OUTPUT_INVERT (1 << 18) 1703 - #define SDVOC_GANG_MODE (1 << 16) 1704 - #define SDVO_ENCODING_SDVO (0x0 << 10) 1705 - #define SDVO_ENCODING_HDMI (0x2 << 10) 1706 - /** Requird for HDMI operation */ 1707 - #define SDVO_NULL_PACKETS_DURING_VSYNC (1 << 9) 1708 - #define SDVO_COLOR_RANGE_16_235 (1 << 8) 1709 - #define SDVO_BORDER_ENABLE (1 << 7) 1710 - #define SDVO_AUDIO_ENABLE (1 << 6) 1711 - /** New with 965, default is to be set */ 1712 - #define SDVO_VSYNC_ACTIVE_HIGH (1 << 4) 1713 - /** New with 965, default is to be set */ 1714 - #define SDVO_HSYNC_ACTIVE_HIGH (1 << 3) 1715 - #define SDVOB_PCIE_CONCURRENCY (1 << 3) 1716 - #define SDVO_DETECTED (1 << 2) 1704 + #define SDVO_PHASE_SELECT_MASK (15 << 19) 1705 + #define SDVO_PHASE_SELECT_DEFAULT (6 << 19) 1706 + #define SDVO_CLOCK_OUTPUT_INVERT (1 << 18) 1707 + #define SDVOC_GANG_MODE (1 << 16) /* Port C only */ 1708 + #define SDVO_BORDER_ENABLE (1 << 7) /* SDVO only */ 1709 + #define SDVOB_PCIE_CONCURRENCY (1 << 3) /* Port B only */ 1710 + #define SDVO_DETECTED (1 << 2) 1717 1711 /* Bits to be preserved when writing */ 1718 - #define SDVOB_PRESERVE_MASK ((1 << 17) | (1 << 16) | (1 << 14) | (1 << 26)) 1719 - #define SDVOC_PRESERVE_MASK ((1 << 17) | (1 << 26)) 1712 + #define SDVOB_PRESERVE_MASK ((1 << 17) | (1 << 16) | (1 << 14) | \ 1713 + SDVO_INTERRUPT_ENABLE) 1714 + #define SDVOC_PRESERVE_MASK ((1 << 17) | SDVO_INTERRUPT_ENABLE) 1715 + 1716 + /* Gen 4 SDVO/HDMI bits: */ 1717 + #define SDVO_COLOR_FORMAT_8bpc (0 << 26) 1718 + #define SDVO_ENCODING_SDVO (0 << 10) 1719 + #define SDVO_ENCODING_HDMI (2 << 10) 1720 + #define HDMI_MODE_SELECT_HDMI (1 << 9) /* HDMI only */ 1721 + #define HDMI_MODE_SELECT_DVI (0 << 9) /* HDMI only */ 1722 + #define HDMI_COLOR_RANGE_16_235 (1 << 8) /* HDMI only */ 1723 + #define SDVO_AUDIO_ENABLE (1 << 6) 1724 + /* VSYNC/HSYNC bits new with 965, default is to be set */ 1725 + #define SDVO_VSYNC_ACTIVE_HIGH (1 << 4) 1726 + #define SDVO_HSYNC_ACTIVE_HIGH (1 << 3) 1727 + 1728 + /* Gen 5 (IBX) SDVO/HDMI bits: */ 1729 + #define HDMI_COLOR_FORMAT_12bpc (3 << 26) /* HDMI only */ 1730 + #define SDVOB_HOTPLUG_ENABLE (1 << 23) /* SDVO only */ 1731 + 1732 + /* Gen 6 (CPT) SDVO/HDMI bits: */ 1733 + #define SDVO_PIPE_SEL_CPT(pipe) ((pipe) << 29) 1734 + #define SDVO_PIPE_SEL_MASK_CPT (3 << 29) 1735 + 1720 1736 1721 1737 /* DVO port control */ 1722 1738 #define DVOA 0x61120 ··· 1924 1898 #define PFIT_AUTO_RATIOS (dev_priv->info->display_mmio_offset + 0x61238) 1925 1899 1926 1900 /* Backlight control */ 1927 - #define BLC_PWM_CTL2 0x61250 /* 965+ only */ 1901 + #define BLC_PWM_CTL2 (dev_priv->info->display_mmio_offset + 0x61250) /* 965+ only */ 1928 1902 #define BLM_PWM_ENABLE (1 << 31) 1929 1903 #define BLM_COMBINATION_MODE (1 << 30) /* gen4 only */ 1930 1904 #define BLM_PIPE_SELECT (1 << 29) ··· 1943 1917 #define BLM_PHASE_IN_COUNT_MASK (0xff << 8) 1944 1918 #define BLM_PHASE_IN_INCR_SHIFT (0) 1945 1919 #define BLM_PHASE_IN_INCR_MASK (0xff << 0) 1946 - #define BLC_PWM_CTL 0x61254 1920 + #define BLC_PWM_CTL (dev_priv->info->display_mmio_offset + 0x61254) 1947 1921 /* 1948 1922 * This is the most significant 15 bits of the number of backlight cycles in a 1949 1923 * complete cycle of the modulated backlight control. ··· 1965 1939 #define BACKLIGHT_DUTY_CYCLE_MASK_PNV (0xfffe) 1966 1940 #define BLM_POLARITY_PNV (1 << 0) /* pnv only */ 1967 1941 1968 - #define BLC_HIST_CTL 0x61260 1942 + #define BLC_HIST_CTL (dev_priv->info->display_mmio_offset + 0x61260) 1969 1943 1970 1944 /* New registers for PCH-split platforms. Safe where new bits show up, the 1971 1945 * register layout machtes with gen4 BLC_PWM_CTL[12]. */ ··· 2802 2776 #define DSPFW_HPLL_CURSOR_SHIFT 16 2803 2777 #define DSPFW_HPLL_CURSOR_MASK (0x3f<<16) 2804 2778 #define DSPFW_HPLL_SR_MASK (0x1ff) 2779 + #define DSPFW4 (dev_priv->info->display_mmio_offset + 0x70070) 2780 + #define DSPFW7 (dev_priv->info->display_mmio_offset + 0x7007c) 2805 2781 2806 2782 /* drain latency register values*/ 2807 2783 #define DRAIN_LATENCY_PRECISION_32 32 ··· 3782 3754 #define HSW_VIDEO_DIP_VSC_ECC_B 0x61344 3783 3755 #define HSW_VIDEO_DIP_GCP_B 0x61210 3784 3756 3785 - #define HSW_TVIDEO_DIP_CTL(pipe) \ 3786 - _PIPE(pipe, HSW_VIDEO_DIP_CTL_A, HSW_VIDEO_DIP_CTL_B) 3787 - #define HSW_TVIDEO_DIP_AVI_DATA(pipe) \ 3788 - _PIPE(pipe, HSW_VIDEO_DIP_AVI_DATA_A, HSW_VIDEO_DIP_AVI_DATA_B) 3789 - #define HSW_TVIDEO_DIP_SPD_DATA(pipe) \ 3790 - _PIPE(pipe, HSW_VIDEO_DIP_SPD_DATA_A, HSW_VIDEO_DIP_SPD_DATA_B) 3791 - #define HSW_TVIDEO_DIP_GCP(pipe) \ 3792 - _PIPE(pipe, HSW_VIDEO_DIP_GCP_A, HSW_VIDEO_DIP_GCP_B) 3757 + #define HSW_TVIDEO_DIP_CTL(trans) \ 3758 + _TRANSCODER(trans, HSW_VIDEO_DIP_CTL_A, HSW_VIDEO_DIP_CTL_B) 3759 + #define HSW_TVIDEO_DIP_AVI_DATA(trans) \ 3760 + _TRANSCODER(trans, HSW_VIDEO_DIP_AVI_DATA_A, HSW_VIDEO_DIP_AVI_DATA_B) 3761 + #define HSW_TVIDEO_DIP_SPD_DATA(trans) \ 3762 + _TRANSCODER(trans, HSW_VIDEO_DIP_SPD_DATA_A, HSW_VIDEO_DIP_SPD_DATA_B) 3763 + #define HSW_TVIDEO_DIP_GCP(trans) \ 3764 + _TRANSCODER(trans, HSW_VIDEO_DIP_GCP_A, HSW_VIDEO_DIP_GCP_B) 3765 + #define HSW_TVIDEO_DIP_VSC_DATA(trans) \ 3766 + _TRANSCODER(trans, HSW_VIDEO_DIP_VSC_DATA_A, HSW_VIDEO_DIP_VSC_DATA_B) 3793 3767 3794 3768 #define _TRANS_HTOTAL_B 0xe1000 3795 3769 #define _TRANS_HBLANK_B 0xe1004 ··· 4006 3976 #define FDI_PLL_CTL_1 0xfe000 4007 3977 #define FDI_PLL_CTL_2 0xfe004 4008 3978 4009 - /* or SDVOB */ 4010 - #define HDMIB 0xe1140 4011 - #define PORT_ENABLE (1 << 31) 4012 - #define TRANSCODER(pipe) ((pipe) << 30) 4013 - #define TRANSCODER_CPT(pipe) ((pipe) << 29) 4014 - #define TRANSCODER_MASK (1 << 30) 4015 - #define TRANSCODER_MASK_CPT (3 << 29) 4016 - #define COLOR_FORMAT_8bpc (0) 4017 - #define COLOR_FORMAT_12bpc (3 << 26) 4018 - #define SDVOB_HOTPLUG_ENABLE (1 << 23) 4019 - #define SDVO_ENCODING (0) 4020 - #define TMDS_ENCODING (2 << 10) 4021 - #define NULL_PACKET_VSYNC_ENABLE (1 << 9) 4022 - /* CPT */ 4023 - #define HDMI_MODE_SELECT (1 << 9) 4024 - #define DVI_MODE_SELECT (0) 4025 - #define SDVOB_BORDER_ENABLE (1 << 7) 4026 - #define AUDIO_ENABLE (1 << 6) 4027 - #define VSYNC_ACTIVE_HIGH (1 << 4) 4028 - #define HSYNC_ACTIVE_HIGH (1 << 3) 4029 - #define PORT_DETECTED (1 << 2) 4030 - 4031 - /* PCH SDVOB multiplex with HDMIB */ 4032 - #define PCH_SDVOB HDMIB 4033 - 4034 - #define HDMIC 0xe1150 4035 - #define HDMID 0xe1160 4036 - 4037 3979 #define PCH_LVDS 0xe1180 4038 3980 #define LVDS_DETECTED (1 << 1) 4039 3981 ··· 4151 4149 #define FORCEWAKE 0xA18C 4152 4150 #define FORCEWAKE_VLV 0x1300b0 4153 4151 #define FORCEWAKE_ACK_VLV 0x1300b4 4152 + #define FORCEWAKE_MEDIA_VLV 0x1300b8 4153 + #define FORCEWAKE_ACK_MEDIA_VLV 0x1300bc 4154 4154 #define FORCEWAKE_ACK_HSW 0x130044 4155 4155 #define FORCEWAKE_ACK 0x130090 4156 + #define VLV_GTLC_WAKE_CTRL 0x130090 4157 + #define VLV_GTLC_PW_STATUS 0x130094 4156 4158 #define FORCEWAKE_MT 0xa188 /* multi-threaded */ 4157 4159 #define FORCEWAKE_KERNEL 0x1 4158 4160 #define FORCEWAKE_USER 0x2
+10 -5
drivers/gpu/drm/i915/i915_suspend.c
··· 209 209 dev_priv->regfile.saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_PCH_CTL2); 210 210 dev_priv->regfile.saveBLC_CPU_PWM_CTL = I915_READ(BLC_PWM_CPU_CTL); 211 211 dev_priv->regfile.saveBLC_CPU_PWM_CTL2 = I915_READ(BLC_PWM_CPU_CTL2); 212 - dev_priv->regfile.saveLVDS = I915_READ(PCH_LVDS); 212 + if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) 213 + dev_priv->regfile.saveLVDS = I915_READ(PCH_LVDS); 213 214 } else { 214 215 dev_priv->regfile.savePP_CONTROL = I915_READ(PP_CONTROL); 215 216 dev_priv->regfile.savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS); ··· 256 255 static void i915_restore_display(struct drm_device *dev) 257 256 { 258 257 struct drm_i915_private *dev_priv = dev->dev_private; 258 + u32 mask = 0xffffffff; 259 259 260 260 /* Display arbitration */ 261 261 if (INTEL_INFO(dev)->gen <= 4) ··· 269 267 if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) 270 268 I915_WRITE(BLC_PWM_CTL2, dev_priv->regfile.saveBLC_PWM_CTL2); 271 269 272 - if (HAS_PCH_SPLIT(dev)) { 273 - I915_WRITE(PCH_LVDS, dev_priv->regfile.saveLVDS); 274 - } else if (IS_MOBILE(dev) && !IS_I830(dev)) 275 - I915_WRITE(LVDS, dev_priv->regfile.saveLVDS); 270 + if (drm_core_check_feature(dev, DRIVER_MODESET)) 271 + mask = ~LVDS_PORT_EN; 272 + 273 + if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) 274 + I915_WRITE(PCH_LVDS, dev_priv->regfile.saveLVDS & mask); 275 + else if (INTEL_INFO(dev)->gen <= 4 && IS_MOBILE(dev) && !IS_I830(dev)) 276 + I915_WRITE(LVDS, dev_priv->regfile.saveLVDS & mask); 276 277 277 278 if (!IS_I830(dev) && !IS_845G(dev) && !HAS_PCH_SPLIT(dev)) 278 279 I915_WRITE(PFIT_CONTROL, dev_priv->regfile.savePFIT_CONTROL);
+8 -8
drivers/gpu/drm/i915/i915_sysfs.c
··· 49 49 show_rc6_mask(struct device *kdev, struct device_attribute *attr, char *buf) 50 50 { 51 51 struct drm_minor *dminor = container_of(kdev, struct drm_minor, kdev); 52 - return snprintf(buf, PAGE_SIZE, "%x", intel_enable_rc6(dminor->dev)); 52 + return snprintf(buf, PAGE_SIZE, "%x\n", intel_enable_rc6(dminor->dev)); 53 53 } 54 54 55 55 static ssize_t ··· 57 57 { 58 58 struct drm_minor *dminor = container_of(kdev, struct drm_minor, kdev); 59 59 u32 rc6_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6); 60 - return snprintf(buf, PAGE_SIZE, "%u", rc6_residency); 60 + return snprintf(buf, PAGE_SIZE, "%u\n", rc6_residency); 61 61 } 62 62 63 63 static ssize_t ··· 65 65 { 66 66 struct drm_minor *dminor = container_of(kdev, struct drm_minor, kdev); 67 67 u32 rc6p_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6p); 68 - return snprintf(buf, PAGE_SIZE, "%u", rc6p_residency); 68 + return snprintf(buf, PAGE_SIZE, "%u\n", rc6p_residency); 69 69 } 70 70 71 71 static ssize_t ··· 73 73 { 74 74 struct drm_minor *dminor = container_of(kdev, struct drm_minor, kdev); 75 75 u32 rc6pp_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6pp); 76 - return snprintf(buf, PAGE_SIZE, "%u", rc6pp_residency); 76 + return snprintf(buf, PAGE_SIZE, "%u\n", rc6pp_residency); 77 77 } 78 78 79 79 static DEVICE_ATTR(rc6_enable, S_IRUGO, show_rc6_mask, NULL); ··· 215 215 ret = dev_priv->rps.cur_delay * GT_FREQUENCY_MULTIPLIER; 216 216 mutex_unlock(&dev_priv->rps.hw_lock); 217 217 218 - return snprintf(buf, PAGE_SIZE, "%d", ret); 218 + return snprintf(buf, PAGE_SIZE, "%d\n", ret); 219 219 } 220 220 221 221 static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf) ··· 229 229 ret = dev_priv->rps.max_delay * GT_FREQUENCY_MULTIPLIER; 230 230 mutex_unlock(&dev_priv->rps.hw_lock); 231 231 232 - return snprintf(buf, PAGE_SIZE, "%d", ret); 232 + return snprintf(buf, PAGE_SIZE, "%d\n", ret); 233 233 } 234 234 235 235 static ssize_t gt_max_freq_mhz_store(struct device *kdev, ··· 280 280 ret = dev_priv->rps.min_delay * GT_FREQUENCY_MULTIPLIER; 281 281 mutex_unlock(&dev_priv->rps.hw_lock); 282 282 283 - return snprintf(buf, PAGE_SIZE, "%d", ret); 283 + return snprintf(buf, PAGE_SIZE, "%d\n", ret); 284 284 } 285 285 286 286 static ssize_t gt_min_freq_mhz_store(struct device *kdev, ··· 355 355 } else { 356 356 BUG(); 357 357 } 358 - return snprintf(buf, PAGE_SIZE, "%d", val); 358 + return snprintf(buf, PAGE_SIZE, "%d\n", val); 359 359 } 360 360 361 361 static const struct attribute *gen6_attrs[] = {
+5 -7
drivers/gpu/drm/i915/intel_ddi.c
··· 1341 1341 struct drm_i915_private *dev_priv = dev->dev_private; 1342 1342 uint32_t tmp; 1343 1343 1344 + tmp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD); 1345 + tmp &= ~((AUDIO_OUTPUT_ENABLE_A | AUDIO_ELD_VALID_A) << (pipe * 4)); 1346 + I915_WRITE(HSW_AUD_PIN_ELD_CP_VLD, tmp); 1347 + 1344 1348 if (type == INTEL_OUTPUT_EDP) { 1345 1349 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 1346 1350 1347 1351 ironlake_edp_backlight_off(intel_dp); 1348 1352 } 1349 - 1350 - tmp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD); 1351 - tmp &= ~((AUDIO_OUTPUT_ENABLE_A | AUDIO_ELD_VALID_A) << (pipe * 4)); 1352 - I915_WRITE(HSW_AUD_PIN_ELD_CP_VLD, tmp); 1353 1353 } 1354 1354 1355 1355 int intel_ddi_get_cdclk_freq(struct drm_i915_private *dev_priv) ··· 1537 1537 intel_dig_port->port_reversal = I915_READ(DDI_BUF_CTL(port)) & 1538 1538 DDI_BUF_PORT_REVERSAL; 1539 1539 if (hdmi_connector) 1540 - intel_dig_port->hdmi.sdvox_reg = DDI_BUF_CTL(port); 1541 - else 1542 - intel_dig_port->hdmi.sdvox_reg = 0; 1540 + intel_dig_port->hdmi.hdmi_reg = DDI_BUF_CTL(port); 1543 1541 intel_dig_port->dp.output_reg = DDI_BUF_CTL(port); 1544 1542 1545 1543 intel_encoder->type = INTEL_OUTPUT_UNKNOWN;
+116 -78
drivers/gpu/drm/i915/intel_display.c
··· 71 71 struct intel_limit { 72 72 intel_range_t dot, vco, n, m, m1, m2, p, p1; 73 73 intel_p2_t p2; 74 - bool (* find_pll)(const intel_limit_t *, struct drm_crtc *, 75 - int, int, intel_clock_t *, intel_clock_t *); 74 + /** 75 + * find_pll() - Find the best values for the PLL 76 + * @limit: limits for the PLL 77 + * @crtc: current CRTC 78 + * @target: target frequency in kHz 79 + * @refclk: reference clock frequency in kHz 80 + * @match_clock: if provided, @best_clock P divider must 81 + * match the P divider from @match_clock 82 + * used for LVDS downclocking 83 + * @best_clock: best PLL values found 84 + * 85 + * Returns true on success, false on failure. 86 + */ 87 + bool (*find_pll)(const intel_limit_t *limit, 88 + struct drm_crtc *crtc, 89 + int target, int refclk, 90 + intel_clock_t *match_clock, 91 + intel_clock_t *best_clock); 76 92 }; 77 93 78 94 /* FDI */ ··· 487 471 488 472 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { 489 473 if (intel_is_dual_link_lvds(dev)) { 490 - /* LVDS dual channel */ 491 474 if (refclk == 100000) 492 475 limit = &intel_limits_ironlake_dual_lvds_100m; 493 476 else ··· 513 498 514 499 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { 515 500 if (intel_is_dual_link_lvds(dev)) 516 - /* LVDS with dual channel */ 517 501 limit = &intel_limits_g4x_dual_channel_lvds; 518 502 else 519 - /* LVDS with dual channel */ 520 503 limit = &intel_limits_g4x_single_channel_lvds; 521 504 } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI) || 522 505 intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG)) { ··· 1267 1254 int cur_pipe; 1268 1255 1269 1256 /* Planes are fixed to pipes on ILK+ */ 1270 - if (HAS_PCH_SPLIT(dev_priv->dev)) { 1257 + if (HAS_PCH_SPLIT(dev_priv->dev) || IS_VALLEYVIEW(dev_priv->dev)) { 1271 1258 reg = DSPCNTR(pipe); 1272 1259 val = I915_READ(reg); 1273 1260 WARN((val & DISPLAY_PLANE_ENABLE), ··· 1340 1327 static bool hdmi_pipe_enabled(struct drm_i915_private *dev_priv, 1341 1328 enum pipe pipe, u32 val) 1342 1329 { 1343 - if ((val & PORT_ENABLE) == 0) 1330 + if ((val & SDVO_ENABLE) == 0) 1344 1331 return false; 1345 1332 1346 1333 if (HAS_PCH_CPT(dev_priv->dev)) { 1347 - if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe)) 1334 + if ((val & SDVO_PIPE_SEL_MASK_CPT) != SDVO_PIPE_SEL_CPT(pipe)) 1348 1335 return false; 1349 1336 } else { 1350 - if ((val & TRANSCODER_MASK) != TRANSCODER(pipe)) 1337 + if ((val & SDVO_PIPE_SEL_MASK) != SDVO_PIPE_SEL(pipe)) 1351 1338 return false; 1352 1339 } 1353 1340 return true; ··· 1405 1392 "PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n", 1406 1393 reg, pipe_name(pipe)); 1407 1394 1408 - WARN(HAS_PCH_IBX(dev_priv->dev) && (val & PORT_ENABLE) == 0 1395 + WARN(HAS_PCH_IBX(dev_priv->dev) && (val & SDVO_ENABLE) == 0 1409 1396 && (val & SDVO_PIPE_B_SELECT), 1410 1397 "IBX PCH hdmi port still using transcoder B\n"); 1411 1398 } ··· 1432 1419 "PCH LVDS enabled on transcoder %c, should be disabled\n", 1433 1420 pipe_name(pipe)); 1434 1421 1435 - assert_pch_hdmi_disabled(dev_priv, pipe, HDMIB); 1436 - assert_pch_hdmi_disabled(dev_priv, pipe, HDMIC); 1437 - assert_pch_hdmi_disabled(dev_priv, pipe, HDMID); 1422 + assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMIB); 1423 + assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMIC); 1424 + assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMID); 1438 1425 } 1439 1426 1440 1427 /** ··· 2242 2229 return dev_priv->display.update_plane(crtc, fb, x, y); 2243 2230 } 2244 2231 2232 + void intel_display_handle_reset(struct drm_device *dev) 2233 + { 2234 + struct drm_i915_private *dev_priv = dev->dev_private; 2235 + struct drm_crtc *crtc; 2236 + 2237 + /* 2238 + * Flips in the rings have been nuked by the reset, 2239 + * so complete all pending flips so that user space 2240 + * will get its events and not get stuck. 2241 + * 2242 + * Also update the base address of all primary 2243 + * planes to the the last fb to make sure we're 2244 + * showing the correct fb after a reset. 2245 + * 2246 + * Need to make two loops over the crtcs so that we 2247 + * don't try to grab a crtc mutex before the 2248 + * pending_flip_queue really got woken up. 2249 + */ 2250 + 2251 + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 2252 + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2253 + enum plane plane = intel_crtc->plane; 2254 + 2255 + intel_prepare_page_flip(dev, plane); 2256 + intel_finish_page_flip_plane(dev, plane); 2257 + } 2258 + 2259 + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 2260 + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2261 + 2262 + mutex_lock(&crtc->mutex); 2263 + if (intel_crtc->active) 2264 + dev_priv->display.update_plane(crtc, crtc->fb, 2265 + crtc->x, crtc->y); 2266 + mutex_unlock(&crtc->mutex); 2267 + } 2268 + } 2269 + 2245 2270 static int 2246 2271 intel_finish_fb(struct drm_framebuffer *old_fb) 2247 2272 { ··· 2346 2295 return 0; 2347 2296 } 2348 2297 2349 - if(intel_crtc->plane > dev_priv->num_pipe) { 2298 + if (intel_crtc->plane > INTEL_INFO(dev)->num_pipes) { 2350 2299 DRM_ERROR("no plane for crtc: plane %d, num_pipes %d\n", 2351 2300 intel_crtc->plane, 2352 - dev_priv->num_pipe); 2301 + INTEL_INFO(dev)->num_pipes); 2353 2302 return -EINVAL; 2354 2303 } 2355 2304 ··· 2362 2311 DRM_ERROR("pin & fence failed\n"); 2363 2312 return ret; 2364 2313 } 2365 - 2366 - if (crtc->fb) 2367 - intel_finish_fb(crtc->fb); 2368 2314 2369 2315 ret = dev_priv->display.update_plane(crtc, fb, x, y); 2370 2316 if (ret) { ··· 4560 4512 dpll |= PLL_P2_DIVIDE_BY_4; 4561 4513 } 4562 4514 4563 - if (intel_pipe_has_type(crtc, INTEL_OUTPUT_TVOUT)) 4564 - /* XXX: just matching BIOS for now */ 4565 - /* dpll |= PLL_REF_INPUT_TVCLKINBC; */ 4566 - dpll |= 3; 4567 - else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) && 4515 + if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) && 4568 4516 intel_panel_use_ssc(dev_priv) && num_connectors < 2) 4569 4517 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; 4570 4518 else ··· 4743 4699 /* Set up the display plane register */ 4744 4700 dspcntr = DISPPLANE_GAMMA_ENABLE; 4745 4701 4746 - if (pipe == 0) 4747 - dspcntr &= ~DISPPLANE_SEL_PIPE_MASK; 4748 - else 4749 - dspcntr |= DISPPLANE_SEL_PIPE_B; 4702 + if (!IS_VALLEYVIEW(dev)) { 4703 + if (pipe == 0) 4704 + dspcntr &= ~DISPPLANE_SEL_PIPE_MASK; 4705 + else 4706 + dspcntr |= DISPPLANE_SEL_PIPE_B; 4707 + } 4750 4708 4751 4709 if (pipe == 0 && INTEL_INFO(dev)->gen < 4) { 4752 4710 /* Enable pixel doubling when the dot clock is > 90% of the (display) ··· 5390 5344 return false; 5391 5345 } 5392 5346 5393 - if (dev_priv->num_pipe == 2) 5347 + if (INTEL_INFO(dev)->num_pipes == 2) 5394 5348 return true; 5395 5349 5396 5350 switch (intel_crtc->pipe) { ··· 6482 6436 intel_crtc_load_lut(crtc); 6483 6437 } 6484 6438 6485 - /** 6486 - * Get a pipe with a simple mode set on it for doing load-based monitor 6487 - * detection. 6488 - * 6489 - * It will be up to the load-detect code to adjust the pipe as appropriate for 6490 - * its requirements. The pipe will be connected to no other encoders. 6491 - * 6492 - * Currently this code will only succeed if there is a pipe with no encoders 6493 - * configured for it. In the future, it could choose to temporarily disable 6494 - * some outputs to free up a pipe for its use. 6495 - * 6496 - * \return crtc, or NULL if no pipes are available. 6497 - */ 6498 - 6499 6439 /* VESA 640x480x72Hz mode to set on the pipe */ 6500 6440 static struct drm_display_mode load_detect_mode = { 6501 6441 DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664, ··· 6986 6954 drm_i915_private_t *dev_priv = dev->dev_private; 6987 6955 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 6988 6956 struct intel_unpin_work *work; 6989 - struct drm_i915_gem_object *obj; 6990 6957 unsigned long flags; 6991 6958 6992 6959 /* Ignore early vblank irqs */ ··· 7014 6983 drm_vblank_put(dev, intel_crtc->pipe); 7015 6984 7016 6985 spin_unlock_irqrestore(&dev->event_lock, flags); 7017 - 7018 - obj = work->old_fb_obj; 7019 6986 7020 6987 wake_up_all(&dev_priv->pending_flip_queue); 7021 6988 ··· 8174 8145 goto fail; 8175 8146 } 8176 8147 } else if (config->fb_changed) { 8148 + intel_crtc_wait_for_pending_flips(set->crtc); 8149 + 8177 8150 ret = intel_pipe_set_base(set->crtc, 8178 8151 set->x, set->y, set->fb); 8179 8152 } ··· 8374 8343 if (has_edp_a(dev)) 8375 8344 intel_dp_init(dev, DP_A, PORT_A); 8376 8345 8377 - if (I915_READ(HDMIB) & PORT_DETECTED) { 8346 + if (I915_READ(PCH_HDMIB) & SDVO_DETECTED) { 8378 8347 /* PCH SDVOB multiplex with HDMIB */ 8379 8348 found = intel_sdvo_init(dev, PCH_SDVOB, true); 8380 8349 if (!found) 8381 - intel_hdmi_init(dev, HDMIB, PORT_B); 8350 + intel_hdmi_init(dev, PCH_HDMIB, PORT_B); 8382 8351 if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED)) 8383 8352 intel_dp_init(dev, PCH_DP_B, PORT_B); 8384 8353 } 8385 8354 8386 - if (I915_READ(HDMIC) & PORT_DETECTED) 8387 - intel_hdmi_init(dev, HDMIC, PORT_C); 8355 + if (I915_READ(PCH_HDMIC) & SDVO_DETECTED) 8356 + intel_hdmi_init(dev, PCH_HDMIC, PORT_C); 8388 8357 8389 - if (!dpd_is_edp && I915_READ(HDMID) & PORT_DETECTED) 8390 - intel_hdmi_init(dev, HDMID, PORT_D); 8358 + if (!dpd_is_edp && I915_READ(PCH_HDMID) & SDVO_DETECTED) 8359 + intel_hdmi_init(dev, PCH_HDMID, PORT_D); 8391 8360 8392 8361 if (I915_READ(PCH_DP_C) & DP_DETECTED) 8393 8362 intel_dp_init(dev, PCH_DP_C, PORT_C); ··· 8399 8368 if (I915_READ(VLV_DISPLAY_BASE + DP_C) & DP_DETECTED) 8400 8369 intel_dp_init(dev, VLV_DISPLAY_BASE + DP_C, PORT_C); 8401 8370 8402 - if (I915_READ(VLV_DISPLAY_BASE + SDVOB) & PORT_DETECTED) { 8403 - intel_hdmi_init(dev, VLV_DISPLAY_BASE + SDVOB, PORT_B); 8371 + if (I915_READ(VLV_DISPLAY_BASE + GEN4_HDMIB) & SDVO_DETECTED) { 8372 + intel_hdmi_init(dev, VLV_DISPLAY_BASE + GEN4_HDMIB, 8373 + PORT_B); 8404 8374 if (I915_READ(VLV_DISPLAY_BASE + DP_B) & DP_DETECTED) 8405 8375 intel_dp_init(dev, VLV_DISPLAY_BASE + DP_B, PORT_B); 8406 8376 } 8407 - 8408 - if (I915_READ(VLV_DISPLAY_BASE + SDVOC) & PORT_DETECTED) 8409 - intel_hdmi_init(dev, VLV_DISPLAY_BASE + SDVOC, PORT_C); 8410 - 8411 8377 } else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) { 8412 8378 bool found = false; 8413 8379 8414 - if (I915_READ(SDVOB) & SDVO_DETECTED) { 8380 + if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) { 8415 8381 DRM_DEBUG_KMS("probing SDVOB\n"); 8416 - found = intel_sdvo_init(dev, SDVOB, true); 8382 + found = intel_sdvo_init(dev, GEN3_SDVOB, true); 8417 8383 if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) { 8418 8384 DRM_DEBUG_KMS("probing HDMI on SDVOB\n"); 8419 - intel_hdmi_init(dev, SDVOB, PORT_B); 8385 + intel_hdmi_init(dev, GEN4_HDMIB, PORT_B); 8420 8386 } 8421 8387 8422 8388 if (!found && SUPPORTS_INTEGRATED_DP(dev)) { ··· 8424 8396 8425 8397 /* Before G4X SDVOC doesn't have its own detect register */ 8426 8398 8427 - if (I915_READ(SDVOB) & SDVO_DETECTED) { 8399 + if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) { 8428 8400 DRM_DEBUG_KMS("probing SDVOC\n"); 8429 - found = intel_sdvo_init(dev, SDVOC, false); 8401 + found = intel_sdvo_init(dev, GEN3_SDVOC, false); 8430 8402 } 8431 8403 8432 - if (!found && (I915_READ(SDVOC) & SDVO_DETECTED)) { 8404 + if (!found && (I915_READ(GEN3_SDVOC) & SDVO_DETECTED)) { 8433 8405 8434 8406 if (SUPPORTS_INTEGRATED_HDMI(dev)) { 8435 8407 DRM_DEBUG_KMS("probing HDMI on SDVOC\n"); 8436 - intel_hdmi_init(dev, SDVOC, PORT_C); 8408 + intel_hdmi_init(dev, GEN4_HDMIC, PORT_C); 8437 8409 } 8438 8410 if (SUPPORTS_INTEGRATED_DP(dev)) { 8439 8411 DRM_DEBUG_KMS("probing DP_C\n"); ··· 8600 8572 { 8601 8573 struct drm_i915_private *dev_priv = dev->dev_private; 8602 8574 8603 - /* We always want a DPMS function */ 8604 8575 if (HAS_DDI(dev)) { 8605 8576 dev_priv->display.crtc_mode_set = haswell_crtc_mode_set; 8606 8577 dev_priv->display.crtc_enable = haswell_crtc_enable; ··· 8886 8859 dev->mode_config.fb_base = dev_priv->gtt.mappable_base; 8887 8860 8888 8861 DRM_DEBUG_KMS("%d display pipe%s available.\n", 8889 - dev_priv->num_pipe, dev_priv->num_pipe > 1 ? "s" : ""); 8862 + INTEL_INFO(dev)->num_pipes, 8863 + INTEL_INFO(dev)->num_pipes > 1 ? "s" : ""); 8890 8864 8891 - for (i = 0; i < dev_priv->num_pipe; i++) { 8865 + for (i = 0; i < INTEL_INFO(dev)->num_pipes; i++) { 8892 8866 intel_crtc_init(dev, i); 8893 8867 ret = intel_plane_init(dev, i); 8894 8868 if (ret) ··· 8946 8918 static bool 8947 8919 intel_check_plane_mapping(struct intel_crtc *crtc) 8948 8920 { 8949 - struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; 8921 + struct drm_device *dev = crtc->base.dev; 8922 + struct drm_i915_private *dev_priv = dev->dev_private; 8950 8923 u32 reg, val; 8951 8924 8952 - if (dev_priv->num_pipe == 1) 8925 + if (INTEL_INFO(dev)->num_pipes == 1) 8953 8926 return true; 8954 8927 8955 8928 reg = DSPCNTR(!crtc->plane); ··· 9352 9323 for_each_pipe(i) { 9353 9324 cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, i); 9354 9325 9355 - error->cursor[i].control = I915_READ(CURCNTR(i)); 9356 - error->cursor[i].position = I915_READ(CURPOS(i)); 9357 - error->cursor[i].base = I915_READ(CURBASE(i)); 9326 + if (INTEL_INFO(dev)->gen <= 6 || IS_VALLEYVIEW(dev)) { 9327 + error->cursor[i].control = I915_READ(CURCNTR(i)); 9328 + error->cursor[i].position = I915_READ(CURPOS(i)); 9329 + error->cursor[i].base = I915_READ(CURBASE(i)); 9330 + } else { 9331 + error->cursor[i].control = I915_READ(CURCNTR_IVB(i)); 9332 + error->cursor[i].position = I915_READ(CURPOS_IVB(i)); 9333 + error->cursor[i].base = I915_READ(CURBASE_IVB(i)); 9334 + } 9358 9335 9359 9336 error->plane[i].control = I915_READ(DSPCNTR(i)); 9360 9337 error->plane[i].stride = I915_READ(DSPSTRIDE(i)); 9361 - error->plane[i].size = I915_READ(DSPSIZE(i)); 9338 + if (INTEL_INFO(dev)->gen <= 3) 9339 + error->plane[i].size = I915_READ(DSPSIZE(i)); 9362 9340 error->plane[i].pos = I915_READ(DSPPOS(i)); 9363 - error->plane[i].addr = I915_READ(DSPADDR(i)); 9341 + if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev)) 9342 + error->plane[i].addr = I915_READ(DSPADDR(i)); 9364 9343 if (INTEL_INFO(dev)->gen >= 4) { 9365 9344 error->plane[i].surface = I915_READ(DSPSURF(i)); 9366 9345 error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i)); ··· 9392 9355 struct drm_device *dev, 9393 9356 struct intel_display_error_state *error) 9394 9357 { 9395 - drm_i915_private_t *dev_priv = dev->dev_private; 9396 9358 int i; 9397 9359 9398 - seq_printf(m, "Num Pipes: %d\n", dev_priv->num_pipe); 9360 + seq_printf(m, "Num Pipes: %d\n", INTEL_INFO(dev)->num_pipes); 9399 9361 for_each_pipe(i) { 9400 9362 seq_printf(m, "Pipe [%d]:\n", i); 9401 9363 seq_printf(m, " CONF: %08x\n", error->pipe[i].conf); ··· 9409 9373 seq_printf(m, "Plane [%d]:\n", i); 9410 9374 seq_printf(m, " CNTR: %08x\n", error->plane[i].control); 9411 9375 seq_printf(m, " STRIDE: %08x\n", error->plane[i].stride); 9412 - seq_printf(m, " SIZE: %08x\n", error->plane[i].size); 9376 + if (INTEL_INFO(dev)->gen <= 3) 9377 + seq_printf(m, " SIZE: %08x\n", error->plane[i].size); 9413 9378 seq_printf(m, " POS: %08x\n", error->plane[i].pos); 9414 - seq_printf(m, " ADDR: %08x\n", error->plane[i].addr); 9379 + if (!IS_HASWELL(dev)) 9380 + seq_printf(m, " ADDR: %08x\n", error->plane[i].addr); 9415 9381 if (INTEL_INFO(dev)->gen >= 4) { 9416 9382 seq_printf(m, " SURF: %08x\n", error->plane[i].surface); 9417 9383 seq_printf(m, " TILEOFF: %08x\n", error->plane[i].tile_offset);
+37 -51
drivers/gpu/drm/i915/intel_dp.c
··· 328 328 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 329 329 struct drm_device *dev = intel_dig_port->base.base.dev; 330 330 struct drm_i915_private *dev_priv = dev->dev_private; 331 - uint32_t ch_ctl = intel_dp->output_reg + 0x10; 331 + uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg; 332 332 uint32_t status; 333 333 bool done; 334 - 335 - if (IS_HASWELL(dev)) { 336 - switch (intel_dig_port->port) { 337 - case PORT_A: 338 - ch_ctl = DPA_AUX_CH_CTL; 339 - break; 340 - case PORT_B: 341 - ch_ctl = PCH_DPB_AUX_CH_CTL; 342 - break; 343 - case PORT_C: 344 - ch_ctl = PCH_DPC_AUX_CH_CTL; 345 - break; 346 - case PORT_D: 347 - ch_ctl = PCH_DPD_AUX_CH_CTL; 348 - break; 349 - default: 350 - BUG(); 351 - } 352 - } 353 334 354 335 #define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0) 355 336 if (has_aux_irq) ··· 351 370 uint8_t *send, int send_bytes, 352 371 uint8_t *recv, int recv_size) 353 372 { 354 - uint32_t output_reg = intel_dp->output_reg; 355 373 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 356 374 struct drm_device *dev = intel_dig_port->base.base.dev; 357 375 struct drm_i915_private *dev_priv = dev->dev_private; 358 - uint32_t ch_ctl = output_reg + 0x10; 376 + uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg; 359 377 uint32_t ch_data = ch_ctl + 4; 360 378 int i, ret, recv_bytes; 361 379 uint32_t status; ··· 367 387 * deep sleep states. 368 388 */ 369 389 pm_qos_update_request(&dev_priv->pm_qos, 0); 370 - 371 - if (IS_HASWELL(dev)) { 372 - switch (intel_dig_port->port) { 373 - case PORT_A: 374 - ch_ctl = DPA_AUX_CH_CTL; 375 - ch_data = DPA_AUX_CH_DATA1; 376 - break; 377 - case PORT_B: 378 - ch_ctl = PCH_DPB_AUX_CH_CTL; 379 - ch_data = PCH_DPB_AUX_CH_DATA1; 380 - break; 381 - case PORT_C: 382 - ch_ctl = PCH_DPC_AUX_CH_CTL; 383 - ch_data = PCH_DPC_AUX_CH_DATA1; 384 - break; 385 - case PORT_D: 386 - ch_ctl = PCH_DPD_AUX_CH_CTL; 387 - ch_data = PCH_DPD_AUX_CH_DATA1; 388 - break; 389 - default: 390 - BUG(); 391 - } 392 - } 393 390 394 391 intel_dp_check_edp(intel_dp); 395 392 /* The clock divider is based off the hrawclk, ··· 810 853 intel_link_compute_m_n(intel_crtc->bpp, lane_count, 811 854 target_clock, adjusted_mode->clock, &m_n); 812 855 813 - if (IS_HASWELL(dev)) { 856 + if (HAS_DDI(dev)) { 814 857 I915_WRITE(PIPE_DATA_M1(cpu_transcoder), 815 858 TU_SIZE(m_n.tu) | m_n.gmch_m); 816 859 I915_WRITE(PIPE_DATA_N1(cpu_transcoder), m_n.gmch_n); ··· 977 1020 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT; 978 1021 } 979 1022 980 - if (is_cpu_edp(intel_dp)) 1023 + if (is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev)) 981 1024 ironlake_set_pll_edp(crtc, adjusted_mode->clock); 982 1025 } 983 1026 ··· 1341 1384 if (!(tmp & DP_PORT_EN)) 1342 1385 return false; 1343 1386 1344 - if (is_cpu_edp(intel_dp) && IS_GEN7(dev)) { 1387 + if (is_cpu_edp(intel_dp) && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) { 1345 1388 *pipe = PORT_TO_PIPE_CPT(tmp); 1346 1389 } else if (!HAS_PCH_CPT(dev) || is_cpu_edp(intel_dp)) { 1347 1390 *pipe = PORT_TO_PIPE(tmp); ··· 1505 1548 { 1506 1549 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1507 1550 1508 - if (IS_HASWELL(dev)) { 1551 + if (HAS_DDI(dev)) { 1509 1552 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) { 1510 1553 case DP_TRAIN_VOLTAGE_SWING_400: 1511 1554 return DP_TRAIN_PRE_EMPHASIS_9_5; ··· 1713 1756 uint32_t signal_levels, mask; 1714 1757 uint8_t train_set = intel_dp->train_set[0]; 1715 1758 1716 - if (IS_HASWELL(dev)) { 1759 + if (HAS_DDI(dev)) { 1717 1760 signal_levels = intel_hsw_signal_levels(train_set); 1718 1761 mask = DDI_BUF_EMP_MASK; 1719 1762 } else if (IS_GEN7(dev) && is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev)) { ··· 1744 1787 int ret; 1745 1788 uint32_t temp; 1746 1789 1747 - if (IS_HASWELL(dev)) { 1790 + if (HAS_DDI(dev)) { 1748 1791 temp = I915_READ(DP_TP_CTL(port)); 1749 1792 1750 1793 if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE) ··· 2267 2310 struct drm_i915_private *dev_priv = dev->dev_private; 2268 2311 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 2269 2312 uint32_t bit; 2313 + 2314 + /* Can't disconnect eDP, but you can close the lid... */ 2315 + if (is_edp(intel_dp)) { 2316 + enum drm_connector_status status; 2317 + 2318 + status = intel_panel_detect(dev); 2319 + if (status == connector_status_unknown) 2320 + status = connector_status_connected; 2321 + return status; 2322 + } 2270 2323 2271 2324 switch (intel_dig_port->port) { 2272 2325 case PORT_B: ··· 2811 2844 else 2812 2845 intel_connector->get_hw_state = intel_connector_get_hw_state; 2813 2846 2847 + intel_dp->aux_ch_ctl_reg = intel_dp->output_reg + 0x10; 2848 + if (HAS_DDI(dev)) { 2849 + switch (intel_dig_port->port) { 2850 + case PORT_A: 2851 + intel_dp->aux_ch_ctl_reg = DPA_AUX_CH_CTL; 2852 + break; 2853 + case PORT_B: 2854 + intel_dp->aux_ch_ctl_reg = PCH_DPB_AUX_CH_CTL; 2855 + break; 2856 + case PORT_C: 2857 + intel_dp->aux_ch_ctl_reg = PCH_DPC_AUX_CH_CTL; 2858 + break; 2859 + case PORT_D: 2860 + intel_dp->aux_ch_ctl_reg = PCH_DPD_AUX_CH_CTL; 2861 + break; 2862 + default: 2863 + BUG(); 2864 + } 2865 + } 2814 2866 2815 2867 /* Set up the DDC bus. */ 2816 2868 switch (port) {
+5 -2
drivers/gpu/drm/i915/intel_drv.h
··· 347 347 } __attribute__((packed)); 348 348 349 349 struct intel_hdmi { 350 - u32 sdvox_reg; 350 + u32 hdmi_reg; 351 351 int ddc_bus; 352 352 uint32_t color_range; 353 353 bool color_range_auto; ··· 366 366 367 367 struct intel_dp { 368 368 uint32_t output_reg; 369 + uint32_t aux_ch_ctl_reg; 369 370 uint32_t DP; 370 371 uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE]; 371 372 bool has_audio; ··· 444 443 445 444 extern void intel_crt_init(struct drm_device *dev); 446 445 extern void intel_hdmi_init(struct drm_device *dev, 447 - int sdvox_reg, enum port port); 446 + int hdmi_reg, enum port port); 448 447 extern void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port, 449 448 struct intel_connector *intel_connector); 450 449 extern struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder); ··· 695 694 extern bool 696 695 intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector); 697 696 extern void intel_ddi_fdi_disable(struct drm_crtc *crtc); 697 + 698 + extern void intel_display_handle_reset(struct drm_device *dev); 698 699 699 700 #endif /* __INTEL_DRV_H__ */
+1 -3
drivers/gpu/drm/i915/intel_fb.c
··· 150 150 } 151 151 info->screen_size = size; 152 152 153 - // memset(info->screen_base, 0, size); 154 - 155 153 drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth); 156 154 drm_fb_helper_fill_var(info, &ifbdev->helper, sizes->fb_width, sizes->fb_height); 157 155 ··· 225 227 ifbdev->helper.funcs = &intel_fb_helper_funcs; 226 228 227 229 ret = drm_fb_helper_init(dev, &ifbdev->helper, 228 - dev_priv->num_pipe, 230 + INTEL_INFO(dev)->num_pipes, 229 231 INTELFB_CONN_LIMIT); 230 232 if (ret) { 231 233 kfree(ifbdev);
+55 -59
drivers/gpu/drm/i915/intel_hdmi.c
··· 50 50 51 51 enabled_bits = HAS_DDI(dev) ? DDI_BUF_CTL_ENABLE : SDVO_ENABLE; 52 52 53 - WARN(I915_READ(intel_hdmi->sdvox_reg) & enabled_bits, 53 + WARN(I915_READ(intel_hdmi->hdmi_reg) & enabled_bits, 54 54 "HDMI port enabled, expecting disabled\n"); 55 55 } 56 56 ··· 120 120 } 121 121 } 122 122 123 - static u32 hsw_infoframe_data_reg(struct dip_infoframe *frame, enum pipe pipe) 123 + static u32 hsw_infoframe_data_reg(struct dip_infoframe *frame, 124 + enum transcoder cpu_transcoder) 124 125 { 125 126 switch (frame->type) { 126 127 case DIP_TYPE_AVI: 127 - return HSW_TVIDEO_DIP_AVI_DATA(pipe); 128 + return HSW_TVIDEO_DIP_AVI_DATA(cpu_transcoder); 128 129 case DIP_TYPE_SPD: 129 - return HSW_TVIDEO_DIP_SPD_DATA(pipe); 130 + return HSW_TVIDEO_DIP_SPD_DATA(cpu_transcoder); 130 131 default: 131 132 DRM_DEBUG_DRIVER("unknown info frame type %d\n", frame->type); 132 133 return 0; ··· 294 293 struct drm_device *dev = encoder->dev; 295 294 struct drm_i915_private *dev_priv = dev->dev_private; 296 295 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); 297 - u32 ctl_reg = HSW_TVIDEO_DIP_CTL(intel_crtc->pipe); 298 - u32 data_reg = hsw_infoframe_data_reg(frame, intel_crtc->pipe); 296 + u32 ctl_reg = HSW_TVIDEO_DIP_CTL(intel_crtc->cpu_transcoder); 297 + u32 data_reg = hsw_infoframe_data_reg(frame, intel_crtc->cpu_transcoder); 299 298 unsigned int i, len = DIP_HEADER_SIZE + frame->len; 300 299 u32 val = I915_READ(ctl_reg); 301 300 ··· 569 568 struct drm_i915_private *dev_priv = encoder->dev->dev_private; 570 569 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); 571 570 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); 572 - u32 reg = HSW_TVIDEO_DIP_CTL(intel_crtc->pipe); 571 + u32 reg = HSW_TVIDEO_DIP_CTL(intel_crtc->cpu_transcoder); 573 572 u32 val = I915_READ(reg); 574 573 575 574 assert_hdmi_port_disabled(intel_hdmi); ··· 598 597 struct drm_i915_private *dev_priv = dev->dev_private; 599 598 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); 600 599 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); 601 - u32 sdvox; 600 + u32 hdmi_val; 602 601 603 - sdvox = SDVO_ENCODING_HDMI; 602 + hdmi_val = SDVO_ENCODING_HDMI; 604 603 if (!HAS_PCH_SPLIT(dev)) 605 - sdvox |= intel_hdmi->color_range; 604 + hdmi_val |= intel_hdmi->color_range; 606 605 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) 607 - sdvox |= SDVO_VSYNC_ACTIVE_HIGH; 606 + hdmi_val |= SDVO_VSYNC_ACTIVE_HIGH; 608 607 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) 609 - sdvox |= SDVO_HSYNC_ACTIVE_HIGH; 608 + hdmi_val |= SDVO_HSYNC_ACTIVE_HIGH; 610 609 611 610 if (intel_crtc->bpp > 24) 612 - sdvox |= COLOR_FORMAT_12bpc; 611 + hdmi_val |= HDMI_COLOR_FORMAT_12bpc; 613 612 else 614 - sdvox |= COLOR_FORMAT_8bpc; 613 + hdmi_val |= SDVO_COLOR_FORMAT_8bpc; 615 614 616 615 /* Required on CPT */ 617 616 if (intel_hdmi->has_hdmi_sink && HAS_PCH_CPT(dev)) 618 - sdvox |= HDMI_MODE_SELECT; 617 + hdmi_val |= HDMI_MODE_SELECT_HDMI; 619 618 620 619 if (intel_hdmi->has_audio) { 621 620 DRM_DEBUG_DRIVER("Enabling HDMI audio on pipe %c\n", 622 621 pipe_name(intel_crtc->pipe)); 623 - sdvox |= SDVO_AUDIO_ENABLE; 624 - sdvox |= SDVO_NULL_PACKETS_DURING_VSYNC; 622 + hdmi_val |= SDVO_AUDIO_ENABLE; 623 + hdmi_val |= HDMI_MODE_SELECT_HDMI; 625 624 intel_write_eld(encoder, adjusted_mode); 626 625 } 627 626 628 627 if (HAS_PCH_CPT(dev)) 629 - sdvox |= PORT_TRANS_SEL_CPT(intel_crtc->pipe); 630 - else if (intel_crtc->pipe == PIPE_B) 631 - sdvox |= SDVO_PIPE_B_SELECT; 628 + hdmi_val |= SDVO_PIPE_SEL_CPT(intel_crtc->pipe); 629 + else 630 + hdmi_val |= SDVO_PIPE_SEL(intel_crtc->pipe); 632 631 633 - I915_WRITE(intel_hdmi->sdvox_reg, sdvox); 634 - POSTING_READ(intel_hdmi->sdvox_reg); 632 + I915_WRITE(intel_hdmi->hdmi_reg, hdmi_val); 633 + POSTING_READ(intel_hdmi->hdmi_reg); 635 634 636 635 intel_hdmi->set_infoframes(encoder, adjusted_mode); 637 636 } ··· 644 643 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); 645 644 u32 tmp; 646 645 647 - tmp = I915_READ(intel_hdmi->sdvox_reg); 646 + tmp = I915_READ(intel_hdmi->hdmi_reg); 648 647 649 648 if (!(tmp & SDVO_ENABLE)) 650 649 return false; ··· 661 660 { 662 661 struct drm_device *dev = encoder->base.dev; 663 662 struct drm_i915_private *dev_priv = dev->dev_private; 663 + struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); 664 664 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); 665 665 u32 temp; 666 666 u32 enable_bits = SDVO_ENABLE; ··· 669 667 if (intel_hdmi->has_audio) 670 668 enable_bits |= SDVO_AUDIO_ENABLE; 671 669 672 - temp = I915_READ(intel_hdmi->sdvox_reg); 670 + temp = I915_READ(intel_hdmi->hdmi_reg); 673 671 674 672 /* HW workaround for IBX, we need to move the port to transcoder A 675 - * before disabling it. */ 676 - if (HAS_PCH_IBX(dev)) { 677 - struct drm_crtc *crtc = encoder->base.crtc; 678 - int pipe = crtc ? to_intel_crtc(crtc)->pipe : -1; 679 - 680 - /* Restore the transcoder select bit. */ 681 - if (pipe == PIPE_B) 682 - enable_bits |= SDVO_PIPE_B_SELECT; 683 - } 673 + * before disabling it, so restore the transcoder select bit here. */ 674 + if (HAS_PCH_IBX(dev)) 675 + enable_bits |= SDVO_PIPE_SEL(intel_crtc->pipe); 684 676 685 677 /* HW workaround, need to toggle enable bit off and on for 12bpc, but 686 678 * we do this anyway which shows more stable in testing. 687 679 */ 688 680 if (HAS_PCH_SPLIT(dev)) { 689 - I915_WRITE(intel_hdmi->sdvox_reg, temp & ~SDVO_ENABLE); 690 - POSTING_READ(intel_hdmi->sdvox_reg); 681 + I915_WRITE(intel_hdmi->hdmi_reg, temp & ~SDVO_ENABLE); 682 + POSTING_READ(intel_hdmi->hdmi_reg); 691 683 } 692 684 693 685 temp |= enable_bits; 694 686 695 - I915_WRITE(intel_hdmi->sdvox_reg, temp); 696 - POSTING_READ(intel_hdmi->sdvox_reg); 687 + I915_WRITE(intel_hdmi->hdmi_reg, temp); 688 + POSTING_READ(intel_hdmi->hdmi_reg); 697 689 698 690 /* HW workaround, need to write this twice for issue that may result 699 691 * in first write getting masked. 700 692 */ 701 693 if (HAS_PCH_SPLIT(dev)) { 702 - I915_WRITE(intel_hdmi->sdvox_reg, temp); 703 - POSTING_READ(intel_hdmi->sdvox_reg); 694 + I915_WRITE(intel_hdmi->hdmi_reg, temp); 695 + POSTING_READ(intel_hdmi->hdmi_reg); 704 696 } 705 697 } 706 698 ··· 706 710 u32 temp; 707 711 u32 enable_bits = SDVO_ENABLE | SDVO_AUDIO_ENABLE; 708 712 709 - temp = I915_READ(intel_hdmi->sdvox_reg); 713 + temp = I915_READ(intel_hdmi->hdmi_reg); 710 714 711 715 /* HW workaround for IBX, we need to move the port to transcoder A 712 716 * before disabling it. */ ··· 716 720 717 721 if (temp & SDVO_PIPE_B_SELECT) { 718 722 temp &= ~SDVO_PIPE_B_SELECT; 719 - I915_WRITE(intel_hdmi->sdvox_reg, temp); 720 - POSTING_READ(intel_hdmi->sdvox_reg); 723 + I915_WRITE(intel_hdmi->hdmi_reg, temp); 724 + POSTING_READ(intel_hdmi->hdmi_reg); 721 725 722 726 /* Again we need to write this twice. */ 723 - I915_WRITE(intel_hdmi->sdvox_reg, temp); 724 - POSTING_READ(intel_hdmi->sdvox_reg); 727 + I915_WRITE(intel_hdmi->hdmi_reg, temp); 728 + POSTING_READ(intel_hdmi->hdmi_reg); 725 729 726 730 /* Transcoder selection bits only update 727 731 * effectively on vblank. */ ··· 736 740 * we do this anyway which shows more stable in testing. 737 741 */ 738 742 if (HAS_PCH_SPLIT(dev)) { 739 - I915_WRITE(intel_hdmi->sdvox_reg, temp & ~SDVO_ENABLE); 740 - POSTING_READ(intel_hdmi->sdvox_reg); 743 + I915_WRITE(intel_hdmi->hdmi_reg, temp & ~SDVO_ENABLE); 744 + POSTING_READ(intel_hdmi->hdmi_reg); 741 745 } 742 746 743 747 temp &= ~enable_bits; 744 748 745 - I915_WRITE(intel_hdmi->sdvox_reg, temp); 746 - POSTING_READ(intel_hdmi->sdvox_reg); 749 + I915_WRITE(intel_hdmi->hdmi_reg, temp); 750 + POSTING_READ(intel_hdmi->hdmi_reg); 747 751 748 752 /* HW workaround, need to write this twice for issue that may result 749 753 * in first write getting masked. 750 754 */ 751 755 if (HAS_PCH_SPLIT(dev)) { 752 - I915_WRITE(intel_hdmi->sdvox_reg, temp); 753 - POSTING_READ(intel_hdmi->sdvox_reg); 756 + I915_WRITE(intel_hdmi->hdmi_reg, temp); 757 + POSTING_READ(intel_hdmi->hdmi_reg); 754 758 } 755 759 } 756 760 ··· 778 782 /* See CEA-861-E - 5.1 Default Encoding Parameters */ 779 783 if (intel_hdmi->has_hdmi_sink && 780 784 drm_match_cea_mode(adjusted_mode) > 1) 781 - intel_hdmi->color_range = SDVO_COLOR_RANGE_16_235; 785 + intel_hdmi->color_range = HDMI_COLOR_RANGE_16_235; 782 786 else 783 787 intel_hdmi->color_range = 0; 784 788 } ··· 912 916 break; 913 917 case INTEL_BROADCAST_RGB_LIMITED: 914 918 intel_hdmi->color_range_auto = false; 915 - intel_hdmi->color_range = SDVO_COLOR_RANGE_16_235; 919 + intel_hdmi->color_range = HDMI_COLOR_RANGE_16_235; 916 920 break; 917 921 default: 918 922 return -EINVAL; ··· 1004 1008 BUG(); 1005 1009 } 1006 1010 1007 - if (!HAS_PCH_SPLIT(dev)) { 1008 - intel_hdmi->write_infoframe = g4x_write_infoframe; 1009 - intel_hdmi->set_infoframes = g4x_set_infoframes; 1010 - } else if (IS_VALLEYVIEW(dev)) { 1011 + if (IS_VALLEYVIEW(dev)) { 1011 1012 intel_hdmi->write_infoframe = vlv_write_infoframe; 1012 1013 intel_hdmi->set_infoframes = vlv_set_infoframes; 1013 - } else if (IS_HASWELL(dev)) { 1014 + } else if (!HAS_PCH_SPLIT(dev)) { 1015 + intel_hdmi->write_infoframe = g4x_write_infoframe; 1016 + intel_hdmi->set_infoframes = g4x_set_infoframes; 1017 + } else if (HAS_DDI(dev)) { 1014 1018 intel_hdmi->write_infoframe = hsw_write_infoframe; 1015 1019 intel_hdmi->set_infoframes = hsw_set_infoframes; 1016 1020 } else if (HAS_PCH_IBX(dev)) { ··· 1041 1045 } 1042 1046 } 1043 1047 1044 - void intel_hdmi_init(struct drm_device *dev, int sdvox_reg, enum port port) 1048 + void intel_hdmi_init(struct drm_device *dev, int hdmi_reg, enum port port) 1045 1049 { 1046 1050 struct intel_digital_port *intel_dig_port; 1047 1051 struct intel_encoder *intel_encoder; ··· 1074 1078 intel_encoder->cloneable = false; 1075 1079 1076 1080 intel_dig_port->port = port; 1077 - intel_dig_port->hdmi.sdvox_reg = sdvox_reg; 1081 + intel_dig_port->hdmi.hdmi_reg = hdmi_reg; 1078 1082 intel_dig_port->dp.output_reg = 0; 1079 1083 1080 1084 intel_hdmi_init_connector(intel_dig_port, intel_connector);
+5 -2
drivers/gpu/drm/i915/intel_lvds.c
··· 1019 1019 { 1020 1020 /* With the introduction of the PCH we gained a dedicated 1021 1021 * LVDS presence pin, use it. */ 1022 - if (HAS_PCH_SPLIT(dev)) 1022 + if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) 1023 1023 return true; 1024 1024 1025 1025 /* Otherwise LVDS was only attached to mobile products, 1026 1026 * except for the inglorious 830gm */ 1027 - return IS_MOBILE(dev) && !IS_I830(dev); 1027 + if (INTEL_INFO(dev)->gen <= 4 && IS_MOBILE(dev) && !IS_I830(dev)) 1028 + return true; 1029 + 1030 + return false; 1028 1031 } 1029 1032 1030 1033 /**
+1 -1
drivers/gpu/drm/i915/intel_panel.c
··· 335 335 if (tmp & BLM_PWM_ENABLE) 336 336 goto set_level; 337 337 338 - if (dev_priv->num_pipe == 3) 338 + if (INTEL_INFO(dev)->num_pipes == 3) 339 339 tmp &= ~BLM_PIPE_SELECT_IVB; 340 340 else 341 341 tmp &= ~BLM_PIPE_SELECT;
+41 -24
drivers/gpu/drm/i915/intel_pm.c
··· 2631 2631 if (!ret) { 2632 2632 pcu_mbox = 0; 2633 2633 ret = sandybridge_pcode_read(dev_priv, GEN6_READ_OC_PARAMS, &pcu_mbox); 2634 - if (ret && pcu_mbox & (1<<31)) { /* OC supported */ 2634 + if (!ret && (pcu_mbox & (1<<31))) { /* OC supported */ 2635 + DRM_DEBUG_DRIVER("overclocking supported, adjusting frequency max from %dMHz to %dMHz\n", 2636 + (dev_priv->rps.max_delay & 0xff) * 50, 2637 + (pcu_mbox & 0xff) * 50); 2635 2638 dev_priv->rps.max_delay = pcu_mbox & 0xff; 2636 - DRM_DEBUG_DRIVER("overclocking supported, adjusting frequency max to %dMHz\n", pcu_mbox * 50); 2637 2639 } 2638 2640 } else { 2639 2641 DRM_DEBUG_DRIVER("Failed to set the min frequency\n"); ··· 2823 2821 ret = intel_ring_idle(ring); 2824 2822 dev_priv->mm.interruptible = was_interruptible; 2825 2823 if (ret) { 2826 - DRM_ERROR("failed to enable ironlake power power savings\n"); 2824 + DRM_ERROR("failed to enable ironlake power savings\n"); 2827 2825 ironlake_teardown_rc6(dev); 2828 2826 return; 2829 2827 } ··· 3770 3768 I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) | 3771 3769 GEN6_MBCTL_ENABLE_BOOT_FETCH); 3772 3770 3771 + /* WaSwitchSolVfFArbitrationPriority */ 3772 + I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL); 3773 + 3773 3774 /* XXX: This is a workaround for early silicon revisions and should be 3774 3775 * removed later. 3775 3776 */ ··· 3904 3899 CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE | 3905 3900 CHICKEN3_DGMG_DONE_FIX_DISABLE); 3906 3901 3902 + /* WaDisablePSDDualDispatchEnable */ 3907 3903 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1, 3908 - _MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE)); 3904 + _MASKED_BIT_ENABLE(GEN7_MAX_PS_THREAD_DEP | 3905 + GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE)); 3909 3906 3910 3907 /* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */ 3911 3908 I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1, ··· 3992 3985 * Disable clock gating on th GCFG unit to prevent a delay 3993 3986 * in the reporting of vblank events. 3994 3987 */ 3995 - I915_WRITE(VLV_GUNIT_CLOCK_GATE, GCFG_DIS); 3988 + I915_WRITE(VLV_GUNIT_CLOCK_GATE, 0xffffffff); 3989 + 3990 + /* Conservative clock gating settings for now */ 3991 + I915_WRITE(0x9400, 0xffffffff); 3992 + I915_WRITE(0x9404, 0xffffffff); 3993 + I915_WRITE(0x9408, 0xffffffff); 3994 + I915_WRITE(0x940c, 0xffffffff); 3995 + I915_WRITE(0x9410, 0xffffffff); 3996 + I915_WRITE(0x9414, 0xffffffff); 3997 + I915_WRITE(0x9418, 0xffffffff); 3996 3998 } 3997 3999 3998 4000 static void g4x_init_clock_gating(struct drm_device *dev) ··· 4092 4076 bool is_enabled, enable_requested; 4093 4077 uint32_t tmp; 4094 4078 4095 - if (!IS_HASWELL(dev)) 4079 + if (!HAS_POWER_WELL(dev)) 4096 4080 return; 4097 4081 4098 4082 if (!i915_disable_power_well && !enable) ··· 4130 4114 { 4131 4115 struct drm_i915_private *dev_priv = dev->dev_private; 4132 4116 4133 - if (!IS_HASWELL(dev)) 4117 + if (!HAS_POWER_WELL(dev)) 4134 4118 return; 4135 4119 4136 4120 /* For now, we need the power well to be always enabled. */ ··· 4290 4274 4291 4275 static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv) 4292 4276 { 4293 - u32 forcewake_ack; 4294 - 4295 - if (IS_HASWELL(dev_priv->dev)) 4296 - forcewake_ack = FORCEWAKE_ACK_HSW; 4297 - else 4298 - forcewake_ack = FORCEWAKE_ACK; 4299 - 4300 - if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & 1) == 0, 4277 + if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK) & 1) == 0, 4301 4278 FORCEWAKE_ACK_TIMEOUT_MS)) 4302 4279 DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n"); 4303 4280 4304 - I915_WRITE_NOTRACE(FORCEWAKE, FORCEWAKE_KERNEL); 4281 + I915_WRITE_NOTRACE(FORCEWAKE, 1); 4305 4282 POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */ 4306 4283 4307 - if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & 1), 4284 + if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK) & 1), 4308 4285 FORCEWAKE_ACK_TIMEOUT_MS)) 4309 4286 DRM_ERROR("Timed out waiting for forcewake to ack request.\n"); 4310 4287 ··· 4320 4311 else 4321 4312 forcewake_ack = FORCEWAKE_MT_ACK; 4322 4313 4323 - if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & 1) == 0, 4314 + if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & FORCEWAKE_KERNEL) == 0, 4324 4315 FORCEWAKE_ACK_TIMEOUT_MS)) 4325 4316 DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n"); 4326 4317 ··· 4328 4319 /* something from same cacheline, but !FORCEWAKE_MT */ 4329 4320 POSTING_READ(ECOBUS); 4330 4321 4331 - if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & 1), 4322 + if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & FORCEWAKE_KERNEL), 4332 4323 FORCEWAKE_ACK_TIMEOUT_MS)) 4333 4324 DRM_ERROR("Timed out waiting for forcewake to ack request.\n"); 4334 4325 ··· 4418 4409 4419 4410 static void vlv_force_wake_get(struct drm_i915_private *dev_priv) 4420 4411 { 4421 - if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & 1) == 0, 4412 + if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & FORCEWAKE_KERNEL) == 0, 4422 4413 FORCEWAKE_ACK_TIMEOUT_MS)) 4423 4414 DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n"); 4424 4415 4425 4416 I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL)); 4417 + I915_WRITE_NOTRACE(FORCEWAKE_MEDIA_VLV, 4418 + _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL)); 4426 4419 4427 - if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & 1), 4420 + if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & FORCEWAKE_KERNEL), 4428 4421 FORCEWAKE_ACK_TIMEOUT_MS)) 4429 - DRM_ERROR("Timed out waiting for forcewake to ack request.\n"); 4422 + DRM_ERROR("Timed out waiting for GT to ack forcewake request.\n"); 4423 + 4424 + if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_MEDIA_VLV) & 4425 + FORCEWAKE_KERNEL), 4426 + FORCEWAKE_ACK_TIMEOUT_MS)) 4427 + DRM_ERROR("Timed out waiting for media to ack forcewake request.\n"); 4430 4428 4431 4429 __gen6_gt_wait_for_thread_c0(dev_priv); 4432 4430 } ··· 4441 4425 static void vlv_force_wake_put(struct drm_i915_private *dev_priv) 4442 4426 { 4443 4427 I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL)); 4444 - /* something from same cacheline, but !FORCEWAKE_VLV */ 4445 - POSTING_READ(FORCEWAKE_ACK_VLV); 4428 + I915_WRITE_NOTRACE(FORCEWAKE_MEDIA_VLV, 4429 + _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL)); 4430 + /* The below doubles as a POSTING_READ */ 4446 4431 gen6_gt_check_fifodbg(dev_priv); 4447 4432 } 4448 4433
+25 -25
drivers/gpu/drm/i915/intel_sdvo.c
··· 246 246 return; 247 247 } 248 248 249 - if (intel_sdvo->sdvo_reg == SDVOB) { 250 - cval = I915_READ(SDVOC); 251 - } else { 252 - bval = I915_READ(SDVOB); 253 - } 249 + if (intel_sdvo->sdvo_reg == GEN3_SDVOB) 250 + cval = I915_READ(GEN3_SDVOC); 251 + else 252 + bval = I915_READ(GEN3_SDVOB); 253 + 254 254 /* 255 255 * Write the registers twice for luck. Sometimes, 256 256 * writing them only once doesn't appear to 'stick'. ··· 258 258 */ 259 259 for (i = 0; i < 2; i++) 260 260 { 261 - I915_WRITE(SDVOB, bval); 262 - I915_READ(SDVOB); 263 - I915_WRITE(SDVOC, cval); 264 - I915_READ(SDVOC); 261 + I915_WRITE(GEN3_SDVOB, bval); 262 + I915_READ(GEN3_SDVOB); 263 + I915_WRITE(GEN3_SDVOC, cval); 264 + I915_READ(GEN3_SDVOC); 265 265 } 266 266 } 267 267 ··· 451 451 int i, ret = true; 452 452 453 453 /* Would be simpler to allocate both in one go ? */ 454 - buf = (u8 *)kzalloc(args_len * 2 + 2, GFP_KERNEL); 454 + buf = kzalloc(args_len * 2 + 2, GFP_KERNEL); 455 455 if (!buf) 456 456 return false; 457 457 ··· 965 965 avi_if.body.avi.ITC_EC_Q_SC |= DIP_AVI_RGB_QUANT_RANGE_FULL; 966 966 } 967 967 968 + avi_if.body.avi.VIC = drm_match_cea_mode(adjusted_mode); 969 + 968 970 intel_dip_infoframe_csum(&avi_if); 969 971 970 972 /* sdvo spec says that the ecc is handled by the hw, and it looks like ··· 1078 1076 1079 1077 if (intel_sdvo->color_range_auto) { 1080 1078 /* See CEA-861-E - 5.1 Default Encoding Parameters */ 1079 + /* FIXME: This bit is only valid when using TMDS encoding and 8 1080 + * bit per color mode. */ 1081 1081 if (intel_sdvo->has_hdmi_monitor && 1082 1082 drm_match_cea_mode(adjusted_mode) > 1) 1083 - intel_sdvo->color_range = SDVO_COLOR_RANGE_16_235; 1083 + intel_sdvo->color_range = HDMI_COLOR_RANGE_16_235; 1084 1084 else 1085 1085 intel_sdvo->color_range = 0; 1086 1086 } ··· 1186 1182 } else { 1187 1183 sdvox = I915_READ(intel_sdvo->sdvo_reg); 1188 1184 switch (intel_sdvo->sdvo_reg) { 1189 - case SDVOB: 1185 + case GEN3_SDVOB: 1190 1186 sdvox &= SDVOB_PRESERVE_MASK; 1191 1187 break; 1192 - case SDVOC: 1188 + case GEN3_SDVOC: 1193 1189 sdvox &= SDVOC_PRESERVE_MASK; 1194 1190 break; 1195 1191 } ··· 1197 1193 } 1198 1194 1199 1195 if (INTEL_PCH_TYPE(dev) >= PCH_CPT) 1200 - sdvox |= TRANSCODER_CPT(intel_crtc->pipe); 1196 + sdvox |= SDVO_PIPE_SEL_CPT(intel_crtc->pipe); 1201 1197 else 1202 - sdvox |= TRANSCODER(intel_crtc->pipe); 1198 + sdvox |= SDVO_PIPE_SEL(intel_crtc->pipe); 1203 1199 1204 1200 if (intel_sdvo->has_hdmi_audio) 1205 1201 sdvox |= SDVO_AUDIO_ENABLE; ··· 1309 1305 temp = I915_READ(intel_sdvo->sdvo_reg); 1310 1306 if ((temp & SDVO_ENABLE) == 0) { 1311 1307 /* HW workaround for IBX, we need to move the port 1312 - * to transcoder A before disabling it. */ 1313 - if (HAS_PCH_IBX(dev)) { 1314 - struct drm_crtc *crtc = encoder->base.crtc; 1315 - int pipe = crtc ? to_intel_crtc(crtc)->pipe : -1; 1316 - 1317 - /* Restore the transcoder select bit. */ 1318 - if (pipe == PIPE_B) 1319 - temp |= SDVO_PIPE_B_SELECT; 1320 - } 1308 + * to transcoder A before disabling it, so restore it here. */ 1309 + if (HAS_PCH_IBX(dev)) 1310 + temp |= SDVO_PIPE_SEL(intel_crtc->pipe); 1321 1311 1322 1312 intel_sdvo_write_sdvox(intel_sdvo, temp | SDVO_ENABLE); 1323 1313 } ··· 1930 1932 break; 1931 1933 case INTEL_BROADCAST_RGB_LIMITED: 1932 1934 intel_sdvo->color_range_auto = false; 1933 - intel_sdvo->color_range = SDVO_COLOR_RANGE_16_235; 1935 + /* FIXME: this bit is only valid when using TMDS 1936 + * encoding and 8 bit per color mode. */ 1937 + intel_sdvo->color_range = HDMI_COLOR_RANGE_16_235; 1934 1938 break; 1935 1939 default: 1936 1940 return -EINVAL;
+7
drivers/video/fbmem.c
··· 1645 1645 if (!fb_info->modelist.prev || !fb_info->modelist.next) 1646 1646 INIT_LIST_HEAD(&fb_info->modelist); 1647 1647 1648 + if (fb_info->skip_vt_switch) 1649 + pm_vt_switch_required(fb_info->dev, false); 1650 + else 1651 + pm_vt_switch_required(fb_info->dev, true); 1652 + 1648 1653 fb_var_to_videomode(&mode, &fb_info->var); 1649 1654 fb_add_videomode(&mode, &fb_info->modelist); 1650 1655 registered_fb[i] = fb_info; ··· 1683 1678 1684 1679 if (ret) 1685 1680 return -EINVAL; 1681 + 1682 + pm_vt_switch_unregister(fb_info->dev); 1686 1683 1687 1684 unlink_framebuffer(fb_info); 1688 1685 if (fb_info->pixmap.addr &&
+2
include/linux/fb.h
··· 501 501 resource_size_t size; 502 502 } ranges[0]; 503 503 } *apertures; 504 + 505 + bool skip_vt_switch; /* no VT switch on suspend/resume required */ 504 506 }; 505 507 506 508 static inline struct apertures_struct *alloc_apertures(unsigned int max_num) {
+13
include/linux/pm.h
··· 34 34 extern void (*pm_power_off)(void); 35 35 extern void (*pm_power_off_prepare)(void); 36 36 37 + struct device; /* we have a circular dep with device.h */ 38 + #ifdef CONFIG_VT_CONSOLE_SLEEP 39 + extern void pm_vt_switch_required(struct device *dev, bool required); 40 + extern void pm_vt_switch_unregister(struct device *dev); 41 + #else 42 + static inline void pm_vt_switch_required(struct device *dev, bool required) 43 + { 44 + } 45 + static inline void pm_vt_switch_unregister(struct device *dev) 46 + { 47 + } 48 + #endif /* CONFIG_VT_CONSOLE_SLEEP */ 49 + 37 50 /* 38 51 * Device power management 39 52 */
+116
kernel/power/console.c
··· 4 4 * Originally from swsusp. 5 5 */ 6 6 7 + #include <linux/console.h> 7 8 #include <linux/vt_kern.h> 8 9 #include <linux/kbd_kern.h> 9 10 #include <linux/vt.h> ··· 15 14 16 15 static int orig_fgconsole, orig_kmsg; 17 16 17 + static DEFINE_MUTEX(vt_switch_mutex); 18 + 19 + struct pm_vt_switch { 20 + struct list_head head; 21 + struct device *dev; 22 + bool required; 23 + }; 24 + 25 + static LIST_HEAD(pm_vt_switch_list); 26 + 27 + 28 + /** 29 + * pm_vt_switch_required - indicate VT switch at suspend requirements 30 + * @dev: device 31 + * @required: if true, caller needs VT switch at suspend/resume time 32 + * 33 + * The different console drivers may or may not require VT switches across 34 + * suspend/resume, depending on how they handle restoring video state and 35 + * what may be running. 36 + * 37 + * Drivers can indicate support for switchless suspend/resume, which can 38 + * save time and flicker, by using this routine and passing 'false' as 39 + * the argument. If any loaded driver needs VT switching, or the 40 + * no_console_suspend argument has been passed on the command line, VT 41 + * switches will occur. 42 + */ 43 + void pm_vt_switch_required(struct device *dev, bool required) 44 + { 45 + struct pm_vt_switch *entry, *tmp; 46 + 47 + mutex_lock(&vt_switch_mutex); 48 + list_for_each_entry(tmp, &pm_vt_switch_list, head) { 49 + if (tmp->dev == dev) { 50 + /* already registered, update requirement */ 51 + tmp->required = required; 52 + goto out; 53 + } 54 + } 55 + 56 + entry = kmalloc(sizeof(*entry), GFP_KERNEL); 57 + if (!entry) 58 + goto out; 59 + 60 + entry->required = required; 61 + entry->dev = dev; 62 + 63 + list_add(&entry->head, &pm_vt_switch_list); 64 + out: 65 + mutex_unlock(&vt_switch_mutex); 66 + } 67 + EXPORT_SYMBOL(pm_vt_switch_required); 68 + 69 + /** 70 + * pm_vt_switch_unregister - stop tracking a device's VT switching needs 71 + * @dev: device 72 + * 73 + * Remove @dev from the vt switch list. 74 + */ 75 + void pm_vt_switch_unregister(struct device *dev) 76 + { 77 + struct pm_vt_switch *tmp; 78 + 79 + mutex_lock(&vt_switch_mutex); 80 + list_for_each_entry(tmp, &pm_vt_switch_list, head) { 81 + if (tmp->dev == dev) { 82 + list_del(&tmp->head); 83 + break; 84 + } 85 + } 86 + mutex_unlock(&vt_switch_mutex); 87 + } 88 + EXPORT_SYMBOL(pm_vt_switch_unregister); 89 + 90 + /* 91 + * There are three cases when a VT switch on suspend/resume are required: 92 + * 1) no driver has indicated a requirement one way or another, so preserve 93 + * the old behavior 94 + * 2) console suspend is disabled, we want to see debug messages across 95 + * suspend/resume 96 + * 3) any registered driver indicates it needs a VT switch 97 + * 98 + * If none of these conditions is present, meaning we have at least one driver 99 + * that doesn't need the switch, and none that do, we can avoid it to make 100 + * resume look a little prettier (and suspend too, but that's usually hidden, 101 + * e.g. when closing the lid on a laptop). 102 + */ 103 + static bool pm_vt_switch(void) 104 + { 105 + struct pm_vt_switch *entry; 106 + bool ret = true; 107 + 108 + mutex_lock(&vt_switch_mutex); 109 + if (list_empty(&pm_vt_switch_list)) 110 + goto out; 111 + 112 + if (!console_suspend_enabled) 113 + goto out; 114 + 115 + list_for_each_entry(entry, &pm_vt_switch_list, head) { 116 + if (entry->required) 117 + goto out; 118 + } 119 + 120 + ret = false; 121 + out: 122 + mutex_unlock(&vt_switch_mutex); 123 + return ret; 124 + } 125 + 18 126 int pm_prepare_console(void) 19 127 { 128 + if (!pm_vt_switch()) 129 + return 0; 130 + 20 131 orig_fgconsole = vt_move_to_console(SUSPEND_CONSOLE, 1); 21 132 if (orig_fgconsole < 0) 22 133 return 1; ··· 139 26 140 27 void pm_restore_console(void) 141 28 { 29 + if (!pm_vt_switch()) 30 + return; 31 + 142 32 if (orig_fgconsole >= 0) { 143 33 vt_move_to_console(orig_fgconsole, 0); 144 34 vt_kmsg_redirect(orig_kmsg);