Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'drm-intel-next-2014-10-24' of git://anongit.freedesktop.org/drm-intel into drm-next

- suspend/resume/freeze/thaw unification from Imre
- wa list improvements from Mika&Arun
- display pll precomputation from Ander Conselvan, this removed the last
->mode_set callbacks, a big step towards implementing atomic modesets
- more kerneldoc for the interrupt code
- 180 rotation for cursors (Ville&Sonika)
- ULT/ULX feature check macros cleaned up thanks to Damien
- piles and piles of fixes all over, bug team seems to work!

* tag 'drm-intel-next-2014-10-24' of git://anongit.freedesktop.org/drm-intel: (61 commits)
drm/i915: Update DRIVER_DATE to 20141024
drm/i915: add comments on what stage a given PM handler is called
drm/i915: unify switcheroo and legacy suspend/resume handlers
drm/i915: add poweroff_late handler
drm/i915: sanitize suspend/resume helper function names
drm/i915: unify S3 and S4 suspend/resume handlers
drm/i915: disable/re-enable PCI device around S4 freeze/thaw
drm/i915: enable output polling during S4 thaw
drm/i915: check for GT faults in all resume handlers and driver load time
drm/i915: remove unused restore_gtt_mappings optimization during suspend
drm/i915: fix S4 suspend while switcheroo state is off
drm/i915: vlv: fix switcheroo/legacy suspend/resume
drm/i915: propagate error from legacy resume handler
drm/i915: unify legacy S3 suspend and S4 freeze handlers
drm/i915: factor out i915_drm_suspend_late
drm/i915: Emit even number of dwords when emitting LRIs
drm/i915: Add rotation support for cursor plane (v5)
drm/i915: Correctly reject invalid flags for wait_ioctl
drm/i915: use macros to assign mmio access functions
drm/i915: only run hsw_power_well_post_enable when really needed
...

+1155 -946
+5
Documentation/DocBook/drm.tmpl
··· 3831 3831 !Fdrivers/gpu/drm/i915/i915_gem.c i915_gem_track_fb 3832 3832 </sect2> 3833 3833 <sect2> 3834 + <title>Display FIFO Underrun Reporting</title> 3835 + !Pdrivers/gpu/drm/i915/intel_fifo_underrun.c fifo underrun handling 3836 + !Idrivers/gpu/drm/i915/intel_fifo_underrun.c 3837 + </sect2> 3838 + <sect2> 3834 3839 <title>Plane Configuration</title> 3835 3840 <para> 3836 3841 This section covers plane configuration and composition with the
+1
drivers/gpu/drm/i915/Makefile
··· 45 45 # modesetting core code 46 46 i915-y += intel_bios.o \ 47 47 intel_display.o \ 48 + intel_fifo_underrun.o \ 48 49 intel_frontbuffer.o \ 49 50 intel_modes.o \ 50 51 intel_overlay.o \
+26 -11
drivers/gpu/drm/i915/i915_debugfs.c
··· 1848 1848 if (ret) 1849 1849 return ret; 1850 1850 1851 + intel_runtime_pm_get(dev_priv); 1852 + 1851 1853 for_each_ring(ring, dev_priv, ring_id) { 1852 1854 struct intel_ctx_submit_request *head_req = NULL; 1853 1855 int count = 0; ··· 1901 1899 seq_putc(m, '\n'); 1902 1900 } 1903 1901 1902 + intel_runtime_pm_put(dev_priv); 1904 1903 mutex_unlock(&dev->struct_mutex); 1905 1904 1906 1905 return 0; ··· 2658 2655 2659 2656 intel_runtime_pm_get(dev_priv); 2660 2657 2661 - seq_printf(m, "Workarounds applied: %d\n", dev_priv->num_wa_regs); 2662 - for (i = 0; i < dev_priv->num_wa_regs; ++i) { 2663 - u32 addr, mask; 2658 + seq_printf(m, "Workarounds applied: %d\n", dev_priv->workarounds.count); 2659 + for (i = 0; i < dev_priv->workarounds.count; ++i) { 2660 + u32 addr, mask, value, read; 2661 + bool ok; 2664 2662 2665 - addr = dev_priv->intel_wa_regs[i].addr; 2666 - mask = dev_priv->intel_wa_regs[i].mask; 2667 - dev_priv->intel_wa_regs[i].value = I915_READ(addr) | mask; 2668 - if (dev_priv->intel_wa_regs[i].addr) 2669 - seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X\n", 2670 - dev_priv->intel_wa_regs[i].addr, 2671 - dev_priv->intel_wa_regs[i].value, 2672 - dev_priv->intel_wa_regs[i].mask); 2663 + addr = dev_priv->workarounds.reg[i].addr; 2664 + mask = dev_priv->workarounds.reg[i].mask; 2665 + value = dev_priv->workarounds.reg[i].value; 2666 + read = I915_READ(addr); 2667 + ok = (value & mask) == (read & mask); 2668 + seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X, read: 0x%08x, status: %s\n", 2669 + addr, value, mask, read, ok ? "OK" : "FAIL"); 2673 2670 } 2674 2671 2675 2672 intel_runtime_pm_put(dev_priv); ··· 3258 3255 { 3259 3256 struct drm_i915_private *dev_priv = dev->dev_private; 3260 3257 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe]; 3258 + struct intel_crtc *crtc = to_intel_crtc(intel_get_crtc_for_pipe(dev, 3259 + pipe)); 3261 3260 u32 val = 0; /* shut up gcc */ 3262 3261 int ret; 3263 3262 ··· 3294 3289 GFP_KERNEL); 3295 3290 if (!pipe_crc->entries) 3296 3291 return -ENOMEM; 3292 + 3293 + /* 3294 + * When IPS gets enabled, the pipe CRC changes. Since IPS gets 3295 + * enabled and disabled dynamically based on package C states, 3296 + * user space can't make reliable use of the CRCs, so let's just 3297 + * completely disable it. 3298 + */ 3299 + hsw_disable_ips(crtc); 3297 3300 3298 3301 spin_lock_irq(&pipe_crc->lock); 3299 3302 pipe_crc->head = 0; ··· 3341 3328 vlv_undo_pipe_scramble_reset(dev, pipe); 3342 3329 else if (IS_HASWELL(dev) && pipe == PIPE_A) 3343 3330 hsw_undo_trans_edp_pipe_A_crc_wa(dev); 3331 + 3332 + hsw_enable_ips(crtc); 3344 3333 } 3345 3334 3346 3335 return 0;
+7 -5
drivers/gpu/drm/i915/i915_dma.c
··· 1275 1275 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; 1276 1276 /* i915 resume handler doesn't set to D0 */ 1277 1277 pci_set_power_state(dev->pdev, PCI_D0); 1278 - i915_resume(dev); 1278 + i915_resume_legacy(dev); 1279 1279 dev->switch_power_state = DRM_SWITCH_POWER_ON; 1280 1280 } else { 1281 1281 pr_err("switched off\n"); 1282 1282 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; 1283 - i915_suspend(dev, pmm); 1283 + i915_suspend_legacy(dev, pmm); 1284 1284 dev->switch_power_state = DRM_SWITCH_POWER_OFF; 1285 1285 } 1286 1286 } ··· 1853 1853 1854 1854 acpi_video_unregister(); 1855 1855 1856 - if (drm_core_check_feature(dev, DRIVER_MODESET)) { 1856 + if (drm_core_check_feature(dev, DRIVER_MODESET)) 1857 1857 intel_fbdev_fini(dev); 1858 + 1859 + drm_vblank_cleanup(dev); 1860 + 1861 + if (drm_core_check_feature(dev, DRIVER_MODESET)) { 1858 1862 intel_modeset_cleanup(dev); 1859 1863 1860 1864 /* ··· 1898 1894 if (!I915_NEED_GFX_HWS(dev)) 1899 1895 i915_free_hws(dev); 1900 1896 } 1901 - 1902 - drm_vblank_cleanup(dev); 1903 1897 1904 1898 intel_teardown_gmbus(dev); 1905 1899 intel_teardown_mchbar(dev);
+88 -122
drivers/gpu/drm/i915/i915_drv.c
··· 463 463 dev_priv->pch_type = PCH_LPT; 464 464 DRM_DEBUG_KMS("Found LynxPoint PCH\n"); 465 465 WARN_ON(!IS_HASWELL(dev)); 466 - WARN_ON(IS_ULT(dev)); 466 + WARN_ON(IS_HSW_ULT(dev)); 467 467 } else if (IS_BROADWELL(dev)) { 468 468 dev_priv->pch_type = PCH_LPT; 469 469 dev_priv->pch_id = ··· 474 474 dev_priv->pch_type = PCH_LPT; 475 475 DRM_DEBUG_KMS("Found LynxPoint LP PCH\n"); 476 476 WARN_ON(!IS_HASWELL(dev)); 477 - WARN_ON(!IS_ULT(dev)); 477 + WARN_ON(!IS_HSW_ULT(dev)); 478 478 } else if (id == INTEL_PCH_SPT_DEVICE_ID_TYPE) { 479 479 dev_priv->pch_type = PCH_SPT; 480 480 DRM_DEBUG_KMS("Found SunrisePoint PCH\n"); 481 481 WARN_ON(!IS_SKYLAKE(dev)); 482 - WARN_ON(IS_ULT(dev)); 483 482 } else if (id == INTEL_PCH_SPT_LP_DEVICE_ID_TYPE) { 484 483 dev_priv->pch_type = PCH_SPT; 485 484 DRM_DEBUG_KMS("Found SunrisePoint LP PCH\n"); 486 485 WARN_ON(!IS_SKYLAKE(dev)); 487 - WARN_ON(!IS_ULT(dev)); 488 486 } else 489 487 continue; 490 488 ··· 554 556 static int intel_resume_prepare(struct drm_i915_private *dev_priv, 555 557 bool rpm_resume); 556 558 557 - static int i915_drm_freeze(struct drm_device *dev) 559 + static int i915_drm_suspend(struct drm_device *dev) 558 560 { 559 561 struct drm_i915_private *dev_priv = dev->dev_private; 560 562 struct drm_crtc *crtc; ··· 630 632 return 0; 631 633 } 632 634 633 - int i915_suspend(struct drm_device *dev, pm_message_t state) 635 + static int i915_drm_suspend_late(struct drm_device *drm_dev) 636 + { 637 + struct drm_i915_private *dev_priv = drm_dev->dev_private; 638 + int ret; 639 + 640 + ret = intel_suspend_complete(dev_priv); 641 + 642 + if (ret) { 643 + DRM_ERROR("Suspend complete failed: %d\n", ret); 644 + 645 + return ret; 646 + } 647 + 648 + pci_disable_device(drm_dev->pdev); 649 + pci_set_power_state(drm_dev->pdev, PCI_D3hot); 650 + 651 + return 0; 652 + } 653 + 654 + int i915_suspend_legacy(struct drm_device *dev, pm_message_t state) 634 655 { 635 656 int error; 636 657 ··· 659 642 return -ENODEV; 660 643 } 661 644 662 - if (state.event == PM_EVENT_PRETHAW) 663 - return 0; 664 - 645 + if (WARN_ON_ONCE(state.event != PM_EVENT_SUSPEND && 646 + state.event != PM_EVENT_FREEZE)) 647 + return -EINVAL; 665 648 666 649 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) 667 650 return 0; 668 651 669 - error = i915_drm_freeze(dev); 652 + error = i915_drm_suspend(dev); 670 653 if (error) 671 654 return error; 672 655 673 - if (state.event == PM_EVENT_SUSPEND) { 674 - /* Shut down the device */ 675 - pci_disable_device(dev->pdev); 676 - pci_set_power_state(dev->pdev, PCI_D3hot); 677 - } 678 - 679 - return 0; 656 + return i915_drm_suspend_late(dev); 680 657 } 681 658 682 - static int i915_drm_thaw_early(struct drm_device *dev) 683 - { 684 - struct drm_i915_private *dev_priv = dev->dev_private; 685 - int ret; 686 - 687 - ret = intel_resume_prepare(dev_priv, false); 688 - if (ret) 689 - DRM_ERROR("Resume prepare failed: %d,Continuing resume\n", ret); 690 - 691 - intel_uncore_early_sanitize(dev, true); 692 - intel_uncore_sanitize(dev); 693 - intel_power_domains_init_hw(dev_priv); 694 - 695 - return ret; 696 - } 697 - 698 - static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings) 659 + static int i915_drm_resume(struct drm_device *dev) 699 660 { 700 661 struct drm_i915_private *dev_priv = dev->dev_private; 701 662 702 - if (drm_core_check_feature(dev, DRIVER_MODESET) && 703 - restore_gtt_mappings) { 663 + if (drm_core_check_feature(dev, DRIVER_MODESET)) { 704 664 mutex_lock(&dev->struct_mutex); 705 665 i915_gem_restore_gtt_mappings(dev); 706 666 mutex_unlock(&dev->struct_mutex); ··· 736 742 737 743 intel_opregion_notify_adapter(dev, PCI_D0); 738 744 745 + drm_kms_helper_poll_enable(dev); 746 + 739 747 return 0; 740 748 } 741 749 742 - static int i915_drm_thaw(struct drm_device *dev) 750 + static int i915_drm_resume_early(struct drm_device *dev) 743 751 { 744 - if (drm_core_check_feature(dev, DRIVER_MODESET)) 745 - i915_check_and_clear_faults(dev); 746 - 747 - return __i915_drm_thaw(dev, true); 748 - } 749 - 750 - static int i915_resume_early(struct drm_device *dev) 751 - { 752 - if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) 753 - return 0; 752 + struct drm_i915_private *dev_priv = dev->dev_private; 753 + int ret; 754 754 755 755 /* 756 756 * We have a resume ordering issue with the snd-hda driver also ··· 760 772 761 773 pci_set_master(dev->pdev); 762 774 763 - return i915_drm_thaw_early(dev); 775 + ret = intel_resume_prepare(dev_priv, false); 776 + if (ret) 777 + DRM_ERROR("Resume prepare failed: %d,Continuing resume\n", ret); 778 + 779 + intel_uncore_early_sanitize(dev, true); 780 + intel_uncore_sanitize(dev); 781 + intel_power_domains_init_hw(dev_priv); 782 + 783 + return ret; 764 784 } 765 785 766 - int i915_resume(struct drm_device *dev) 786 + int i915_resume_legacy(struct drm_device *dev) 767 787 { 768 - struct drm_i915_private *dev_priv = dev->dev_private; 769 788 int ret; 770 789 771 - /* 772 - * Platforms with opregion should have sane BIOS, older ones (gen3 and 773 - * earlier) need to restore the GTT mappings since the BIOS might clear 774 - * all our scratch PTEs. 775 - */ 776 - ret = __i915_drm_thaw(dev, !dev_priv->opregion.header); 790 + if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) 791 + return 0; 792 + 793 + ret = i915_drm_resume_early(dev); 777 794 if (ret) 778 795 return ret; 779 796 780 - drm_kms_helper_poll_enable(dev); 781 - return 0; 782 - } 783 - 784 - static int i915_resume_legacy(struct drm_device *dev) 785 - { 786 - i915_resume_early(dev); 787 - i915_resume(dev); 788 - 789 - return 0; 797 + return i915_drm_resume(dev); 790 798 } 791 799 792 800 /** ··· 934 950 if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF) 935 951 return 0; 936 952 937 - return i915_drm_freeze(drm_dev); 953 + return i915_drm_suspend(drm_dev); 938 954 } 939 955 940 956 static int i915_pm_suspend_late(struct device *dev) 941 957 { 942 958 struct pci_dev *pdev = to_pci_dev(dev); 943 959 struct drm_device *drm_dev = pci_get_drvdata(pdev); 944 - struct drm_i915_private *dev_priv = drm_dev->dev_private; 945 - int ret; 946 960 947 961 /* 948 962 * We have a suspedn ordering issue with the snd-hda driver also ··· 954 972 if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF) 955 973 return 0; 956 974 957 - ret = intel_suspend_complete(dev_priv); 958 - 959 - if (ret) 960 - DRM_ERROR("Suspend complete failed: %d\n", ret); 961 - else { 962 - pci_disable_device(pdev); 963 - pci_set_power_state(pdev, PCI_D3hot); 964 - } 965 - 966 - return ret; 975 + return i915_drm_suspend_late(drm_dev); 967 976 } 968 977 969 978 static int i915_pm_resume_early(struct device *dev) ··· 962 989 struct pci_dev *pdev = to_pci_dev(dev); 963 990 struct drm_device *drm_dev = pci_get_drvdata(pdev); 964 991 965 - return i915_resume_early(drm_dev); 992 + if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF) 993 + return 0; 994 + 995 + return i915_drm_resume_early(drm_dev); 966 996 } 967 997 968 998 static int i915_pm_resume(struct device *dev) ··· 973 997 struct pci_dev *pdev = to_pci_dev(dev); 974 998 struct drm_device *drm_dev = pci_get_drvdata(pdev); 975 999 976 - return i915_resume(drm_dev); 977 - } 1000 + if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF) 1001 + return 0; 978 1002 979 - static int i915_pm_freeze(struct device *dev) 980 - { 981 - struct pci_dev *pdev = to_pci_dev(dev); 982 - struct drm_device *drm_dev = pci_get_drvdata(pdev); 983 - 984 - if (!drm_dev || !drm_dev->dev_private) { 985 - dev_err(dev, "DRM not initialized, aborting suspend.\n"); 986 - return -ENODEV; 987 - } 988 - 989 - return i915_drm_freeze(drm_dev); 990 - } 991 - 992 - static int i915_pm_thaw_early(struct device *dev) 993 - { 994 - struct pci_dev *pdev = to_pci_dev(dev); 995 - struct drm_device *drm_dev = pci_get_drvdata(pdev); 996 - 997 - return i915_drm_thaw_early(drm_dev); 998 - } 999 - 1000 - static int i915_pm_thaw(struct device *dev) 1001 - { 1002 - struct pci_dev *pdev = to_pci_dev(dev); 1003 - struct drm_device *drm_dev = pci_get_drvdata(pdev); 1004 - 1005 - return i915_drm_thaw(drm_dev); 1006 - } 1007 - 1008 - static int i915_pm_poweroff(struct device *dev) 1009 - { 1010 - struct pci_dev *pdev = to_pci_dev(dev); 1011 - struct drm_device *drm_dev = pci_get_drvdata(pdev); 1012 - 1013 - return i915_drm_freeze(drm_dev); 1003 + return i915_drm_resume(drm_dev); 1014 1004 } 1015 1005 1016 1006 static int hsw_suspend_complete(struct drm_i915_private *dev_priv) ··· 1534 1592 } 1535 1593 1536 1594 static const struct dev_pm_ops i915_pm_ops = { 1595 + /* 1596 + * S0ix (via system suspend) and S3 event handlers [PMSG_SUSPEND, 1597 + * PMSG_RESUME] 1598 + */ 1537 1599 .suspend = i915_pm_suspend, 1538 1600 .suspend_late = i915_pm_suspend_late, 1539 1601 .resume_early = i915_pm_resume_early, 1540 1602 .resume = i915_pm_resume, 1541 - .freeze = i915_pm_freeze, 1542 - .thaw_early = i915_pm_thaw_early, 1543 - .thaw = i915_pm_thaw, 1544 - .poweroff = i915_pm_poweroff, 1603 + 1604 + /* 1605 + * S4 event handlers 1606 + * @freeze, @freeze_late : called (1) before creating the 1607 + * hibernation image [PMSG_FREEZE] and 1608 + * (2) after rebooting, before restoring 1609 + * the image [PMSG_QUIESCE] 1610 + * @thaw, @thaw_early : called (1) after creating the hibernation 1611 + * image, before writing it [PMSG_THAW] 1612 + * and (2) after failing to create or 1613 + * restore the image [PMSG_RECOVER] 1614 + * @poweroff, @poweroff_late: called after writing the hibernation 1615 + * image, before rebooting [PMSG_HIBERNATE] 1616 + * @restore, @restore_early : called after rebooting and restoring the 1617 + * hibernation image [PMSG_RESTORE] 1618 + */ 1619 + .freeze = i915_pm_suspend, 1620 + .freeze_late = i915_pm_suspend_late, 1621 + .thaw_early = i915_pm_resume_early, 1622 + .thaw = i915_pm_resume, 1623 + .poweroff = i915_pm_suspend, 1624 + .poweroff_late = i915_pm_suspend_late, 1545 1625 .restore_early = i915_pm_resume_early, 1546 1626 .restore = i915_pm_resume, 1627 + 1628 + /* S0ix (via runtime suspend) event handlers */ 1547 1629 .runtime_suspend = intel_runtime_suspend, 1548 1630 .runtime_resume = intel_runtime_resume, 1549 1631 }; ··· 1609 1643 .set_busid = drm_pci_set_busid, 1610 1644 1611 1645 /* Used in place of i915_pm_ops for non-DRIVER_MODESET */ 1612 - .suspend = i915_suspend, 1646 + .suspend = i915_suspend_legacy, 1613 1647 .resume = i915_resume_legacy, 1614 1648 1615 1649 .device_is_agp = i915_driver_device_is_agp,
+38 -22
drivers/gpu/drm/i915/i915_drv.h
··· 55 55 56 56 #define DRIVER_NAME "i915" 57 57 #define DRIVER_DESC "Intel Graphics" 58 - #define DRIVER_DATE "20141003" 58 + #define DRIVER_DATE "20141024" 59 59 60 60 enum pipe { 61 61 INVALID_PIPE = -1, ··· 460 460 * Returns true on success, false on failure. 461 461 */ 462 462 bool (*find_dpll)(const struct intel_limit *limit, 463 - struct drm_crtc *crtc, 463 + struct intel_crtc *crtc, 464 464 int target, int refclk, 465 465 struct dpll *match_clock, 466 466 struct dpll *best_clock); ··· 476 476 struct intel_crtc_config *); 477 477 void (*get_plane_config)(struct intel_crtc *, 478 478 struct intel_plane_config *); 479 - int (*crtc_mode_set)(struct drm_crtc *crtc, 479 + int (*crtc_mode_set)(struct intel_crtc *crtc, 480 480 int x, int y, 481 481 struct drm_framebuffer *old_fb); 482 482 void (*crtc_enable)(struct drm_crtc *crtc); ··· 1448 1448 unsigned flip_bits; 1449 1449 }; 1450 1450 1451 + struct i915_wa_reg { 1452 + u32 addr; 1453 + u32 value; 1454 + /* bitmask representing WA bits */ 1455 + u32 mask; 1456 + }; 1457 + 1458 + #define I915_MAX_WA_REGS 16 1459 + 1460 + struct i915_workarounds { 1461 + struct i915_wa_reg reg[I915_MAX_WA_REGS]; 1462 + u32 count; 1463 + }; 1464 + 1451 1465 struct drm_i915_private { 1452 1466 struct drm_device *dev; 1453 1467 struct kmem_cache *slab; ··· 1541 1527 struct intel_opregion opregion; 1542 1528 struct intel_vbt_data vbt; 1543 1529 1530 + bool preserve_bios_swizzle; 1531 + 1544 1532 /* overlay */ 1545 1533 struct intel_overlay *overlay; 1546 1534 ··· 1606 1590 struct intel_shared_dpll shared_dplls[I915_NUM_PLLS]; 1607 1591 int dpio_phy_iosf_port[I915_NUM_PHYS_VLV]; 1608 1592 1609 - /* 1610 - * workarounds are currently applied at different places and 1611 - * changes are being done to consolidate them so exact count is 1612 - * not clear at this point, use a max value for now. 1613 - */ 1614 - #define I915_MAX_WA_REGS 16 1615 - struct { 1616 - u32 addr; 1617 - u32 value; 1618 - /* bitmask representing WA bits */ 1619 - u32 mask; 1620 - } intel_wa_regs[I915_MAX_WA_REGS]; 1621 - u32 num_wa_regs; 1593 + struct i915_workarounds workarounds; 1622 1594 1623 1595 /* Reclocking support */ 1624 1596 bool render_reclock_avail; ··· 2111 2107 (INTEL_DEVID(dev) & 0x00F0) == 0x0020) 2112 2108 #define IS_HSW_ULT(dev) (IS_HASWELL(dev) && \ 2113 2109 (INTEL_DEVID(dev) & 0xFF00) == 0x0A00) 2114 - #define IS_ULT(dev) (IS_HSW_ULT(dev) || IS_BDW_ULT(dev)) 2115 2110 #define IS_HSW_GT3(dev) (IS_HASWELL(dev) && \ 2116 2111 (INTEL_DEVID(dev) & 0x00F0) == 0x0020) 2117 2112 /* ULX machines are also considered ULT. */ ··· 2144 2141 #define HAS_VEBOX(dev) (INTEL_INFO(dev)->ring_mask & VEBOX_RING) 2145 2142 #define HAS_LLC(dev) (INTEL_INFO(dev)->has_llc) 2146 2143 #define HAS_WT(dev) ((IS_HASWELL(dev) || IS_BROADWELL(dev)) && \ 2147 - to_i915(dev)->ellc_size) 2144 + __I915__(dev)->ellc_size) 2148 2145 #define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws) 2149 2146 2150 2147 #define HAS_HW_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 6) ··· 2181 2178 #define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr) 2182 2179 #define HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc) 2183 2180 2184 - #define HAS_IPS(dev) (IS_ULT(dev) || IS_BROADWELL(dev)) 2181 + #define HAS_IPS(dev) (IS_HSW_ULT(dev) || IS_BROADWELL(dev)) 2185 2182 2186 2183 #define HAS_DDI(dev) (INTEL_INFO(dev)->has_ddi) 2187 2184 #define HAS_FPGA_DBG_UNCLAIMED(dev) (INTEL_INFO(dev)->has_fpga_dbg) 2188 2185 #define HAS_PSR(dev) (IS_HASWELL(dev) || IS_BROADWELL(dev)) 2189 2186 #define HAS_RUNTIME_PM(dev) (IS_GEN6(dev) || IS_HASWELL(dev) || \ 2190 2187 IS_BROADWELL(dev) || IS_VALLEYVIEW(dev)) 2188 + #define HAS_RC6(dev) (INTEL_INFO(dev)->gen >= 6) 2189 + #define HAS_RC6p(dev) (INTEL_INFO(dev)->gen == 6 || IS_IVYBRIDGE(dev)) 2191 2190 2192 2191 #define INTEL_PCH_DEVICE_ID_MASK 0xff00 2193 2192 #define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00 ··· 2200 2195 #define INTEL_PCH_SPT_DEVICE_ID_TYPE 0xA100 2201 2196 #define INTEL_PCH_SPT_LP_DEVICE_ID_TYPE 0x9D00 2202 2197 2203 - #define INTEL_PCH_TYPE(dev) (to_i915(dev)->pch_type) 2198 + #define INTEL_PCH_TYPE(dev) (__I915__(dev)->pch_type) 2204 2199 #define HAS_PCH_SPT(dev) (INTEL_PCH_TYPE(dev) == PCH_SPT) 2205 2200 #define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT) 2206 2201 #define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT) ··· 2221 2216 extern const struct drm_ioctl_desc i915_ioctls[]; 2222 2217 extern int i915_max_ioctl; 2223 2218 2224 - extern int i915_suspend(struct drm_device *dev, pm_message_t state); 2225 - extern int i915_resume(struct drm_device *dev); 2219 + extern int i915_suspend_legacy(struct drm_device *dev, pm_message_t state); 2220 + extern int i915_resume_legacy(struct drm_device *dev); 2226 2221 extern int i915_master_create(struct drm_device *dev, struct drm_master *master); 2227 2222 extern void i915_master_destroy(struct drm_device *dev, struct drm_master *master); 2228 2223 ··· 2317 2312 2318 2313 void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv); 2319 2314 void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv); 2315 + void 2316 + ironlake_enable_display_irq(struct drm_i915_private *dev_priv, u32 mask); 2317 + void 2318 + ironlake_disable_display_irq(struct drm_i915_private *dev_priv, u32 mask); 2319 + void ibx_display_interrupt_update(struct drm_i915_private *dev_priv, 2320 + uint32_t interrupt_mask, 2321 + uint32_t enabled_irq_mask); 2322 + #define ibx_enable_display_interrupt(dev_priv, bits) \ 2323 + ibx_display_interrupt_update((dev_priv), (bits), (bits)) 2324 + #define ibx_disable_display_interrupt(dev_priv, bits) \ 2325 + ibx_display_interrupt_update((dev_priv), (bits), 0) 2320 2326 2321 2327 /* i915_gem.c */ 2322 2328 int i915_gem_init_ioctl(struct drm_device *dev, void *data,
+19 -5
drivers/gpu/drm/i915/i915_gem.c
··· 1466 1466 * 1467 1467 * While the mapping holds a reference on the contents of the object, it doesn't 1468 1468 * imply a ref on the object itself. 1469 + * 1470 + * IMPORTANT: 1471 + * 1472 + * DRM driver writers who look a this function as an example for how to do GEM 1473 + * mmap support, please don't implement mmap support like here. The modern way 1474 + * to implement DRM mmap support is with an mmap offset ioctl (like 1475 + * i915_gem_mmap_gtt) and then using the mmap syscall on the DRM fd directly. 1476 + * That way debug tooling like valgrind will understand what's going on, hiding 1477 + * the mmap call in a driver private ioctl will break that. The i915 driver only 1478 + * does cpu mmaps this way because we didn't know better. 1469 1479 */ 1470 1480 int 1471 1481 i915_gem_mmap_ioctl(struct drm_device *dev, void *data, ··· 2809 2799 unsigned reset_counter; 2810 2800 u32 seqno = 0; 2811 2801 int ret = 0; 2802 + 2803 + if (args->flags != 0) 2804 + return -EINVAL; 2812 2805 2813 2806 ret = i915_mutex_lock_interruptible(dev); 2814 2807 if (ret) ··· 5272 5259 struct drm_device *dev = dev_priv->dev; 5273 5260 struct drm_i915_gem_object *obj; 5274 5261 unsigned long timeout = msecs_to_jiffies(5000) + 1; 5275 - unsigned long pinned, bound, unbound, freed; 5262 + unsigned long pinned, bound, unbound, freed_pages; 5276 5263 bool was_interruptible; 5277 5264 bool unlock; 5278 5265 ··· 5289 5276 was_interruptible = dev_priv->mm.interruptible; 5290 5277 dev_priv->mm.interruptible = false; 5291 5278 5292 - freed = i915_gem_shrink_all(dev_priv); 5279 + freed_pages = i915_gem_shrink_all(dev_priv); 5293 5280 5294 5281 dev_priv->mm.interruptible = was_interruptible; 5295 5282 ··· 5320 5307 if (unlock) 5321 5308 mutex_unlock(&dev->struct_mutex); 5322 5309 5323 - pr_info("Purging GPU memory, %lu bytes freed, %lu bytes still pinned.\n", 5324 - freed, pinned); 5310 + if (freed_pages || unbound || bound) 5311 + pr_info("Purging GPU memory, %lu bytes freed, %lu bytes still pinned.\n", 5312 + freed_pages << PAGE_SHIFT, pinned); 5325 5313 if (unbound || bound) 5326 5314 pr_err("%lu and %lu bytes still available in the " 5327 5315 "bound and unbound GPU page lists.\n", 5328 5316 bound, unbound); 5329 5317 5330 - *(unsigned long *)ptr += freed; 5318 + *(unsigned long *)ptr += freed_pages; 5331 5319 return NOTIFY_DONE; 5332 5320 } 5333 5321
+26 -15
drivers/gpu/drm/i915/i915_gem_tiling.c
··· 102 102 swizzle_x = I915_BIT_6_SWIZZLE_NONE; 103 103 swizzle_y = I915_BIT_6_SWIZZLE_NONE; 104 104 } else if (INTEL_INFO(dev)->gen >= 6) { 105 - uint32_t dimm_c0, dimm_c1; 106 - dimm_c0 = I915_READ(MAD_DIMM_C0); 107 - dimm_c1 = I915_READ(MAD_DIMM_C1); 108 - dimm_c0 &= MAD_DIMM_A_SIZE_MASK | MAD_DIMM_B_SIZE_MASK; 109 - dimm_c1 &= MAD_DIMM_A_SIZE_MASK | MAD_DIMM_B_SIZE_MASK; 110 - /* Enable swizzling when the channels are populated with 111 - * identically sized dimms. We don't need to check the 3rd 112 - * channel because no cpu with gpu attached ships in that 113 - * configuration. Also, swizzling only makes sense for 2 114 - * channels anyway. */ 115 - if (dimm_c0 == dimm_c1) { 116 - swizzle_x = I915_BIT_6_SWIZZLE_9_10; 117 - swizzle_y = I915_BIT_6_SWIZZLE_9; 105 + if (dev_priv->preserve_bios_swizzle) { 106 + if (I915_READ(DISP_ARB_CTL) & 107 + DISP_TILE_SURFACE_SWIZZLING) { 108 + swizzle_x = I915_BIT_6_SWIZZLE_9_10; 109 + swizzle_y = I915_BIT_6_SWIZZLE_9; 110 + } else { 111 + swizzle_x = I915_BIT_6_SWIZZLE_NONE; 112 + swizzle_y = I915_BIT_6_SWIZZLE_NONE; 113 + } 118 114 } else { 119 - swizzle_x = I915_BIT_6_SWIZZLE_NONE; 120 - swizzle_y = I915_BIT_6_SWIZZLE_NONE; 115 + uint32_t dimm_c0, dimm_c1; 116 + dimm_c0 = I915_READ(MAD_DIMM_C0); 117 + dimm_c1 = I915_READ(MAD_DIMM_C1); 118 + dimm_c0 &= MAD_DIMM_A_SIZE_MASK | MAD_DIMM_B_SIZE_MASK; 119 + dimm_c1 &= MAD_DIMM_A_SIZE_MASK | MAD_DIMM_B_SIZE_MASK; 120 + /* Enable swizzling when the channels are populated 121 + * with identically sized dimms. We don't need to check 122 + * the 3rd channel because no cpu with gpu attached 123 + * ships in that configuration. Also, swizzling only 124 + * makes sense for 2 channels anyway. */ 125 + if (dimm_c0 == dimm_c1) { 126 + swizzle_x = I915_BIT_6_SWIZZLE_9_10; 127 + swizzle_y = I915_BIT_6_SWIZZLE_9; 128 + } else { 129 + swizzle_x = I915_BIT_6_SWIZZLE_NONE; 130 + swizzle_y = I915_BIT_6_SWIZZLE_NONE; 131 + } 121 132 } 122 133 } else if (IS_GEN5(dev)) { 123 134 /* On Ironlake whatever DRAM config, GPU always do
-2
drivers/gpu/drm/i915/i915_ioc32.c
··· 189 189 [DRM_I915_ALLOC] = compat_i915_alloc 190 190 }; 191 191 192 - #ifdef CONFIG_COMPAT 193 192 /** 194 193 * Called whenever a 32-bit process running under a 64-bit kernel 195 194 * performs an ioctl on /dev/dri/card<n>. ··· 217 218 218 219 return ret; 219 220 } 220 - #endif
+29 -337
drivers/gpu/drm/i915/i915_irq.c
··· 139 139 } while (0) 140 140 141 141 /* For display hotplug interrupt */ 142 - static void 142 + void 143 143 ironlake_enable_display_irq(struct drm_i915_private *dev_priv, u32 mask) 144 144 { 145 145 assert_spin_locked(&dev_priv->irq_lock); ··· 154 154 } 155 155 } 156 156 157 - static void 157 + void 158 158 ironlake_disable_display_irq(struct drm_i915_private *dev_priv, u32 mask) 159 159 { 160 160 assert_spin_locked(&dev_priv->irq_lock); ··· 238 238 snb_update_pm_irq(dev_priv, mask, 0); 239 239 } 240 240 241 - static bool ivb_can_enable_err_int(struct drm_device *dev) 242 - { 243 - struct drm_i915_private *dev_priv = dev->dev_private; 244 - struct intel_crtc *crtc; 245 - enum pipe pipe; 246 - 247 - assert_spin_locked(&dev_priv->irq_lock); 248 - 249 - for_each_pipe(dev_priv, pipe) { 250 - crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); 251 - 252 - if (crtc->cpu_fifo_underrun_disabled) 253 - return false; 254 - } 255 - 256 - return true; 257 - } 258 - 259 241 /** 260 242 * bdw_update_pm_irq - update GT interrupt 2 261 243 * @dev_priv: driver private ··· 278 296 bdw_update_pm_irq(dev_priv, mask, 0); 279 297 } 280 298 281 - static bool cpt_can_enable_serr_int(struct drm_device *dev) 282 - { 283 - struct drm_i915_private *dev_priv = dev->dev_private; 284 - enum pipe pipe; 285 - struct intel_crtc *crtc; 286 - 287 - assert_spin_locked(&dev_priv->irq_lock); 288 - 289 - for_each_pipe(dev_priv, pipe) { 290 - crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); 291 - 292 - if (crtc->pch_fifo_underrun_disabled) 293 - return false; 294 - } 295 - 296 - return true; 297 - } 298 - 299 - void i9xx_check_fifo_underruns(struct drm_device *dev) 300 - { 301 - struct drm_i915_private *dev_priv = dev->dev_private; 302 - struct intel_crtc *crtc; 303 - 304 - spin_lock_irq(&dev_priv->irq_lock); 305 - 306 - for_each_intel_crtc(dev, crtc) { 307 - u32 reg = PIPESTAT(crtc->pipe); 308 - u32 pipestat; 309 - 310 - if (crtc->cpu_fifo_underrun_disabled) 311 - continue; 312 - 313 - pipestat = I915_READ(reg) & 0xffff0000; 314 - if ((pipestat & PIPE_FIFO_UNDERRUN_STATUS) == 0) 315 - continue; 316 - 317 - I915_WRITE(reg, pipestat | PIPE_FIFO_UNDERRUN_STATUS); 318 - POSTING_READ(reg); 319 - 320 - DRM_ERROR("pipe %c underrun\n", pipe_name(crtc->pipe)); 321 - } 322 - 323 - spin_unlock_irq(&dev_priv->irq_lock); 324 - } 325 - 326 - static void i9xx_set_fifo_underrun_reporting(struct drm_device *dev, 327 - enum pipe pipe, 328 - bool enable, bool old) 329 - { 330 - struct drm_i915_private *dev_priv = dev->dev_private; 331 - u32 reg = PIPESTAT(pipe); 332 - u32 pipestat = I915_READ(reg) & 0xffff0000; 333 - 334 - assert_spin_locked(&dev_priv->irq_lock); 335 - 336 - if (enable) { 337 - I915_WRITE(reg, pipestat | PIPE_FIFO_UNDERRUN_STATUS); 338 - POSTING_READ(reg); 339 - } else { 340 - if (old && pipestat & PIPE_FIFO_UNDERRUN_STATUS) 341 - DRM_ERROR("pipe %c underrun\n", pipe_name(pipe)); 342 - } 343 - } 344 - 345 - static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev, 346 - enum pipe pipe, bool enable) 347 - { 348 - struct drm_i915_private *dev_priv = dev->dev_private; 349 - uint32_t bit = (pipe == PIPE_A) ? DE_PIPEA_FIFO_UNDERRUN : 350 - DE_PIPEB_FIFO_UNDERRUN; 351 - 352 - if (enable) 353 - ironlake_enable_display_irq(dev_priv, bit); 354 - else 355 - ironlake_disable_display_irq(dev_priv, bit); 356 - } 357 - 358 - static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev, 359 - enum pipe pipe, 360 - bool enable, bool old) 361 - { 362 - struct drm_i915_private *dev_priv = dev->dev_private; 363 - if (enable) { 364 - I915_WRITE(GEN7_ERR_INT, ERR_INT_FIFO_UNDERRUN(pipe)); 365 - 366 - if (!ivb_can_enable_err_int(dev)) 367 - return; 368 - 369 - ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB); 370 - } else { 371 - ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB); 372 - 373 - if (old && 374 - I915_READ(GEN7_ERR_INT) & ERR_INT_FIFO_UNDERRUN(pipe)) { 375 - DRM_ERROR("uncleared fifo underrun on pipe %c\n", 376 - pipe_name(pipe)); 377 - } 378 - } 379 - } 380 - 381 - static void broadwell_set_fifo_underrun_reporting(struct drm_device *dev, 382 - enum pipe pipe, bool enable) 383 - { 384 - struct drm_i915_private *dev_priv = dev->dev_private; 385 - 386 - assert_spin_locked(&dev_priv->irq_lock); 387 - 388 - if (enable) 389 - dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_FIFO_UNDERRUN; 390 - else 391 - dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_FIFO_UNDERRUN; 392 - I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]); 393 - POSTING_READ(GEN8_DE_PIPE_IMR(pipe)); 394 - } 395 - 396 299 /** 397 300 * ibx_display_interrupt_update - update SDEIMR 398 301 * @dev_priv: driver private 399 302 * @interrupt_mask: mask of interrupt bits to update 400 303 * @enabled_irq_mask: mask of interrupt bits to enable 401 304 */ 402 - static void ibx_display_interrupt_update(struct drm_i915_private *dev_priv, 403 - uint32_t interrupt_mask, 404 - uint32_t enabled_irq_mask) 305 + void ibx_display_interrupt_update(struct drm_i915_private *dev_priv, 306 + uint32_t interrupt_mask, 307 + uint32_t enabled_irq_mask) 405 308 { 406 309 uint32_t sdeimr = I915_READ(SDEIMR); 407 310 sdeimr &= ~interrupt_mask; ··· 300 433 I915_WRITE(SDEIMR, sdeimr); 301 434 POSTING_READ(SDEIMR); 302 435 } 303 - #define ibx_enable_display_interrupt(dev_priv, bits) \ 304 - ibx_display_interrupt_update((dev_priv), (bits), (bits)) 305 - #define ibx_disable_display_interrupt(dev_priv, bits) \ 306 - ibx_display_interrupt_update((dev_priv), (bits), 0) 307 - 308 - static void ibx_set_fifo_underrun_reporting(struct drm_device *dev, 309 - enum transcoder pch_transcoder, 310 - bool enable) 311 - { 312 - struct drm_i915_private *dev_priv = dev->dev_private; 313 - uint32_t bit = (pch_transcoder == TRANSCODER_A) ? 314 - SDE_TRANSA_FIFO_UNDER : SDE_TRANSB_FIFO_UNDER; 315 - 316 - if (enable) 317 - ibx_enable_display_interrupt(dev_priv, bit); 318 - else 319 - ibx_disable_display_interrupt(dev_priv, bit); 320 - } 321 - 322 - static void cpt_set_fifo_underrun_reporting(struct drm_device *dev, 323 - enum transcoder pch_transcoder, 324 - bool enable, bool old) 325 - { 326 - struct drm_i915_private *dev_priv = dev->dev_private; 327 - 328 - if (enable) { 329 - I915_WRITE(SERR_INT, 330 - SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder)); 331 - 332 - if (!cpt_can_enable_serr_int(dev)) 333 - return; 334 - 335 - ibx_enable_display_interrupt(dev_priv, SDE_ERROR_CPT); 336 - } else { 337 - ibx_disable_display_interrupt(dev_priv, SDE_ERROR_CPT); 338 - 339 - if (old && I915_READ(SERR_INT) & 340 - SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder)) { 341 - DRM_ERROR("uncleared pch fifo underrun on pch transcoder %c\n", 342 - transcoder_name(pch_transcoder)); 343 - } 344 - } 345 - } 346 - 347 - /** 348 - * intel_set_cpu_fifo_underrun_reporting - enable/disable FIFO underrun messages 349 - * @dev: drm device 350 - * @pipe: pipe 351 - * @enable: true if we want to report FIFO underrun errors, false otherwise 352 - * 353 - * This function makes us disable or enable CPU fifo underruns for a specific 354 - * pipe. Notice that on some Gens (e.g. IVB, HSW), disabling FIFO underrun 355 - * reporting for one pipe may also disable all the other CPU error interruts for 356 - * the other pipes, due to the fact that there's just one interrupt mask/enable 357 - * bit for all the pipes. 358 - * 359 - * Returns the previous state of underrun reporting. 360 - */ 361 - static bool __intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev, 362 - enum pipe pipe, bool enable) 363 - { 364 - struct drm_i915_private *dev_priv = dev->dev_private; 365 - struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 366 - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 367 - bool old; 368 - 369 - assert_spin_locked(&dev_priv->irq_lock); 370 - 371 - old = !intel_crtc->cpu_fifo_underrun_disabled; 372 - intel_crtc->cpu_fifo_underrun_disabled = !enable; 373 - 374 - if (HAS_GMCH_DISPLAY(dev)) 375 - i9xx_set_fifo_underrun_reporting(dev, pipe, enable, old); 376 - else if (IS_GEN5(dev) || IS_GEN6(dev)) 377 - ironlake_set_fifo_underrun_reporting(dev, pipe, enable); 378 - else if (IS_GEN7(dev)) 379 - ivybridge_set_fifo_underrun_reporting(dev, pipe, enable, old); 380 - else if (IS_GEN8(dev) || IS_GEN9(dev)) 381 - broadwell_set_fifo_underrun_reporting(dev, pipe, enable); 382 - 383 - return old; 384 - } 385 - 386 - bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev, 387 - enum pipe pipe, bool enable) 388 - { 389 - struct drm_i915_private *dev_priv = dev->dev_private; 390 - unsigned long flags; 391 - bool ret; 392 - 393 - spin_lock_irqsave(&dev_priv->irq_lock, flags); 394 - ret = __intel_set_cpu_fifo_underrun_reporting(dev, pipe, enable); 395 - spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 396 - 397 - return ret; 398 - } 399 - 400 - static bool __cpu_fifo_underrun_reporting_enabled(struct drm_device *dev, 401 - enum pipe pipe) 402 - { 403 - struct drm_i915_private *dev_priv = dev->dev_private; 404 - struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 405 - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 406 - 407 - return !intel_crtc->cpu_fifo_underrun_disabled; 408 - } 409 - 410 - /** 411 - * intel_set_pch_fifo_underrun_reporting - enable/disable FIFO underrun messages 412 - * @dev: drm device 413 - * @pch_transcoder: the PCH transcoder (same as pipe on IVB and older) 414 - * @enable: true if we want to report FIFO underrun errors, false otherwise 415 - * 416 - * This function makes us disable or enable PCH fifo underruns for a specific 417 - * PCH transcoder. Notice that on some PCHs (e.g. CPT/PPT), disabling FIFO 418 - * underrun reporting for one transcoder may also disable all the other PCH 419 - * error interruts for the other transcoders, due to the fact that there's just 420 - * one interrupt mask/enable bit for all the transcoders. 421 - * 422 - * Returns the previous state of underrun reporting. 423 - */ 424 - bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev, 425 - enum transcoder pch_transcoder, 426 - bool enable) 427 - { 428 - struct drm_i915_private *dev_priv = dev->dev_private; 429 - struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pch_transcoder]; 430 - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 431 - unsigned long flags; 432 - bool old; 433 - 434 - /* 435 - * NOTE: Pre-LPT has a fixed cpu pipe -> pch transcoder mapping, but LPT 436 - * has only one pch transcoder A that all pipes can use. To avoid racy 437 - * pch transcoder -> pipe lookups from interrupt code simply store the 438 - * underrun statistics in crtc A. Since we never expose this anywhere 439 - * nor use it outside of the fifo underrun code here using the "wrong" 440 - * crtc on LPT won't cause issues. 441 - */ 442 - 443 - spin_lock_irqsave(&dev_priv->irq_lock, flags); 444 - 445 - old = !intel_crtc->pch_fifo_underrun_disabled; 446 - intel_crtc->pch_fifo_underrun_disabled = !enable; 447 - 448 - if (HAS_PCH_IBX(dev)) 449 - ibx_set_fifo_underrun_reporting(dev, pch_transcoder, enable); 450 - else 451 - cpt_set_fifo_underrun_reporting(dev, pch_transcoder, enable, old); 452 - 453 - spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 454 - return old; 455 - } 456 - 457 436 458 437 static void 459 438 __i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, ··· 1749 2036 * we need to be careful that we only handle what we want to 1750 2037 * handle. 1751 2038 */ 1752 - mask = 0; 1753 - if (__cpu_fifo_underrun_reporting_enabled(dev, pipe)) 1754 - mask |= PIPE_FIFO_UNDERRUN_STATUS; 2039 + 2040 + /* fifo underruns are filterered in the underrun handler. */ 2041 + mask = PIPE_FIFO_UNDERRUN_STATUS; 1755 2042 1756 2043 switch (pipe) { 1757 2044 case PIPE_A: ··· 1796 2083 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 1797 2084 i9xx_pipe_crc_irq_handler(dev, pipe); 1798 2085 1799 - if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS && 1800 - intel_set_cpu_fifo_underrun_reporting(dev, pipe, false)) 1801 - DRM_ERROR("pipe %c underrun\n", pipe_name(pipe)); 2086 + if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 2087 + intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 1802 2088 } 1803 2089 1804 2090 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) ··· 1964 2252 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n"); 1965 2253 1966 2254 if (pch_iir & SDE_TRANSA_FIFO_UNDER) 1967 - if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, 1968 - false)) 1969 - DRM_ERROR("PCH transcoder A FIFO underrun\n"); 2255 + intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A); 1970 2256 1971 2257 if (pch_iir & SDE_TRANSB_FIFO_UNDER) 1972 - if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B, 1973 - false)) 1974 - DRM_ERROR("PCH transcoder B FIFO underrun\n"); 2258 + intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B); 1975 2259 } 1976 2260 1977 2261 static void ivb_err_int_handler(struct drm_device *dev) ··· 1980 2272 DRM_ERROR("Poison interrupt\n"); 1981 2273 1982 2274 for_each_pipe(dev_priv, pipe) { 1983 - if (err_int & ERR_INT_FIFO_UNDERRUN(pipe)) { 1984 - if (intel_set_cpu_fifo_underrun_reporting(dev, pipe, 1985 - false)) 1986 - DRM_ERROR("Pipe %c FIFO underrun\n", 1987 - pipe_name(pipe)); 1988 - } 2275 + if (err_int & ERR_INT_FIFO_UNDERRUN(pipe)) 2276 + intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 1989 2277 1990 2278 if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) { 1991 2279 if (IS_IVYBRIDGE(dev)) ··· 2003 2299 DRM_ERROR("PCH poison interrupt\n"); 2004 2300 2005 2301 if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN) 2006 - if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, 2007 - false)) 2008 - DRM_ERROR("PCH transcoder A FIFO underrun\n"); 2302 + intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A); 2009 2303 2010 2304 if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN) 2011 - if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B, 2012 - false)) 2013 - DRM_ERROR("PCH transcoder B FIFO underrun\n"); 2305 + intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B); 2014 2306 2015 2307 if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN) 2016 - if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_C, 2017 - false)) 2018 - DRM_ERROR("PCH transcoder C FIFO underrun\n"); 2308 + intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_C); 2019 2309 2020 2310 I915_WRITE(SERR_INT, serr_int); 2021 2311 } ··· 2075 2377 intel_check_page_flip(dev, pipe); 2076 2378 2077 2379 if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe)) 2078 - if (intel_set_cpu_fifo_underrun_reporting(dev, pipe, false)) 2079 - DRM_ERROR("Pipe %c FIFO underrun\n", 2080 - pipe_name(pipe)); 2380 + intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 2081 2381 2082 2382 if (de_iir & DE_PIPE_CRC_DONE(pipe)) 2083 2383 i9xx_pipe_crc_irq_handler(dev, pipe); ··· 2294 2598 if (pipe_iir & GEN8_PIPE_CDCLK_CRC_DONE) 2295 2599 hsw_pipe_crc_irq_handler(dev, pipe); 2296 2600 2297 - if (pipe_iir & GEN8_PIPE_FIFO_UNDERRUN) { 2298 - if (intel_set_cpu_fifo_underrun_reporting(dev, pipe, 2299 - false)) 2300 - DRM_ERROR("Pipe %c FIFO underrun\n", 2301 - pipe_name(pipe)); 2302 - } 2601 + if (pipe_iir & GEN8_PIPE_FIFO_UNDERRUN) 2602 + intel_cpu_fifo_underrun_irq_handler(dev_priv, 2603 + pipe); 2303 2604 2304 2605 2305 2606 if (IS_GEN9(dev)) ··· 3813 4120 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 3814 4121 i9xx_pipe_crc_irq_handler(dev, pipe); 3815 4122 3816 - if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS && 3817 - intel_set_cpu_fifo_underrun_reporting(dev, pipe, false)) 3818 - DRM_ERROR("pipe %c underrun\n", pipe_name(pipe)); 4123 + if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 4124 + intel_cpu_fifo_underrun_irq_handler(dev_priv, 4125 + pipe); 3819 4126 } 3820 4127 3821 4128 iir = new_iir; ··· 4007 4314 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 4008 4315 i9xx_pipe_crc_irq_handler(dev, pipe); 4009 4316 4010 - if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS && 4011 - intel_set_cpu_fifo_underrun_reporting(dev, pipe, false)) 4012 - DRM_ERROR("pipe %c underrun\n", pipe_name(pipe)); 4317 + if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 4318 + intel_cpu_fifo_underrun_irq_handler(dev_priv, 4319 + pipe); 4013 4320 } 4014 4321 4015 4322 if (blc_event || (iir & I915_ASLE_INTERRUPT)) ··· 4235 4542 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 4236 4543 i9xx_pipe_crc_irq_handler(dev, pipe); 4237 4544 4238 - if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS && 4239 - intel_set_cpu_fifo_underrun_reporting(dev, pipe, false)) 4240 - DRM_ERROR("pipe %c underrun\n", pipe_name(pipe)); 4545 + if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 4546 + intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 4241 4547 } 4242 4548 4243 4549 if (blc_event || (iir & I915_ASLE_INTERRUPT))
+13 -8
drivers/gpu/drm/i915/i915_reg.h
··· 883 883 #define _VLV_PCS23_DW11_CH0 0x042c 884 884 #define _VLV_PCS01_DW11_CH1 0x262c 885 885 #define _VLV_PCS23_DW11_CH1 0x282c 886 - #define VLV_PCS01_DW11(ch) _PORT(ch, _VLV_PCS01_DW0_CH0, _VLV_PCS01_DW0_CH1) 887 - #define VLV_PCS23_DW11(ch) _PORT(ch, _VLV_PCS23_DW0_CH0, _VLV_PCS23_DW0_CH1) 886 + #define VLV_PCS01_DW11(ch) _PORT(ch, _VLV_PCS01_DW11_CH0, _VLV_PCS01_DW11_CH1) 887 + #define VLV_PCS23_DW11(ch) _PORT(ch, _VLV_PCS23_DW11_CH0, _VLV_PCS23_DW11_CH1) 888 888 889 889 #define _VLV_PCS_DW12_CH0 0x8230 890 890 #define _VLV_PCS_DW12_CH1 0x8430 ··· 4054 4054 #define DSPFW_PLANEA_WM1_HI_MASK (1<<0) 4055 4055 4056 4056 /* drain latency register values*/ 4057 + #define DRAIN_LATENCY_PRECISION_16 16 4057 4058 #define DRAIN_LATENCY_PRECISION_32 32 4058 4059 #define DRAIN_LATENCY_PRECISION_64 64 4059 4060 #define VLV_DDL(pipe) (VLV_DISPLAY_BASE + 0x70050 + 4 * (pipe)) 4060 - #define DDL_CURSOR_PRECISION_64 (1<<31) 4061 - #define DDL_CURSOR_PRECISION_32 (0<<31) 4061 + #define DDL_CURSOR_PRECISION_HIGH (1<<31) 4062 + #define DDL_CURSOR_PRECISION_LOW (0<<31) 4062 4063 #define DDL_CURSOR_SHIFT 24 4063 - #define DDL_SPRITE_PRECISION_64(sprite) (1<<(15+8*(sprite))) 4064 - #define DDL_SPRITE_PRECISION_32(sprite) (0<<(15+8*(sprite))) 4064 + #define DDL_SPRITE_PRECISION_HIGH(sprite) (1<<(15+8*(sprite))) 4065 + #define DDL_SPRITE_PRECISION_LOW(sprite) (0<<(15+8*(sprite))) 4065 4066 #define DDL_SPRITE_SHIFT(sprite) (8+8*(sprite)) 4066 - #define DDL_PLANE_PRECISION_64 (1<<7) 4067 - #define DDL_PLANE_PRECISION_32 (0<<7) 4067 + #define DDL_PLANE_PRECISION_HIGH (1<<7) 4068 + #define DDL_PLANE_PRECISION_LOW (0<<7) 4068 4069 #define DDL_PLANE_SHIFT 0 4069 4070 #define DRAIN_LATENCY_MASK 0x7f 4070 4071 ··· 4208 4207 #define MCURSOR_PIPE_A 0x00 4209 4208 #define MCURSOR_PIPE_B (1 << 28) 4210 4209 #define MCURSOR_GAMMA_ENABLE (1 << 26) 4210 + #define CURSOR_ROTATE_180 (1<<15) 4211 4211 #define CURSOR_TRICKLE_FEED_DISABLE (1 << 14) 4212 4212 #define _CURABASE 0x70084 4213 4213 #define _CURAPOS 0x70088 ··· 4581 4579 #define PLANE_CTL_ALPHA_DISABLE ( 0 << 4) 4582 4580 #define PLANE_CTL_ALPHA_SW_PREMULTIPLY ( 2 << 4) 4583 4581 #define PLANE_CTL_ALPHA_HW_PREMULTIPLY ( 3 << 4) 4582 + #define PLANE_CTL_ROTATE_MASK 0x3 4583 + #define PLANE_CTL_ROTATE_0 0x0 4584 + #define PLANE_CTL_ROTATE_180 0x2 4584 4585 #define _PLANE_STRIDE_1_A 0x70188 4585 4586 #define _PLANE_STRIDE_2_A 0x70288 4586 4587 #define _PLANE_STRIDE_3_A 0x70388
+19 -3
drivers/gpu/drm/i915/i915_sysfs.c
··· 139 139 static struct attribute *rc6_attrs[] = { 140 140 &dev_attr_rc6_enable.attr, 141 141 &dev_attr_rc6_residency_ms.attr, 142 - &dev_attr_rc6p_residency_ms.attr, 143 - &dev_attr_rc6pp_residency_ms.attr, 144 142 NULL 145 143 }; 146 144 147 145 static struct attribute_group rc6_attr_group = { 148 146 .name = power_group_name, 149 147 .attrs = rc6_attrs 148 + }; 149 + 150 + static struct attribute *rc6p_attrs[] = { 151 + &dev_attr_rc6p_residency_ms.attr, 152 + &dev_attr_rc6pp_residency_ms.attr, 153 + NULL 154 + }; 155 + 156 + static struct attribute_group rc6p_attr_group = { 157 + .name = power_group_name, 158 + .attrs = rc6p_attrs 150 159 }; 151 160 #endif 152 161 ··· 604 595 int ret; 605 596 606 597 #ifdef CONFIG_PM 607 - if (INTEL_INFO(dev)->gen >= 6) { 598 + if (HAS_RC6(dev)) { 608 599 ret = sysfs_merge_group(&dev->primary->kdev->kobj, 609 600 &rc6_attr_group); 610 601 if (ret) 611 602 DRM_ERROR("RC6 residency sysfs setup failed\n"); 603 + } 604 + if (HAS_RC6p(dev)) { 605 + ret = sysfs_merge_group(&dev->primary->kdev->kobj, 606 + &rc6p_attr_group); 607 + if (ret) 608 + DRM_ERROR("RC6p residency sysfs setup failed\n"); 612 609 } 613 610 #endif 614 611 if (HAS_L3_DPF(dev)) { ··· 655 640 device_remove_bin_file(dev->primary->kdev, &dpf_attrs); 656 641 #ifdef CONFIG_PM 657 642 sysfs_unmerge_group(&dev->primary->kdev->kobj, &rc6_attr_group); 643 + sysfs_unmerge_group(&dev->primary->kdev->kobj, &rc6p_attr_group); 658 644 #endif 659 645 }
+1 -1
drivers/gpu/drm/i915/intel_crt.c
··· 775 775 I915_WRITE(crt->adpa_reg, adpa); 776 776 POSTING_READ(crt->adpa_reg); 777 777 778 - DRM_DEBUG_KMS("pch crt adpa set to 0x%x\n", adpa); 778 + DRM_DEBUG_KMS("crt adpa set to 0x%x\n", adpa); 779 779 crt->force_hotplug_required = 1; 780 780 } 781 781
+1 -1
drivers/gpu/drm/i915/intel_ddi.c
··· 1291 1291 return 450000; 1292 1292 else if (freq == LCPLL_CLK_FREQ_450) 1293 1293 return 450000; 1294 - else if (IS_ULT(dev)) 1294 + else if (IS_HSW_ULT(dev)) 1295 1295 return 337500; 1296 1296 else 1297 1297 return 540000;
+294 -229
drivers/gpu/drm/i915/intel_display.c
··· 406 406 /** 407 407 * Returns whether any output on the specified pipe is of the specified type 408 408 */ 409 - static bool intel_pipe_has_type(struct drm_crtc *crtc, int type) 409 + static bool intel_pipe_has_type(struct intel_crtc *crtc, int type) 410 410 { 411 - struct drm_device *dev = crtc->dev; 411 + struct drm_device *dev = crtc->base.dev; 412 412 struct intel_encoder *encoder; 413 413 414 - for_each_encoder_on_crtc(dev, crtc, encoder) 414 + for_each_encoder_on_crtc(dev, &crtc->base, encoder) 415 415 if (encoder->type == type) 416 416 return true; 417 417 418 418 return false; 419 419 } 420 420 421 - static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc, 421 + static const intel_limit_t *intel_ironlake_limit(struct intel_crtc *crtc, 422 422 int refclk) 423 423 { 424 - struct drm_device *dev = crtc->dev; 424 + struct drm_device *dev = crtc->base.dev; 425 425 const intel_limit_t *limit; 426 426 427 427 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { ··· 442 442 return limit; 443 443 } 444 444 445 - static const intel_limit_t *intel_g4x_limit(struct drm_crtc *crtc) 445 + static const intel_limit_t *intel_g4x_limit(struct intel_crtc *crtc) 446 446 { 447 - struct drm_device *dev = crtc->dev; 447 + struct drm_device *dev = crtc->base.dev; 448 448 const intel_limit_t *limit; 449 449 450 450 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { ··· 463 463 return limit; 464 464 } 465 465 466 - static const intel_limit_t *intel_limit(struct drm_crtc *crtc, int refclk) 466 + static const intel_limit_t *intel_limit(struct intel_crtc *crtc, int refclk) 467 467 { 468 - struct drm_device *dev = crtc->dev; 468 + struct drm_device *dev = crtc->base.dev; 469 469 const intel_limit_t *limit; 470 470 471 471 if (HAS_PCH_SPLIT(dev)) ··· 576 576 } 577 577 578 578 static bool 579 - i9xx_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc, 579 + i9xx_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc, 580 580 int target, int refclk, intel_clock_t *match_clock, 581 581 intel_clock_t *best_clock) 582 582 { 583 - struct drm_device *dev = crtc->dev; 583 + struct drm_device *dev = crtc->base.dev; 584 584 intel_clock_t clock; 585 585 int err = target; 586 586 ··· 637 637 } 638 638 639 639 static bool 640 - pnv_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc, 640 + pnv_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc, 641 641 int target, int refclk, intel_clock_t *match_clock, 642 642 intel_clock_t *best_clock) 643 643 { 644 - struct drm_device *dev = crtc->dev; 644 + struct drm_device *dev = crtc->base.dev; 645 645 intel_clock_t clock; 646 646 int err = target; 647 647 ··· 696 696 } 697 697 698 698 static bool 699 - g4x_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc, 699 + g4x_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc, 700 700 int target, int refclk, intel_clock_t *match_clock, 701 701 intel_clock_t *best_clock) 702 702 { 703 - struct drm_device *dev = crtc->dev; 703 + struct drm_device *dev = crtc->base.dev; 704 704 intel_clock_t clock; 705 705 int max_n; 706 706 bool found; ··· 753 753 } 754 754 755 755 static bool 756 - vlv_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc, 756 + vlv_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc, 757 757 int target, int refclk, intel_clock_t *match_clock, 758 758 intel_clock_t *best_clock) 759 759 { 760 - struct drm_device *dev = crtc->dev; 760 + struct drm_device *dev = crtc->base.dev; 761 761 intel_clock_t clock; 762 762 unsigned int bestppm = 1000000; 763 763 /* min update 19.2 MHz */ ··· 810 810 } 811 811 812 812 static bool 813 - chv_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc, 813 + chv_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc, 814 814 int target, int refclk, intel_clock_t *match_clock, 815 815 intel_clock_t *best_clock) 816 816 { 817 - struct drm_device *dev = crtc->dev; 817 + struct drm_device *dev = crtc->base.dev; 818 818 intel_clock_t clock; 819 819 uint64_t m2; 820 820 int found = false; ··· 1567 1567 1568 1568 for_each_intel_crtc(dev, crtc) 1569 1569 count += crtc->active && 1570 - intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DVO); 1570 + intel_pipe_has_type(crtc, INTEL_OUTPUT_DVO); 1571 1571 1572 1572 return count; 1573 1573 } ··· 1646 1646 1647 1647 /* Disable DVO 2x clock on both PLLs if necessary */ 1648 1648 if (IS_I830(dev) && 1649 - intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DVO) && 1649 + intel_pipe_has_type(crtc, INTEL_OUTPUT_DVO) && 1650 1650 intel_num_dvo_pipes(dev) == 1) { 1651 1651 I915_WRITE(DPLL(PIPE_B), 1652 1652 I915_READ(DPLL(PIPE_B)) & ~DPLL_DVO_2X_MODE); ··· 1884 1884 val &= ~TRANS_INTERLACE_MASK; 1885 1885 if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK) 1886 1886 if (HAS_PCH_IBX(dev_priv->dev) && 1887 - intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO)) 1887 + intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_SDVO)) 1888 1888 val |= TRANS_LEGACY_INTERLACED_ILK; 1889 1889 else 1890 1890 val |= TRANS_INTERLACED; ··· 2007 2007 * need the check. 2008 2008 */ 2009 2009 if (!HAS_PCH_SPLIT(dev_priv->dev)) 2010 - if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DSI)) 2010 + if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI)) 2011 2011 assert_dsi_pll_enabled(dev_priv); 2012 2012 else 2013 2013 assert_pll_enabled(dev_priv, pipe); ··· 2359 2359 struct intel_plane_config *plane_config) 2360 2360 { 2361 2361 struct drm_device *dev = intel_crtc->base.dev; 2362 + struct drm_i915_private *dev_priv = dev->dev_private; 2362 2363 struct drm_crtc *c; 2363 2364 struct intel_crtc *i; 2364 2365 struct drm_i915_gem_object *obj; ··· 2391 2390 continue; 2392 2391 2393 2392 if (i915_gem_obj_ggtt_offset(obj) == plane_config->base) { 2393 + if (obj->tiling_mode != I915_TILING_NONE) 2394 + dev_priv->preserve_bios_swizzle = true; 2395 + 2394 2396 drm_framebuffer_reference(c->primary->fb); 2395 2397 intel_crtc->base.primary->fb = c->primary->fb; 2396 2398 obj->frontbuffer_bits |= INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe); ··· 2698 2694 } 2699 2695 2700 2696 plane_ctl |= PLANE_CTL_PLANE_GAMMA_DISABLE; 2697 + if (to_intel_plane(crtc->primary)->rotation == BIT(DRM_ROTATE_180)) 2698 + plane_ctl |= PLANE_CTL_ROTATE_180; 2701 2699 2702 2700 I915_WRITE(PLANE_CTL(pipe, 0), plane_ctl); 2703 2701 ··· 2850 2844 ((adjusted_mode->crtc_hdisplay - 1) << 16) | 2851 2845 (adjusted_mode->crtc_vdisplay - 1)); 2852 2846 if (!crtc->config.pch_pfit.enabled && 2853 - (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) || 2854 - intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_EDP))) { 2847 + (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) || 2848 + intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) { 2855 2849 I915_WRITE(PF_CTL(crtc->pipe), 0); 2856 2850 I915_WRITE(PF_WIN_POS(crtc->pipe), 0); 2857 2851 I915_WRITE(PF_WIN_SZ(crtc->pipe), 0); ··· 3759 3753 3760 3754 /* For PCH DP, enable TRANS_DP_CTL */ 3761 3755 if (HAS_PCH_CPT(dev) && 3762 - (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) || 3763 - intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) { 3756 + (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DISPLAYPORT) || 3757 + intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_EDP))) { 3764 3758 u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5; 3765 3759 reg = TRANS_DP_CTL(pipe); 3766 3760 temp = I915_READ(reg); ··· 4037 4031 return; 4038 4032 4039 4033 if (!HAS_PCH_SPLIT(dev_priv->dev)) { 4040 - if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI)) 4034 + if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DSI)) 4041 4035 assert_dsi_pll_enabled(dev_priv); 4042 4036 else 4043 4037 assert_pll_enabled(dev_priv, pipe); ··· 4169 4163 4170 4164 intel_crtc->active = true; 4171 4165 4172 - intel_set_cpu_fifo_underrun_reporting(dev, pipe, true); 4173 - intel_set_pch_fifo_underrun_reporting(dev, pipe, true); 4166 + intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); 4167 + intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true); 4174 4168 4175 4169 for_each_encoder_on_crtc(dev, crtc, encoder) 4176 4170 if (encoder->pre_enable) ··· 4284 4278 4285 4279 intel_crtc->active = true; 4286 4280 4287 - intel_set_cpu_fifo_underrun_reporting(dev, pipe, true); 4281 + intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); 4288 4282 for_each_encoder_on_crtc(dev, crtc, encoder) 4289 4283 if (encoder->pre_enable) 4290 4284 encoder->pre_enable(encoder); 4291 4285 4292 4286 if (intel_crtc->config.has_pch_encoder) { 4293 - intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, true); 4287 + intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A, 4288 + true); 4294 4289 dev_priv->display.fdi_link_train(crtc); 4295 4290 } 4296 4291 ··· 4367 4360 encoder->disable(encoder); 4368 4361 4369 4362 if (intel_crtc->config.has_pch_encoder) 4370 - intel_set_pch_fifo_underrun_reporting(dev, pipe, false); 4363 + intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false); 4371 4364 4372 4365 intel_disable_pipe(intel_crtc); 4373 4366 ··· 4381 4374 ironlake_fdi_disable(crtc); 4382 4375 4383 4376 ironlake_disable_pch_transcoder(dev_priv, pipe); 4384 - intel_set_pch_fifo_underrun_reporting(dev, pipe, true); 4377 + intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true); 4385 4378 4386 4379 if (HAS_PCH_CPT(dev)) { 4387 4380 /* disable TRANS_DP_CTL */ ··· 4434 4427 } 4435 4428 4436 4429 if (intel_crtc->config.has_pch_encoder) 4437 - intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, false); 4430 + intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A, 4431 + false); 4438 4432 intel_disable_pipe(intel_crtc); 4439 4433 4440 4434 if (intel_crtc->config.dp_encoder_is_mst) ··· 4449 4441 4450 4442 if (intel_crtc->config.has_pch_encoder) { 4451 4443 lpt_disable_pch_transcoder(dev_priv); 4452 - intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, true); 4444 + intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A, 4445 + true); 4453 4446 intel_ddi_fdi_disable(crtc); 4454 4447 } 4455 4448 ··· 4624 4615 struct drm_i915_private *dev_priv = dev->dev_private; 4625 4616 4626 4617 dev_priv->vlv_cdclk_freq = dev_priv->display.get_display_clock_speed(dev); 4627 - DRM_DEBUG_DRIVER("Current CD clock rate: %d kHz", 4618 + DRM_DEBUG_DRIVER("Current CD clock rate: %d kHz\n", 4628 4619 dev_priv->vlv_cdclk_freq); 4629 4620 4630 4621 /* ··· 4827 4818 static void valleyview_crtc_enable(struct drm_crtc *crtc) 4828 4819 { 4829 4820 struct drm_device *dev = crtc->dev; 4821 + struct drm_i915_private *dev_priv = to_i915(dev); 4830 4822 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4831 4823 struct intel_encoder *encoder; 4832 4824 int pipe = intel_crtc->pipe; ··· 4838 4828 if (intel_crtc->active) 4839 4829 return; 4840 4830 4841 - is_dsi = intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI); 4831 + is_dsi = intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DSI); 4842 4832 4843 4833 if (!is_dsi) { 4844 4834 if (IS_CHERRYVIEW(dev)) ··· 4856 4846 4857 4847 intel_crtc->active = true; 4858 4848 4859 - intel_set_cpu_fifo_underrun_reporting(dev, pipe, true); 4849 + intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); 4860 4850 4861 4851 for_each_encoder_on_crtc(dev, crtc, encoder) 4862 4852 if (encoder->pre_pll_enable) ··· 4889 4879 intel_crtc_enable_planes(crtc); 4890 4880 4891 4881 /* Underruns don't raise interrupts, so check manually. */ 4892 - i9xx_check_fifo_underruns(dev); 4882 + i9xx_check_fifo_underruns(dev_priv); 4893 4883 } 4894 4884 4895 4885 static void i9xx_set_pll_dividers(struct intel_crtc *crtc) ··· 4904 4894 static void i9xx_crtc_enable(struct drm_crtc *crtc) 4905 4895 { 4906 4896 struct drm_device *dev = crtc->dev; 4897 + struct drm_i915_private *dev_priv = to_i915(dev); 4907 4898 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4908 4899 struct intel_encoder *encoder; 4909 4900 int pipe = intel_crtc->pipe; ··· 4926 4915 intel_crtc->active = true; 4927 4916 4928 4917 if (!IS_GEN2(dev)) 4929 - intel_set_cpu_fifo_underrun_reporting(dev, pipe, true); 4918 + intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); 4930 4919 4931 4920 for_each_encoder_on_crtc(dev, crtc, encoder) 4932 4921 if (encoder->pre_enable) ··· 4957 4946 * but leave the pipe running. 4958 4947 */ 4959 4948 if (IS_GEN2(dev)) 4960 - intel_set_cpu_fifo_underrun_reporting(dev, pipe, true); 4949 + intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); 4961 4950 4962 4951 /* Underruns don't raise interrupts, so check manually. */ 4963 - i9xx_check_fifo_underruns(dev); 4952 + i9xx_check_fifo_underruns(dev_priv); 4964 4953 } 4965 4954 4966 4955 static void i9xx_pfit_disable(struct intel_crtc *crtc) ··· 4996 4985 * but leave the pipe running. 4997 4986 */ 4998 4987 if (IS_GEN2(dev)) 4999 - intel_set_cpu_fifo_underrun_reporting(dev, pipe, false); 4988 + intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); 5000 4989 5001 4990 /* 5002 4991 * Vblank time updates from the shadow to live plane control register ··· 5032 5021 if (encoder->post_disable) 5033 5022 encoder->post_disable(encoder); 5034 5023 5035 - if (!intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI)) { 5024 + if (!intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DSI)) { 5036 5025 if (IS_CHERRYVIEW(dev)) 5037 5026 chv_disable_pll(dev_priv, pipe); 5038 5027 else if (IS_VALLEYVIEW(dev)) ··· 5042 5031 } 5043 5032 5044 5033 if (!IS_GEN2(dev)) 5045 - intel_set_cpu_fifo_underrun_reporting(dev, pipe, false); 5034 + intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); 5046 5035 5047 5036 intel_crtc->active = false; 5048 5037 intel_update_watermarks(crtc); ··· 5415 5404 * - LVDS dual channel mode 5416 5405 * - Double wide pipe 5417 5406 */ 5418 - if ((intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) && 5407 + if ((intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) && 5419 5408 intel_is_dual_link_lvds(dev)) || pipe_config->double_wide) 5420 5409 pipe_config->pipe_src_w &= ~1; 5421 5410 ··· 5603 5592 && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE); 5604 5593 } 5605 5594 5606 - static int i9xx_get_refclk(struct drm_crtc *crtc, int num_connectors) 5595 + static int i9xx_get_refclk(struct intel_crtc *crtc, int num_connectors) 5607 5596 { 5608 - struct drm_device *dev = crtc->dev; 5597 + struct drm_device *dev = crtc->base.dev; 5609 5598 struct drm_i915_private *dev_priv = dev->dev_private; 5610 5599 int refclk; 5611 5600 ··· 5653 5642 crtc->config.dpll_hw_state.fp0 = fp; 5654 5643 5655 5644 crtc->lowfreq_avail = false; 5656 - if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) && 5645 + if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) && 5657 5646 reduced_clock && i915.powersave) { 5658 5647 crtc->config.dpll_hw_state.fp1 = fp2; 5659 5648 crtc->lowfreq_avail = true; ··· 5822 5811 5823 5812 /* Set HBR and RBR LPF coefficients */ 5824 5813 if (crtc->config.port_clock == 162000 || 5825 - intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_ANALOG) || 5826 - intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_HDMI)) 5814 + intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG) || 5815 + intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI)) 5827 5816 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe), 5828 5817 0x009f0003); 5829 5818 else 5830 5819 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe), 5831 5820 0x00d0000f); 5832 5821 5833 - if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_EDP) || 5834 - intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT)) { 5822 + if (intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP) || 5823 + intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) { 5835 5824 /* Use SSC source */ 5836 5825 if (pipe == PIPE_A) 5837 5826 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe), ··· 5851 5840 5852 5841 coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe)); 5853 5842 coreclk = (coreclk & 0x0000ff00) | 0x01c00000; 5854 - if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT) || 5855 - intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_EDP)) 5843 + if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) || 5844 + intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP)) 5856 5845 coreclk |= 0x01000000; 5857 5846 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk); 5858 5847 ··· 5922 5911 (2 << DPIO_CHV_FEEDFWD_GAIN_SHIFT)); 5923 5912 5924 5913 /* Loop filter */ 5925 - refclk = i9xx_get_refclk(&crtc->base, 0); 5914 + refclk = i9xx_get_refclk(crtc, 0); 5926 5915 loopfilter = 5 << DPIO_CHV_PROP_COEFF_SHIFT | 5927 5916 2 << DPIO_CHV_GAIN_CTRL_SHIFT; 5928 5917 if (refclk == 100000) ··· 5954 5943 5955 5944 i9xx_update_pll_dividers(crtc, reduced_clock); 5956 5945 5957 - is_sdvo = intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_SDVO) || 5958 - intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_HDMI); 5946 + is_sdvo = intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO) || 5947 + intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI); 5959 5948 5960 5949 dpll = DPLL_VGA_MODE_DIS; 5961 5950 5962 - if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS)) 5951 + if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) 5963 5952 dpll |= DPLLB_MODE_LVDS; 5964 5953 else 5965 5954 dpll |= DPLLB_MODE_DAC_SERIAL; ··· 5972 5961 if (is_sdvo) 5973 5962 dpll |= DPLL_SDVO_HIGH_SPEED; 5974 5963 5975 - if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT)) 5964 + if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) 5976 5965 dpll |= DPLL_SDVO_HIGH_SPEED; 5977 5966 5978 5967 /* compute bitmask from p1 value */ ··· 6002 5991 6003 5992 if (crtc->config.sdvo_tv_clock) 6004 5993 dpll |= PLL_REF_INPUT_TVCLKINBC; 6005 - else if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) && 5994 + else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) && 6006 5995 intel_panel_use_ssc(dev_priv) && num_connectors < 2) 6007 5996 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; 6008 5997 else ··· 6031 6020 6032 6021 dpll = DPLL_VGA_MODE_DIS; 6033 6022 6034 - if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS)) { 6023 + if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { 6035 6024 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; 6036 6025 } else { 6037 6026 if (clock->p1 == 2) ··· 6042 6031 dpll |= PLL_P2_DIVIDE_BY_4; 6043 6032 } 6044 6033 6045 - if (!IS_I830(dev) && intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DVO)) 6034 + if (!IS_I830(dev) && intel_pipe_has_type(crtc, INTEL_OUTPUT_DVO)) 6046 6035 dpll |= DPLL_DVO_2X_MODE; 6047 6036 6048 - if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) && 6037 + if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) && 6049 6038 intel_panel_use_ssc(dev_priv) && num_connectors < 2) 6050 6039 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; 6051 6040 else ··· 6076 6065 crtc_vtotal -= 1; 6077 6066 crtc_vblank_end -= 1; 6078 6067 6079 - if (intel_pipe_has_type(&intel_crtc->base, INTEL_OUTPUT_SDVO)) 6068 + if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_SDVO)) 6080 6069 vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2; 6081 6070 else 6082 6071 vsyncshift = adjusted_mode->crtc_hsync_start - ··· 6234 6223 6235 6224 if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) { 6236 6225 if (INTEL_INFO(dev)->gen < 4 || 6237 - intel_pipe_has_type(&intel_crtc->base, INTEL_OUTPUT_SDVO)) 6226 + intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_SDVO)) 6238 6227 pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION; 6239 6228 else 6240 6229 pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT; ··· 6248 6237 POSTING_READ(PIPECONF(intel_crtc->pipe)); 6249 6238 } 6250 6239 6251 - static int i9xx_crtc_mode_set(struct drm_crtc *crtc, 6240 + static int i9xx_crtc_mode_set(struct intel_crtc *crtc, 6252 6241 int x, int y, 6253 6242 struct drm_framebuffer *fb) 6254 6243 { 6255 - struct drm_device *dev = crtc->dev; 6244 + struct drm_device *dev = crtc->base.dev; 6256 6245 struct drm_i915_private *dev_priv = dev->dev_private; 6257 - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 6258 6246 int refclk, num_connectors = 0; 6259 6247 intel_clock_t clock, reduced_clock; 6260 6248 bool ok, has_reduced_clock = false; ··· 6261 6251 struct intel_encoder *encoder; 6262 6252 const intel_limit_t *limit; 6263 6253 6264 - for_each_encoder_on_crtc(dev, crtc, encoder) { 6254 + for_each_encoder_on_crtc(dev, &crtc->base, encoder) { 6265 6255 switch (encoder->type) { 6266 6256 case INTEL_OUTPUT_LVDS: 6267 6257 is_lvds = true; ··· 6277 6267 if (is_dsi) 6278 6268 return 0; 6279 6269 6280 - if (!intel_crtc->config.clock_set) { 6270 + if (!crtc->config.clock_set) { 6281 6271 refclk = i9xx_get_refclk(crtc, num_connectors); 6282 6272 6283 6273 /* ··· 6288 6278 */ 6289 6279 limit = intel_limit(crtc, refclk); 6290 6280 ok = dev_priv->display.find_dpll(limit, crtc, 6291 - intel_crtc->config.port_clock, 6281 + crtc->config.port_clock, 6292 6282 refclk, NULL, &clock); 6293 6283 if (!ok) { 6294 6284 DRM_ERROR("Couldn't find PLL settings for mode!\n"); ··· 6309 6299 &reduced_clock); 6310 6300 } 6311 6301 /* Compat-code for transition, will disappear. */ 6312 - intel_crtc->config.dpll.n = clock.n; 6313 - intel_crtc->config.dpll.m1 = clock.m1; 6314 - intel_crtc->config.dpll.m2 = clock.m2; 6315 - intel_crtc->config.dpll.p1 = clock.p1; 6316 - intel_crtc->config.dpll.p2 = clock.p2; 6302 + crtc->config.dpll.n = clock.n; 6303 + crtc->config.dpll.m1 = clock.m1; 6304 + crtc->config.dpll.m2 = clock.m2; 6305 + crtc->config.dpll.p1 = clock.p1; 6306 + crtc->config.dpll.p2 = clock.p2; 6317 6307 } 6318 6308 6319 6309 if (IS_GEN2(dev)) { 6320 - i8xx_update_pll(intel_crtc, 6310 + i8xx_update_pll(crtc, 6321 6311 has_reduced_clock ? &reduced_clock : NULL, 6322 6312 num_connectors); 6323 6313 } else if (IS_CHERRYVIEW(dev)) { 6324 - chv_update_pll(intel_crtc); 6314 + chv_update_pll(crtc); 6325 6315 } else if (IS_VALLEYVIEW(dev)) { 6326 - vlv_update_pll(intel_crtc); 6316 + vlv_update_pll(crtc); 6327 6317 } else { 6328 - i9xx_update_pll(intel_crtc, 6318 + i9xx_update_pll(crtc, 6329 6319 has_reduced_clock ? &reduced_clock : NULL, 6330 6320 num_connectors); 6331 6321 } ··· 7113 7103 { 7114 7104 struct drm_device *dev = crtc->dev; 7115 7105 struct drm_i915_private *dev_priv = dev->dev_private; 7116 - struct intel_encoder *intel_encoder; 7106 + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 7117 7107 int refclk; 7118 7108 const intel_limit_t *limit; 7119 7109 bool ret, is_lvds = false; 7120 7110 7121 - for_each_encoder_on_crtc(dev, crtc, intel_encoder) { 7122 - switch (intel_encoder->type) { 7123 - case INTEL_OUTPUT_LVDS: 7124 - is_lvds = true; 7125 - break; 7126 - } 7127 - } 7111 + is_lvds = intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_LVDS); 7128 7112 7129 7113 refclk = ironlake_get_refclk(crtc); 7130 7114 ··· 7127 7123 * refclk, or FALSE. The returned values represent the clock equation: 7128 7124 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. 7129 7125 */ 7130 - limit = intel_limit(crtc, refclk); 7131 - ret = dev_priv->display.find_dpll(limit, crtc, 7132 - to_intel_crtc(crtc)->config.port_clock, 7126 + limit = intel_limit(intel_crtc, refclk); 7127 + ret = dev_priv->display.find_dpll(limit, intel_crtc, 7128 + intel_crtc->config.port_clock, 7133 7129 refclk, NULL, clock); 7134 7130 if (!ret) 7135 7131 return false; ··· 7142 7138 * downclock feature. 7143 7139 */ 7144 7140 *has_reduced_clock = 7145 - dev_priv->display.find_dpll(limit, crtc, 7141 + dev_priv->display.find_dpll(limit, intel_crtc, 7146 7142 dev_priv->lvds_downclock, 7147 7143 refclk, clock, 7148 7144 reduced_clock); ··· 7252 7248 return dpll | DPLL_VCO_ENABLE; 7253 7249 } 7254 7250 7255 - static int ironlake_crtc_mode_set(struct drm_crtc *crtc, 7251 + static int ironlake_crtc_mode_set(struct intel_crtc *crtc, 7256 7252 int x, int y, 7257 7253 struct drm_framebuffer *fb) 7258 7254 { 7259 - struct drm_device *dev = crtc->dev; 7260 - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 7261 - int num_connectors = 0; 7255 + struct drm_device *dev = crtc->base.dev; 7262 7256 intel_clock_t clock, reduced_clock; 7263 7257 u32 dpll = 0, fp = 0, fp2 = 0; 7264 7258 bool ok, has_reduced_clock = false; 7265 7259 bool is_lvds = false; 7266 - struct intel_encoder *encoder; 7267 7260 struct intel_shared_dpll *pll; 7268 7261 7269 - for_each_encoder_on_crtc(dev, crtc, encoder) { 7270 - switch (encoder->type) { 7271 - case INTEL_OUTPUT_LVDS: 7272 - is_lvds = true; 7273 - break; 7274 - } 7275 - 7276 - num_connectors++; 7277 - } 7262 + is_lvds = intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS); 7278 7263 7279 7264 WARN(!(HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)), 7280 7265 "Unexpected PCH type %d\n", INTEL_PCH_TYPE(dev)); 7281 7266 7282 - ok = ironlake_compute_clocks(crtc, &clock, 7267 + ok = ironlake_compute_clocks(&crtc->base, &clock, 7283 7268 &has_reduced_clock, &reduced_clock); 7284 - if (!ok && !intel_crtc->config.clock_set) { 7269 + if (!ok && !crtc->config.clock_set) { 7285 7270 DRM_ERROR("Couldn't find PLL settings for mode!\n"); 7286 7271 return -EINVAL; 7287 7272 } 7288 7273 /* Compat-code for transition, will disappear. */ 7289 - if (!intel_crtc->config.clock_set) { 7290 - intel_crtc->config.dpll.n = clock.n; 7291 - intel_crtc->config.dpll.m1 = clock.m1; 7292 - intel_crtc->config.dpll.m2 = clock.m2; 7293 - intel_crtc->config.dpll.p1 = clock.p1; 7294 - intel_crtc->config.dpll.p2 = clock.p2; 7274 + if (!crtc->config.clock_set) { 7275 + crtc->config.dpll.n = clock.n; 7276 + crtc->config.dpll.m1 = clock.m1; 7277 + crtc->config.dpll.m2 = clock.m2; 7278 + crtc->config.dpll.p1 = clock.p1; 7279 + crtc->config.dpll.p2 = clock.p2; 7295 7280 } 7296 7281 7297 7282 /* CPU eDP is the only output that doesn't need a PCH PLL of its own. */ 7298 - if (intel_crtc->config.has_pch_encoder) { 7299 - fp = i9xx_dpll_compute_fp(&intel_crtc->config.dpll); 7283 + if (crtc->config.has_pch_encoder) { 7284 + fp = i9xx_dpll_compute_fp(&crtc->config.dpll); 7300 7285 if (has_reduced_clock) 7301 7286 fp2 = i9xx_dpll_compute_fp(&reduced_clock); 7302 7287 7303 - dpll = ironlake_compute_dpll(intel_crtc, 7288 + dpll = ironlake_compute_dpll(crtc, 7304 7289 &fp, &reduced_clock, 7305 7290 has_reduced_clock ? &fp2 : NULL); 7306 7291 7307 - intel_crtc->config.dpll_hw_state.dpll = dpll; 7308 - intel_crtc->config.dpll_hw_state.fp0 = fp; 7292 + crtc->config.dpll_hw_state.dpll = dpll; 7293 + crtc->config.dpll_hw_state.fp0 = fp; 7309 7294 if (has_reduced_clock) 7310 - intel_crtc->config.dpll_hw_state.fp1 = fp2; 7295 + crtc->config.dpll_hw_state.fp1 = fp2; 7311 7296 else 7312 - intel_crtc->config.dpll_hw_state.fp1 = fp; 7297 + crtc->config.dpll_hw_state.fp1 = fp; 7313 7298 7314 - pll = intel_get_shared_dpll(intel_crtc); 7299 + pll = intel_get_shared_dpll(crtc); 7315 7300 if (pll == NULL) { 7316 7301 DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n", 7317 - pipe_name(intel_crtc->pipe)); 7302 + pipe_name(crtc->pipe)); 7318 7303 return -EINVAL; 7319 7304 } 7320 7305 } else 7321 - intel_put_shared_dpll(intel_crtc); 7306 + intel_put_shared_dpll(crtc); 7322 7307 7323 7308 if (is_lvds && has_reduced_clock && i915.powersave) 7324 - intel_crtc->lowfreq_avail = true; 7309 + crtc->lowfreq_avail = true; 7325 7310 else 7326 - intel_crtc->lowfreq_avail = false; 7311 + crtc->lowfreq_avail = false; 7327 7312 7328 7313 return 0; 7329 7314 } ··· 7806 7813 modeset_update_crtc_power_domains(dev); 7807 7814 } 7808 7815 7809 - static int haswell_crtc_mode_set(struct drm_crtc *crtc, 7816 + static int haswell_crtc_mode_set(struct intel_crtc *crtc, 7810 7817 int x, int y, 7811 7818 struct drm_framebuffer *fb) 7812 7819 { 7813 - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 7814 - 7815 - if (!intel_ddi_pll_select(intel_crtc)) 7820 + if (!intel_ddi_pll_select(crtc)) 7816 7821 return -EINVAL; 7817 7822 7818 - intel_crtc->lowfreq_avail = false; 7823 + crtc->lowfreq_avail = false; 7819 7824 7820 7825 return 0; 7821 7826 } ··· 8053 8062 struct drm_display_mode *mode) 8054 8063 { 8055 8064 struct drm_i915_private *dev_priv = connector->dev->dev_private; 8065 + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 8056 8066 uint8_t *eld = connector->eld; 8057 8067 uint32_t eldv; 8058 8068 uint32_t i; ··· 8094 8102 8095 8103 eldv = AUDIO_ELD_VALID_A << (pipe * 4); 8096 8104 8097 - if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) { 8105 + if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DISPLAYPORT)) { 8098 8106 DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n"); 8099 8107 eld[5] |= (1 << 2); /* Conn_Type, 0x1 = DisplayPort */ 8100 8108 I915_WRITE(aud_config, AUD_CONFIG_N_VALUE_INDEX); /* 0x1 = DP */ ··· 8137 8145 struct drm_display_mode *mode) 8138 8146 { 8139 8147 struct drm_i915_private *dev_priv = connector->dev->dev_private; 8148 + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 8140 8149 uint8_t *eld = connector->eld; 8141 8150 uint32_t eldv; 8142 8151 uint32_t i; ··· 8191 8198 eldv = IBX_ELD_VALIDB << ((i - 1) * 4); 8192 8199 } 8193 8200 8194 - if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) { 8201 + if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DISPLAYPORT)) { 8195 8202 DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n"); 8196 8203 eld[5] |= (1 << 2); /* Conn_Type, 0x1 = DisplayPort */ 8197 8204 I915_WRITE(aud_config, AUD_CONFIG_N_VALUE_INDEX); /* 0x1 = DP */ ··· 8343 8350 cntl |= CURSOR_PIPE_CSC_ENABLE; 8344 8351 } 8345 8352 8353 + if (to_intel_plane(crtc->cursor)->rotation == BIT(DRM_ROTATE_180)) 8354 + cntl |= CURSOR_ROTATE_180; 8355 + 8346 8356 if (intel_crtc->cursor_cntl != cntl) { 8347 8357 I915_WRITE(CURCNTR(pipe), cntl); 8348 8358 POSTING_READ(CURCNTR(pipe)); ··· 8403 8407 8404 8408 I915_WRITE(CURPOS(pipe), pos); 8405 8409 8410 + /* ILK+ do this automagically */ 8411 + if (HAS_GMCH_DISPLAY(dev) && 8412 + to_intel_plane(crtc->cursor)->rotation == BIT(DRM_ROTATE_180)) { 8413 + base += (intel_crtc->cursor_height * 8414 + intel_crtc->cursor_width - 1) * 4; 8415 + } 8416 + 8406 8417 if (IS_845G(dev) || IS_I865G(dev)) 8407 8418 i845_update_cursor(crtc, base); 8408 8419 else ··· 8453 8450 return true; 8454 8451 } 8455 8452 8456 - /* 8457 - * intel_crtc_cursor_set_obj - Set cursor to specified GEM object 8458 - * 8459 - * Note that the object's reference will be consumed if the update fails. If 8460 - * the update succeeds, the reference of the old object (if any) will be 8461 - * consumed. 8462 - */ 8463 8453 static int intel_crtc_cursor_set_obj(struct drm_crtc *crtc, 8464 8454 struct drm_i915_gem_object *obj, 8465 8455 uint32_t width, uint32_t height) ··· 8461 8465 struct drm_i915_private *dev_priv = dev->dev_private; 8462 8466 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 8463 8467 enum pipe pipe = intel_crtc->pipe; 8464 - unsigned old_width, stride; 8468 + unsigned old_width; 8465 8469 uint32_t addr; 8466 8470 int ret; 8467 8471 ··· 8473 8477 goto finish; 8474 8478 } 8475 8479 8476 - /* Check for which cursor types we support */ 8477 - if (!cursor_size_ok(dev, width, height)) { 8478 - DRM_DEBUG("Cursor dimension not supported\n"); 8479 - return -EINVAL; 8480 - } 8481 - 8482 - stride = roundup_pow_of_two(width) * 4; 8483 - if (obj->base.size < stride * height) { 8484 - DRM_DEBUG_KMS("buffer is too small\n"); 8485 - ret = -ENOMEM; 8486 - goto fail; 8487 - } 8488 - 8489 8480 /* we only need to pin inside GTT if cursor is non-phy */ 8490 8481 mutex_lock(&dev->struct_mutex); 8491 8482 if (!INTEL_INFO(dev)->cursor_needs_physical) { 8492 8483 unsigned alignment; 8493 - 8494 - if (obj->tiling_mode) { 8495 - DRM_DEBUG_KMS("cursor cannot be tiled\n"); 8496 - ret = -EINVAL; 8497 - goto fail_locked; 8498 - } 8499 8484 8500 8485 /* 8501 8486 * Global gtt pte registers are special registers which actually ··· 8553 8576 i915_gem_object_unpin_from_display_plane(obj); 8554 8577 fail_locked: 8555 8578 mutex_unlock(&dev->struct_mutex); 8556 - fail: 8557 - drm_gem_object_unreference_unlocked(&obj->base); 8558 8579 return ret; 8559 8580 } 8560 8581 ··· 10896 10921 10897 10922 crtc->scanline_offset = vtotal - 1; 10898 10923 } else if (HAS_DDI(dev) && 10899 - intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_HDMI)) { 10924 + intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI)) { 10900 10925 crtc->scanline_offset = 2; 10901 10926 } else 10902 10927 crtc->scanline_offset = 1; ··· 11016 11041 crtc->x = x; 11017 11042 crtc->y = y; 11018 11043 11019 - ret = dev_priv->display.crtc_mode_set(&intel_crtc->base, 11020 - x, y, fb); 11044 + ret = dev_priv->display.crtc_mode_set(intel_crtc, x, y, fb); 11021 11045 if (ret) 11022 11046 goto done; 11023 11047 } ··· 11640 11666 struct drm_rect *dest = &state->dst; 11641 11667 struct drm_rect *src = &state->src; 11642 11668 const struct drm_rect *clip = &state->clip; 11669 + int ret; 11643 11670 11644 - return drm_plane_helper_check_update(plane, crtc, fb, 11671 + ret = drm_plane_helper_check_update(plane, crtc, fb, 11645 11672 src, dest, clip, 11646 11673 DRM_PLANE_HELPER_NO_SCALING, 11647 11674 DRM_PLANE_HELPER_NO_SCALING, 11648 11675 false, true, &state->visible); 11676 + if (ret) 11677 + return ret; 11678 + 11679 + /* no fb bound */ 11680 + if (state->visible && !fb) { 11681 + DRM_ERROR("No FB bound\n"); 11682 + return -EINVAL; 11683 + } 11684 + 11685 + return 0; 11649 11686 } 11650 11687 11651 11688 static int ··· 11668 11683 struct drm_device *dev = crtc->dev; 11669 11684 struct drm_i915_private *dev_priv = dev->dev_private; 11670 11685 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 11686 + enum pipe pipe = intel_crtc->pipe; 11687 + struct drm_framebuffer *old_fb = plane->fb; 11671 11688 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 11672 11689 struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->fb); 11673 11690 struct intel_plane *intel_plane = to_intel_plane(plane); ··· 11678 11691 11679 11692 intel_crtc_wait_for_pending_flips(crtc); 11680 11693 11681 - /* 11682 - * If clipping results in a non-visible primary plane, we'll disable 11683 - * the primary plane. Note that this is a bit different than what 11684 - * happens if userspace explicitly disables the plane by passing fb=0 11685 - * because plane->fb still gets set and pinned. 11686 - */ 11687 - if (!state->visible) { 11688 - mutex_lock(&dev->struct_mutex); 11689 - 11690 - /* 11691 - * Try to pin the new fb first so that we can bail out if we 11692 - * fail. 11693 - */ 11694 - if (plane->fb != fb) { 11695 - ret = intel_pin_and_fence_fb_obj(dev, obj, NULL); 11696 - if (ret) { 11697 - mutex_unlock(&dev->struct_mutex); 11698 - return ret; 11699 - } 11700 - } 11701 - 11702 - i915_gem_track_fb(old_obj, obj, 11703 - INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe)); 11704 - 11705 - if (intel_crtc->primary_enabled) 11706 - intel_disable_primary_hw_plane(plane, crtc); 11707 - 11708 - 11709 - if (plane->fb != fb) 11710 - if (plane->fb) 11711 - intel_unpin_fb_obj(old_obj); 11712 - 11713 - mutex_unlock(&dev->struct_mutex); 11714 - 11715 - } else { 11716 - if (intel_crtc && intel_crtc->active && 11717 - intel_crtc->primary_enabled) { 11718 - /* 11719 - * FBC does not work on some platforms for rotated 11720 - * planes, so disable it when rotation is not 0 and 11721 - * update it when rotation is set back to 0. 11722 - * 11723 - * FIXME: This is redundant with the fbc update done in 11724 - * the primary plane enable function except that that 11725 - * one is done too late. We eventually need to unify 11726 - * this. 11727 - */ 11728 - if (INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev) && 11729 - dev_priv->fbc.plane == intel_crtc->plane && 11730 - intel_plane->rotation != BIT(DRM_ROTATE_0)) { 11731 - intel_disable_fbc(dev); 11732 - } 11733 - } 11734 - ret = intel_pipe_set_base(crtc, src->x1, src->y1, fb); 11735 - if (ret) 11736 - return ret; 11737 - 11738 - if (!intel_crtc->primary_enabled) 11739 - intel_enable_primary_hw_plane(plane, crtc); 11694 + if (intel_crtc_has_pending_flip(crtc)) { 11695 + DRM_ERROR("pipe is still busy with an old pageflip\n"); 11696 + return -EBUSY; 11740 11697 } 11698 + 11699 + if (plane->fb != fb) { 11700 + mutex_lock(&dev->struct_mutex); 11701 + ret = intel_pin_and_fence_fb_obj(dev, obj, NULL); 11702 + if (ret == 0) 11703 + i915_gem_track_fb(old_obj, obj, 11704 + INTEL_FRONTBUFFER_PRIMARY(pipe)); 11705 + mutex_unlock(&dev->struct_mutex); 11706 + if (ret != 0) { 11707 + DRM_DEBUG_KMS("pin & fence failed\n"); 11708 + return ret; 11709 + } 11710 + } 11711 + 11712 + crtc->primary->fb = fb; 11713 + crtc->x = src->x1; 11714 + crtc->y = src->y1; 11741 11715 11742 11716 intel_plane->crtc_x = state->orig_dst.x1; 11743 11717 intel_plane->crtc_y = state->orig_dst.y1; ··· 11709 11761 intel_plane->src_w = drm_rect_width(&state->orig_src); 11710 11762 intel_plane->src_h = drm_rect_height(&state->orig_src); 11711 11763 intel_plane->obj = obj; 11764 + 11765 + if (intel_crtc->active) { 11766 + /* 11767 + * FBC does not work on some platforms for rotated 11768 + * planes, so disable it when rotation is not 0 and 11769 + * update it when rotation is set back to 0. 11770 + * 11771 + * FIXME: This is redundant with the fbc update done in 11772 + * the primary plane enable function except that that 11773 + * one is done too late. We eventually need to unify 11774 + * this. 11775 + */ 11776 + if (intel_crtc->primary_enabled && 11777 + INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev) && 11778 + dev_priv->fbc.plane == intel_crtc->plane && 11779 + intel_plane->rotation != BIT(DRM_ROTATE_0)) { 11780 + intel_disable_fbc(dev); 11781 + } 11782 + 11783 + if (state->visible) { 11784 + bool was_enabled = intel_crtc->primary_enabled; 11785 + 11786 + /* FIXME: kill this fastboot hack */ 11787 + intel_update_pipe_size(intel_crtc); 11788 + 11789 + intel_crtc->primary_enabled = true; 11790 + 11791 + dev_priv->display.update_primary_plane(crtc, plane->fb, 11792 + crtc->x, crtc->y); 11793 + 11794 + /* 11795 + * BDW signals flip done immediately if the plane 11796 + * is disabled, even if the plane enable is already 11797 + * armed to occur at the next vblank :( 11798 + */ 11799 + if (IS_BROADWELL(dev) && !was_enabled) 11800 + intel_wait_for_vblank(dev, intel_crtc->pipe); 11801 + } else { 11802 + /* 11803 + * If clipping results in a non-visible primary plane, 11804 + * we'll disable the primary plane. Note that this is 11805 + * a bit different than what happens if userspace 11806 + * explicitly disables the plane by passing fb=0 11807 + * because plane->fb still gets set and pinned. 11808 + */ 11809 + intel_disable_primary_hw_plane(plane, crtc); 11810 + } 11811 + 11812 + intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_PRIMARY(pipe)); 11813 + 11814 + mutex_lock(&dev->struct_mutex); 11815 + intel_update_fbc(dev); 11816 + mutex_unlock(&dev->struct_mutex); 11817 + } 11818 + 11819 + if (old_fb && old_fb != fb) { 11820 + if (intel_crtc->active) 11821 + intel_wait_for_vblank(dev, intel_crtc->pipe); 11822 + 11823 + mutex_lock(&dev->struct_mutex); 11824 + intel_unpin_fb_obj(old_obj); 11825 + mutex_unlock(&dev->struct_mutex); 11826 + } 11712 11827 11713 11828 return 0; 11714 11829 } ··· 11897 11886 struct intel_plane_state *state) 11898 11887 { 11899 11888 struct drm_crtc *crtc = state->crtc; 11889 + struct drm_device *dev = crtc->dev; 11900 11890 struct drm_framebuffer *fb = state->fb; 11901 11891 struct drm_rect *dest = &state->dst; 11902 11892 struct drm_rect *src = &state->src; 11903 11893 const struct drm_rect *clip = &state->clip; 11894 + struct drm_i915_gem_object *obj = intel_fb_obj(fb); 11895 + int crtc_w, crtc_h; 11896 + unsigned stride; 11897 + int ret; 11904 11898 11905 - return drm_plane_helper_check_update(plane, crtc, fb, 11899 + ret = drm_plane_helper_check_update(plane, crtc, fb, 11906 11900 src, dest, clip, 11907 11901 DRM_PLANE_HELPER_NO_SCALING, 11908 11902 DRM_PLANE_HELPER_NO_SCALING, 11909 11903 true, true, &state->visible); 11904 + if (ret) 11905 + return ret; 11906 + 11907 + 11908 + /* if we want to turn off the cursor ignore width and height */ 11909 + if (!obj) 11910 + return 0; 11911 + 11912 + /* Check for which cursor types we support */ 11913 + crtc_w = drm_rect_width(&state->orig_dst); 11914 + crtc_h = drm_rect_height(&state->orig_dst); 11915 + if (!cursor_size_ok(dev, crtc_w, crtc_h)) { 11916 + DRM_DEBUG("Cursor dimension not supported\n"); 11917 + return -EINVAL; 11918 + } 11919 + 11920 + stride = roundup_pow_of_two(crtc_w) * 4; 11921 + if (obj->base.size < stride * crtc_h) { 11922 + DRM_DEBUG_KMS("buffer is too small\n"); 11923 + return -ENOMEM; 11924 + } 11925 + 11926 + if (fb == crtc->cursor->fb) 11927 + return 0; 11928 + 11929 + /* we only need to pin inside GTT if cursor is non-phy */ 11930 + mutex_lock(&dev->struct_mutex); 11931 + if (!INTEL_INFO(dev)->cursor_needs_physical && obj->tiling_mode) { 11932 + DRM_DEBUG_KMS("cursor cannot be tiled\n"); 11933 + ret = -EINVAL; 11934 + } 11935 + mutex_unlock(&dev->struct_mutex); 11936 + 11937 + return ret; 11910 11938 } 11911 11939 11912 11940 static int ··· 12020 11970 .update_plane = intel_cursor_plane_update, 12021 11971 .disable_plane = intel_cursor_plane_disable, 12022 11972 .destroy = intel_plane_destroy, 11973 + .set_property = intel_plane_set_property, 12023 11974 }; 12024 11975 12025 11976 static struct drm_plane *intel_cursor_plane_create(struct drm_device *dev, ··· 12036 11985 cursor->max_downscale = 1; 12037 11986 cursor->pipe = pipe; 12038 11987 cursor->plane = pipe; 11988 + cursor->rotation = BIT(DRM_ROTATE_0); 12039 11989 12040 11990 drm_universal_plane_init(dev, &cursor->base, 0, 12041 11991 &intel_cursor_plane_funcs, 12042 11992 intel_cursor_formats, 12043 11993 ARRAY_SIZE(intel_cursor_formats), 12044 11994 DRM_PLANE_TYPE_CURSOR); 11995 + 11996 + if (INTEL_INFO(dev)->gen >= 4) { 11997 + if (!dev->mode_config.rotation_property) 11998 + dev->mode_config.rotation_property = 11999 + drm_mode_create_rotation_property(dev, 12000 + BIT(DRM_ROTATE_0) | 12001 + BIT(DRM_ROTATE_180)); 12002 + if (dev->mode_config.rotation_property) 12003 + drm_object_attach_property(&cursor->base.base, 12004 + dev->mode_config.rotation_property, 12005 + cursor->rotation); 12006 + } 12007 + 12045 12008 return &cursor->base; 12046 12009 } 12047 12010 ··· 12222 12157 if (INTEL_INFO(dev)->gen >= 9) 12223 12158 return false; 12224 12159 12225 - if (IS_ULT(dev)) 12160 + if (IS_HSW_ULT(dev) || IS_BDW_ULT(dev)) 12226 12161 return false; 12227 12162 12228 12163 if (IS_CHERRYVIEW(dev))
+1 -9
drivers/gpu/drm/i915/intel_dp_mst.c
··· 278 278 } 279 279 280 280 static enum drm_connector_status 281 - intel_mst_port_dp_detect(struct drm_connector *connector) 281 + intel_dp_mst_detect(struct drm_connector *connector, bool force) 282 282 { 283 283 struct intel_connector *intel_connector = to_intel_connector(connector); 284 284 struct intel_dp *intel_dp = intel_connector->mst_port; 285 285 286 286 return drm_dp_mst_detect_port(&intel_dp->mst_mgr, intel_connector->port); 287 - } 288 - 289 - static enum drm_connector_status 290 - intel_dp_mst_detect(struct drm_connector *connector, bool force) 291 - { 292 - enum drm_connector_status status; 293 - status = intel_mst_port_dp_detect(connector); 294 - return status; 295 287 } 296 288 297 289 static int
+10 -4
drivers/gpu/drm/i915/intel_drv.h
··· 755 755 return INTEL_INFO(crtc->base.dev)->num_sprites[crtc->pipe] + 1; 756 756 } 757 757 758 - /* i915_irq.c */ 759 - bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev, 758 + /* intel_fifo_underrun.c */ 759 + bool intel_set_cpu_fifo_underrun_reporting(struct drm_i915_private *dev_priv, 760 760 enum pipe pipe, bool enable); 761 - bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev, 761 + bool intel_set_pch_fifo_underrun_reporting(struct drm_i915_private *dev_priv, 762 762 enum transcoder pch_transcoder, 763 763 bool enable); 764 + void intel_cpu_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv, 765 + enum pipe pipe); 766 + void intel_pch_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv, 767 + enum transcoder pch_transcoder); 768 + void i9xx_check_fifo_underruns(struct drm_i915_private *dev_priv); 769 + 770 + /* i915_irq.c */ 764 771 void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask); 765 772 void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask); 766 773 void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask); ··· 786 779 } 787 780 788 781 int intel_get_crtc_scanline(struct intel_crtc *crtc); 789 - void i9xx_check_fifo_underruns(struct drm_device *dev); 790 782 void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv); 791 783 792 784 /* intel_crt.c */
+381
drivers/gpu/drm/i915/intel_fifo_underrun.c
··· 1 + /* 2 + * Copyright © 2014 Intel Corporation 3 + * 4 + * Permission is hereby granted, free of charge, to any person obtaining a 5 + * copy of this software and associated documentation files (the "Software"), 6 + * to deal in the Software without restriction, including without limitation 7 + * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 + * and/or sell copies of the Software, and to permit persons to whom the 9 + * Software is furnished to do so, subject to the following conditions: 10 + * 11 + * The above copyright notice and this permission notice (including the next 12 + * paragraph) shall be included in all copies or substantial portions of the 13 + * Software. 14 + * 15 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 + * IN THE SOFTWARE. 22 + * 23 + * Authors: 24 + * Daniel Vetter <daniel.vetter@ffwll.ch> 25 + * 26 + */ 27 + 28 + #include "i915_drv.h" 29 + #include "intel_drv.h" 30 + 31 + /** 32 + * DOC: fifo underrun handling 33 + * 34 + * The i915 driver checks for display fifo underruns using the interrupt signals 35 + * provided by the hardware. This is enabled by default and fairly useful to 36 + * debug display issues, especially watermark settings. 37 + * 38 + * If an underrun is detected this is logged into dmesg. To avoid flooding logs 39 + * and occupying the cpu underrun interrupts are disabled after the first 40 + * occurrence until the next modeset on a given pipe. 41 + * 42 + * Note that underrun detection on gmch platforms is a bit more ugly since there 43 + * is no interrupt (despite that the signalling bit is in the PIPESTAT pipe 44 + * interrupt register). Also on some other platforms underrun interrupts are 45 + * shared, which means that if we detect an underrun we need to disable underrun 46 + * reporting on all pipes. 47 + * 48 + * The code also supports underrun detection on the PCH transcoder. 49 + */ 50 + 51 + static bool ivb_can_enable_err_int(struct drm_device *dev) 52 + { 53 + struct drm_i915_private *dev_priv = dev->dev_private; 54 + struct intel_crtc *crtc; 55 + enum pipe pipe; 56 + 57 + assert_spin_locked(&dev_priv->irq_lock); 58 + 59 + for_each_pipe(dev_priv, pipe) { 60 + crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); 61 + 62 + if (crtc->cpu_fifo_underrun_disabled) 63 + return false; 64 + } 65 + 66 + return true; 67 + } 68 + 69 + static bool cpt_can_enable_serr_int(struct drm_device *dev) 70 + { 71 + struct drm_i915_private *dev_priv = dev->dev_private; 72 + enum pipe pipe; 73 + struct intel_crtc *crtc; 74 + 75 + assert_spin_locked(&dev_priv->irq_lock); 76 + 77 + for_each_pipe(dev_priv, pipe) { 78 + crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); 79 + 80 + if (crtc->pch_fifo_underrun_disabled) 81 + return false; 82 + } 83 + 84 + return true; 85 + } 86 + 87 + /** 88 + * i9xx_check_fifo_underruns - check for fifo underruns 89 + * @dev_priv: i915 device instance 90 + * 91 + * This function checks for fifo underruns on GMCH platforms. This needs to be 92 + * done manually on modeset to make sure that we catch all underruns since they 93 + * do not generate an interrupt by themselves on these platforms. 94 + */ 95 + void i9xx_check_fifo_underruns(struct drm_i915_private *dev_priv) 96 + { 97 + struct intel_crtc *crtc; 98 + 99 + spin_lock_irq(&dev_priv->irq_lock); 100 + 101 + for_each_intel_crtc(dev_priv->dev, crtc) { 102 + u32 reg = PIPESTAT(crtc->pipe); 103 + u32 pipestat; 104 + 105 + if (crtc->cpu_fifo_underrun_disabled) 106 + continue; 107 + 108 + pipestat = I915_READ(reg) & 0xffff0000; 109 + if ((pipestat & PIPE_FIFO_UNDERRUN_STATUS) == 0) 110 + continue; 111 + 112 + I915_WRITE(reg, pipestat | PIPE_FIFO_UNDERRUN_STATUS); 113 + POSTING_READ(reg); 114 + 115 + DRM_ERROR("pipe %c underrun\n", pipe_name(crtc->pipe)); 116 + } 117 + 118 + spin_unlock_irq(&dev_priv->irq_lock); 119 + } 120 + 121 + static void i9xx_set_fifo_underrun_reporting(struct drm_device *dev, 122 + enum pipe pipe, 123 + bool enable, bool old) 124 + { 125 + struct drm_i915_private *dev_priv = dev->dev_private; 126 + u32 reg = PIPESTAT(pipe); 127 + u32 pipestat = I915_READ(reg) & 0xffff0000; 128 + 129 + assert_spin_locked(&dev_priv->irq_lock); 130 + 131 + if (enable) { 132 + I915_WRITE(reg, pipestat | PIPE_FIFO_UNDERRUN_STATUS); 133 + POSTING_READ(reg); 134 + } else { 135 + if (old && pipestat & PIPE_FIFO_UNDERRUN_STATUS) 136 + DRM_ERROR("pipe %c underrun\n", pipe_name(pipe)); 137 + } 138 + } 139 + 140 + static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev, 141 + enum pipe pipe, bool enable) 142 + { 143 + struct drm_i915_private *dev_priv = dev->dev_private; 144 + uint32_t bit = (pipe == PIPE_A) ? DE_PIPEA_FIFO_UNDERRUN : 145 + DE_PIPEB_FIFO_UNDERRUN; 146 + 147 + if (enable) 148 + ironlake_enable_display_irq(dev_priv, bit); 149 + else 150 + ironlake_disable_display_irq(dev_priv, bit); 151 + } 152 + 153 + static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev, 154 + enum pipe pipe, 155 + bool enable, bool old) 156 + { 157 + struct drm_i915_private *dev_priv = dev->dev_private; 158 + if (enable) { 159 + I915_WRITE(GEN7_ERR_INT, ERR_INT_FIFO_UNDERRUN(pipe)); 160 + 161 + if (!ivb_can_enable_err_int(dev)) 162 + return; 163 + 164 + ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB); 165 + } else { 166 + ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB); 167 + 168 + if (old && 169 + I915_READ(GEN7_ERR_INT) & ERR_INT_FIFO_UNDERRUN(pipe)) { 170 + DRM_ERROR("uncleared fifo underrun on pipe %c\n", 171 + pipe_name(pipe)); 172 + } 173 + } 174 + } 175 + 176 + static void broadwell_set_fifo_underrun_reporting(struct drm_device *dev, 177 + enum pipe pipe, bool enable) 178 + { 179 + struct drm_i915_private *dev_priv = dev->dev_private; 180 + 181 + assert_spin_locked(&dev_priv->irq_lock); 182 + 183 + if (enable) 184 + dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_FIFO_UNDERRUN; 185 + else 186 + dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_FIFO_UNDERRUN; 187 + I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]); 188 + POSTING_READ(GEN8_DE_PIPE_IMR(pipe)); 189 + } 190 + 191 + static void ibx_set_fifo_underrun_reporting(struct drm_device *dev, 192 + enum transcoder pch_transcoder, 193 + bool enable) 194 + { 195 + struct drm_i915_private *dev_priv = dev->dev_private; 196 + uint32_t bit = (pch_transcoder == TRANSCODER_A) ? 197 + SDE_TRANSA_FIFO_UNDER : SDE_TRANSB_FIFO_UNDER; 198 + 199 + if (enable) 200 + ibx_enable_display_interrupt(dev_priv, bit); 201 + else 202 + ibx_disable_display_interrupt(dev_priv, bit); 203 + } 204 + 205 + static void cpt_set_fifo_underrun_reporting(struct drm_device *dev, 206 + enum transcoder pch_transcoder, 207 + bool enable, bool old) 208 + { 209 + struct drm_i915_private *dev_priv = dev->dev_private; 210 + 211 + if (enable) { 212 + I915_WRITE(SERR_INT, 213 + SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder)); 214 + 215 + if (!cpt_can_enable_serr_int(dev)) 216 + return; 217 + 218 + ibx_enable_display_interrupt(dev_priv, SDE_ERROR_CPT); 219 + } else { 220 + ibx_disable_display_interrupt(dev_priv, SDE_ERROR_CPT); 221 + 222 + if (old && I915_READ(SERR_INT) & 223 + SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder)) { 224 + DRM_ERROR("uncleared pch fifo underrun on pch transcoder %c\n", 225 + transcoder_name(pch_transcoder)); 226 + } 227 + } 228 + } 229 + 230 + static bool __intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev, 231 + enum pipe pipe, bool enable) 232 + { 233 + struct drm_i915_private *dev_priv = dev->dev_private; 234 + struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 235 + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 236 + bool old; 237 + 238 + assert_spin_locked(&dev_priv->irq_lock); 239 + 240 + old = !intel_crtc->cpu_fifo_underrun_disabled; 241 + intel_crtc->cpu_fifo_underrun_disabled = !enable; 242 + 243 + if (HAS_GMCH_DISPLAY(dev)) 244 + i9xx_set_fifo_underrun_reporting(dev, pipe, enable, old); 245 + else if (IS_GEN5(dev) || IS_GEN6(dev)) 246 + ironlake_set_fifo_underrun_reporting(dev, pipe, enable); 247 + else if (IS_GEN7(dev)) 248 + ivybridge_set_fifo_underrun_reporting(dev, pipe, enable, old); 249 + else if (IS_GEN8(dev) || IS_GEN9(dev)) 250 + broadwell_set_fifo_underrun_reporting(dev, pipe, enable); 251 + 252 + return old; 253 + } 254 + 255 + /** 256 + * intel_set_cpu_fifo_underrun_reporting - set cpu fifo underrrun reporting state 257 + * @dev_priv: i915 device instance 258 + * @pipe: (CPU) pipe to set state for 259 + * @enable: whether underruns should be reported or not 260 + * 261 + * This function sets the fifo underrun state for @pipe. It is used in the 262 + * modeset code to avoid false positives since on many platforms underruns are 263 + * expected when disabling or enabling the pipe. 264 + * 265 + * Notice that on some platforms disabling underrun reports for one pipe 266 + * disables for all due to shared interrupts. Actual reporting is still per-pipe 267 + * though. 268 + * 269 + * Returns the previous state of underrun reporting. 270 + */ 271 + bool intel_set_cpu_fifo_underrun_reporting(struct drm_i915_private *dev_priv, 272 + enum pipe pipe, bool enable) 273 + { 274 + unsigned long flags; 275 + bool ret; 276 + 277 + spin_lock_irqsave(&dev_priv->irq_lock, flags); 278 + ret = __intel_set_cpu_fifo_underrun_reporting(dev_priv->dev, pipe, 279 + enable); 280 + spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 281 + 282 + return ret; 283 + } 284 + 285 + static bool 286 + __cpu_fifo_underrun_reporting_enabled(struct drm_i915_private *dev_priv, 287 + enum pipe pipe) 288 + { 289 + struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 290 + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 291 + 292 + return !intel_crtc->cpu_fifo_underrun_disabled; 293 + } 294 + 295 + /** 296 + * intel_set_pch_fifo_underrun_reporting - set PCH fifo underrun reporting state 297 + * @dev_priv: i915 device instance 298 + * @pch_transcoder: the PCH transcoder (same as pipe on IVB and older) 299 + * @enable: whether underruns should be reported or not 300 + * 301 + * This function makes us disable or enable PCH fifo underruns for a specific 302 + * PCH transcoder. Notice that on some PCHs (e.g. CPT/PPT), disabling FIFO 303 + * underrun reporting for one transcoder may also disable all the other PCH 304 + * error interruts for the other transcoders, due to the fact that there's just 305 + * one interrupt mask/enable bit for all the transcoders. 306 + * 307 + * Returns the previous state of underrun reporting. 308 + */ 309 + bool intel_set_pch_fifo_underrun_reporting(struct drm_i915_private *dev_priv, 310 + enum transcoder pch_transcoder, 311 + bool enable) 312 + { 313 + struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pch_transcoder]; 314 + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 315 + unsigned long flags; 316 + bool old; 317 + 318 + /* 319 + * NOTE: Pre-LPT has a fixed cpu pipe -> pch transcoder mapping, but LPT 320 + * has only one pch transcoder A that all pipes can use. To avoid racy 321 + * pch transcoder -> pipe lookups from interrupt code simply store the 322 + * underrun statistics in crtc A. Since we never expose this anywhere 323 + * nor use it outside of the fifo underrun code here using the "wrong" 324 + * crtc on LPT won't cause issues. 325 + */ 326 + 327 + spin_lock_irqsave(&dev_priv->irq_lock, flags); 328 + 329 + old = !intel_crtc->pch_fifo_underrun_disabled; 330 + intel_crtc->pch_fifo_underrun_disabled = !enable; 331 + 332 + if (HAS_PCH_IBX(dev_priv->dev)) 333 + ibx_set_fifo_underrun_reporting(dev_priv->dev, pch_transcoder, 334 + enable); 335 + else 336 + cpt_set_fifo_underrun_reporting(dev_priv->dev, pch_transcoder, 337 + enable, old); 338 + 339 + spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 340 + return old; 341 + } 342 + 343 + /** 344 + * intel_pch_fifo_underrun_irq_handler - handle PCH fifo underrun interrupt 345 + * @dev_priv: i915 device instance 346 + * @pipe: (CPU) pipe to set state for 347 + * 348 + * This handles a CPU fifo underrun interrupt, generating an underrun warning 349 + * into dmesg if underrun reporting is enabled and then disables the underrun 350 + * interrupt to avoid an irq storm. 351 + */ 352 + void intel_cpu_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv, 353 + enum pipe pipe) 354 + { 355 + /* GMCH can't disable fifo underruns, filter them. */ 356 + if (HAS_GMCH_DISPLAY(dev_priv->dev) && 357 + !__cpu_fifo_underrun_reporting_enabled(dev_priv, pipe)) 358 + return; 359 + 360 + if (intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false)) 361 + DRM_ERROR("CPU pipe %c FIFO underrun\n", 362 + pipe_name(pipe)); 363 + } 364 + 365 + /** 366 + * intel_pch_fifo_underrun_irq_handler - handle PCH fifo underrun interrupt 367 + * @dev_priv: i915 device instance 368 + * @pch_transcoder: the PCH transcoder (same as pipe on IVB and older) 369 + * 370 + * This handles a PCH fifo underrun interrupt, generating an underrun warning 371 + * into dmesg if underrun reporting is enabled and then disables the underrun 372 + * interrupt to avoid an irq storm. 373 + */ 374 + void intel_pch_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv, 375 + enum transcoder pch_transcoder) 376 + { 377 + if (intel_set_pch_fifo_underrun_reporting(dev_priv, pch_transcoder, 378 + false)) 379 + DRM_ERROR("PCH transcoder %c FIFO underrun\n", 380 + transcoder_name(pch_transcoder)); 381 + }
+3 -2
drivers/gpu/drm/i915/intel_panel.c
··· 775 775 if (panel->backlight.active_low_pwm) 776 776 pch_ctl1 |= BLM_PCH_POLARITY; 777 777 778 - /* BDW always uses the pch pwm controls. */ 779 - pch_ctl1 |= BLM_PCH_OVERRIDE_ENABLE; 778 + /* After LPT, override is the default. */ 779 + if (HAS_PCH_LPT(dev_priv)) 780 + pch_ctl1 |= BLM_PCH_OVERRIDE_ENABLE; 780 781 781 782 I915_WRITE(BLC_PWM_PCH_CTL1, pch_ctl1); 782 783 POSTING_READ(BLC_PWM_PCH_CTL1);
+35 -30
drivers/gpu/drm/i915/intel_pm.c
··· 1345 1345 int *prec_mult, 1346 1346 int *drain_latency) 1347 1347 { 1348 + struct drm_device *dev = crtc->dev; 1348 1349 int entries; 1349 1350 int clock = to_intel_crtc(crtc)->config.adjusted_mode.crtc_clock; 1350 1351 ··· 1356 1355 return false; 1357 1356 1358 1357 entries = DIV_ROUND_UP(clock, 1000) * pixel_size; 1359 - *prec_mult = (entries > 128) ? DRAIN_LATENCY_PRECISION_64 : 1360 - DRAIN_LATENCY_PRECISION_32; 1358 + if (IS_CHERRYVIEW(dev)) 1359 + *prec_mult = (entries > 128) ? DRAIN_LATENCY_PRECISION_32 : 1360 + DRAIN_LATENCY_PRECISION_16; 1361 + else 1362 + *prec_mult = (entries > 128) ? DRAIN_LATENCY_PRECISION_64 : 1363 + DRAIN_LATENCY_PRECISION_32; 1361 1364 *drain_latency = (64 * (*prec_mult) * 4) / entries; 1362 1365 1363 1366 if (*drain_latency > DRAIN_LATENCY_MASK) ··· 1380 1375 1381 1376 static void vlv_update_drain_latency(struct drm_crtc *crtc) 1382 1377 { 1383 - struct drm_i915_private *dev_priv = crtc->dev->dev_private; 1378 + struct drm_device *dev = crtc->dev; 1379 + struct drm_i915_private *dev_priv = dev->dev_private; 1384 1380 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 1385 1381 int pixel_size; 1386 1382 int drain_latency; 1387 1383 enum pipe pipe = intel_crtc->pipe; 1388 1384 int plane_prec, prec_mult, plane_dl; 1385 + const int high_precision = IS_CHERRYVIEW(dev) ? 1386 + DRAIN_LATENCY_PRECISION_32 : DRAIN_LATENCY_PRECISION_64; 1389 1387 1390 - plane_dl = I915_READ(VLV_DDL(pipe)) & ~(DDL_PLANE_PRECISION_64 | 1391 - DRAIN_LATENCY_MASK | DDL_CURSOR_PRECISION_64 | 1388 + plane_dl = I915_READ(VLV_DDL(pipe)) & ~(DDL_PLANE_PRECISION_HIGH | 1389 + DRAIN_LATENCY_MASK | DDL_CURSOR_PRECISION_HIGH | 1392 1390 (DRAIN_LATENCY_MASK << DDL_CURSOR_SHIFT)); 1393 1391 1394 1392 if (!intel_crtc_active(crtc)) { ··· 1402 1394 /* Primary plane Drain Latency */ 1403 1395 pixel_size = crtc->primary->fb->bits_per_pixel / 8; /* BPP */ 1404 1396 if (vlv_compute_drain_latency(crtc, pixel_size, &prec_mult, &drain_latency)) { 1405 - plane_prec = (prec_mult == DRAIN_LATENCY_PRECISION_64) ? 1406 - DDL_PLANE_PRECISION_64 : 1407 - DDL_PLANE_PRECISION_32; 1397 + plane_prec = (prec_mult == high_precision) ? 1398 + DDL_PLANE_PRECISION_HIGH : 1399 + DDL_PLANE_PRECISION_LOW; 1408 1400 plane_dl |= plane_prec | drain_latency; 1409 1401 } 1410 1402 ··· 1416 1408 /* Program cursor DL only if it is enabled */ 1417 1409 if (intel_crtc->cursor_base && 1418 1410 vlv_compute_drain_latency(crtc, pixel_size, &prec_mult, &drain_latency)) { 1419 - plane_prec = (prec_mult == DRAIN_LATENCY_PRECISION_64) ? 1420 - DDL_CURSOR_PRECISION_64 : 1421 - DDL_CURSOR_PRECISION_32; 1411 + plane_prec = (prec_mult == high_precision) ? 1412 + DDL_CURSOR_PRECISION_HIGH : 1413 + DDL_CURSOR_PRECISION_LOW; 1422 1414 plane_dl |= plane_prec | (drain_latency << DDL_CURSOR_SHIFT); 1423 1415 } 1424 1416 ··· 1586 1578 int plane_prec; 1587 1579 int sprite_dl; 1588 1580 int prec_mult; 1581 + const int high_precision = IS_CHERRYVIEW(dev) ? 1582 + DRAIN_LATENCY_PRECISION_32 : DRAIN_LATENCY_PRECISION_64; 1589 1583 1590 - sprite_dl = I915_READ(VLV_DDL(pipe)) & ~(DDL_SPRITE_PRECISION_64(sprite) | 1584 + sprite_dl = I915_READ(VLV_DDL(pipe)) & ~(DDL_SPRITE_PRECISION_HIGH(sprite) | 1591 1585 (DRAIN_LATENCY_MASK << DDL_SPRITE_SHIFT(sprite))); 1592 1586 1593 1587 if (enabled && vlv_compute_drain_latency(crtc, pixel_size, &prec_mult, 1594 1588 &drain_latency)) { 1595 - plane_prec = (prec_mult == DRAIN_LATENCY_PRECISION_64) ? 1596 - DDL_SPRITE_PRECISION_64(sprite) : 1597 - DDL_SPRITE_PRECISION_32(sprite); 1589 + plane_prec = (prec_mult == high_precision) ? 1590 + DDL_SPRITE_PRECISION_HIGH(sprite) : 1591 + DDL_SPRITE_PRECISION_LOW(sprite); 1598 1592 sprite_dl |= plane_prec | 1599 1593 (drain_latency << DDL_SPRITE_SHIFT(sprite)); 1600 1594 } ··· 3639 3629 else 3640 3630 mode = 0; 3641 3631 } 3642 - DRM_DEBUG_KMS("Enabling RC6 states: RC6 %s, RC6p %s, RC6pp %s\n", 3643 - (mode & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off", 3644 - (mode & GEN6_RC_CTL_RC6p_ENABLE) ? "on" : "off", 3645 - (mode & GEN6_RC_CTL_RC6pp_ENABLE) ? "on" : "off"); 3632 + if (HAS_RC6p(dev)) 3633 + DRM_DEBUG_KMS("Enabling RC6 states: RC6 %s RC6p %s RC6pp %s\n", 3634 + (mode & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off", 3635 + (mode & GEN6_RC_CTL_RC6p_ENABLE) ? "on" : "off", 3636 + (mode & GEN6_RC_CTL_RC6pp_ENABLE) ? "on" : "off"); 3637 + 3638 + else 3639 + DRM_DEBUG_KMS("Enabling RC6 states: RC6 %s\n", 3640 + (mode & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off"); 3646 3641 } 3647 3642 3648 3643 static int sanitize_rc6_option(const struct drm_device *dev, int enable_rc6) ··· 3664 3649 if (enable_rc6 >= 0) { 3665 3650 int mask; 3666 3651 3667 - if (INTEL_INFO(dev)->gen == 6 || IS_IVYBRIDGE(dev)) 3652 + if (HAS_RC6p(dev)) 3668 3653 mask = INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE | 3669 3654 INTEL_RC6pp_ENABLE; 3670 3655 else ··· 5663 5648 I915_WRITE(WM3_LP_ILK, 0); 5664 5649 I915_WRITE(WM2_LP_ILK, 0); 5665 5650 I915_WRITE(WM1_LP_ILK, 0); 5666 - 5667 - /* FIXME(BDW): Check all the w/a, some might only apply to 5668 - * pre-production hw. */ 5669 - 5670 - 5671 - I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_BWGTLB_DISABLE)); 5672 - 5673 - I915_WRITE(_3D_CHICKEN3, 5674 - _MASKED_BIT_ENABLE(_3D_CHICKEN_SDE_LIMIT_FIFO_POLY_DEPTH(2))); 5675 - 5676 5651 5677 5652 /* WaSwitchSolVfFArbitrationPriority:bdw */ 5678 5653 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
+113 -89
drivers/gpu/drm/i915/intel_ringbuffer.c
··· 665 665 return ret; 666 666 } 667 667 668 - static inline void intel_ring_emit_wa(struct intel_engine_cs *ring, 669 - u32 addr, u32 value) 668 + static int intel_ring_workarounds_emit(struct intel_engine_cs *ring) 670 669 { 670 + int ret, i; 671 671 struct drm_device *dev = ring->dev; 672 672 struct drm_i915_private *dev_priv = dev->dev_private; 673 + struct i915_workarounds *w = &dev_priv->workarounds; 673 674 674 - if (WARN_ON(dev_priv->num_wa_regs >= I915_MAX_WA_REGS)) 675 - return; 675 + if (WARN_ON(w->count == 0)) 676 + return 0; 676 677 677 - intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); 678 - intel_ring_emit(ring, addr); 679 - intel_ring_emit(ring, value); 680 - 681 - dev_priv->intel_wa_regs[dev_priv->num_wa_regs].addr = addr; 682 - dev_priv->intel_wa_regs[dev_priv->num_wa_regs].mask = value & 0xFFFF; 683 - /* value is updated with the status of remaining bits of this 684 - * register when it is read from debugfs file 685 - */ 686 - dev_priv->intel_wa_regs[dev_priv->num_wa_regs].value = value; 687 - dev_priv->num_wa_regs++; 688 - 689 - return; 690 - } 691 - 692 - static int bdw_init_workarounds(struct intel_engine_cs *ring) 693 - { 694 - int ret; 695 - struct drm_device *dev = ring->dev; 696 - struct drm_i915_private *dev_priv = dev->dev_private; 697 - 698 - /* 699 - * workarounds applied in this fn are part of register state context, 700 - * they need to be re-initialized followed by gpu reset, suspend/resume, 701 - * module reload. 702 - */ 703 - dev_priv->num_wa_regs = 0; 704 - memset(dev_priv->intel_wa_regs, 0, sizeof(dev_priv->intel_wa_regs)); 705 - 706 - /* 707 - * update the number of dwords required based on the 708 - * actual number of workarounds applied 709 - */ 710 - ret = intel_ring_begin(ring, 18); 678 + ring->gpu_caches_dirty = true; 679 + ret = intel_ring_flush_all_caches(ring); 711 680 if (ret) 712 681 return ret; 713 682 683 + ret = intel_ring_begin(ring, (w->count * 2 + 2)); 684 + if (ret) 685 + return ret; 686 + 687 + intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(w->count)); 688 + for (i = 0; i < w->count; i++) { 689 + intel_ring_emit(ring, w->reg[i].addr); 690 + intel_ring_emit(ring, w->reg[i].value); 691 + } 692 + intel_ring_emit(ring, MI_NOOP); 693 + 694 + intel_ring_advance(ring); 695 + 696 + ring->gpu_caches_dirty = true; 697 + ret = intel_ring_flush_all_caches(ring); 698 + if (ret) 699 + return ret; 700 + 701 + DRM_DEBUG_DRIVER("Number of Workarounds emitted: %d\n", w->count); 702 + 703 + return 0; 704 + } 705 + 706 + static int wa_add(struct drm_i915_private *dev_priv, 707 + const u32 addr, const u32 val, const u32 mask) 708 + { 709 + const u32 idx = dev_priv->workarounds.count; 710 + 711 + if (WARN_ON(idx >= I915_MAX_WA_REGS)) 712 + return -ENOSPC; 713 + 714 + dev_priv->workarounds.reg[idx].addr = addr; 715 + dev_priv->workarounds.reg[idx].value = val; 716 + dev_priv->workarounds.reg[idx].mask = mask; 717 + 718 + dev_priv->workarounds.count++; 719 + 720 + return 0; 721 + } 722 + 723 + #define WA_REG(addr, val, mask) { \ 724 + const int r = wa_add(dev_priv, (addr), (val), (mask)); \ 725 + if (r) \ 726 + return r; \ 727 + } 728 + 729 + #define WA_SET_BIT_MASKED(addr, mask) \ 730 + WA_REG(addr, _MASKED_BIT_ENABLE(mask), (mask) & 0xffff) 731 + 732 + #define WA_CLR_BIT_MASKED(addr, mask) \ 733 + WA_REG(addr, _MASKED_BIT_DISABLE(mask), (mask) & 0xffff) 734 + 735 + #define WA_SET_BIT(addr, mask) WA_REG(addr, I915_READ(addr) | (mask), mask) 736 + #define WA_CLR_BIT(addr, mask) WA_REG(addr, I915_READ(addr) & ~(mask), mask) 737 + 738 + #define WA_WRITE(addr, val) WA_REG(addr, val, 0xffffffff) 739 + 740 + static int bdw_init_workarounds(struct intel_engine_cs *ring) 741 + { 742 + struct drm_device *dev = ring->dev; 743 + struct drm_i915_private *dev_priv = dev->dev_private; 744 + 714 745 /* WaDisablePartialInstShootdown:bdw */ 715 - /* WaDisableThreadStallDopClockGating:bdw */ 716 - /* FIXME: Unclear whether we really need this on production bdw. */ 717 - intel_ring_emit_wa(ring, GEN8_ROW_CHICKEN, 718 - _MASKED_BIT_ENABLE(PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE 719 - | STALL_DOP_GATING_DISABLE)); 746 + /* WaDisableThreadStallDopClockGating:bdw (pre-production) */ 747 + WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, 748 + PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE | 749 + STALL_DOP_GATING_DISABLE); 720 750 721 - /* WaDisableDopClockGating:bdw May not be needed for production */ 722 - intel_ring_emit_wa(ring, GEN7_ROW_CHICKEN2, 723 - _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE)); 751 + /* WaDisableDopClockGating:bdw */ 752 + WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2, 753 + DOP_CLOCK_GATING_DISABLE); 724 754 725 - intel_ring_emit_wa(ring, HALF_SLICE_CHICKEN3, 726 - _MASKED_BIT_ENABLE(GEN8_SAMPLER_POWER_BYPASS_DIS)); 755 + WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3, 756 + GEN8_SAMPLER_POWER_BYPASS_DIS); 727 757 728 758 /* Use Force Non-Coherent whenever executing a 3D context. This is a 729 759 * workaround for for a possible hang in the unlikely event a TLB 730 760 * invalidation occurs during a PSD flush. 731 761 */ 732 762 /* WaDisableFenceDestinationToSLM:bdw (GT3 pre-production) */ 733 - intel_ring_emit_wa(ring, HDC_CHICKEN0, 734 - _MASKED_BIT_ENABLE(HDC_FORCE_NON_COHERENT | 735 - (IS_BDW_GT3(dev) ? 736 - HDC_FENCE_DEST_SLM_DISABLE : 0) 737 - )); 763 + WA_SET_BIT_MASKED(HDC_CHICKEN0, 764 + HDC_FORCE_NON_COHERENT | 765 + (IS_BDW_GT3(dev) ? HDC_FENCE_DEST_SLM_DISABLE : 0)); 738 766 739 767 /* Wa4x4STCOptimizationDisable:bdw */ 740 - intel_ring_emit_wa(ring, CACHE_MODE_1, 741 - _MASKED_BIT_ENABLE(GEN8_4x4_STC_OPTIMIZATION_DISABLE)); 768 + WA_SET_BIT_MASKED(CACHE_MODE_1, 769 + GEN8_4x4_STC_OPTIMIZATION_DISABLE); 742 770 743 771 /* 744 772 * BSpec recommends 8x4 when MSAA is used, ··· 776 748 * disable bit, which we don't touch here, but it's good 777 749 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM). 778 750 */ 779 - intel_ring_emit_wa(ring, GEN7_GT_MODE, 780 - GEN6_WIZ_HASHING_MASK | GEN6_WIZ_HASHING_16x4); 781 - 782 - intel_ring_advance(ring); 783 - 784 - DRM_DEBUG_DRIVER("Number of Workarounds applied: %d\n", 785 - dev_priv->num_wa_regs); 751 + WA_SET_BIT_MASKED(GEN7_GT_MODE, 752 + GEN6_WIZ_HASHING_MASK | GEN6_WIZ_HASHING_16x4); 786 753 787 754 return 0; 788 755 } 789 756 790 757 static int chv_init_workarounds(struct intel_engine_cs *ring) 791 758 { 792 - int ret; 793 759 struct drm_device *dev = ring->dev; 794 760 struct drm_i915_private *dev_priv = dev->dev_private; 795 761 796 - /* 797 - * workarounds applied in this fn are part of register state context, 798 - * they need to be re-initialized followed by gpu reset, suspend/resume, 799 - * module reload. 800 - */ 801 - dev_priv->num_wa_regs = 0; 802 - memset(dev_priv->intel_wa_regs, 0, sizeof(dev_priv->intel_wa_regs)); 803 - 804 - ret = intel_ring_begin(ring, 12); 805 - if (ret) 806 - return ret; 807 - 808 762 /* WaDisablePartialInstShootdown:chv */ 809 - intel_ring_emit_wa(ring, GEN8_ROW_CHICKEN, 810 - _MASKED_BIT_ENABLE(PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE)); 763 + WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, 764 + PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE); 811 765 812 766 /* WaDisableThreadStallDopClockGating:chv */ 813 - intel_ring_emit_wa(ring, GEN8_ROW_CHICKEN, 814 - _MASKED_BIT_ENABLE(STALL_DOP_GATING_DISABLE)); 767 + WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, 768 + STALL_DOP_GATING_DISABLE); 815 769 816 770 /* WaDisableDopClockGating:chv (pre-production hw) */ 817 - intel_ring_emit_wa(ring, GEN7_ROW_CHICKEN2, 818 - _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE)); 771 + WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2, 772 + DOP_CLOCK_GATING_DISABLE); 819 773 820 774 /* WaDisableSamplerPowerBypass:chv (pre-production hw) */ 821 - intel_ring_emit_wa(ring, HALF_SLICE_CHICKEN3, 822 - _MASKED_BIT_ENABLE(GEN8_SAMPLER_POWER_BYPASS_DIS)); 775 + WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3, 776 + GEN8_SAMPLER_POWER_BYPASS_DIS); 823 777 824 - intel_ring_advance(ring); 778 + return 0; 779 + } 780 + 781 + static int init_workarounds_ring(struct intel_engine_cs *ring) 782 + { 783 + struct drm_device *dev = ring->dev; 784 + struct drm_i915_private *dev_priv = dev->dev_private; 785 + 786 + WARN_ON(ring->id != RCS); 787 + 788 + dev_priv->workarounds.count = 0; 789 + 790 + if (IS_BROADWELL(dev)) 791 + return bdw_init_workarounds(ring); 792 + 793 + if (IS_CHERRYVIEW(dev)) 794 + return chv_init_workarounds(ring); 825 795 826 796 return 0; 827 797 } ··· 879 853 if (HAS_L3_DPF(dev)) 880 854 I915_WRITE_IMR(ring, ~GT_PARITY_ERROR(dev)); 881 855 882 - return ret; 856 + return init_workarounds_ring(ring); 883 857 } 884 858 885 859 static void render_ring_cleanup(struct intel_engine_cs *ring) ··· 2325 2299 dev_priv->semaphore_obj = obj; 2326 2300 } 2327 2301 } 2328 - if (IS_CHERRYVIEW(dev)) 2329 - ring->init_context = chv_init_workarounds; 2330 - else 2331 - ring->init_context = bdw_init_workarounds; 2302 + 2303 + ring->init_context = intel_ring_workarounds_emit; 2332 2304 ring->add_request = gen6_add_request; 2333 2305 ring->flush = gen8_render_ring_flush; 2334 2306 ring->irq_get = gen8_ring_get_irq;
+1 -1
drivers/gpu/drm/i915/intel_runtime_pm.c
··· 221 221 if (wait_for((I915_READ(HSW_PWR_WELL_DRIVER) & 222 222 HSW_PWR_WELL_STATE_ENABLED), 20)) 223 223 DRM_ERROR("Timeout enabling power well\n"); 224 + hsw_power_well_post_enable(dev_priv); 224 225 } 225 226 226 - hsw_power_well_post_enable(dev_priv); 227 227 } else { 228 228 if (enable_requested) { 229 229 I915_WRITE(HSW_PWR_WELL_DRIVER, 0);
+3
drivers/gpu/drm/i915/intel_sprite.c
··· 162 162 plane_ctl &= ~PLANE_CTL_YUV422_ORDER_MASK; 163 163 plane_ctl &= ~PLANE_CTL_TILED_MASK; 164 164 plane_ctl &= ~PLANE_CTL_ALPHA_MASK; 165 + plane_ctl &= ~PLANE_CTL_ROTATE_MASK; 165 166 166 167 /* Trickle feed has to be enabled */ 167 168 plane_ctl &= ~PLANE_CTL_TRICKLE_FEED_DISABLE; ··· 218 217 default: 219 218 BUG(); 220 219 } 220 + if (intel_plane->rotation == BIT(DRM_ROTATE_180)) 221 + plane_ctl |= PLANE_CTL_ROTATE_180; 221 222 222 223 plane_ctl |= PLANE_CTL_ENABLE; 223 224 plane_ctl |= PLANE_CTL_PIPE_CSC_ENABLE;
+41 -50
drivers/gpu/drm/i915/intel_uncore.c
··· 360 360 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 361 361 } 362 362 363 - void intel_uncore_early_sanitize(struct drm_device *dev, bool restore_forcewake) 363 + static void __intel_uncore_early_sanitize(struct drm_device *dev, 364 + bool restore_forcewake) 364 365 { 365 366 struct drm_i915_private *dev_priv = dev->dev_private; 366 367 ··· 385 384 __raw_i915_read32(dev_priv, GTFIFODBG)); 386 385 387 386 intel_uncore_forcewake_reset(dev, restore_forcewake); 387 + } 388 + 389 + void intel_uncore_early_sanitize(struct drm_device *dev, bool restore_forcewake) 390 + { 391 + __intel_uncore_early_sanitize(dev, restore_forcewake); 392 + i915_check_and_clear_faults(dev); 388 393 } 389 394 390 395 void intel_uncore_sanitize(struct drm_device *dev) ··· 830 823 #undef REG_WRITE_FOOTER 831 824 #undef REG_WRITE_HEADER 832 825 826 + #define ASSIGN_WRITE_MMIO_VFUNCS(x) \ 827 + do { \ 828 + dev_priv->uncore.funcs.mmio_writeb = x##_write8; \ 829 + dev_priv->uncore.funcs.mmio_writew = x##_write16; \ 830 + dev_priv->uncore.funcs.mmio_writel = x##_write32; \ 831 + dev_priv->uncore.funcs.mmio_writeq = x##_write64; \ 832 + } while (0) 833 + 834 + #define ASSIGN_READ_MMIO_VFUNCS(x) \ 835 + do { \ 836 + dev_priv->uncore.funcs.mmio_readb = x##_read8; \ 837 + dev_priv->uncore.funcs.mmio_readw = x##_read16; \ 838 + dev_priv->uncore.funcs.mmio_readl = x##_read32; \ 839 + dev_priv->uncore.funcs.mmio_readq = x##_read64; \ 840 + } while (0) 841 + 833 842 void intel_uncore_init(struct drm_device *dev) 834 843 { 835 844 struct drm_i915_private *dev_priv = dev->dev_private; ··· 853 830 setup_timer(&dev_priv->uncore.force_wake_timer, 854 831 gen6_force_wake_timer, (unsigned long)dev_priv); 855 832 856 - intel_uncore_early_sanitize(dev, false); 833 + __intel_uncore_early_sanitize(dev, false); 857 834 858 835 if (IS_VALLEYVIEW(dev)) { 859 836 dev_priv->uncore.funcs.force_wake_get = __vlv_force_wake_get; ··· 902 879 switch (INTEL_INFO(dev)->gen) { 903 880 default: 904 881 if (IS_CHERRYVIEW(dev)) { 905 - dev_priv->uncore.funcs.mmio_writeb = chv_write8; 906 - dev_priv->uncore.funcs.mmio_writew = chv_write16; 907 - dev_priv->uncore.funcs.mmio_writel = chv_write32; 908 - dev_priv->uncore.funcs.mmio_writeq = chv_write64; 909 - dev_priv->uncore.funcs.mmio_readb = chv_read8; 910 - dev_priv->uncore.funcs.mmio_readw = chv_read16; 911 - dev_priv->uncore.funcs.mmio_readl = chv_read32; 912 - dev_priv->uncore.funcs.mmio_readq = chv_read64; 882 + ASSIGN_WRITE_MMIO_VFUNCS(chv); 883 + ASSIGN_READ_MMIO_VFUNCS(chv); 913 884 914 885 } else { 915 - dev_priv->uncore.funcs.mmio_writeb = gen8_write8; 916 - dev_priv->uncore.funcs.mmio_writew = gen8_write16; 917 - dev_priv->uncore.funcs.mmio_writel = gen8_write32; 918 - dev_priv->uncore.funcs.mmio_writeq = gen8_write64; 919 - dev_priv->uncore.funcs.mmio_readb = gen6_read8; 920 - dev_priv->uncore.funcs.mmio_readw = gen6_read16; 921 - dev_priv->uncore.funcs.mmio_readl = gen6_read32; 922 - dev_priv->uncore.funcs.mmio_readq = gen6_read64; 886 + ASSIGN_WRITE_MMIO_VFUNCS(gen8); 887 + ASSIGN_READ_MMIO_VFUNCS(gen6); 923 888 } 924 889 break; 925 890 case 7: 926 891 case 6: 927 892 if (IS_HASWELL(dev)) { 928 - dev_priv->uncore.funcs.mmio_writeb = hsw_write8; 929 - dev_priv->uncore.funcs.mmio_writew = hsw_write16; 930 - dev_priv->uncore.funcs.mmio_writel = hsw_write32; 931 - dev_priv->uncore.funcs.mmio_writeq = hsw_write64; 893 + ASSIGN_WRITE_MMIO_VFUNCS(hsw); 932 894 } else { 933 - dev_priv->uncore.funcs.mmio_writeb = gen6_write8; 934 - dev_priv->uncore.funcs.mmio_writew = gen6_write16; 935 - dev_priv->uncore.funcs.mmio_writel = gen6_write32; 936 - dev_priv->uncore.funcs.mmio_writeq = gen6_write64; 895 + ASSIGN_WRITE_MMIO_VFUNCS(gen6); 937 896 } 938 897 939 898 if (IS_VALLEYVIEW(dev)) { 940 - dev_priv->uncore.funcs.mmio_readb = vlv_read8; 941 - dev_priv->uncore.funcs.mmio_readw = vlv_read16; 942 - dev_priv->uncore.funcs.mmio_readl = vlv_read32; 943 - dev_priv->uncore.funcs.mmio_readq = vlv_read64; 899 + ASSIGN_READ_MMIO_VFUNCS(vlv); 944 900 } else { 945 - dev_priv->uncore.funcs.mmio_readb = gen6_read8; 946 - dev_priv->uncore.funcs.mmio_readw = gen6_read16; 947 - dev_priv->uncore.funcs.mmio_readl = gen6_read32; 948 - dev_priv->uncore.funcs.mmio_readq = gen6_read64; 901 + ASSIGN_READ_MMIO_VFUNCS(gen6); 949 902 } 950 903 break; 951 904 case 5: 952 - dev_priv->uncore.funcs.mmio_writeb = gen5_write8; 953 - dev_priv->uncore.funcs.mmio_writew = gen5_write16; 954 - dev_priv->uncore.funcs.mmio_writel = gen5_write32; 955 - dev_priv->uncore.funcs.mmio_writeq = gen5_write64; 956 - dev_priv->uncore.funcs.mmio_readb = gen5_read8; 957 - dev_priv->uncore.funcs.mmio_readw = gen5_read16; 958 - dev_priv->uncore.funcs.mmio_readl = gen5_read32; 959 - dev_priv->uncore.funcs.mmio_readq = gen5_read64; 905 + ASSIGN_WRITE_MMIO_VFUNCS(gen5); 906 + ASSIGN_READ_MMIO_VFUNCS(gen5); 960 907 break; 961 908 case 4: 962 909 case 3: 963 910 case 2: 964 - dev_priv->uncore.funcs.mmio_writeb = gen4_write8; 965 - dev_priv->uncore.funcs.mmio_writew = gen4_write16; 966 - dev_priv->uncore.funcs.mmio_writel = gen4_write32; 967 - dev_priv->uncore.funcs.mmio_writeq = gen4_write64; 968 - dev_priv->uncore.funcs.mmio_readb = gen4_read8; 969 - dev_priv->uncore.funcs.mmio_readw = gen4_read16; 970 - dev_priv->uncore.funcs.mmio_readl = gen4_read32; 971 - dev_priv->uncore.funcs.mmio_readq = gen4_read64; 911 + ASSIGN_WRITE_MMIO_VFUNCS(gen4); 912 + ASSIGN_READ_MMIO_VFUNCS(gen4); 972 913 break; 973 914 } 915 + 916 + i915_check_and_clear_faults(dev); 974 917 } 918 + #undef ASSIGN_WRITE_MMIO_VFUNCS 919 + #undef ASSIGN_READ_MMIO_VFUNCS 975 920 976 921 void intel_uncore_fini(struct drm_device *dev) 977 922 {