Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'drm-intel-next-2014-02-07' of ssh://git.freedesktop.org/git/drm-intel into drm-next

- Yet more steps towards atomic modeset from Ville.
- DP panel power sequencing improvements from Paulo.
- irq code cleanups from Ville.
- 5.4 GHz dp lane clock support for bdw/hsw from Todd.
- Clock readout support for hsw/bdw (aka fastboot) from Jesse.
- Make pipe underruns report at ERROR level (Ville). This is to check our
improved watermarks code.
- Full ppgtt support from Ben for gen7.
- More fbc fixes and improvements from Ville all over the place, unfortunately
not yet enabled by default on more platforms.
- w/a cleanups from Ville.
- HiZ stall optimization settings (Chia-I Wu).
- Display register mmio offset refactor patch from Antti.
- RPS improvements for corner-cases from Jeff McGee.

* tag 'drm-intel-next-2014-02-07' of ssh://git.freedesktop.org/git/drm-intel: (166 commits)
drm/i915: Update rps interrupt limits
drm/i915: Restore rps/rc6 on reset
drm/i915: Prevent recursion by retiring requests when the ring is full
drm/i915: Generate a hang error code
drm/i915: unify FLIP_DONE macro names
drm/i915: vlv: s/spin_lock_irqsave/spin_lock/ in irq handler
drm/i915: factor out valleyview_pipestat_irq_handler
drm/i915: vlv: don't unmask IIR[DISPLAY_PIPE_A/B_VBLANK] interrupt
drm/i915: Reorganize display pipe register accesses
drm/i915: Treat using a purged buffer as a source of EFAULT
drm/i915: Convert EFAULT into a silent SIGBUS
drm/i915: release mutex in i915_gem_init()'s error path
drm/i915: check for oom when allocating private_default_ctx
drm/i915/vlv: WA to fix Voltage not getting dropped to Vmin when Gfx is power gated.
drm/i915: Get rid of acthd based guilty batch search
drm/i915: Use hangcheck score to find guilty context
drm/i915: Drop WaDisablePSDDualDispatchEnable:ivb for IVB GT2
drm/i915: Fix IVB GT2 WaDisableDopClockGating and WaDisablePSDDualDispatchEnable
drm/i915: Don't access snooped pages through the GTT (even for error capture)
drm/i915: Only print information for filing bug reports once
...

Conflicts:
drivers/gpu/drm/i915/intel_dp.c

+3355 -1689
+1
drivers/gpu/drm/i915/Makefile
··· 14 14 i915_gem_gtt.o \ 15 15 i915_gem_stolen.o \ 16 16 i915_gem_tiling.o \ 17 + i915_params.o \ 17 18 i915_sysfs.o \ 18 19 i915_trace_points.o \ 19 20 i915_ums.o \
+286 -15
drivers/gpu/drm/i915/i915_debugfs.c
··· 98 98 { 99 99 if (obj->user_pin_count > 0) 100 100 return "P"; 101 - else if (obj->pin_count > 0) 101 + else if (i915_gem_obj_is_pinned(obj)) 102 102 return "p"; 103 103 else 104 104 return " "; ··· 123 123 describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) 124 124 { 125 125 struct i915_vma *vma; 126 + int pin_count = 0; 127 + 126 128 seq_printf(m, "%pK: %s%s%s %8zdKiB %02x %02x %u %u %u%s%s%s", 127 129 &obj->base, 128 130 get_pin_flag(obj), ··· 141 139 obj->madv == I915_MADV_DONTNEED ? " purgeable" : ""); 142 140 if (obj->base.name) 143 141 seq_printf(m, " (name: %d)", obj->base.name); 144 - if (obj->pin_count) 145 - seq_printf(m, " (pinned x %d)", obj->pin_count); 142 + list_for_each_entry(vma, &obj->vma_list, vma_link) 143 + if (vma->pin_count > 0) 144 + pin_count++; 145 + seq_printf(m, " (pinned x %d)", pin_count); 146 146 if (obj->pin_display) 147 147 seq_printf(m, " (display)"); 148 148 if (obj->fence_reg != I915_FENCE_REG_NONE) ··· 451 447 452 448 total_obj_size = total_gtt_size = count = 0; 453 449 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { 454 - if (list == PINNED_LIST && obj->pin_count == 0) 450 + if (list == PINNED_LIST && !i915_gem_obj_is_pinned(obj)) 455 451 continue; 456 452 457 453 seq_puts(m, " "); ··· 716 712 seq_printf(m, "Graphics Interrupt mask: %08x\n", 717 713 I915_READ(GTIMR)); 718 714 } 719 - seq_printf(m, "Interrupts received: %d\n", 720 - atomic_read(&dev_priv->irq_received)); 721 715 for_each_ring(ring, dev_priv, i) { 722 716 if (INTEL_INFO(dev)->gen >= 6) { 723 717 seq_printf(m, ··· 1735 1733 return 0; 1736 1734 } 1737 1735 1736 + static int per_file_ctx(int id, void *ptr, void *data) 1737 + { 1738 + struct i915_hw_context *ctx = ptr; 1739 + struct seq_file *m = data; 1740 + struct i915_hw_ppgtt *ppgtt = ctx_to_ppgtt(ctx); 1741 + 1742 + ppgtt->debug_dump(ppgtt, m); 1743 + 1744 + return 0; 1745 + } 1746 + 1738 1747 static void gen8_ppgtt_info(struct seq_file *m, struct drm_device *dev) 1739 1748 { 1740 1749 struct drm_i915_private *dev_priv = dev->dev_private; ··· 1775 1762 { 1776 1763 struct drm_i915_private *dev_priv = dev->dev_private; 1777 1764 struct intel_ring_buffer *ring; 1765 + struct drm_file *file; 1778 1766 int i; 1779 1767 1780 1768 if (INTEL_INFO(dev)->gen == 6) ··· 1794 1780 1795 1781 seq_puts(m, "aliasing PPGTT:\n"); 1796 1782 seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd_offset); 1783 + 1784 + ppgtt->debug_dump(ppgtt, m); 1785 + } else 1786 + return; 1787 + 1788 + list_for_each_entry_reverse(file, &dev->filelist, lhead) { 1789 + struct drm_i915_file_private *file_priv = file->driver_priv; 1790 + struct i915_hw_ppgtt *pvt_ppgtt; 1791 + 1792 + pvt_ppgtt = ctx_to_ppgtt(file_priv->private_default_ctx); 1793 + seq_printf(m, "proc: %s\n", 1794 + get_pid_task(file->pid, PIDTYPE_PID)->comm); 1795 + seq_puts(m, " default context:\n"); 1796 + idr_for_each(&file_priv->context_idr, per_file_ctx, m); 1797 1797 } 1798 1798 seq_printf(m, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK)); 1799 1799 } ··· 1918 1890 1919 1891 intel_runtime_pm_put(dev_priv); 1920 1892 return 0; 1893 + } 1894 + 1895 + static int i915_sink_crc(struct seq_file *m, void *data) 1896 + { 1897 + struct drm_info_node *node = m->private; 1898 + struct drm_device *dev = node->minor->dev; 1899 + struct intel_encoder *encoder; 1900 + struct intel_connector *connector; 1901 + struct intel_dp *intel_dp = NULL; 1902 + int ret; 1903 + u8 crc[6]; 1904 + 1905 + drm_modeset_lock_all(dev); 1906 + list_for_each_entry(connector, &dev->mode_config.connector_list, 1907 + base.head) { 1908 + 1909 + if (connector->base.dpms != DRM_MODE_DPMS_ON) 1910 + continue; 1911 + 1912 + encoder = to_intel_encoder(connector->base.encoder); 1913 + if (encoder->type != INTEL_OUTPUT_EDP) 1914 + continue; 1915 + 1916 + intel_dp = enc_to_intel_dp(&encoder->base); 1917 + 1918 + ret = intel_dp_sink_crc(intel_dp, crc); 1919 + if (ret) 1920 + goto out; 1921 + 1922 + seq_printf(m, "%02x%02x%02x%02x%02x%02x\n", 1923 + crc[0], crc[1], crc[2], 1924 + crc[3], crc[4], crc[5]); 1925 + goto out; 1926 + } 1927 + ret = -ENODEV; 1928 + out: 1929 + drm_modeset_unlock_all(dev); 1930 + return ret; 1921 1931 } 1922 1932 1923 1933 static int i915_energy_uJ(struct seq_file *m, void *data) ··· 2822 2756 .write = display_crc_ctl_write 2823 2757 }; 2824 2758 2759 + static void wm_latency_show(struct seq_file *m, const uint16_t wm[5]) 2760 + { 2761 + struct drm_device *dev = m->private; 2762 + int num_levels = IS_HASWELL(dev) || IS_BROADWELL(dev) ? 5 : 4; 2763 + int level; 2764 + 2765 + drm_modeset_lock_all(dev); 2766 + 2767 + for (level = 0; level < num_levels; level++) { 2768 + unsigned int latency = wm[level]; 2769 + 2770 + /* WM1+ latency values in 0.5us units */ 2771 + if (level > 0) 2772 + latency *= 5; 2773 + 2774 + seq_printf(m, "WM%d %u (%u.%u usec)\n", 2775 + level, wm[level], 2776 + latency / 10, latency % 10); 2777 + } 2778 + 2779 + drm_modeset_unlock_all(dev); 2780 + } 2781 + 2782 + static int pri_wm_latency_show(struct seq_file *m, void *data) 2783 + { 2784 + struct drm_device *dev = m->private; 2785 + 2786 + wm_latency_show(m, to_i915(dev)->wm.pri_latency); 2787 + 2788 + return 0; 2789 + } 2790 + 2791 + static int spr_wm_latency_show(struct seq_file *m, void *data) 2792 + { 2793 + struct drm_device *dev = m->private; 2794 + 2795 + wm_latency_show(m, to_i915(dev)->wm.spr_latency); 2796 + 2797 + return 0; 2798 + } 2799 + 2800 + static int cur_wm_latency_show(struct seq_file *m, void *data) 2801 + { 2802 + struct drm_device *dev = m->private; 2803 + 2804 + wm_latency_show(m, to_i915(dev)->wm.cur_latency); 2805 + 2806 + return 0; 2807 + } 2808 + 2809 + static int pri_wm_latency_open(struct inode *inode, struct file *file) 2810 + { 2811 + struct drm_device *dev = inode->i_private; 2812 + 2813 + if (!HAS_PCH_SPLIT(dev)) 2814 + return -ENODEV; 2815 + 2816 + return single_open(file, pri_wm_latency_show, dev); 2817 + } 2818 + 2819 + static int spr_wm_latency_open(struct inode *inode, struct file *file) 2820 + { 2821 + struct drm_device *dev = inode->i_private; 2822 + 2823 + if (!HAS_PCH_SPLIT(dev)) 2824 + return -ENODEV; 2825 + 2826 + return single_open(file, spr_wm_latency_show, dev); 2827 + } 2828 + 2829 + static int cur_wm_latency_open(struct inode *inode, struct file *file) 2830 + { 2831 + struct drm_device *dev = inode->i_private; 2832 + 2833 + if (!HAS_PCH_SPLIT(dev)) 2834 + return -ENODEV; 2835 + 2836 + return single_open(file, cur_wm_latency_show, dev); 2837 + } 2838 + 2839 + static ssize_t wm_latency_write(struct file *file, const char __user *ubuf, 2840 + size_t len, loff_t *offp, uint16_t wm[5]) 2841 + { 2842 + struct seq_file *m = file->private_data; 2843 + struct drm_device *dev = m->private; 2844 + uint16_t new[5] = { 0 }; 2845 + int num_levels = IS_HASWELL(dev) || IS_BROADWELL(dev) ? 5 : 4; 2846 + int level; 2847 + int ret; 2848 + char tmp[32]; 2849 + 2850 + if (len >= sizeof(tmp)) 2851 + return -EINVAL; 2852 + 2853 + if (copy_from_user(tmp, ubuf, len)) 2854 + return -EFAULT; 2855 + 2856 + tmp[len] = '\0'; 2857 + 2858 + ret = sscanf(tmp, "%hu %hu %hu %hu %hu", &new[0], &new[1], &new[2], &new[3], &new[4]); 2859 + if (ret != num_levels) 2860 + return -EINVAL; 2861 + 2862 + drm_modeset_lock_all(dev); 2863 + 2864 + for (level = 0; level < num_levels; level++) 2865 + wm[level] = new[level]; 2866 + 2867 + drm_modeset_unlock_all(dev); 2868 + 2869 + return len; 2870 + } 2871 + 2872 + 2873 + static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf, 2874 + size_t len, loff_t *offp) 2875 + { 2876 + struct seq_file *m = file->private_data; 2877 + struct drm_device *dev = m->private; 2878 + 2879 + return wm_latency_write(file, ubuf, len, offp, to_i915(dev)->wm.pri_latency); 2880 + } 2881 + 2882 + static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf, 2883 + size_t len, loff_t *offp) 2884 + { 2885 + struct seq_file *m = file->private_data; 2886 + struct drm_device *dev = m->private; 2887 + 2888 + return wm_latency_write(file, ubuf, len, offp, to_i915(dev)->wm.spr_latency); 2889 + } 2890 + 2891 + static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf, 2892 + size_t len, loff_t *offp) 2893 + { 2894 + struct seq_file *m = file->private_data; 2895 + struct drm_device *dev = m->private; 2896 + 2897 + return wm_latency_write(file, ubuf, len, offp, to_i915(dev)->wm.cur_latency); 2898 + } 2899 + 2900 + static const struct file_operations i915_pri_wm_latency_fops = { 2901 + .owner = THIS_MODULE, 2902 + .open = pri_wm_latency_open, 2903 + .read = seq_read, 2904 + .llseek = seq_lseek, 2905 + .release = single_release, 2906 + .write = pri_wm_latency_write 2907 + }; 2908 + 2909 + static const struct file_operations i915_spr_wm_latency_fops = { 2910 + .owner = THIS_MODULE, 2911 + .open = spr_wm_latency_open, 2912 + .read = seq_read, 2913 + .llseek = seq_lseek, 2914 + .release = single_release, 2915 + .write = spr_wm_latency_write 2916 + }; 2917 + 2918 + static const struct file_operations i915_cur_wm_latency_fops = { 2919 + .owner = THIS_MODULE, 2920 + .open = cur_wm_latency_open, 2921 + .read = seq_read, 2922 + .llseek = seq_lseek, 2923 + .release = single_release, 2924 + .write = cur_wm_latency_write 2925 + }; 2926 + 2825 2927 static int 2826 2928 i915_wedged_get(void *data, u64 *val) 2827 2929 { ··· 3163 2929 list_for_each_entry(vm, &dev_priv->vm_list, global_link) { 3164 2930 list_for_each_entry_safe(vma, x, &vm->inactive_list, 3165 2931 mm_list) { 3166 - if (vma->obj->pin_count) 2932 + if (vma->pin_count) 3167 2933 continue; 3168 2934 3169 2935 ret = i915_vma_unbind(vma); ··· 3223 2989 { 3224 2990 struct drm_device *dev = data; 3225 2991 struct drm_i915_private *dev_priv = dev->dev_private; 2992 + u32 rp_state_cap, hw_max, hw_min; 3226 2993 int ret; 3227 2994 3228 2995 if (!(IS_GEN6(dev) || IS_GEN7(dev))) ··· 3242 3007 */ 3243 3008 if (IS_VALLEYVIEW(dev)) { 3244 3009 val = vlv_freq_opcode(dev_priv, val); 3245 - dev_priv->rps.max_delay = val; 3246 - valleyview_set_rps(dev, val); 3010 + 3011 + hw_max = valleyview_rps_max_freq(dev_priv); 3012 + hw_min = valleyview_rps_min_freq(dev_priv); 3247 3013 } else { 3248 3014 do_div(val, GT_FREQUENCY_MULTIPLIER); 3249 - dev_priv->rps.max_delay = val; 3250 - gen6_set_rps(dev, val); 3015 + 3016 + rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); 3017 + hw_max = dev_priv->rps.hw_max; 3018 + hw_min = (rp_state_cap >> 16) & 0xff; 3251 3019 } 3020 + 3021 + if (val < hw_min || val > hw_max || val < dev_priv->rps.min_delay) { 3022 + mutex_unlock(&dev_priv->rps.hw_lock); 3023 + return -EINVAL; 3024 + } 3025 + 3026 + dev_priv->rps.max_delay = val; 3027 + 3028 + if (IS_VALLEYVIEW(dev)) 3029 + valleyview_set_rps(dev, val); 3030 + else 3031 + gen6_set_rps(dev, val); 3252 3032 3253 3033 mutex_unlock(&dev_priv->rps.hw_lock); 3254 3034 ··· 3304 3054 { 3305 3055 struct drm_device *dev = data; 3306 3056 struct drm_i915_private *dev_priv = dev->dev_private; 3057 + u32 rp_state_cap, hw_max, hw_min; 3307 3058 int ret; 3308 3059 3309 3060 if (!(IS_GEN6(dev) || IS_GEN7(dev))) ··· 3323 3072 */ 3324 3073 if (IS_VALLEYVIEW(dev)) { 3325 3074 val = vlv_freq_opcode(dev_priv, val); 3326 - dev_priv->rps.min_delay = val; 3327 - valleyview_set_rps(dev, val); 3075 + 3076 + hw_max = valleyview_rps_max_freq(dev_priv); 3077 + hw_min = valleyview_rps_min_freq(dev_priv); 3328 3078 } else { 3329 3079 do_div(val, GT_FREQUENCY_MULTIPLIER); 3330 - dev_priv->rps.min_delay = val; 3331 - gen6_set_rps(dev, val); 3080 + 3081 + rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); 3082 + hw_max = dev_priv->rps.hw_max; 3083 + hw_min = (rp_state_cap >> 16) & 0xff; 3332 3084 } 3085 + 3086 + if (val < hw_min || val > hw_max || val > dev_priv->rps.max_delay) { 3087 + mutex_unlock(&dev_priv->rps.hw_lock); 3088 + return -EINVAL; 3089 + } 3090 + 3091 + dev_priv->rps.min_delay = val; 3092 + 3093 + if (IS_VALLEYVIEW(dev)) 3094 + valleyview_set_rps(dev, val); 3095 + else 3096 + gen6_set_rps(dev, val); 3097 + 3333 3098 mutex_unlock(&dev_priv->rps.hw_lock); 3334 3099 3335 3100 return 0; ··· 3515 3248 {"i915_dpio", i915_dpio_info, 0}, 3516 3249 {"i915_llc", i915_llc, 0}, 3517 3250 {"i915_edp_psr_status", i915_edp_psr_status, 0}, 3251 + {"i915_sink_crc_eDP1", i915_sink_crc, 0}, 3518 3252 {"i915_energy_uJ", i915_energy_uJ, 0}, 3519 3253 {"i915_pc8_status", i915_pc8_status, 0}, 3520 3254 {"i915_power_domain_info", i915_power_domain_info, 0}, ··· 3537 3269 {"i915_error_state", &i915_error_state_fops}, 3538 3270 {"i915_next_seqno", &i915_next_seqno_fops}, 3539 3271 {"i915_display_crc_ctl", &i915_display_crc_ctl_fops}, 3272 + {"i915_pri_wm_latency", &i915_pri_wm_latency_fops}, 3273 + {"i915_spr_wm_latency", &i915_spr_wm_latency_fops}, 3274 + {"i915_cur_wm_latency", &i915_cur_wm_latency_fops}, 3540 3275 }; 3541 3276 3542 3277 void intel_display_crc_init(struct drm_device *dev)
+3 -3
drivers/gpu/drm/i915/i915_dma.c
··· 990 990 value = HAS_WT(dev); 991 991 break; 992 992 case I915_PARAM_HAS_ALIASING_PPGTT: 993 - value = dev_priv->mm.aliasing_ppgtt ? 1 : 0; 993 + value = dev_priv->mm.aliasing_ppgtt || USES_FULL_PPGTT(dev); 994 994 break; 995 995 case I915_PARAM_HAS_WAIT_TIMEOUT: 996 996 value = 1; ··· 1374 1374 i915_gem_cleanup_ringbuffer(dev); 1375 1375 i915_gem_context_fini(dev); 1376 1376 mutex_unlock(&dev->struct_mutex); 1377 - i915_gem_cleanup_aliasing_ppgtt(dev); 1377 + WARN_ON(dev_priv->mm.aliasing_ppgtt); 1378 1378 drm_mm_takedown(&dev_priv->gtt.base.mm); 1379 1379 cleanup_power: 1380 1380 intel_display_power_put(dev, POWER_DOMAIN_VGA); ··· 1776 1776 i915_gem_free_all_phys_object(dev); 1777 1777 i915_gem_cleanup_ringbuffer(dev); 1778 1778 i915_gem_context_fini(dev); 1779 + WARN_ON(dev_priv->mm.aliasing_ppgtt); 1779 1780 mutex_unlock(&dev->struct_mutex); 1780 - i915_gem_cleanup_aliasing_ppgtt(dev); 1781 1781 i915_gem_cleanup_stolen(dev); 1782 1782 1783 1783 if (!I915_NEED_GFX_HWS(dev))
+57 -125
drivers/gpu/drm/i915/i915_drv.c
··· 38 38 #include <linux/module.h> 39 39 #include <drm/drm_crtc_helper.h> 40 40 41 - static int i915_modeset __read_mostly = -1; 42 - module_param_named(modeset, i915_modeset, int, 0400); 43 - MODULE_PARM_DESC(modeset, 44 - "Use kernel modesetting [KMS] (0=DRM_I915_KMS from .config, " 45 - "1=on, -1=force vga console preference [default])"); 46 - 47 - unsigned int i915_fbpercrtc __always_unused = 0; 48 - module_param_named(fbpercrtc, i915_fbpercrtc, int, 0400); 49 - 50 - int i915_panel_ignore_lid __read_mostly = 1; 51 - module_param_named(panel_ignore_lid, i915_panel_ignore_lid, int, 0600); 52 - MODULE_PARM_DESC(panel_ignore_lid, 53 - "Override lid status (0=autodetect, 1=autodetect disabled [default], " 54 - "-1=force lid closed, -2=force lid open)"); 55 - 56 - unsigned int i915_powersave __read_mostly = 1; 57 - module_param_named(powersave, i915_powersave, int, 0600); 58 - MODULE_PARM_DESC(powersave, 59 - "Enable powersavings, fbc, downclocking, etc. (default: true)"); 60 - 61 - int i915_semaphores __read_mostly = -1; 62 - module_param_named(semaphores, i915_semaphores, int, 0400); 63 - MODULE_PARM_DESC(semaphores, 64 - "Use semaphores for inter-ring sync (default: -1 (use per-chip defaults))"); 65 - 66 - int i915_enable_rc6 __read_mostly = -1; 67 - module_param_named(i915_enable_rc6, i915_enable_rc6, int, 0400); 68 - MODULE_PARM_DESC(i915_enable_rc6, 69 - "Enable power-saving render C-state 6. " 70 - "Different stages can be selected via bitmask values " 71 - "(0 = disable; 1 = enable rc6; 2 = enable deep rc6; 4 = enable deepest rc6). " 72 - "For example, 3 would enable rc6 and deep rc6, and 7 would enable everything. " 73 - "default: -1 (use per-chip default)"); 74 - 75 - int i915_enable_fbc __read_mostly = -1; 76 - module_param_named(i915_enable_fbc, i915_enable_fbc, int, 0600); 77 - MODULE_PARM_DESC(i915_enable_fbc, 78 - "Enable frame buffer compression for power savings " 79 - "(default: -1 (use per-chip default))"); 80 - 81 - unsigned int i915_lvds_downclock __read_mostly = 0; 82 - module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400); 83 - MODULE_PARM_DESC(lvds_downclock, 84 - "Use panel (LVDS/eDP) downclocking for power savings " 85 - "(default: false)"); 86 - 87 - int i915_lvds_channel_mode __read_mostly; 88 - module_param_named(lvds_channel_mode, i915_lvds_channel_mode, int, 0600); 89 - MODULE_PARM_DESC(lvds_channel_mode, 90 - "Specify LVDS channel mode " 91 - "(0=probe BIOS [default], 1=single-channel, 2=dual-channel)"); 92 - 93 - int i915_panel_use_ssc __read_mostly = -1; 94 - module_param_named(lvds_use_ssc, i915_panel_use_ssc, int, 0600); 95 - MODULE_PARM_DESC(lvds_use_ssc, 96 - "Use Spread Spectrum Clock with panels [LVDS/eDP] " 97 - "(default: auto from VBT)"); 98 - 99 - int i915_vbt_sdvo_panel_type __read_mostly = -1; 100 - module_param_named(vbt_sdvo_panel_type, i915_vbt_sdvo_panel_type, int, 0600); 101 - MODULE_PARM_DESC(vbt_sdvo_panel_type, 102 - "Override/Ignore selection of SDVO panel mode in the VBT " 103 - "(-2=ignore, -1=auto [default], index in VBT BIOS table)"); 104 - 105 - static bool i915_try_reset __read_mostly = true; 106 - module_param_named(reset, i915_try_reset, bool, 0600); 107 - MODULE_PARM_DESC(reset, "Attempt GPU resets (default: true)"); 108 - 109 - bool i915_enable_hangcheck __read_mostly = true; 110 - module_param_named(enable_hangcheck, i915_enable_hangcheck, bool, 0644); 111 - MODULE_PARM_DESC(enable_hangcheck, 112 - "Periodically check GPU activity for detecting hangs. " 113 - "WARNING: Disabling this can cause system wide hangs. " 114 - "(default: true)"); 115 - 116 - int i915_enable_ppgtt __read_mostly = -1; 117 - module_param_named(i915_enable_ppgtt, i915_enable_ppgtt, int, 0400); 118 - MODULE_PARM_DESC(i915_enable_ppgtt, 119 - "Enable PPGTT (default: true)"); 120 - 121 - int i915_enable_psr __read_mostly = 0; 122 - module_param_named(enable_psr, i915_enable_psr, int, 0600); 123 - MODULE_PARM_DESC(enable_psr, "Enable PSR (default: false)"); 124 - 125 - unsigned int i915_preliminary_hw_support __read_mostly = IS_ENABLED(CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT); 126 - module_param_named(preliminary_hw_support, i915_preliminary_hw_support, int, 0600); 127 - MODULE_PARM_DESC(preliminary_hw_support, 128 - "Enable preliminary hardware support."); 129 - 130 - int i915_disable_power_well __read_mostly = 1; 131 - module_param_named(disable_power_well, i915_disable_power_well, int, 0600); 132 - MODULE_PARM_DESC(disable_power_well, 133 - "Disable the power well when possible (default: true)"); 134 - 135 - int i915_enable_ips __read_mostly = 1; 136 - module_param_named(enable_ips, i915_enable_ips, int, 0600); 137 - MODULE_PARM_DESC(enable_ips, "Enable IPS (default: true)"); 138 - 139 - bool i915_fastboot __read_mostly = 0; 140 - module_param_named(fastboot, i915_fastboot, bool, 0600); 141 - MODULE_PARM_DESC(fastboot, "Try to skip unnecessary mode sets at boot time " 142 - "(default: false)"); 143 - 144 - int i915_enable_pc8 __read_mostly = 1; 145 - module_param_named(enable_pc8, i915_enable_pc8, int, 0600); 146 - MODULE_PARM_DESC(enable_pc8, "Enable support for low power package C states (PC8+) (default: true)"); 147 - 148 - int i915_pc8_timeout __read_mostly = 5000; 149 - module_param_named(pc8_timeout, i915_pc8_timeout, int, 0600); 150 - MODULE_PARM_DESC(pc8_timeout, "Number of msecs of idleness required to enter PC8+ (default: 5000)"); 151 - 152 - bool i915_prefault_disable __read_mostly; 153 - module_param_named(prefault_disable, i915_prefault_disable, bool, 0600); 154 - MODULE_PARM_DESC(prefault_disable, 155 - "Disable page prefaulting for pread/pwrite/reloc (default:false). For developers only."); 156 - 157 41 static struct drm_driver driver; 42 + 43 + #define GEN_DEFAULT_PIPEOFFSETS \ 44 + .pipe_offsets = { PIPE_A_OFFSET, PIPE_B_OFFSET, \ 45 + PIPE_C_OFFSET, PIPE_EDP_OFFSET }, \ 46 + .trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \ 47 + TRANSCODER_C_OFFSET, TRANSCODER_EDP_OFFSET }, \ 48 + .dpll_offsets = { DPLL_A_OFFSET, DPLL_B_OFFSET }, \ 49 + .dpll_md_offsets = { DPLL_A_MD_OFFSET, DPLL_B_MD_OFFSET }, \ 50 + .palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET } 51 + 158 52 159 53 static const struct intel_device_info intel_i830_info = { 160 54 .gen = 2, .is_mobile = 1, .cursor_needs_physical = 1, .num_pipes = 2, 161 55 .has_overlay = 1, .overlay_needs_physical = 1, 162 56 .ring_mask = RENDER_RING, 57 + GEN_DEFAULT_PIPEOFFSETS, 163 58 }; 164 59 165 60 static const struct intel_device_info intel_845g_info = { 166 61 .gen = 2, .num_pipes = 1, 167 62 .has_overlay = 1, .overlay_needs_physical = 1, 168 63 .ring_mask = RENDER_RING, 64 + GEN_DEFAULT_PIPEOFFSETS, 169 65 }; 170 66 171 67 static const struct intel_device_info intel_i85x_info = { ··· 70 174 .has_overlay = 1, .overlay_needs_physical = 1, 71 175 .has_fbc = 1, 72 176 .ring_mask = RENDER_RING, 177 + GEN_DEFAULT_PIPEOFFSETS, 73 178 }; 74 179 75 180 static const struct intel_device_info intel_i865g_info = { 76 181 .gen = 2, .num_pipes = 1, 77 182 .has_overlay = 1, .overlay_needs_physical = 1, 78 183 .ring_mask = RENDER_RING, 184 + GEN_DEFAULT_PIPEOFFSETS, 79 185 }; 80 186 81 187 static const struct intel_device_info intel_i915g_info = { 82 188 .gen = 3, .is_i915g = 1, .cursor_needs_physical = 1, .num_pipes = 2, 83 189 .has_overlay = 1, .overlay_needs_physical = 1, 84 190 .ring_mask = RENDER_RING, 191 + GEN_DEFAULT_PIPEOFFSETS, 85 192 }; 86 193 static const struct intel_device_info intel_i915gm_info = { 87 194 .gen = 3, .is_mobile = 1, .num_pipes = 2, ··· 93 194 .supports_tv = 1, 94 195 .has_fbc = 1, 95 196 .ring_mask = RENDER_RING, 197 + GEN_DEFAULT_PIPEOFFSETS, 96 198 }; 97 199 static const struct intel_device_info intel_i945g_info = { 98 200 .gen = 3, .has_hotplug = 1, .cursor_needs_physical = 1, .num_pipes = 2, 99 201 .has_overlay = 1, .overlay_needs_physical = 1, 100 202 .ring_mask = RENDER_RING, 203 + GEN_DEFAULT_PIPEOFFSETS, 101 204 }; 102 205 static const struct intel_device_info intel_i945gm_info = { 103 206 .gen = 3, .is_i945gm = 1, .is_mobile = 1, .num_pipes = 2, ··· 108 207 .supports_tv = 1, 109 208 .has_fbc = 1, 110 209 .ring_mask = RENDER_RING, 210 + GEN_DEFAULT_PIPEOFFSETS, 111 211 }; 112 212 113 213 static const struct intel_device_info intel_i965g_info = { ··· 116 214 .has_hotplug = 1, 117 215 .has_overlay = 1, 118 216 .ring_mask = RENDER_RING, 217 + GEN_DEFAULT_PIPEOFFSETS, 119 218 }; 120 219 121 220 static const struct intel_device_info intel_i965gm_info = { ··· 125 222 .has_overlay = 1, 126 223 .supports_tv = 1, 127 224 .ring_mask = RENDER_RING, 225 + GEN_DEFAULT_PIPEOFFSETS, 128 226 }; 129 227 130 228 static const struct intel_device_info intel_g33_info = { ··· 133 229 .need_gfx_hws = 1, .has_hotplug = 1, 134 230 .has_overlay = 1, 135 231 .ring_mask = RENDER_RING, 232 + GEN_DEFAULT_PIPEOFFSETS, 136 233 }; 137 234 138 235 static const struct intel_device_info intel_g45_info = { 139 236 .gen = 4, .is_g4x = 1, .need_gfx_hws = 1, .num_pipes = 2, 140 237 .has_pipe_cxsr = 1, .has_hotplug = 1, 141 238 .ring_mask = RENDER_RING | BSD_RING, 239 + GEN_DEFAULT_PIPEOFFSETS, 142 240 }; 143 241 144 242 static const struct intel_device_info intel_gm45_info = { ··· 149 243 .has_pipe_cxsr = 1, .has_hotplug = 1, 150 244 .supports_tv = 1, 151 245 .ring_mask = RENDER_RING | BSD_RING, 246 + GEN_DEFAULT_PIPEOFFSETS, 152 247 }; 153 248 154 249 static const struct intel_device_info intel_pineview_info = { 155 250 .gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .num_pipes = 2, 156 251 .need_gfx_hws = 1, .has_hotplug = 1, 157 252 .has_overlay = 1, 253 + GEN_DEFAULT_PIPEOFFSETS, 158 254 }; 159 255 160 256 static const struct intel_device_info intel_ironlake_d_info = { 161 257 .gen = 5, .num_pipes = 2, 162 258 .need_gfx_hws = 1, .has_hotplug = 1, 163 259 .ring_mask = RENDER_RING | BSD_RING, 260 + GEN_DEFAULT_PIPEOFFSETS, 164 261 }; 165 262 166 263 static const struct intel_device_info intel_ironlake_m_info = { ··· 171 262 .need_gfx_hws = 1, .has_hotplug = 1, 172 263 .has_fbc = 1, 173 264 .ring_mask = RENDER_RING | BSD_RING, 265 + GEN_DEFAULT_PIPEOFFSETS, 174 266 }; 175 267 176 268 static const struct intel_device_info intel_sandybridge_d_info = { ··· 180 270 .has_fbc = 1, 181 271 .ring_mask = RENDER_RING | BSD_RING | BLT_RING, 182 272 .has_llc = 1, 273 + GEN_DEFAULT_PIPEOFFSETS, 183 274 }; 184 275 185 276 static const struct intel_device_info intel_sandybridge_m_info = { ··· 189 278 .has_fbc = 1, 190 279 .ring_mask = RENDER_RING | BSD_RING | BLT_RING, 191 280 .has_llc = 1, 281 + GEN_DEFAULT_PIPEOFFSETS, 192 282 }; 193 283 194 284 #define GEN7_FEATURES \ ··· 202 290 static const struct intel_device_info intel_ivybridge_d_info = { 203 291 GEN7_FEATURES, 204 292 .is_ivybridge = 1, 293 + GEN_DEFAULT_PIPEOFFSETS, 205 294 }; 206 295 207 296 static const struct intel_device_info intel_ivybridge_m_info = { 208 297 GEN7_FEATURES, 209 298 .is_ivybridge = 1, 210 299 .is_mobile = 1, 300 + GEN_DEFAULT_PIPEOFFSETS, 211 301 }; 212 302 213 303 static const struct intel_device_info intel_ivybridge_q_info = { 214 304 GEN7_FEATURES, 215 305 .is_ivybridge = 1, 216 306 .num_pipes = 0, /* legal, last one wins */ 307 + GEN_DEFAULT_PIPEOFFSETS, 217 308 }; 218 309 219 310 static const struct intel_device_info intel_valleyview_m_info = { ··· 227 312 .display_mmio_offset = VLV_DISPLAY_BASE, 228 313 .has_fbc = 0, /* legal, last one wins */ 229 314 .has_llc = 0, /* legal, last one wins */ 315 + GEN_DEFAULT_PIPEOFFSETS, 230 316 }; 231 317 232 318 static const struct intel_device_info intel_valleyview_d_info = { ··· 237 321 .display_mmio_offset = VLV_DISPLAY_BASE, 238 322 .has_fbc = 0, /* legal, last one wins */ 239 323 .has_llc = 0, /* legal, last one wins */ 324 + GEN_DEFAULT_PIPEOFFSETS, 240 325 }; 241 326 242 327 static const struct intel_device_info intel_haswell_d_info = { ··· 246 329 .has_ddi = 1, 247 330 .has_fpga_dbg = 1, 248 331 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, 332 + GEN_DEFAULT_PIPEOFFSETS, 249 333 }; 250 334 251 335 static const struct intel_device_info intel_haswell_m_info = { ··· 256 338 .has_ddi = 1, 257 339 .has_fpga_dbg = 1, 258 340 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, 341 + GEN_DEFAULT_PIPEOFFSETS, 259 342 }; 260 343 261 344 static const struct intel_device_info intel_broadwell_d_info = { ··· 265 346 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, 266 347 .has_llc = 1, 267 348 .has_ddi = 1, 349 + GEN_DEFAULT_PIPEOFFSETS, 268 350 }; 269 351 270 352 static const struct intel_device_info intel_broadwell_m_info = { ··· 274 354 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, 275 355 .has_llc = 1, 276 356 .has_ddi = 1, 357 + GEN_DEFAULT_PIPEOFFSETS, 277 358 }; 278 359 279 360 /* ··· 403 482 404 483 /* Until we get further testing... */ 405 484 if (IS_GEN8(dev)) { 406 - WARN_ON(!i915_preliminary_hw_support); 485 + WARN_ON(!i915.preliminary_hw_support); 407 486 return false; 408 487 } 409 488 410 - if (i915_semaphores >= 0) 411 - return i915_semaphores; 489 + if (i915.semaphores >= 0) 490 + return i915.semaphores; 412 491 413 492 #ifdef CONFIG_INTEL_IOMMU 414 493 /* Enable semaphores on SNB when IO remapping is off */ ··· 564 643 /* KMS EnterVT equivalent */ 565 644 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 566 645 intel_init_pch_refclk(dev); 646 + drm_mode_config_reset(dev); 567 647 568 648 mutex_lock(&dev->struct_mutex); 569 649 ··· 577 655 intel_modeset_init_hw(dev); 578 656 579 657 drm_modeset_lock_all(dev); 580 - drm_mode_config_reset(dev); 581 658 intel_modeset_setup_hw_state(dev, true); 582 659 drm_modeset_unlock_all(dev); 583 660 ··· 673 752 bool simulated; 674 753 int ret; 675 754 676 - if (!i915_try_reset) 755 + if (!i915.reset) 677 756 return 0; 678 757 679 758 mutex_lock(&dev->struct_mutex); ··· 728 807 729 808 drm_irq_uninstall(dev); 730 809 drm_irq_install(dev); 810 + 811 + /* rps/rc6 re-init is necessary to restore state lost after the 812 + * reset and the re-install of drm irq. Skip for ironlake per 813 + * previous concerns that it doesn't respond well to some forms 814 + * of re-init after reset. */ 815 + if (INTEL_INFO(dev)->gen > 5) { 816 + mutex_lock(&dev->struct_mutex); 817 + intel_enable_gt_powersave(dev); 818 + mutex_unlock(&dev->struct_mutex); 819 + } 820 + 731 821 intel_hpd_init(dev); 732 822 } else { 733 823 mutex_unlock(&dev->struct_mutex); ··· 752 820 struct intel_device_info *intel_info = 753 821 (struct intel_device_info *) ent->driver_data; 754 822 755 - if (IS_PRELIMINARY_HW(intel_info) && !i915_preliminary_hw_support) { 823 + if (IS_PRELIMINARY_HW(intel_info) && !i915.preliminary_hw_support) { 756 824 DRM_INFO("This hardware requires preliminary hardware support.\n" 757 825 "See CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT, and/or modparam preliminary_hw_support\n"); 758 826 return -ENODEV; ··· 983 1051 * the default behavior. 984 1052 */ 985 1053 #if defined(CONFIG_DRM_I915_KMS) 986 - if (i915_modeset != 0) 1054 + if (i915.modeset != 0) 987 1055 driver.driver_features |= DRIVER_MODESET; 988 1056 #endif 989 - if (i915_modeset == 1) 1057 + if (i915.modeset == 1) 990 1058 driver.driver_features |= DRIVER_MODESET; 991 1059 992 1060 #ifdef CONFIG_VGA_CONSOLE 993 - if (vgacon_text_force() && i915_modeset == -1) 1061 + if (vgacon_text_force() && i915.modeset == -1) 994 1062 driver.driver_features &= ~DRIVER_MODESET; 995 1063 #endif 996 1064
+289 -140
drivers/gpu/drm/i915/i915_drv.h
··· 58 58 PIPE_A = 0, 59 59 PIPE_B, 60 60 PIPE_C, 61 - I915_MAX_PIPES 61 + _PIPE_EDP, 62 + I915_MAX_PIPES = _PIPE_EDP 62 63 }; 63 64 #define pipe_name(p) ((p) + 'A') 64 65 ··· 67 66 TRANSCODER_A = 0, 68 67 TRANSCODER_B, 69 68 TRANSCODER_C, 70 - TRANSCODER_EDP = 0xF, 69 + TRANSCODER_EDP, 70 + I915_MAX_TRANSCODERS 71 71 }; 72 72 #define transcoder_name(t) ((t) + 'A') 73 73 ··· 297 295 298 296 struct drm_i915_error_state { 299 297 struct kref ref; 298 + struct timeval time; 299 + 300 + /* Generic register state */ 300 301 u32 eir; 301 302 u32 pgtbl_er; 302 303 u32 ier; 303 304 u32 ccid; 304 305 u32 derrmr; 305 306 u32 forcewake; 306 - bool waiting[I915_NUM_RINGS]; 307 - u32 pipestat[I915_MAX_PIPES]; 308 - u32 tail[I915_NUM_RINGS]; 309 - u32 head[I915_NUM_RINGS]; 310 - u32 ctl[I915_NUM_RINGS]; 311 - u32 ipeir[I915_NUM_RINGS]; 312 - u32 ipehr[I915_NUM_RINGS]; 313 - u32 instdone[I915_NUM_RINGS]; 314 - u32 acthd[I915_NUM_RINGS]; 315 - u32 semaphore_mboxes[I915_NUM_RINGS][I915_NUM_RINGS - 1]; 316 - u32 semaphore_seqno[I915_NUM_RINGS][I915_NUM_RINGS - 1]; 317 - u32 rc_psmi[I915_NUM_RINGS]; /* sleep state */ 318 - /* our own tracking of ring head and tail */ 319 - u32 cpu_ring_head[I915_NUM_RINGS]; 320 - u32 cpu_ring_tail[I915_NUM_RINGS]; 321 307 u32 error; /* gen6+ */ 322 308 u32 err_int; /* gen7 */ 323 - u32 bbstate[I915_NUM_RINGS]; 324 - u32 instpm[I915_NUM_RINGS]; 325 - u32 instps[I915_NUM_RINGS]; 326 - u32 extra_instdone[I915_NUM_INSTDONE_REG]; 327 - u32 seqno[I915_NUM_RINGS]; 328 - u64 bbaddr[I915_NUM_RINGS]; 329 - u32 fault_reg[I915_NUM_RINGS]; 330 309 u32 done_reg; 331 - u32 faddr[I915_NUM_RINGS]; 310 + u32 gac_eco; 311 + u32 gam_ecochk; 312 + u32 gab_ctl; 313 + u32 gfx_mode; 314 + u32 extra_instdone[I915_NUM_INSTDONE_REG]; 315 + u32 pipestat[I915_MAX_PIPES]; 332 316 u64 fence[I915_MAX_NUM_FENCES]; 333 - struct timeval time; 317 + struct intel_overlay_error_state *overlay; 318 + struct intel_display_error_state *display; 319 + 334 320 struct drm_i915_error_ring { 335 321 bool valid; 322 + /* Software tracked state */ 323 + bool waiting; 324 + int hangcheck_score; 325 + enum intel_ring_hangcheck_action hangcheck_action; 326 + int num_requests; 327 + 328 + /* our own tracking of ring head and tail */ 329 + u32 cpu_ring_head; 330 + u32 cpu_ring_tail; 331 + 332 + u32 semaphore_seqno[I915_NUM_RINGS - 1]; 333 + 334 + /* Register state */ 335 + u32 tail; 336 + u32 head; 337 + u32 ctl; 338 + u32 hws; 339 + u32 ipeir; 340 + u32 ipehr; 341 + u32 instdone; 342 + u32 acthd; 343 + u32 bbstate; 344 + u32 instpm; 345 + u32 instps; 346 + u32 seqno; 347 + u64 bbaddr; 348 + u32 fault_reg; 349 + u32 faddr; 350 + u32 rc_psmi; /* sleep state */ 351 + u32 semaphore_mboxes[I915_NUM_RINGS - 1]; 352 + 336 353 struct drm_i915_error_object { 337 354 int page_count; 338 355 u32 gtt_offset; 339 356 u32 *pages[0]; 340 - } *ringbuffer, *batchbuffer, *ctx; 357 + } *ringbuffer, *batchbuffer, *ctx, *hws_page; 358 + 341 359 struct drm_i915_error_request { 342 360 long jiffies; 343 361 u32 seqno; 344 362 u32 tail; 345 363 } *requests; 346 - int num_requests; 364 + 365 + struct { 366 + u32 gfx_mode; 367 + union { 368 + u64 pdp[4]; 369 + u32 pp_dir_base; 370 + }; 371 + } vm_info; 347 372 } ring[I915_NUM_RINGS]; 348 373 struct drm_i915_error_buffer { 349 374 u32 size; ··· 387 358 s32 ring:4; 388 359 u32 cache_level:3; 389 360 } **active_bo, **pinned_bo; 361 + 390 362 u32 *active_bo_count, *pinned_bo_count; 391 - struct intel_overlay_error_state *overlay; 392 - struct intel_display_error_state *display; 393 - int hangcheck_score[I915_NUM_RINGS]; 394 - enum intel_ring_hangcheck_action hangcheck_action[I915_NUM_RINGS]; 395 363 }; 396 364 397 365 struct intel_connector; ··· 533 507 u8 gen; 534 508 u8 ring_mask; /* Rings supported by the HW */ 535 509 DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG, SEP_SEMICOLON); 510 + /* Register offsets for the various display pipes and transcoders */ 511 + int pipe_offsets[I915_MAX_TRANSCODERS]; 512 + int trans_offsets[I915_MAX_TRANSCODERS]; 513 + int dpll_offsets[I915_MAX_PIPES]; 514 + int dpll_md_offsets[I915_MAX_PIPES]; 515 + int palette_offsets[I915_MAX_PIPES]; 536 516 }; 537 517 538 518 #undef DEFINE_FLAG ··· 555 523 }; 556 524 557 525 typedef uint32_t gen6_gtt_pte_t; 526 + 527 + /** 528 + * A VMA represents a GEM BO that is bound into an address space. Therefore, a 529 + * VMA's presence cannot be guaranteed before binding, or after unbinding the 530 + * object into/from the address space. 531 + * 532 + * To make things as simple as possible (ie. no refcounting), a VMA's lifetime 533 + * will always be <= an objects lifetime. So object refcounting should cover us. 534 + */ 535 + struct i915_vma { 536 + struct drm_mm_node node; 537 + struct drm_i915_gem_object *obj; 538 + struct i915_address_space *vm; 539 + 540 + /** This object's place on the active/inactive lists */ 541 + struct list_head mm_list; 542 + 543 + struct list_head vma_link; /* Link in the object's VMA list */ 544 + 545 + /** This vma's place in the batchbuffer or on the eviction list */ 546 + struct list_head exec_list; 547 + 548 + /** 549 + * Used for performing relocations during execbuffer insertion. 550 + */ 551 + struct hlist_node exec_node; 552 + unsigned long exec_handle; 553 + struct drm_i915_gem_exec_object2 *exec_entry; 554 + 555 + /** 556 + * How many users have pinned this object in GTT space. The following 557 + * users can each hold at most one reference: pwrite/pread, pin_ioctl 558 + * (via user_pin_count), execbuffer (objects are not allowed multiple 559 + * times for the same batchbuffer), and the framebuffer code. When 560 + * switching/pageflipping, the framebuffer code has at most two buffers 561 + * pinned per crtc. 562 + * 563 + * In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3 564 + * bits with absolutely no headroom. So use 4 bits. */ 565 + unsigned int pin_count:4; 566 + #define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf 567 + 568 + /** Unmap an object from an address space. This usually consists of 569 + * setting the valid PTE entries to a reserved scratch page. */ 570 + void (*unbind_vma)(struct i915_vma *vma); 571 + /* Map an object into an address space with the given cache flags. */ 572 + #define GLOBAL_BIND (1<<0) 573 + void (*bind_vma)(struct i915_vma *vma, 574 + enum i915_cache_level cache_level, 575 + u32 flags); 576 + }; 558 577 559 578 struct i915_address_space { 560 579 struct drm_mm mm; ··· 688 605 689 606 struct i915_hw_ppgtt { 690 607 struct i915_address_space base; 608 + struct kref ref; 609 + struct drm_mm_node node; 691 610 unsigned num_pd_entries; 692 611 union { 693 612 struct page **pt_pages; ··· 706 621 dma_addr_t *pt_dma_addr; 707 622 dma_addr_t *gen8_pt_dma_addr[4]; 708 623 }; 709 - int (*enable)(struct drm_device *dev); 710 - }; 711 624 712 - /** 713 - * A VMA represents a GEM BO that is bound into an address space. Therefore, a 714 - * VMA's presence cannot be guaranteed before binding, or after unbinding the 715 - * object into/from the address space. 716 - * 717 - * To make things as simple as possible (ie. no refcounting), a VMA's lifetime 718 - * will always be <= an objects lifetime. So object refcounting should cover us. 719 - */ 720 - struct i915_vma { 721 - struct drm_mm_node node; 722 - struct drm_i915_gem_object *obj; 723 - struct i915_address_space *vm; 724 - 725 - /** This object's place on the active/inactive lists */ 726 - struct list_head mm_list; 727 - 728 - struct list_head vma_link; /* Link in the object's VMA list */ 729 - 730 - /** This vma's place in the batchbuffer or on the eviction list */ 731 - struct list_head exec_list; 732 - 733 - /** 734 - * Used for performing relocations during execbuffer insertion. 735 - */ 736 - struct hlist_node exec_node; 737 - unsigned long exec_handle; 738 - struct drm_i915_gem_exec_object2 *exec_entry; 739 - 625 + int (*enable)(struct i915_hw_ppgtt *ppgtt); 626 + int (*switch_mm)(struct i915_hw_ppgtt *ppgtt, 627 + struct intel_ring_buffer *ring, 628 + bool synchronous); 629 + void (*debug_dump)(struct i915_hw_ppgtt *ppgtt, struct seq_file *m); 740 630 }; 741 631 742 632 struct i915_ctx_hang_stats { ··· 736 676 bool is_initialized; 737 677 uint8_t remap_slice; 738 678 struct drm_i915_file_private *file_priv; 739 - struct intel_ring_buffer *ring; 679 + struct intel_ring_buffer *last_ring; 740 680 struct drm_i915_gem_object *obj; 741 681 struct i915_ctx_hang_stats hang_stats; 682 + struct i915_address_space *vm; 742 683 743 684 struct list_head link; 744 685 }; ··· 892 831 u32 savePFIT_CONTROL; 893 832 u32 save_palette_a[256]; 894 833 u32 save_palette_b[256]; 895 - u32 saveDPFC_CB_BASE; 896 - u32 saveFBC_CFB_BASE; 897 - u32 saveFBC_LL_BASE; 898 834 u32 saveFBC_CONTROL; 899 - u32 saveFBC_CONTROL2; 900 835 u32 saveIER; 901 836 u32 saveIIR; 902 837 u32 saveIMR; ··· 962 905 struct work_struct work; 963 906 u32 pm_iir; 964 907 965 - /* The below variables an all the rps hw state are protected by 966 - * dev->struct mutext. */ 967 908 u8 cur_delay; 968 909 u8 min_delay; 969 910 u8 max_delay; ··· 969 914 u8 rp1_delay; 970 915 u8 rp0_delay; 971 916 u8 hw_max; 917 + 918 + bool rp_up_masked; 919 + bool rp_down_masked; 972 920 973 921 int last_adj; 974 922 enum { LOW_POWER, BETWEEN, HIGH_POWER } power; ··· 1419 1361 drm_dma_handle_t *status_page_dmah; 1420 1362 struct resource mch_res; 1421 1363 1422 - atomic_t irq_received; 1423 - 1424 1364 /* protects the irq masks */ 1425 1365 spinlock_t irq_lock; 1426 1366 ··· 1683 1627 */ 1684 1628 unsigned int fence_dirty:1; 1685 1629 1686 - /** How many users have pinned this object in GTT space. The following 1687 - * users can each hold at most one reference: pwrite/pread, pin_ioctl 1688 - * (via user_pin_count), execbuffer (objects are not allowed multiple 1689 - * times for the same batchbuffer), and the framebuffer code. When 1690 - * switching/pageflipping, the framebuffer code has at most two buffers 1691 - * pinned per crtc. 1692 - * 1693 - * In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3 1694 - * bits with absolutely no headroom. So use 4 bits. */ 1695 - unsigned int pin_count:4; 1696 - #define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf 1697 - 1698 1630 /** 1699 1631 * Is the object at the current location in the gtt mappable and 1700 1632 * fenceable? Used to avoid costly recalculations. ··· 1795 1751 } mm; 1796 1752 struct idr context_idr; 1797 1753 1798 - struct i915_ctx_hang_stats hang_stats; 1754 + struct i915_hw_context *private_default_ctx; 1799 1755 atomic_t rps_wait_boost; 1800 1756 }; 1801 1757 ··· 1868 1824 #define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws) 1869 1825 1870 1826 #define HAS_HW_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 6) 1871 - #define HAS_ALIASING_PPGTT(dev) (INTEL_INFO(dev)->gen >=6 && !IS_VALLEYVIEW(dev)) 1827 + #define HAS_ALIASING_PPGTT(dev) (INTEL_INFO(dev)->gen >= 6 && !IS_VALLEYVIEW(dev)) 1828 + #define HAS_PPGTT(dev) (INTEL_INFO(dev)->gen >= 7 && !IS_VALLEYVIEW(dev) \ 1829 + && !IS_BROADWELL(dev)) 1830 + #define USES_PPGTT(dev) intel_enable_ppgtt(dev, false) 1831 + #define USES_FULL_PPGTT(dev) intel_enable_ppgtt(dev, true) 1872 1832 1873 1833 #define HAS_OVERLAY(dev) (INTEL_INFO(dev)->has_overlay) 1874 1834 #define OVERLAY_NEEDS_PHYSICAL(dev) (INTEL_INFO(dev)->overlay_needs_physical) ··· 1935 1887 1936 1888 extern const struct drm_ioctl_desc i915_ioctls[]; 1937 1889 extern int i915_max_ioctl; 1938 - extern unsigned int i915_fbpercrtc __always_unused; 1939 - extern int i915_panel_ignore_lid __read_mostly; 1940 - extern unsigned int i915_powersave __read_mostly; 1941 - extern int i915_semaphores __read_mostly; 1942 - extern unsigned int i915_lvds_downclock __read_mostly; 1943 - extern int i915_lvds_channel_mode __read_mostly; 1944 - extern int i915_panel_use_ssc __read_mostly; 1945 - extern int i915_vbt_sdvo_panel_type __read_mostly; 1946 - extern int i915_enable_rc6 __read_mostly; 1947 - extern int i915_enable_fbc __read_mostly; 1948 - extern bool i915_enable_hangcheck __read_mostly; 1949 - extern int i915_enable_ppgtt __read_mostly; 1950 - extern int i915_enable_psr __read_mostly; 1951 - extern unsigned int i915_preliminary_hw_support __read_mostly; 1952 - extern int i915_disable_power_well __read_mostly; 1953 - extern int i915_enable_ips __read_mostly; 1954 - extern bool i915_fastboot __read_mostly; 1955 - extern int i915_enable_pc8 __read_mostly; 1956 - extern int i915_pc8_timeout __read_mostly; 1957 - extern bool i915_prefault_disable __read_mostly; 1958 1890 1959 1891 extern int i915_suspend(struct drm_device *dev, pm_message_t state); 1960 1892 extern int i915_resume(struct drm_device *dev); 1961 1893 extern int i915_master_create(struct drm_device *dev, struct drm_master *master); 1962 1894 extern void i915_master_destroy(struct drm_device *dev, struct drm_master *master); 1895 + 1896 + /* i915_params.c */ 1897 + struct i915_params { 1898 + int modeset; 1899 + int panel_ignore_lid; 1900 + unsigned int powersave; 1901 + int semaphores; 1902 + unsigned int lvds_downclock; 1903 + int lvds_channel_mode; 1904 + int panel_use_ssc; 1905 + int vbt_sdvo_panel_type; 1906 + int enable_rc6; 1907 + int enable_fbc; 1908 + bool enable_hangcheck; 1909 + int enable_ppgtt; 1910 + int enable_psr; 1911 + unsigned int preliminary_hw_support; 1912 + int disable_power_well; 1913 + int enable_ips; 1914 + bool fastboot; 1915 + int enable_pc8; 1916 + int pc8_timeout; 1917 + bool prefault_disable; 1918 + bool reset; 1919 + int invert_brightness; 1920 + }; 1921 + extern struct i915_params i915 __read_mostly; 1963 1922 1964 1923 /* i915_dma.c */ 1965 1924 void i915_update_dri1_breadcrumb(struct drm_device *dev); ··· 2000 1945 void i915_queue_hangcheck(struct drm_device *dev); 2001 1946 void i915_handle_error(struct drm_device *dev, bool wedged); 2002 1947 1948 + void gen6_set_pm_mask(struct drm_i915_private *dev_priv, u32 pm_iir, 1949 + int new_delay); 2003 1950 extern void intel_irq_init(struct drm_device *dev); 2004 1951 extern void intel_hpd_init(struct drm_device *dev); 2005 1952 ··· 2071 2014 const struct drm_i915_gem_object_ops *ops); 2072 2015 struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, 2073 2016 size_t size); 2017 + void i915_init_vm(struct drm_i915_private *dev_priv, 2018 + struct i915_address_space *vm); 2074 2019 void i915_gem_free_object(struct drm_gem_object *obj); 2075 2020 void i915_gem_vma_destroy(struct i915_vma *vma); 2076 2021 ··· 2081 2022 uint32_t alignment, 2082 2023 bool map_and_fenceable, 2083 2024 bool nonblocking); 2084 - void i915_gem_object_unpin(struct drm_i915_gem_object *obj); 2025 + void i915_gem_object_ggtt_unpin(struct drm_i915_gem_object *obj); 2085 2026 int __must_check i915_vma_unbind(struct i915_vma *vma); 2086 2027 int __must_check i915_gem_object_ggtt_unbind(struct drm_i915_gem_object *obj); 2087 2028 int i915_gem_object_put_pages(struct drm_i915_gem_object *obj); ··· 2245 2186 struct i915_address_space *vm); 2246 2187 2247 2188 struct i915_vma *i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj); 2189 + static inline bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj) { 2190 + struct i915_vma *vma; 2191 + list_for_each_entry(vma, &obj->vma_list, vma_link) 2192 + if (vma->pin_count > 0) 2193 + return true; 2194 + return false; 2195 + } 2248 2196 2249 2197 /* Some GGTT VM helpers */ 2250 2198 #define obj_to_ggtt(obj) \ ··· 2291 2225 } 2292 2226 2293 2227 /* i915_gem_context.c */ 2228 + #define ctx_to_ppgtt(ctx) container_of((ctx)->vm, struct i915_hw_ppgtt, base) 2294 2229 int __must_check i915_gem_context_init(struct drm_device *dev); 2295 2230 void i915_gem_context_fini(struct drm_device *dev); 2231 + void i915_gem_context_reset(struct drm_device *dev); 2232 + int i915_gem_context_open(struct drm_device *dev, struct drm_file *file); 2233 + int i915_gem_context_enable(struct drm_i915_private *dev_priv); 2296 2234 void i915_gem_context_close(struct drm_device *dev, struct drm_file *file); 2297 2235 int i915_switch_context(struct intel_ring_buffer *ring, 2298 - struct drm_file *file, int to_id); 2236 + struct drm_file *file, struct i915_hw_context *to); 2237 + struct i915_hw_context * 2238 + i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id); 2299 2239 void i915_gem_context_free(struct kref *ctx_ref); 2300 2240 static inline void i915_gem_context_reference(struct i915_hw_context *ctx) 2301 2241 { 2302 - kref_get(&ctx->ref); 2242 + if (ctx->obj && HAS_HW_CONTEXTS(ctx->obj->base.dev)) 2243 + kref_get(&ctx->ref); 2303 2244 } 2304 2245 2305 2246 static inline void i915_gem_context_unreference(struct i915_hw_context *ctx) 2306 2247 { 2307 - kref_put(&ctx->ref, i915_gem_context_free); 2248 + if (ctx->obj && HAS_HW_CONTEXTS(ctx->obj->base.dev)) 2249 + kref_put(&ctx->ref, i915_gem_context_free); 2308 2250 } 2309 2251 2310 - struct i915_ctx_hang_stats * __must_check 2311 - i915_gem_context_get_hang_stats(struct drm_device *dev, 2312 - struct drm_file *file, 2313 - u32 id); 2252 + static inline bool i915_gem_context_is_default(const struct i915_hw_context *c) 2253 + { 2254 + return c->id == DEFAULT_CONTEXT_ID; 2255 + } 2256 + 2314 2257 int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, 2315 2258 struct drm_file *file); 2316 2259 int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data, 2317 2260 struct drm_file *file); 2318 - 2319 - /* i915_gem_gtt.c */ 2320 - void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev); 2321 - void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt, 2322 - struct drm_i915_gem_object *obj, 2323 - enum i915_cache_level cache_level); 2324 - void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt, 2325 - struct drm_i915_gem_object *obj); 2326 - 2327 - void i915_check_and_clear_faults(struct drm_device *dev); 2328 - void i915_gem_suspend_gtt_mappings(struct drm_device *dev); 2329 - void i915_gem_restore_gtt_mappings(struct drm_device *dev); 2330 - int __must_check i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj); 2331 - void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj, 2332 - enum i915_cache_level cache_level); 2333 - void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj); 2334 - void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj); 2335 - void i915_gem_init_global_gtt(struct drm_device *dev); 2336 - void i915_gem_setup_global_gtt(struct drm_device *dev, unsigned long start, 2337 - unsigned long mappable_end, unsigned long end); 2338 - int i915_gem_gtt_init(struct drm_device *dev); 2339 - static inline void i915_gem_chipset_flush(struct drm_device *dev) 2340 - { 2341 - if (INTEL_INFO(dev)->gen < 6) 2342 - intel_gtt_chipset_flush(); 2343 - } 2344 - 2345 2261 2346 2262 /* i915_gem_evict.c */ 2347 2263 int __must_check i915_gem_evict_something(struct drm_device *dev, ··· 2335 2287 bool nonblock); 2336 2288 int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle); 2337 2289 int i915_gem_evict_everything(struct drm_device *dev); 2290 + 2291 + /* i915_gem_gtt.c */ 2292 + void i915_check_and_clear_faults(struct drm_device *dev); 2293 + void i915_gem_suspend_gtt_mappings(struct drm_device *dev); 2294 + void i915_gem_restore_gtt_mappings(struct drm_device *dev); 2295 + int __must_check i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj); 2296 + void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj); 2297 + void i915_gem_init_global_gtt(struct drm_device *dev); 2298 + void i915_gem_setup_global_gtt(struct drm_device *dev, unsigned long start, 2299 + unsigned long mappable_end, unsigned long end); 2300 + int i915_gem_gtt_init(struct drm_device *dev); 2301 + static inline void i915_gem_chipset_flush(struct drm_device *dev) 2302 + { 2303 + if (INTEL_INFO(dev)->gen < 6) 2304 + intel_gtt_chipset_flush(); 2305 + } 2306 + int i915_gem_init_ppgtt(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt); 2307 + static inline bool intel_enable_ppgtt(struct drm_device *dev, bool full) 2308 + { 2309 + if (i915.enable_ppgtt == 0 || !HAS_ALIASING_PPGTT(dev)) 2310 + return false; 2311 + 2312 + if (i915.enable_ppgtt == 1 && full) 2313 + return false; 2314 + 2315 + #ifdef CONFIG_INTEL_IOMMU 2316 + /* Disable ppgtt on SNB if VT-d is on. */ 2317 + if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped) { 2318 + DRM_INFO("Disabling PPGTT because VT-d is on\n"); 2319 + return false; 2320 + } 2321 + #endif 2322 + 2323 + if (full) 2324 + return HAS_PPGTT(dev); 2325 + else 2326 + return HAS_ALIASING_PPGTT(dev); 2327 + } 2328 + 2329 + static inline void ppgtt_release(struct kref *kref) 2330 + { 2331 + struct i915_hw_ppgtt *ppgtt = container_of(kref, struct i915_hw_ppgtt, ref); 2332 + struct drm_device *dev = ppgtt->base.dev; 2333 + struct drm_i915_private *dev_priv = dev->dev_private; 2334 + struct i915_address_space *vm = &ppgtt->base; 2335 + 2336 + if (ppgtt == dev_priv->mm.aliasing_ppgtt || 2337 + (list_empty(&vm->active_list) && list_empty(&vm->inactive_list))) { 2338 + ppgtt->base.cleanup(&ppgtt->base); 2339 + return; 2340 + } 2341 + 2342 + /* 2343 + * Make sure vmas are unbound before we take down the drm_mm 2344 + * 2345 + * FIXME: Proper refcounting should take care of this, this shouldn't be 2346 + * needed at all. 2347 + */ 2348 + if (!list_empty(&vm->active_list)) { 2349 + struct i915_vma *vma; 2350 + 2351 + list_for_each_entry(vma, &vm->active_list, mm_list) 2352 + if (WARN_ON(list_empty(&vma->vma_link) || 2353 + list_is_singular(&vma->vma_link))) 2354 + break; 2355 + 2356 + i915_gem_evict_vm(&ppgtt->base, true); 2357 + } else { 2358 + i915_gem_retire_requests(dev); 2359 + i915_gem_evict_vm(&ppgtt->base, false); 2360 + } 2361 + 2362 + ppgtt->base.cleanup(&ppgtt->base); 2363 + } 2338 2364 2339 2365 /* i915_gem_stolen.c */ 2340 2366 int i915_gem_init_stolen(struct drm_device *dev); ··· 2686 2564 unsigned long j = timespec_to_jiffies(value); 2687 2565 2688 2566 return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1); 2567 + } 2568 + 2569 + /* 2570 + * If you need to wait X milliseconds between events A and B, but event B 2571 + * doesn't happen exactly after event A, you record the timestamp (jiffies) of 2572 + * when event A happened, then just before event B you call this function and 2573 + * pass the timestamp as the first argument, and X as the second argument. 2574 + */ 2575 + static inline void 2576 + wait_remaining_ms_from_jiffies(unsigned long timestamp_jiffies, int to_wait_ms) 2577 + { 2578 + unsigned long target_jiffies, tmp_jiffies, remaining_jiffies; 2579 + 2580 + /* 2581 + * Don't re-read the value of "jiffies" every time since it may change 2582 + * behind our back and break the math. 2583 + */ 2584 + tmp_jiffies = jiffies; 2585 + target_jiffies = timestamp_jiffies + 2586 + msecs_to_jiffies_timeout(to_wait_ms); 2587 + 2588 + if (time_after(target_jiffies, tmp_jiffies)) { 2589 + remaining_jiffies = target_jiffies - tmp_jiffies; 2590 + while (remaining_jiffies) 2591 + remaining_jiffies = 2592 + schedule_timeout_uninterruptible(remaining_jiffies); 2593 + } 2689 2594 } 2690 2595 2691 2596 #endif
+167 -245
drivers/gpu/drm/i915/i915_gem.c
··· 204 204 pinned = 0; 205 205 mutex_lock(&dev->struct_mutex); 206 206 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) 207 - if (obj->pin_count) 207 + if (i915_gem_obj_is_pinned(obj)) 208 208 pinned += i915_gem_obj_ggtt_size(obj); 209 209 mutex_unlock(&dev->struct_mutex); 210 210 ··· 476 476 477 477 mutex_unlock(&dev->struct_mutex); 478 478 479 - if (likely(!i915_prefault_disable) && !prefaulted) { 479 + if (likely(!i915.prefault_disable) && !prefaulted) { 480 480 ret = fault_in_multipages_writeable(user_data, remain); 481 481 /* Userspace is tricking us, but we've already clobbered 482 482 * its pages with the prefault and promised to write the ··· 651 651 } 652 652 653 653 out_unpin: 654 - i915_gem_object_unpin(obj); 654 + i915_gem_object_ggtt_unpin(obj); 655 655 out: 656 656 return ret; 657 657 } ··· 868 868 args->size)) 869 869 return -EFAULT; 870 870 871 - if (likely(!i915_prefault_disable)) { 871 + if (likely(!i915.prefault_disable)) { 872 872 ret = fault_in_multipages_readable(to_user_ptr(args->data_ptr), 873 873 args->size); 874 874 if (ret) ··· 1420 1420 /* Finally, remap it using the new GTT offset */ 1421 1421 ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn); 1422 1422 unpin: 1423 - i915_gem_object_unpin(obj); 1423 + i915_gem_object_ggtt_unpin(obj); 1424 1424 unlock: 1425 1425 mutex_unlock(&dev->struct_mutex); 1426 1426 out: ··· 1453 1453 ret = VM_FAULT_OOM; 1454 1454 break; 1455 1455 case -ENOSPC: 1456 + case -EFAULT: 1456 1457 ret = VM_FAULT_SIGBUS; 1457 1458 break; 1458 1459 default: ··· 1619 1618 1620 1619 if (obj->madv != I915_MADV_WILLNEED) { 1621 1620 DRM_ERROR("Attempting to mmap a purgeable buffer\n"); 1622 - ret = -EINVAL; 1621 + ret = -EFAULT; 1623 1622 goto out; 1624 1623 } 1625 1624 ··· 1973 1972 1974 1973 if (obj->madv != I915_MADV_WILLNEED) { 1975 1974 DRM_ERROR("Attempting to obtain a purgeable object\n"); 1976 - return -EINVAL; 1975 + return -EFAULT; 1977 1976 } 1978 1977 1979 1978 BUG_ON(obj->pages_pin_count); ··· 2036 2035 i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj) 2037 2036 { 2038 2037 struct drm_i915_private *dev_priv = obj->base.dev->dev_private; 2039 - struct i915_address_space *ggtt_vm = &dev_priv->gtt.base; 2040 - struct i915_vma *vma = i915_gem_obj_to_vma(obj, ggtt_vm); 2038 + struct i915_address_space *vm; 2039 + struct i915_vma *vma; 2041 2040 2042 2041 BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS); 2043 2042 BUG_ON(!obj->active); 2044 2043 2045 - list_move_tail(&vma->mm_list, &ggtt_vm->inactive_list); 2044 + list_for_each_entry(vm, &dev_priv->vm_list, global_link) { 2045 + vma = i915_gem_obj_to_vma(obj, vm); 2046 + if (vma && !list_empty(&vma->mm_list)) 2047 + list_move_tail(&vma->mm_list, &vm->inactive_list); 2048 + } 2046 2049 2047 2050 list_del_init(&obj->ring_list); 2048 2051 obj->ring = NULL; ··· 2242 2237 spin_unlock(&file_priv->mm.lock); 2243 2238 } 2244 2239 2245 - static bool i915_head_inside_object(u32 acthd, struct drm_i915_gem_object *obj, 2246 - struct i915_address_space *vm) 2240 + static bool i915_context_is_banned(struct drm_i915_private *dev_priv, 2241 + const struct i915_hw_context *ctx) 2247 2242 { 2248 - if (acthd >= i915_gem_obj_offset(obj, vm) && 2249 - acthd < i915_gem_obj_offset(obj, vm) + obj->base.size) 2250 - return true; 2243 + unsigned long elapsed; 2251 2244 2252 - return false; 2253 - } 2245 + elapsed = get_seconds() - ctx->hang_stats.guilty_ts; 2254 2246 2255 - static bool i915_head_inside_request(const u32 acthd_unmasked, 2256 - const u32 request_start, 2257 - const u32 request_end) 2258 - { 2259 - const u32 acthd = acthd_unmasked & HEAD_ADDR; 2260 - 2261 - if (request_start < request_end) { 2262 - if (acthd >= request_start && acthd < request_end) 2263 - return true; 2264 - } else if (request_start > request_end) { 2265 - if (acthd >= request_start || acthd < request_end) 2266 - return true; 2267 - } 2268 - 2269 - return false; 2270 - } 2271 - 2272 - static struct i915_address_space * 2273 - request_to_vm(struct drm_i915_gem_request *request) 2274 - { 2275 - struct drm_i915_private *dev_priv = request->ring->dev->dev_private; 2276 - struct i915_address_space *vm; 2277 - 2278 - vm = &dev_priv->gtt.base; 2279 - 2280 - return vm; 2281 - } 2282 - 2283 - static bool i915_request_guilty(struct drm_i915_gem_request *request, 2284 - const u32 acthd, bool *inside) 2285 - { 2286 - /* There is a possibility that unmasked head address 2287 - * pointing inside the ring, matches the batch_obj address range. 2288 - * However this is extremely unlikely. 2289 - */ 2290 - if (request->batch_obj) { 2291 - if (i915_head_inside_object(acthd, request->batch_obj, 2292 - request_to_vm(request))) { 2293 - *inside = true; 2294 - return true; 2295 - } 2296 - } 2297 - 2298 - if (i915_head_inside_request(acthd, request->head, request->tail)) { 2299 - *inside = false; 2300 - return true; 2301 - } 2302 - 2303 - return false; 2304 - } 2305 - 2306 - static bool i915_context_is_banned(const struct i915_ctx_hang_stats *hs) 2307 - { 2308 - const unsigned long elapsed = get_seconds() - hs->guilty_ts; 2309 - 2310 - if (hs->banned) 2247 + if (ctx->hang_stats.banned) 2311 2248 return true; 2312 2249 2313 2250 if (elapsed <= DRM_I915_CTX_BAN_PERIOD) { 2314 - DRM_ERROR("context hanging too fast, declaring banned!\n"); 2251 + if (dev_priv->gpu_error.stop_rings == 0 && 2252 + i915_gem_context_is_default(ctx)) { 2253 + DRM_ERROR("gpu hanging too fast, banning!\n"); 2254 + } else { 2255 + DRM_DEBUG("context hanging too fast, banning!\n"); 2256 + } 2257 + 2315 2258 return true; 2316 2259 } 2317 2260 2318 2261 return false; 2319 2262 } 2320 2263 2321 - static void i915_set_reset_status(struct intel_ring_buffer *ring, 2322 - struct drm_i915_gem_request *request, 2323 - u32 acthd) 2264 + static void i915_set_reset_status(struct drm_i915_private *dev_priv, 2265 + struct i915_hw_context *ctx, 2266 + const bool guilty) 2324 2267 { 2325 - struct i915_ctx_hang_stats *hs = NULL; 2326 - bool inside, guilty; 2327 - unsigned long offset = 0; 2268 + struct i915_ctx_hang_stats *hs; 2328 2269 2329 - /* Innocent until proven guilty */ 2330 - guilty = false; 2270 + if (WARN_ON(!ctx)) 2271 + return; 2331 2272 2332 - if (request->batch_obj) 2333 - offset = i915_gem_obj_offset(request->batch_obj, 2334 - request_to_vm(request)); 2273 + hs = &ctx->hang_stats; 2335 2274 2336 - if (ring->hangcheck.action != HANGCHECK_WAIT && 2337 - i915_request_guilty(request, acthd, &inside)) { 2338 - DRM_DEBUG("%s hung %s bo (0x%lx ctx %d) at 0x%x\n", 2339 - ring->name, 2340 - inside ? "inside" : "flushing", 2341 - offset, 2342 - request->ctx ? request->ctx->id : 0, 2343 - acthd); 2344 - 2345 - guilty = true; 2346 - } 2347 - 2348 - /* If contexts are disabled or this is the default context, use 2349 - * file_priv->reset_state 2350 - */ 2351 - if (request->ctx && request->ctx->id != DEFAULT_CONTEXT_ID) 2352 - hs = &request->ctx->hang_stats; 2353 - else if (request->file_priv) 2354 - hs = &request->file_priv->hang_stats; 2355 - 2356 - if (hs) { 2357 - if (guilty) { 2358 - hs->banned = i915_context_is_banned(hs); 2359 - hs->batch_active++; 2360 - hs->guilty_ts = get_seconds(); 2361 - } else { 2362 - hs->batch_pending++; 2363 - } 2275 + if (guilty) { 2276 + hs->banned = i915_context_is_banned(dev_priv, ctx); 2277 + hs->batch_active++; 2278 + hs->guilty_ts = get_seconds(); 2279 + } else { 2280 + hs->batch_pending++; 2364 2281 } 2365 2282 } 2366 2283 ··· 2297 2370 kfree(request); 2298 2371 } 2299 2372 2300 - static void i915_gem_reset_ring_status(struct drm_i915_private *dev_priv, 2301 - struct intel_ring_buffer *ring) 2373 + static struct drm_i915_gem_request * 2374 + i915_gem_find_first_non_complete(struct intel_ring_buffer *ring) 2302 2375 { 2303 - u32 completed_seqno = ring->get_seqno(ring, false); 2304 - u32 acthd = intel_ring_get_active_head(ring); 2305 2376 struct drm_i915_gem_request *request; 2377 + const u32 completed_seqno = ring->get_seqno(ring, false); 2306 2378 2307 2379 list_for_each_entry(request, &ring->request_list, list) { 2308 2380 if (i915_seqno_passed(completed_seqno, request->seqno)) 2309 2381 continue; 2310 2382 2311 - i915_set_reset_status(ring, request, acthd); 2383 + return request; 2312 2384 } 2385 + 2386 + return NULL; 2387 + } 2388 + 2389 + static void i915_gem_reset_ring_status(struct drm_i915_private *dev_priv, 2390 + struct intel_ring_buffer *ring) 2391 + { 2392 + struct drm_i915_gem_request *request; 2393 + bool ring_hung; 2394 + 2395 + request = i915_gem_find_first_non_complete(ring); 2396 + 2397 + if (request == NULL) 2398 + return; 2399 + 2400 + ring_hung = ring->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG; 2401 + 2402 + i915_set_reset_status(dev_priv, request->ctx, ring_hung); 2403 + 2404 + list_for_each_entry_continue(request, &ring->request_list, list) 2405 + i915_set_reset_status(dev_priv, request->ctx, false); 2313 2406 } 2314 2407 2315 2408 static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv, ··· 2403 2456 2404 2457 i915_gem_cleanup_ringbuffer(dev); 2405 2458 2459 + i915_gem_context_reset(dev); 2460 + 2406 2461 i915_gem_restore_fences(dev); 2407 2462 } 2408 2463 ··· 2422 2473 WARN_ON(i915_verify_lists(ring->dev)); 2423 2474 2424 2475 seqno = ring->get_seqno(ring, true); 2476 + 2477 + /* Move any buffers on the active list that are no longer referenced 2478 + * by the ringbuffer to the flushing/inactive lists as appropriate, 2479 + * before we free the context associated with the requests. 2480 + */ 2481 + while (!list_empty(&ring->active_list)) { 2482 + struct drm_i915_gem_object *obj; 2483 + 2484 + obj = list_first_entry(&ring->active_list, 2485 + struct drm_i915_gem_object, 2486 + ring_list); 2487 + 2488 + if (!i915_seqno_passed(seqno, obj->last_read_seqno)) 2489 + break; 2490 + 2491 + i915_gem_object_move_to_inactive(obj); 2492 + } 2493 + 2425 2494 2426 2495 while (!list_empty(&ring->request_list)) { 2427 2496 struct drm_i915_gem_request *request; ··· 2460 2493 ring->last_retired_head = request->tail; 2461 2494 2462 2495 i915_gem_free_request(request); 2463 - } 2464 - 2465 - /* Move any buffers on the active list that are no longer referenced 2466 - * by the ringbuffer to the flushing/inactive lists as appropriate. 2467 - */ 2468 - while (!list_empty(&ring->active_list)) { 2469 - struct drm_i915_gem_object *obj; 2470 - 2471 - obj = list_first_entry(&ring->active_list, 2472 - struct drm_i915_gem_object, 2473 - ring_list); 2474 - 2475 - if (!i915_seqno_passed(seqno, obj->last_read_seqno)) 2476 - break; 2477 - 2478 - i915_gem_object_move_to_inactive(obj); 2479 2496 } 2480 2497 2481 2498 if (unlikely(ring->trace_irq_seqno && ··· 2704 2753 drm_i915_private_t *dev_priv = obj->base.dev->dev_private; 2705 2754 int ret; 2706 2755 2707 - /* For now we only ever use 1 vma per object */ 2708 - WARN_ON(!list_is_singular(&obj->vma_list)); 2709 - 2710 2756 if (list_empty(&vma->vma_link)) 2711 2757 return 0; 2712 2758 ··· 2713 2765 return 0; 2714 2766 } 2715 2767 2716 - if (obj->pin_count) 2768 + if (vma->pin_count) 2717 2769 return -EBUSY; 2718 2770 2719 2771 BUG_ON(obj->pages == NULL); ··· 2735 2787 2736 2788 trace_i915_vma_unbind(vma); 2737 2789 2738 - if (obj->has_global_gtt_mapping) 2739 - i915_gem_gtt_unbind_object(obj); 2740 - if (obj->has_aliasing_ppgtt_mapping) { 2741 - i915_ppgtt_unbind_object(dev_priv->mm.aliasing_ppgtt, obj); 2742 - obj->has_aliasing_ppgtt_mapping = 0; 2743 - } 2790 + vma->unbind_vma(vma); 2791 + 2744 2792 i915_gem_gtt_finish_object(obj); 2745 2793 2746 2794 list_del(&vma->mm_list); ··· 2773 2829 if (!i915_gem_obj_ggtt_bound(obj)) 2774 2830 return 0; 2775 2831 2776 - if (obj->pin_count) 2832 + if (i915_gem_obj_to_ggtt(obj)->pin_count) 2777 2833 return -EBUSY; 2778 2834 2779 2835 BUG_ON(obj->pages == NULL); ··· 2789 2845 2790 2846 /* Flush everything onto the inactive list. */ 2791 2847 for_each_ring(ring, dev_priv, i) { 2792 - ret = i915_switch_context(ring, NULL, DEFAULT_CONTEXT_ID); 2848 + ret = i915_switch_context(ring, NULL, ring->default_context); 2793 2849 if (ret) 2794 2850 return ret; 2795 2851 ··· 3256 3312 3257 3313 i915_gem_object_pin_pages(obj); 3258 3314 3259 - BUG_ON(!i915_is_ggtt(vm)); 3260 - 3261 3315 vma = i915_gem_obj_lookup_or_create_vma(obj, vm); 3262 3316 if (IS_ERR(vma)) { 3263 3317 ret = PTR_ERR(vma); 3264 3318 goto err_unpin; 3265 3319 } 3266 - 3267 - /* For now we only ever use 1 vma per object */ 3268 - WARN_ON(!list_is_singular(&obj->vma_list)); 3269 3320 3270 3321 search_free: 3271 3322 ret = drm_mm_insert_node_in_range_generic(&vm->mm, &vma->node, ··· 3467 3528 enum i915_cache_level cache_level) 3468 3529 { 3469 3530 struct drm_device *dev = obj->base.dev; 3470 - drm_i915_private_t *dev_priv = dev->dev_private; 3471 3531 struct i915_vma *vma; 3472 3532 int ret; 3473 3533 3474 3534 if (obj->cache_level == cache_level) 3475 3535 return 0; 3476 3536 3477 - if (obj->pin_count) { 3537 + if (i915_gem_obj_is_pinned(obj)) { 3478 3538 DRM_DEBUG("can not change the cache level of pinned objects\n"); 3479 3539 return -EBUSY; 3480 3540 } ··· 3505 3567 return ret; 3506 3568 } 3507 3569 3508 - if (obj->has_global_gtt_mapping) 3509 - i915_gem_gtt_bind_object(obj, cache_level); 3510 - if (obj->has_aliasing_ppgtt_mapping) 3511 - i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt, 3512 - obj, cache_level); 3570 + list_for_each_entry(vma, &obj->vma_list, vma_link) 3571 + vma->bind_vma(vma, cache_level, 0); 3513 3572 } 3514 3573 3515 3574 list_for_each_entry(vma, &obj->vma_list, vma_link) ··· 3630 3695 * subtracting the potential reference by the user, any pin_count 3631 3696 * remains, it must be due to another use by the display engine. 3632 3697 */ 3633 - return obj->pin_count - !!obj->user_pin_count; 3698 + return i915_gem_obj_to_ggtt(obj)->pin_count - !!obj->user_pin_count; 3634 3699 } 3635 3700 3636 3701 /* ··· 3704 3769 void 3705 3770 i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj) 3706 3771 { 3707 - i915_gem_object_unpin(obj); 3772 + i915_gem_object_ggtt_unpin(obj); 3708 3773 obj->pin_display = is_pin_display(obj); 3709 3774 } 3710 3775 ··· 3834 3899 bool map_and_fenceable, 3835 3900 bool nonblocking) 3836 3901 { 3902 + const u32 flags = map_and_fenceable ? GLOBAL_BIND : 0; 3837 3903 struct i915_vma *vma; 3838 3904 int ret; 3839 - 3840 - if (WARN_ON(obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT)) 3841 - return -EBUSY; 3842 3905 3843 3906 WARN_ON(map_and_fenceable && !i915_is_ggtt(vm)); 3844 3907 3845 3908 vma = i915_gem_obj_to_vma(obj, vm); 3846 3909 3847 3910 if (vma) { 3911 + if (WARN_ON(vma->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT)) 3912 + return -EBUSY; 3913 + 3848 3914 if ((alignment && 3849 3915 vma->node.start & (alignment - 1)) || 3850 3916 (map_and_fenceable && !obj->map_and_fenceable)) { 3851 - WARN(obj->pin_count, 3917 + WARN(vma->pin_count, 3852 3918 "bo is already pinned with incorrect alignment:" 3853 3919 " offset=%lx, req.alignment=%x, req.map_and_fenceable=%d," 3854 3920 " obj->map_and_fenceable=%d\n", ··· 3863 3927 } 3864 3928 3865 3929 if (!i915_gem_obj_bound(obj, vm)) { 3866 - struct drm_i915_private *dev_priv = obj->base.dev->dev_private; 3867 - 3868 3930 ret = i915_gem_object_bind_to_vm(obj, vm, alignment, 3869 3931 map_and_fenceable, 3870 3932 nonblocking); 3871 3933 if (ret) 3872 3934 return ret; 3873 3935 3874 - if (!dev_priv->mm.aliasing_ppgtt) 3875 - i915_gem_gtt_bind_object(obj, obj->cache_level); 3876 3936 } 3877 3937 3878 - if (!obj->has_global_gtt_mapping && map_and_fenceable) 3879 - i915_gem_gtt_bind_object(obj, obj->cache_level); 3938 + vma = i915_gem_obj_to_vma(obj, vm); 3880 3939 3881 - obj->pin_count++; 3940 + vma->bind_vma(vma, obj->cache_level, flags); 3941 + 3942 + i915_gem_obj_to_vma(obj, vm)->pin_count++; 3882 3943 obj->pin_mappable |= map_and_fenceable; 3883 3944 3884 3945 return 0; 3885 3946 } 3886 3947 3887 3948 void 3888 - i915_gem_object_unpin(struct drm_i915_gem_object *obj) 3949 + i915_gem_object_ggtt_unpin(struct drm_i915_gem_object *obj) 3889 3950 { 3890 - BUG_ON(obj->pin_count == 0); 3891 - BUG_ON(!i915_gem_obj_bound_any(obj)); 3951 + struct i915_vma *vma = i915_gem_obj_to_ggtt(obj); 3892 3952 3893 - if (--obj->pin_count == 0) 3953 + BUG_ON(!vma); 3954 + BUG_ON(vma->pin_count == 0); 3955 + BUG_ON(!i915_gem_obj_ggtt_bound(obj)); 3956 + 3957 + if (--vma->pin_count == 0) 3894 3958 obj->pin_mappable = false; 3895 3959 } 3896 3960 ··· 3901 3965 struct drm_i915_gem_pin *args = data; 3902 3966 struct drm_i915_gem_object *obj; 3903 3967 int ret; 3968 + 3969 + if (INTEL_INFO(dev)->gen >= 6) 3970 + return -ENODEV; 3904 3971 3905 3972 ret = i915_mutex_lock_interruptible(dev); 3906 3973 if (ret) ··· 3917 3978 3918 3979 if (obj->madv != I915_MADV_WILLNEED) { 3919 3980 DRM_ERROR("Attempting to pin a purgeable buffer\n"); 3920 - ret = -EINVAL; 3981 + ret = -EFAULT; 3921 3982 goto out; 3922 3983 } 3923 3984 ··· 3977 4038 obj->user_pin_count--; 3978 4039 if (obj->user_pin_count == 0) { 3979 4040 obj->pin_filp = NULL; 3980 - i915_gem_object_unpin(obj); 4041 + i915_gem_object_ggtt_unpin(obj); 3981 4042 } 3982 4043 3983 4044 out: ··· 4057 4118 goto unlock; 4058 4119 } 4059 4120 4060 - if (obj->pin_count) { 4121 + if (i915_gem_obj_is_pinned(obj)) { 4061 4122 ret = -EINVAL; 4062 4123 goto out; 4063 4124 } ··· 4168 4229 if (obj->phys_obj) 4169 4230 i915_gem_detach_phys_object(dev, obj); 4170 4231 4171 - obj->pin_count = 0; 4172 - /* NB: 0 or 1 elements */ 4173 - WARN_ON(!list_empty(&obj->vma_list) && 4174 - !list_is_singular(&obj->vma_list)); 4175 4232 list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) { 4176 - int ret = i915_vma_unbind(vma); 4233 + int ret; 4234 + 4235 + vma->pin_count = 0; 4236 + ret = i915_vma_unbind(vma); 4177 4237 if (WARN_ON(ret == -ERESTARTSYS)) { 4178 4238 bool was_interruptible; 4179 4239 ··· 4219 4281 return vma; 4220 4282 4221 4283 return NULL; 4222 - } 4223 - 4224 - static struct i915_vma *__i915_gem_vma_create(struct drm_i915_gem_object *obj, 4225 - struct i915_address_space *vm) 4226 - { 4227 - struct i915_vma *vma = kzalloc(sizeof(*vma), GFP_KERNEL); 4228 - if (vma == NULL) 4229 - return ERR_PTR(-ENOMEM); 4230 - 4231 - INIT_LIST_HEAD(&vma->vma_link); 4232 - INIT_LIST_HEAD(&vma->mm_list); 4233 - INIT_LIST_HEAD(&vma->exec_list); 4234 - vma->vm = vm; 4235 - vma->obj = obj; 4236 - 4237 - /* Keep GGTT vmas first to make debug easier */ 4238 - if (i915_is_ggtt(vm)) 4239 - list_add(&vma->vma_link, &obj->vma_list); 4240 - else 4241 - list_add_tail(&vma->vma_link, &obj->vma_list); 4242 - 4243 - return vma; 4244 - } 4245 - 4246 - struct i915_vma * 4247 - i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj, 4248 - struct i915_address_space *vm) 4249 - { 4250 - struct i915_vma *vma; 4251 - 4252 - vma = i915_gem_obj_to_vma(obj, vm); 4253 - if (!vma) 4254 - vma = __i915_gem_vma_create(obj, vm); 4255 - 4256 - return vma; 4257 4284 } 4258 4285 4259 4286 void i915_gem_vma_destroy(struct i915_vma *vma) ··· 4411 4508 LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED); 4412 4509 4413 4510 if (HAS_PCH_NOP(dev)) { 4414 - u32 temp = I915_READ(GEN7_MSG_CTL); 4415 - temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK); 4416 - I915_WRITE(GEN7_MSG_CTL, temp); 4511 + if (IS_IVYBRIDGE(dev)) { 4512 + u32 temp = I915_READ(GEN7_MSG_CTL); 4513 + temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK); 4514 + I915_WRITE(GEN7_MSG_CTL, temp); 4515 + } else if (INTEL_INFO(dev)->gen >= 7) { 4516 + u32 temp = I915_READ(HSW_NDE_RSTWRN_OPT); 4517 + temp &= ~RESET_PCH_HANDSHAKE_ENABLE; 4518 + I915_WRITE(HSW_NDE_RSTWRN_OPT, temp); 4519 + } 4417 4520 } 4418 4521 4419 4522 i915_gem_init_swizzling(dev); ··· 4432 4523 i915_gem_l3_remap(&dev_priv->ring[RCS], i); 4433 4524 4434 4525 /* 4435 - * XXX: There was some w/a described somewhere suggesting loading 4436 - * contexts before PPGTT. 4526 + * XXX: Contexts should only be initialized once. Doing a switch to the 4527 + * default context switch however is something we'd like to do after 4528 + * reset or thaw (the latter may not actually be necessary for HW, but 4529 + * goes with our code better). Context switching requires rings (for 4530 + * the do_switch), but before enabling PPGTT. So don't move this. 4437 4531 */ 4438 - ret = i915_gem_context_init(dev); 4532 + ret = i915_gem_context_enable(dev_priv); 4439 4533 if (ret) { 4440 - i915_gem_cleanup_ringbuffer(dev); 4441 - DRM_ERROR("Context initialization failed %d\n", ret); 4442 - return ret; 4443 - } 4444 - 4445 - if (dev_priv->mm.aliasing_ppgtt) { 4446 - ret = dev_priv->mm.aliasing_ppgtt->enable(dev); 4447 - if (ret) { 4448 - i915_gem_cleanup_aliasing_ppgtt(dev); 4449 - DRM_INFO("PPGTT enable failed. This is not fatal, but unexpected\n"); 4450 - } 4534 + DRM_ERROR("Context enable failed %d\n", ret); 4535 + goto err_out; 4451 4536 } 4452 4537 4453 4538 return 0; 4539 + 4540 + err_out: 4541 + i915_gem_cleanup_ringbuffer(dev); 4542 + return ret; 4454 4543 } 4455 4544 4456 4545 int i915_gem_init(struct drm_device *dev) ··· 4467 4560 4468 4561 i915_gem_init_global_gtt(dev); 4469 4562 4563 + ret = i915_gem_context_init(dev); 4564 + if (ret) { 4565 + mutex_unlock(&dev->struct_mutex); 4566 + return ret; 4567 + } 4568 + 4470 4569 ret = i915_gem_init_hw(dev); 4471 4570 mutex_unlock(&dev->struct_mutex); 4472 4571 if (ret) { 4473 - i915_gem_cleanup_aliasing_ppgtt(dev); 4572 + WARN_ON(dev_priv->mm.aliasing_ppgtt); 4573 + i915_gem_context_fini(dev); 4574 + drm_mm_takedown(&dev_priv->gtt.base.mm); 4474 4575 return ret; 4475 4576 } 4476 4577 ··· 4573 4658 INIT_LIST_HEAD(&ring->request_list); 4574 4659 } 4575 4660 4576 - static void i915_init_vm(struct drm_i915_private *dev_priv, 4577 - struct i915_address_space *vm) 4661 + void i915_init_vm(struct drm_i915_private *dev_priv, 4662 + struct i915_address_space *vm) 4578 4663 { 4664 + if (!i915_is_ggtt(vm)) 4665 + drm_mm_init(&vm->mm, vm->start, vm->total); 4579 4666 vm->dev = dev_priv->dev; 4580 4667 INIT_LIST_HEAD(&vm->active_list); 4581 4668 INIT_LIST_HEAD(&vm->inactive_list); 4582 4669 INIT_LIST_HEAD(&vm->global_link); 4583 - list_add(&vm->global_link, &dev_priv->vm_list); 4670 + list_add_tail(&vm->global_link, &dev_priv->vm_list); 4584 4671 } 4585 4672 4586 4673 void ··· 4867 4950 int i915_gem_open(struct drm_device *dev, struct drm_file *file) 4868 4951 { 4869 4952 struct drm_i915_file_private *file_priv; 4953 + int ret; 4870 4954 4871 4955 DRM_DEBUG_DRIVER("\n"); 4872 4956 ··· 4883 4965 INIT_DELAYED_WORK(&file_priv->mm.idle_work, 4884 4966 i915_gem_file_idle_work_handler); 4885 4967 4886 - idr_init(&file_priv->context_idr); 4968 + ret = i915_gem_context_open(dev, file); 4969 + if (ret) 4970 + kfree(file_priv); 4887 4971 4888 - return 0; 4972 + return ret; 4889 4973 } 4890 4974 4891 4975 static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task) ··· 4934 5014 if (obj->active) 4935 5015 continue; 4936 5016 4937 - if (obj->pin_count == 0 && obj->pages_pin_count == 0) 5017 + if (!i915_gem_obj_is_pinned(obj) && obj->pages_pin_count == 0) 4938 5018 count += obj->base.size >> PAGE_SHIFT; 4939 5019 } 4940 5020 ··· 4951 5031 struct drm_i915_private *dev_priv = o->base.dev->dev_private; 4952 5032 struct i915_vma *vma; 4953 5033 4954 - if (vm == &dev_priv->mm.aliasing_ppgtt->base) 5034 + if (!dev_priv->mm.aliasing_ppgtt || 5035 + vm == &dev_priv->mm.aliasing_ppgtt->base) 4955 5036 vm = &dev_priv->gtt.base; 4956 5037 4957 5038 BUG_ON(list_empty(&o->vma_list)); ··· 4993 5072 struct drm_i915_private *dev_priv = o->base.dev->dev_private; 4994 5073 struct i915_vma *vma; 4995 5074 4996 - if (vm == &dev_priv->mm.aliasing_ppgtt->base) 5075 + if (!dev_priv->mm.aliasing_ppgtt || 5076 + vm == &dev_priv->mm.aliasing_ppgtt->base) 4997 5077 vm = &dev_priv->gtt.base; 4998 5078 4999 5079 BUG_ON(list_empty(&o->vma_list)); ··· 5049 5127 return NULL; 5050 5128 5051 5129 vma = list_first_entry(&obj->vma_list, typeof(*vma), vma_link); 5052 - if (WARN_ON(vma->vm != obj_to_ggtt(obj))) 5130 + if (vma->vm != obj_to_ggtt(obj)) 5053 5131 return NULL; 5054 5132 5055 5133 return vma;
+322 -113
drivers/gpu/drm/i915/i915_gem_context.c
··· 93 93 * I've seen in a spec to date, and that was a workaround for a non-shipping 94 94 * part. It should be safe to decrease this, but it's more future proof as is. 95 95 */ 96 - #define CONTEXT_ALIGN (64<<10) 96 + #define GEN6_CONTEXT_ALIGN (64<<10) 97 + #define GEN7_CONTEXT_ALIGN 4096 97 98 98 - static struct i915_hw_context * 99 - i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id); 100 - static int do_switch(struct i915_hw_context *to); 99 + static int do_switch(struct intel_ring_buffer *ring, 100 + struct i915_hw_context *to); 101 + 102 + static size_t get_context_alignment(struct drm_device *dev) 103 + { 104 + if (IS_GEN6(dev)) 105 + return GEN6_CONTEXT_ALIGN; 106 + 107 + return GEN7_CONTEXT_ALIGN; 108 + } 101 109 102 110 static int get_context_size(struct drm_device *dev) 103 111 { ··· 139 131 { 140 132 struct i915_hw_context *ctx = container_of(ctx_ref, 141 133 typeof(*ctx), ref); 134 + struct i915_hw_ppgtt *ppgtt = NULL; 142 135 143 - list_del(&ctx->link); 136 + /* We refcount even the aliasing PPGTT to keep the code symmetric */ 137 + if (USES_PPGTT(ctx->obj->base.dev)) 138 + ppgtt = ctx_to_ppgtt(ctx); 139 + 140 + /* XXX: Free up the object before tearing down the address space, in 141 + * case we're bound in the PPGTT */ 144 142 drm_gem_object_unreference(&ctx->obj->base); 143 + 144 + if (ppgtt) 145 + kref_put(&ppgtt->ref, ppgtt_release); 146 + list_del(&ctx->link); 145 147 kfree(ctx); 146 148 } 147 149 150 + static struct i915_hw_ppgtt * 151 + create_vm_for_ctx(struct drm_device *dev, struct i915_hw_context *ctx) 152 + { 153 + struct i915_hw_ppgtt *ppgtt; 154 + int ret; 155 + 156 + ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL); 157 + if (!ppgtt) 158 + return ERR_PTR(-ENOMEM); 159 + 160 + ret = i915_gem_init_ppgtt(dev, ppgtt); 161 + if (ret) { 162 + kfree(ppgtt); 163 + return ERR_PTR(ret); 164 + } 165 + 166 + return ppgtt; 167 + } 168 + 148 169 static struct i915_hw_context * 149 - create_hw_context(struct drm_device *dev, 170 + __create_hw_context(struct drm_device *dev, 150 171 struct drm_i915_file_private *file_priv) 151 172 { 152 173 struct drm_i915_private *dev_priv = dev->dev_private; ··· 203 166 goto err_out; 204 167 } 205 168 206 - /* The ring associated with the context object is handled by the normal 207 - * object tracking code. We give an initial ring value simple to pass an 208 - * assertion in the context switch code. 209 - */ 210 - ctx->ring = &dev_priv->ring[RCS]; 211 169 list_add_tail(&ctx->link, &dev_priv->context_list); 212 170 213 171 /* Default context will never have a file_priv */ 214 172 if (file_priv == NULL) 215 173 return ctx; 216 174 217 - ret = idr_alloc(&file_priv->context_idr, ctx, DEFAULT_CONTEXT_ID + 1, 0, 175 + ret = idr_alloc(&file_priv->context_idr, ctx, DEFAULT_CONTEXT_ID, 0, 218 176 GFP_KERNEL); 219 177 if (ret < 0) 220 178 goto err_out; ··· 228 196 return ERR_PTR(ret); 229 197 } 230 198 231 - static inline bool is_default_context(struct i915_hw_context *ctx) 232 - { 233 - return (ctx == ctx->ring->default_context); 234 - } 235 - 236 199 /** 237 200 * The default context needs to exist per ring that uses contexts. It stores the 238 201 * context state of the GPU for applications that don't utilize HW contexts, as 239 202 * well as an idle case. 240 203 */ 241 - static int create_default_context(struct drm_i915_private *dev_priv) 204 + static struct i915_hw_context * 205 + i915_gem_create_context(struct drm_device *dev, 206 + struct drm_i915_file_private *file_priv, 207 + bool create_vm) 242 208 { 209 + const bool is_global_default_ctx = file_priv == NULL; 210 + struct drm_i915_private *dev_priv = dev->dev_private; 243 211 struct i915_hw_context *ctx; 244 - int ret; 212 + int ret = 0; 245 213 246 - BUG_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex)); 214 + BUG_ON(!mutex_is_locked(&dev->struct_mutex)); 247 215 248 - ctx = create_hw_context(dev_priv->dev, NULL); 216 + ctx = __create_hw_context(dev, file_priv); 249 217 if (IS_ERR(ctx)) 250 - return PTR_ERR(ctx); 218 + return ctx; 251 219 252 - /* We may need to do things with the shrinker which require us to 253 - * immediately switch back to the default context. This can cause a 254 - * problem as pinning the default context also requires GTT space which 255 - * may not be available. To avoid this we always pin the 256 - * default context. 257 - */ 258 - ret = i915_gem_obj_ggtt_pin(ctx->obj, CONTEXT_ALIGN, false, false); 259 - if (ret) { 260 - DRM_DEBUG_DRIVER("Couldn't pin %d\n", ret); 261 - goto err_destroy; 220 + if (is_global_default_ctx) { 221 + /* We may need to do things with the shrinker which 222 + * require us to immediately switch back to the default 223 + * context. This can cause a problem as pinning the 224 + * default context also requires GTT space which may not 225 + * be available. To avoid this we always pin the default 226 + * context. 227 + */ 228 + ret = i915_gem_obj_ggtt_pin(ctx->obj, 229 + get_context_alignment(dev), 230 + false, false); 231 + if (ret) { 232 + DRM_DEBUG_DRIVER("Couldn't pin %d\n", ret); 233 + goto err_destroy; 234 + } 262 235 } 263 236 264 - ret = do_switch(ctx); 265 - if (ret) { 266 - DRM_DEBUG_DRIVER("Switch failed %d\n", ret); 267 - goto err_unpin; 268 - } 237 + if (create_vm) { 238 + struct i915_hw_ppgtt *ppgtt = create_vm_for_ctx(dev, ctx); 269 239 270 - dev_priv->ring[RCS].default_context = ctx; 240 + if (IS_ERR_OR_NULL(ppgtt)) { 241 + DRM_DEBUG_DRIVER("PPGTT setup failed (%ld)\n", 242 + PTR_ERR(ppgtt)); 243 + ret = PTR_ERR(ppgtt); 244 + goto err_unpin; 245 + } else 246 + ctx->vm = &ppgtt->base; 271 247 272 - DRM_DEBUG_DRIVER("Default HW context loaded\n"); 273 - return 0; 248 + /* This case is reserved for the global default context and 249 + * should only happen once. */ 250 + if (is_global_default_ctx) { 251 + if (WARN_ON(dev_priv->mm.aliasing_ppgtt)) { 252 + ret = -EEXIST; 253 + goto err_unpin; 254 + } 255 + 256 + dev_priv->mm.aliasing_ppgtt = ppgtt; 257 + } 258 + } else if (USES_PPGTT(dev)) { 259 + /* For platforms which only have aliasing PPGTT, we fake the 260 + * address space and refcounting. */ 261 + ctx->vm = &dev_priv->mm.aliasing_ppgtt->base; 262 + kref_get(&dev_priv->mm.aliasing_ppgtt->ref); 263 + } else 264 + ctx->vm = &dev_priv->gtt.base; 265 + 266 + return ctx; 274 267 275 268 err_unpin: 276 - i915_gem_object_unpin(ctx->obj); 269 + if (is_global_default_ctx) 270 + i915_gem_object_ggtt_unpin(ctx->obj); 277 271 err_destroy: 278 272 i915_gem_context_unreference(ctx); 279 - return ret; 273 + return ERR_PTR(ret); 274 + } 275 + 276 + void i915_gem_context_reset(struct drm_device *dev) 277 + { 278 + struct drm_i915_private *dev_priv = dev->dev_private; 279 + struct intel_ring_buffer *ring; 280 + int i; 281 + 282 + if (!HAS_HW_CONTEXTS(dev)) 283 + return; 284 + 285 + /* Prevent the hardware from restoring the last context (which hung) on 286 + * the next switch */ 287 + for (i = 0; i < I915_NUM_RINGS; i++) { 288 + struct i915_hw_context *dctx; 289 + if (!(INTEL_INFO(dev)->ring_mask & (1<<i))) 290 + continue; 291 + 292 + /* Do a fake switch to the default context */ 293 + ring = &dev_priv->ring[i]; 294 + dctx = ring->default_context; 295 + if (WARN_ON(!dctx)) 296 + continue; 297 + 298 + if (!ring->last_context) 299 + continue; 300 + 301 + if (ring->last_context == dctx) 302 + continue; 303 + 304 + if (i == RCS) { 305 + WARN_ON(i915_gem_obj_ggtt_pin(dctx->obj, 306 + get_context_alignment(dev), 307 + false, false)); 308 + /* Fake a finish/inactive */ 309 + dctx->obj->base.write_domain = 0; 310 + dctx->obj->active = 0; 311 + } 312 + 313 + i915_gem_context_unreference(ring->last_context); 314 + i915_gem_context_reference(dctx); 315 + ring->last_context = dctx; 316 + } 280 317 } 281 318 282 319 int i915_gem_context_init(struct drm_device *dev) 283 320 { 284 321 struct drm_i915_private *dev_priv = dev->dev_private; 285 - int ret; 322 + struct intel_ring_buffer *ring; 323 + int i; 286 324 287 325 if (!HAS_HW_CONTEXTS(dev)) 288 326 return 0; 289 327 290 - /* If called from reset, or thaw... we've been here already */ 291 - if (dev_priv->ring[RCS].default_context) 328 + /* Init should only be called once per module load. Eventually the 329 + * restriction on the context_disabled check can be loosened. */ 330 + if (WARN_ON(dev_priv->ring[RCS].default_context)) 292 331 return 0; 293 332 294 333 dev_priv->hw_context_size = round_up(get_context_size(dev), 4096); ··· 369 266 return -E2BIG; 370 267 } 371 268 372 - ret = create_default_context(dev_priv); 373 - if (ret) { 374 - DRM_DEBUG_DRIVER("Disabling HW Contexts; create failed %d\n", 375 - ret); 376 - return ret; 269 + dev_priv->ring[RCS].default_context = 270 + i915_gem_create_context(dev, NULL, USES_PPGTT(dev)); 271 + 272 + if (IS_ERR_OR_NULL(dev_priv->ring[RCS].default_context)) { 273 + DRM_DEBUG_DRIVER("Disabling HW Contexts; create failed %ld\n", 274 + PTR_ERR(dev_priv->ring[RCS].default_context)); 275 + return PTR_ERR(dev_priv->ring[RCS].default_context); 276 + } 277 + 278 + for (i = RCS + 1; i < I915_NUM_RINGS; i++) { 279 + if (!(INTEL_INFO(dev)->ring_mask & (1<<i))) 280 + continue; 281 + 282 + ring = &dev_priv->ring[i]; 283 + 284 + /* NB: RCS will hold a ref for all rings */ 285 + ring->default_context = dev_priv->ring[RCS].default_context; 377 286 } 378 287 379 288 DRM_DEBUG_DRIVER("HW context support initialized\n"); ··· 396 281 { 397 282 struct drm_i915_private *dev_priv = dev->dev_private; 398 283 struct i915_hw_context *dctx = dev_priv->ring[RCS].default_context; 284 + int i; 399 285 400 286 if (!HAS_HW_CONTEXTS(dev)) 401 287 return; ··· 416 300 if (dev_priv->ring[RCS].last_context == dctx) { 417 301 /* Fake switch to NULL context */ 418 302 WARN_ON(dctx->obj->active); 419 - i915_gem_object_unpin(dctx->obj); 303 + i915_gem_object_ggtt_unpin(dctx->obj); 420 304 i915_gem_context_unreference(dctx); 305 + dev_priv->ring[RCS].last_context = NULL; 421 306 } 422 307 423 - i915_gem_object_unpin(dctx->obj); 308 + for (i = 0; i < I915_NUM_RINGS; i++) { 309 + struct intel_ring_buffer *ring = &dev_priv->ring[i]; 310 + if (!(INTEL_INFO(dev)->ring_mask & (1<<i))) 311 + continue; 312 + 313 + if (ring->last_context) 314 + i915_gem_context_unreference(ring->last_context); 315 + 316 + ring->default_context = NULL; 317 + ring->last_context = NULL; 318 + } 319 + 320 + i915_gem_object_ggtt_unpin(dctx->obj); 424 321 i915_gem_context_unreference(dctx); 425 - dev_priv->ring[RCS].default_context = NULL; 426 - dev_priv->ring[RCS].last_context = NULL; 322 + dev_priv->mm.aliasing_ppgtt = NULL; 323 + } 324 + 325 + int i915_gem_context_enable(struct drm_i915_private *dev_priv) 326 + { 327 + struct intel_ring_buffer *ring; 328 + int ret, i; 329 + 330 + if (!HAS_HW_CONTEXTS(dev_priv->dev)) 331 + return 0; 332 + 333 + /* This is the only place the aliasing PPGTT gets enabled, which means 334 + * it has to happen before we bail on reset */ 335 + if (dev_priv->mm.aliasing_ppgtt) { 336 + struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt; 337 + ppgtt->enable(ppgtt); 338 + } 339 + 340 + /* FIXME: We should make this work, even in reset */ 341 + if (i915_reset_in_progress(&dev_priv->gpu_error)) 342 + return 0; 343 + 344 + BUG_ON(!dev_priv->ring[RCS].default_context); 345 + 346 + for_each_ring(ring, dev_priv, i) { 347 + ret = do_switch(ring, ring->default_context); 348 + if (ret) 349 + return ret; 350 + } 351 + 352 + return 0; 427 353 } 428 354 429 355 static int context_idr_cleanup(int id, void *p, void *data) 430 356 { 431 357 struct i915_hw_context *ctx = p; 432 358 433 - BUG_ON(id == DEFAULT_CONTEXT_ID); 359 + /* Ignore the default context because close will handle it */ 360 + if (i915_gem_context_is_default(ctx)) 361 + return 0; 434 362 435 363 i915_gem_context_unreference(ctx); 436 364 return 0; 437 365 } 438 366 439 - struct i915_ctx_hang_stats * 440 - i915_gem_context_get_hang_stats(struct drm_device *dev, 441 - struct drm_file *file, 442 - u32 id) 367 + int i915_gem_context_open(struct drm_device *dev, struct drm_file *file) 443 368 { 444 369 struct drm_i915_file_private *file_priv = file->driver_priv; 445 - struct i915_hw_context *ctx; 370 + struct drm_i915_private *dev_priv = dev->dev_private; 446 371 447 - if (id == DEFAULT_CONTEXT_ID) 448 - return &file_priv->hang_stats; 372 + if (!HAS_HW_CONTEXTS(dev)) { 373 + /* Cheat for hang stats */ 374 + file_priv->private_default_ctx = 375 + kzalloc(sizeof(struct i915_hw_context), GFP_KERNEL); 449 376 450 - if (!HAS_HW_CONTEXTS(dev)) 451 - return ERR_PTR(-ENOENT); 377 + if (file_priv->private_default_ctx == NULL) 378 + return -ENOMEM; 452 379 453 - ctx = i915_gem_context_get(file->driver_priv, id); 454 - if (ctx == NULL) 455 - return ERR_PTR(-ENOENT); 380 + file_priv->private_default_ctx->vm = &dev_priv->gtt.base; 381 + return 0; 382 + } 456 383 457 - return &ctx->hang_stats; 384 + idr_init(&file_priv->context_idr); 385 + 386 + mutex_lock(&dev->struct_mutex); 387 + file_priv->private_default_ctx = 388 + i915_gem_create_context(dev, file_priv, USES_FULL_PPGTT(dev)); 389 + mutex_unlock(&dev->struct_mutex); 390 + 391 + if (IS_ERR(file_priv->private_default_ctx)) { 392 + idr_destroy(&file_priv->context_idr); 393 + return PTR_ERR(file_priv->private_default_ctx); 394 + } 395 + 396 + return 0; 458 397 } 459 398 460 399 void i915_gem_context_close(struct drm_device *dev, struct drm_file *file) 461 400 { 462 401 struct drm_i915_file_private *file_priv = file->driver_priv; 463 402 403 + if (!HAS_HW_CONTEXTS(dev)) { 404 + kfree(file_priv->private_default_ctx); 405 + return; 406 + } 407 + 464 408 idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL); 409 + i915_gem_context_unreference(file_priv->private_default_ctx); 465 410 idr_destroy(&file_priv->context_idr); 466 411 } 467 412 468 - static struct i915_hw_context * 413 + struct i915_hw_context * 469 414 i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id) 470 415 { 471 - return (struct i915_hw_context *)idr_find(&file_priv->context_idr, id); 416 + struct i915_hw_context *ctx; 417 + 418 + if (!HAS_HW_CONTEXTS(file_priv->dev_priv->dev)) 419 + return file_priv->private_default_ctx; 420 + 421 + ctx = (struct i915_hw_context *)idr_find(&file_priv->context_idr, id); 422 + if (!ctx) 423 + return ERR_PTR(-ENOENT); 424 + 425 + return ctx; 472 426 } 473 427 474 428 static inline int ··· 576 390 MI_SAVE_EXT_STATE_EN | 577 391 MI_RESTORE_EXT_STATE_EN | 578 392 hw_flags); 579 - /* w/a: MI_SET_CONTEXT must always be followed by MI_NOOP */ 393 + /* 394 + * w/a: MI_SET_CONTEXT must always be followed by MI_NOOP 395 + * WaMiSetContext_Hang:snb,ivb,vlv 396 + */ 580 397 intel_ring_emit(ring, MI_NOOP); 581 398 582 399 if (IS_GEN7(ring->dev)) ··· 592 403 return ret; 593 404 } 594 405 595 - static int do_switch(struct i915_hw_context *to) 406 + static int do_switch(struct intel_ring_buffer *ring, 407 + struct i915_hw_context *to) 596 408 { 597 - struct intel_ring_buffer *ring = to->ring; 409 + struct drm_i915_private *dev_priv = ring->dev->dev_private; 598 410 struct i915_hw_context *from = ring->last_context; 411 + struct i915_hw_ppgtt *ppgtt = ctx_to_ppgtt(to); 599 412 u32 hw_flags = 0; 600 413 int ret, i; 601 414 602 - BUG_ON(from != NULL && from->obj != NULL && from->obj->pin_count == 0); 415 + if (from != NULL && ring == &dev_priv->ring[RCS]) { 416 + BUG_ON(from->obj == NULL); 417 + BUG_ON(!i915_gem_obj_is_pinned(from->obj)); 418 + } 603 419 604 - if (from == to && !to->remap_slice) 420 + if (from == to && from->last_ring == ring && !to->remap_slice) 605 421 return 0; 606 422 607 - ret = i915_gem_obj_ggtt_pin(to->obj, CONTEXT_ALIGN, false, false); 608 - if (ret) 609 - return ret; 423 + /* Trying to pin first makes error handling easier. */ 424 + if (ring == &dev_priv->ring[RCS]) { 425 + ret = i915_gem_obj_ggtt_pin(to->obj, 426 + get_context_alignment(ring->dev), 427 + false, false); 428 + if (ret) 429 + return ret; 430 + } 610 431 611 432 /* 612 433 * Pin can switch back to the default context if we end up calling into ··· 624 425 * switches to the default context. Hence we need to reload from here. 625 426 */ 626 427 from = ring->last_context; 428 + 429 + if (USES_FULL_PPGTT(ring->dev)) { 430 + ret = ppgtt->switch_mm(ppgtt, ring, false); 431 + if (ret) 432 + goto unpin_out; 433 + } 434 + 435 + if (ring != &dev_priv->ring[RCS]) { 436 + if (from) 437 + i915_gem_context_unreference(from); 438 + goto done; 439 + } 627 440 628 441 /* 629 442 * Clear this page out of any CPU caches for coherent swap-in/out. Note ··· 646 435 * XXX: We need a real interface to do this instead of trickery. 647 436 */ 648 437 ret = i915_gem_object_set_to_gtt_domain(to->obj, false); 649 - if (ret) { 650 - i915_gem_object_unpin(to->obj); 651 - return ret; 438 + if (ret) 439 + goto unpin_out; 440 + 441 + if (!to->obj->has_global_gtt_mapping) { 442 + struct i915_vma *vma = i915_gem_obj_to_vma(to->obj, 443 + &dev_priv->gtt.base); 444 + vma->bind_vma(vma, to->obj->cache_level, GLOBAL_BIND); 652 445 } 653 446 654 - if (!to->obj->has_global_gtt_mapping) 655 - i915_gem_gtt_bind_object(to->obj, to->obj->cache_level); 656 - 657 - if (!to->is_initialized || is_default_context(to)) 447 + if (!to->is_initialized || i915_gem_context_is_default(to)) 658 448 hw_flags |= MI_RESTORE_INHIBIT; 659 449 660 450 ret = mi_set_context(ring, to, hw_flags); 661 - if (ret) { 662 - i915_gem_object_unpin(to->obj); 663 - return ret; 664 - } 451 + if (ret) 452 + goto unpin_out; 665 453 666 454 for (i = 0; i < MAX_L3_SLICES; i++) { 667 455 if (!(to->remap_slice & (1<<i))) ··· 694 484 BUG_ON(from->obj->ring != ring); 695 485 696 486 /* obj is kept alive until the next request by its active ref */ 697 - i915_gem_object_unpin(from->obj); 487 + i915_gem_object_ggtt_unpin(from->obj); 698 488 i915_gem_context_unreference(from); 699 489 } 700 490 701 - i915_gem_context_reference(to); 702 - ring->last_context = to; 703 491 to->is_initialized = true; 704 492 493 + done: 494 + i915_gem_context_reference(to); 495 + ring->last_context = to; 496 + to->last_ring = ring; 497 + 705 498 return 0; 499 + 500 + unpin_out: 501 + if (ring->id == RCS) 502 + i915_gem_object_ggtt_unpin(to->obj); 503 + return ret; 706 504 } 707 505 708 506 /** ··· 726 508 */ 727 509 int i915_switch_context(struct intel_ring_buffer *ring, 728 510 struct drm_file *file, 729 - int to_id) 511 + struct i915_hw_context *to) 730 512 { 731 513 struct drm_i915_private *dev_priv = ring->dev->dev_private; 732 - struct i915_hw_context *to; 733 - 734 - if (!HAS_HW_CONTEXTS(ring->dev)) 735 - return 0; 736 514 737 515 WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex)); 738 516 739 - if (ring != &dev_priv->ring[RCS]) 517 + BUG_ON(file && to == NULL); 518 + 519 + /* We have the fake context, but don't supports switching. */ 520 + if (!HAS_HW_CONTEXTS(ring->dev)) 740 521 return 0; 741 522 742 - if (to_id == DEFAULT_CONTEXT_ID) { 743 - to = ring->default_context; 744 - } else { 745 - if (file == NULL) 746 - return -EINVAL; 747 - 748 - to = i915_gem_context_get(file->driver_priv, to_id); 749 - if (to == NULL) 750 - return -ENOENT; 751 - } 752 - 753 - return do_switch(to); 523 + return do_switch(ring, to); 754 524 } 755 525 756 526 int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, ··· 759 553 if (ret) 760 554 return ret; 761 555 762 - ctx = create_hw_context(dev, file_priv); 556 + ctx = i915_gem_create_context(dev, file_priv, USES_FULL_PPGTT(dev)); 763 557 mutex_unlock(&dev->struct_mutex); 764 558 if (IS_ERR(ctx)) 765 559 return PTR_ERR(ctx); ··· 781 575 if (!(dev->driver->driver_features & DRIVER_GEM)) 782 576 return -ENODEV; 783 577 578 + if (args->ctx_id == DEFAULT_CONTEXT_ID) 579 + return -ENOENT; 580 + 784 581 ret = i915_mutex_lock_interruptible(dev); 785 582 if (ret) 786 583 return ret; 787 584 788 585 ctx = i915_gem_context_get(file_priv, args->ctx_id); 789 - if (!ctx) { 586 + if (IS_ERR(ctx)) { 790 587 mutex_unlock(&dev->struct_mutex); 791 - return -ENOENT; 588 + return PTR_ERR(ctx); 792 589 } 793 590 794 591 idr_remove(&ctx->file_priv->context_idr, ctx->id);
+38 -11
drivers/gpu/drm/i915/i915_gem_evict.c
··· 36 36 static bool 37 37 mark_free(struct i915_vma *vma, struct list_head *unwind) 38 38 { 39 - if (vma->obj->pin_count) 39 + if (vma->pin_count) 40 40 return false; 41 41 42 42 if (WARN_ON(!list_empty(&vma->exec_list))) ··· 46 46 return drm_mm_scan_add_block(&vma->node); 47 47 } 48 48 49 + /** 50 + * i915_gem_evict_something - Evict vmas to make room for binding a new one 51 + * @dev: drm_device 52 + * @vm: address space to evict from 53 + * @size: size of the desired free space 54 + * @alignment: alignment constraint of the desired free space 55 + * @cache_level: cache_level for the desired space 56 + * @mappable: whether the free space must be mappable 57 + * @nonblocking: whether evicting active objects is allowed or not 58 + * 59 + * This function will try to evict vmas until a free space satisfying the 60 + * requirements is found. Callers must check first whether any such hole exists 61 + * already before calling this function. 62 + * 63 + * This function is used by the object/vma binding code. 64 + * 65 + * To clarify: This is for freeing up virtual address space, not for freeing 66 + * memory in e.g. the shrinker. 67 + */ 49 68 int 50 69 i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm, 51 70 int min_size, unsigned alignment, unsigned cache_level, ··· 196 177 } 197 178 198 179 /** 199 - * i915_gem_evict_vm - Try to free up VM space 180 + * i915_gem_evict_vm - Evict all idle vmas from a vm 200 181 * 201 - * @vm: Address space to evict from 182 + * @vm: Address space to cleanse 202 183 * @do_idle: Boolean directing whether to idle first. 203 184 * 204 - * VM eviction is about freeing up virtual address space. If one wants fine 205 - * grained eviction, they should see evict something for more details. In terms 206 - * of freeing up actual system memory, this function may not accomplish the 207 - * desired result. An object may be shared in multiple address space, and this 208 - * function will not assert those objects be freed. 185 + * This function evicts all idles vmas from a vm. If all unpinned vmas should be 186 + * evicted the @do_idle needs to be set to true. 209 187 * 210 - * Using do_idle will result in a more complete eviction because it retires, and 211 - * inactivates current BOs. 188 + * This is used by the execbuf code as a last-ditch effort to defragment the 189 + * address space. 190 + * 191 + * To clarify: This is for freeing up virtual address space, not for freeing 192 + * memory in e.g. the shrinker. 212 193 */ 213 194 int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle) 214 195 { ··· 226 207 } 227 208 228 209 list_for_each_entry_safe(vma, next, &vm->inactive_list, mm_list) 229 - if (vma->obj->pin_count == 0) 210 + if (vma->pin_count == 0) 230 211 WARN_ON(i915_vma_unbind(vma)); 231 212 232 213 return 0; 233 214 } 234 215 216 + /** 217 + * i915_gem_evict_everything - Try to evict all objects 218 + * @dev: Device to evict objects for 219 + * 220 + * This functions tries to evict all gem objects from all address spaces. Used 221 + * by the shrinker as a last-ditch effort and for suspend, before releasing the 222 + * backing storage of all unbound objects. 223 + */ 235 224 int 236 225 i915_gem_evict_everything(struct drm_device *dev) 237 226 {
+84 -80
drivers/gpu/drm/i915/i915_gem_execbuffer.c
··· 91 91 struct i915_address_space *vm, 92 92 struct drm_file *file) 93 93 { 94 + struct drm_i915_private *dev_priv = vm->dev->dev_private; 94 95 struct drm_i915_gem_object *obj; 95 96 struct list_head objects; 96 97 int i, ret; ··· 126 125 i = 0; 127 126 while (!list_empty(&objects)) { 128 127 struct i915_vma *vma; 128 + struct i915_address_space *bind_vm = vm; 129 + 130 + if (exec[i].flags & EXEC_OBJECT_NEEDS_GTT && 131 + USES_FULL_PPGTT(vm->dev)) { 132 + ret = -EINVAL; 133 + goto err; 134 + } 135 + 136 + /* If we have secure dispatch, or the userspace assures us that 137 + * they know what they're doing, use the GGTT VM. 138 + */ 139 + if (((args->flags & I915_EXEC_SECURE) && 140 + (i == (args->buffer_count - 1)))) 141 + bind_vm = &dev_priv->gtt.base; 129 142 130 143 obj = list_first_entry(&objects, 131 144 struct drm_i915_gem_object, ··· 153 138 * from the (obj, vm) we don't run the risk of creating 154 139 * duplicated vmas for the same vm. 155 140 */ 156 - vma = i915_gem_obj_lookup_or_create_vma(obj, vm); 141 + vma = i915_gem_obj_lookup_or_create_vma(obj, bind_vm); 157 142 if (IS_ERR(vma)) { 158 143 DRM_DEBUG("Failed to lookup VMA\n"); 159 144 ret = PTR_ERR(vma); ··· 232 217 i915_gem_object_unpin_fence(obj); 233 218 234 219 if (entry->flags & __EXEC_OBJECT_HAS_PIN) 235 - i915_gem_object_unpin(obj); 220 + vma->pin_count--; 236 221 237 222 entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN); 238 223 } ··· 342 327 static int 343 328 i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, 344 329 struct eb_vmas *eb, 345 - struct drm_i915_gem_relocation_entry *reloc, 346 - struct i915_address_space *vm) 330 + struct drm_i915_gem_relocation_entry *reloc) 347 331 { 348 332 struct drm_device *dev = obj->base.dev; 349 333 struct drm_gem_object *target_obj; ··· 366 352 if (unlikely(IS_GEN6(dev) && 367 353 reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION && 368 354 !target_i915_obj->has_global_gtt_mapping)) { 369 - i915_gem_gtt_bind_object(target_i915_obj, 370 - target_i915_obj->cache_level); 355 + struct i915_vma *vma = 356 + list_first_entry(&target_i915_obj->vma_list, 357 + typeof(*vma), vma_link); 358 + vma->bind_vma(vma, target_i915_obj->cache_level, GLOBAL_BIND); 371 359 } 372 360 373 361 /* Validate that the target is in a valid r/w GPU domain */ ··· 467 451 do { 468 452 u64 offset = r->presumed_offset; 469 453 470 - ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, r, 471 - vma->vm); 454 + ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, r); 472 455 if (ret) 473 456 return ret; 474 457 ··· 496 481 int i, ret; 497 482 498 483 for (i = 0; i < entry->relocation_count; i++) { 499 - ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, &relocs[i], 500 - vma->vm); 484 + ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, &relocs[i]); 501 485 if (ret) 502 486 return ret; 503 487 } ··· 541 527 struct intel_ring_buffer *ring, 542 528 bool *need_reloc) 543 529 { 544 - struct drm_i915_private *dev_priv = ring->dev->dev_private; 530 + struct drm_i915_gem_object *obj = vma->obj; 545 531 struct drm_i915_gem_exec_object2 *entry = vma->exec_entry; 546 532 bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4; 547 533 bool need_fence, need_mappable; 548 - struct drm_i915_gem_object *obj = vma->obj; 534 + u32 flags = (entry->flags & EXEC_OBJECT_NEEDS_GTT) && 535 + !vma->obj->has_global_gtt_mapping ? GLOBAL_BIND : 0; 549 536 int ret; 550 537 551 538 need_fence = ··· 575 560 } 576 561 } 577 562 578 - /* Ensure ppgtt mapping exists if needed */ 579 - if (dev_priv->mm.aliasing_ppgtt && !obj->has_aliasing_ppgtt_mapping) { 580 - i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt, 581 - obj, obj->cache_level); 582 - 583 - obj->has_aliasing_ppgtt_mapping = 1; 584 - } 585 - 586 563 if (entry->offset != vma->node.start) { 587 564 entry->offset = vma->node.start; 588 565 *need_reloc = true; ··· 585 578 obj->base.pending_write_domain = I915_GEM_DOMAIN_RENDER; 586 579 } 587 580 588 - if (entry->flags & EXEC_OBJECT_NEEDS_GTT && 589 - !obj->has_global_gtt_mapping) 590 - i915_gem_gtt_bind_object(obj, obj->cache_level); 581 + vma->bind_vma(vma, obj->cache_level, flags); 591 582 592 583 return 0; 593 584 } ··· 896 891 if (!access_ok(VERIFY_WRITE, ptr, length)) 897 892 return -EFAULT; 898 893 899 - if (likely(!i915_prefault_disable)) { 894 + if (likely(!i915.prefault_disable)) { 900 895 if (fault_in_multipages_readable(ptr, length)) 901 896 return -EFAULT; 902 897 } ··· 905 900 return 0; 906 901 } 907 902 908 - static int 903 + static struct i915_hw_context * 909 904 i915_gem_validate_context(struct drm_device *dev, struct drm_file *file, 910 - const u32 ctx_id) 905 + struct intel_ring_buffer *ring, const u32 ctx_id) 911 906 { 907 + struct i915_hw_context *ctx = NULL; 912 908 struct i915_ctx_hang_stats *hs; 913 909 914 - hs = i915_gem_context_get_hang_stats(dev, file, ctx_id); 915 - if (IS_ERR(hs)) 916 - return PTR_ERR(hs); 910 + if (ring->id != RCS && ctx_id != DEFAULT_CONTEXT_ID) 911 + return ERR_PTR(-EINVAL); 917 912 913 + ctx = i915_gem_context_get(file->driver_priv, ctx_id); 914 + if (IS_ERR(ctx)) 915 + return ctx; 916 + 917 + hs = &ctx->hang_stats; 918 918 if (hs->banned) { 919 919 DRM_DEBUG("Context %u tried to submit while banned\n", ctx_id); 920 - return -EIO; 920 + return ERR_PTR(-EIO); 921 921 } 922 922 923 - return 0; 923 + return ctx; 924 924 } 925 925 926 926 static void ··· 949 939 if (obj->base.write_domain) { 950 940 obj->dirty = 1; 951 941 obj->last_write_seqno = intel_ring_get_seqno(ring); 952 - if (obj->pin_count) /* check for potential scanout */ 942 + /* check for potential scanout */ 943 + if (i915_gem_obj_ggtt_bound(obj) && 944 + i915_gem_obj_to_ggtt(obj)->pin_count) 953 945 intel_mark_fb_busy(obj, ring); 954 946 } 955 947 ··· 1001 989 i915_gem_do_execbuffer(struct drm_device *dev, void *data, 1002 990 struct drm_file *file, 1003 991 struct drm_i915_gem_execbuffer2 *args, 1004 - struct drm_i915_gem_exec_object2 *exec, 1005 - struct i915_address_space *vm) 992 + struct drm_i915_gem_exec_object2 *exec) 1006 993 { 1007 994 drm_i915_private_t *dev_priv = dev->dev_private; 1008 995 struct eb_vmas *eb; 1009 996 struct drm_i915_gem_object *batch_obj; 1010 997 struct drm_clip_rect *cliprects = NULL; 1011 998 struct intel_ring_buffer *ring; 999 + struct i915_hw_context *ctx; 1000 + struct i915_address_space *vm; 1012 1001 const u32 ctx_id = i915_execbuffer2_get_context_id(*args); 1013 - u32 exec_start, exec_len; 1002 + u32 exec_start = args->batch_start_offset, exec_len; 1014 1003 u32 mask, flags; 1015 1004 int ret, mode, i; 1016 1005 bool need_relocs; ··· 1033 1020 if (args->flags & I915_EXEC_IS_PINNED) 1034 1021 flags |= I915_DISPATCH_PINNED; 1035 1022 1036 - switch (args->flags & I915_EXEC_RING_MASK) { 1037 - case I915_EXEC_DEFAULT: 1038 - case I915_EXEC_RENDER: 1039 - ring = &dev_priv->ring[RCS]; 1040 - break; 1041 - case I915_EXEC_BSD: 1042 - ring = &dev_priv->ring[VCS]; 1043 - if (ctx_id != DEFAULT_CONTEXT_ID) { 1044 - DRM_DEBUG("Ring %s doesn't support contexts\n", 1045 - ring->name); 1046 - return -EPERM; 1047 - } 1048 - break; 1049 - case I915_EXEC_BLT: 1050 - ring = &dev_priv->ring[BCS]; 1051 - if (ctx_id != DEFAULT_CONTEXT_ID) { 1052 - DRM_DEBUG("Ring %s doesn't support contexts\n", 1053 - ring->name); 1054 - return -EPERM; 1055 - } 1056 - break; 1057 - case I915_EXEC_VEBOX: 1058 - ring = &dev_priv->ring[VECS]; 1059 - if (ctx_id != DEFAULT_CONTEXT_ID) { 1060 - DRM_DEBUG("Ring %s doesn't support contexts\n", 1061 - ring->name); 1062 - return -EPERM; 1063 - } 1064 - break; 1065 - 1066 - default: 1023 + if ((args->flags & I915_EXEC_RING_MASK) > I915_NUM_RINGS) { 1067 1024 DRM_DEBUG("execbuf with unknown ring: %d\n", 1068 1025 (int)(args->flags & I915_EXEC_RING_MASK)); 1069 1026 return -EINVAL; 1070 1027 } 1028 + 1029 + if ((args->flags & I915_EXEC_RING_MASK) == I915_EXEC_DEFAULT) 1030 + ring = &dev_priv->ring[RCS]; 1031 + else 1032 + ring = &dev_priv->ring[(args->flags & I915_EXEC_RING_MASK) - 1]; 1033 + 1071 1034 if (!intel_ring_initialized(ring)) { 1072 1035 DRM_DEBUG("execbuf with invalid ring: %d\n", 1073 1036 (int)(args->flags & I915_EXEC_RING_MASK)); ··· 1125 1136 goto pre_mutex_err; 1126 1137 } 1127 1138 1128 - ret = i915_gem_validate_context(dev, file, ctx_id); 1129 - if (ret) { 1139 + ctx = i915_gem_validate_context(dev, file, ring, ctx_id); 1140 + if (IS_ERR(ctx)) { 1130 1141 mutex_unlock(&dev->struct_mutex); 1142 + ret = PTR_ERR(ctx); 1131 1143 goto pre_mutex_err; 1132 - } 1144 + } 1145 + 1146 + i915_gem_context_reference(ctx); 1147 + 1148 + vm = ctx->vm; 1149 + if (!USES_FULL_PPGTT(dev)) 1150 + vm = &dev_priv->gtt.base; 1133 1151 1134 1152 eb = eb_create(args); 1135 1153 if (eb == NULL) { ··· 1183 1187 /* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure 1184 1188 * batch" bit. Hence we need to pin secure batches into the global gtt. 1185 1189 * hsw should have this fixed, but bdw mucks it up again. */ 1186 - if (flags & I915_DISPATCH_SECURE && !batch_obj->has_global_gtt_mapping) 1187 - i915_gem_gtt_bind_object(batch_obj, batch_obj->cache_level); 1190 + if (flags & I915_DISPATCH_SECURE && 1191 + !batch_obj->has_global_gtt_mapping) { 1192 + /* When we have multiple VMs, we'll need to make sure that we 1193 + * allocate space first */ 1194 + struct i915_vma *vma = i915_gem_obj_to_ggtt(batch_obj); 1195 + BUG_ON(!vma); 1196 + vma->bind_vma(vma, batch_obj->cache_level, GLOBAL_BIND); 1197 + } 1198 + 1199 + if (flags & I915_DISPATCH_SECURE) 1200 + exec_start += i915_gem_obj_ggtt_offset(batch_obj); 1201 + else 1202 + exec_start += i915_gem_obj_offset(batch_obj, vm); 1188 1203 1189 1204 ret = i915_gem_execbuffer_move_to_gpu(ring, &eb->vmas); 1190 1205 if (ret) 1191 1206 goto err; 1192 1207 1193 - ret = i915_switch_context(ring, file, ctx_id); 1208 + ret = i915_switch_context(ring, file, ctx); 1194 1209 if (ret) 1195 1210 goto err; 1196 1211 ··· 1226 1219 goto err; 1227 1220 } 1228 1221 1229 - exec_start = i915_gem_obj_offset(batch_obj, vm) + 1230 - args->batch_start_offset; 1222 + 1231 1223 exec_len = args->batch_len; 1232 1224 if (cliprects) { 1233 1225 for (i = 0; i < args->num_cliprects; i++) { ··· 1255 1249 i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj); 1256 1250 1257 1251 err: 1252 + /* the request owns the ref now */ 1253 + i915_gem_context_unreference(ctx); 1258 1254 eb_destroy(eb); 1259 1255 1260 1256 mutex_unlock(&dev->struct_mutex); ··· 1278 1270 i915_gem_execbuffer(struct drm_device *dev, void *data, 1279 1271 struct drm_file *file) 1280 1272 { 1281 - struct drm_i915_private *dev_priv = dev->dev_private; 1282 1273 struct drm_i915_gem_execbuffer *args = data; 1283 1274 struct drm_i915_gem_execbuffer2 exec2; 1284 1275 struct drm_i915_gem_exec_object *exec_list = NULL; ··· 1333 1326 exec2.flags = I915_EXEC_RENDER; 1334 1327 i915_execbuffer2_set_context_id(exec2, 0); 1335 1328 1336 - ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list, 1337 - &dev_priv->gtt.base); 1329 + ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list); 1338 1330 if (!ret) { 1339 1331 /* Copy the new buffer offsets back to the user's exec list. */ 1340 1332 for (i = 0; i < args->buffer_count; i++) ··· 1359 1353 i915_gem_execbuffer2(struct drm_device *dev, void *data, 1360 1354 struct drm_file *file) 1361 1355 { 1362 - struct drm_i915_private *dev_priv = dev->dev_private; 1363 1356 struct drm_i915_gem_execbuffer2 *args = data; 1364 1357 struct drm_i915_gem_exec_object2 *exec2_list = NULL; 1365 1358 int ret; ··· 1389 1384 return -EFAULT; 1390 1385 } 1391 1386 1392 - ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list, 1393 - &dev_priv->gtt.base); 1387 + ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list); 1394 1388 if (!ret) { 1395 1389 /* Copy the new buffer offsets back to the user's exec list. */ 1396 1390 ret = copy_to_user(to_user_ptr(args->buffers_ptr),
+519 -172
drivers/gpu/drm/i915/i915_gem_gtt.c
··· 22 22 * 23 23 */ 24 24 25 + #include <linux/seq_file.h> 25 26 #include <drm/drmP.h> 26 27 #include <drm/i915_drm.h> 27 28 #include "i915_drv.h" ··· 70 69 #define PPAT_CACHED_PDE_INDEX 0 /* WB LLC */ 71 70 #define PPAT_CACHED_INDEX _PAGE_PAT /* WB LLCeLLC */ 72 71 #define PPAT_DISPLAY_ELLC_INDEX _PAGE_PCD /* WT eLLC */ 72 + 73 + static void ppgtt_bind_vma(struct i915_vma *vma, 74 + enum i915_cache_level cache_level, 75 + u32 flags); 76 + static void ppgtt_unbind_vma(struct i915_vma *vma); 77 + static int gen8_ppgtt_enable(struct i915_hw_ppgtt *ppgtt); 73 78 74 79 static inline gen8_gtt_pte_t gen8_pte_encode(dma_addr_t addr, 75 80 enum i915_cache_level level, ··· 206 199 207 200 /* Broadwell Page Directory Pointer Descriptors */ 208 201 static int gen8_write_pdp(struct intel_ring_buffer *ring, unsigned entry, 209 - uint64_t val) 202 + uint64_t val, bool synchronous) 210 203 { 204 + struct drm_i915_private *dev_priv = ring->dev->dev_private; 211 205 int ret; 212 206 213 207 BUG_ON(entry >= 4); 208 + 209 + if (synchronous) { 210 + I915_WRITE(GEN8_RING_PDP_UDW(ring, entry), val >> 32); 211 + I915_WRITE(GEN8_RING_PDP_LDW(ring, entry), (u32)val); 212 + return 0; 213 + } 214 214 215 215 ret = intel_ring_begin(ring, 6); 216 216 if (ret) ··· 234 220 return 0; 235 221 } 236 222 237 - static int gen8_ppgtt_enable(struct drm_device *dev) 223 + static int gen8_mm_switch(struct i915_hw_ppgtt *ppgtt, 224 + struct intel_ring_buffer *ring, 225 + bool synchronous) 238 226 { 239 - struct drm_i915_private *dev_priv = dev->dev_private; 240 - struct intel_ring_buffer *ring; 241 - struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt; 242 - int i, j, ret; 227 + int i, ret; 243 228 244 229 /* bit of a hack to find the actual last used pd */ 245 230 int used_pd = ppgtt->num_pd_entries / GEN8_PDES_PER_PAGE; 246 231 247 - for_each_ring(ring, dev_priv, j) { 248 - I915_WRITE(RING_MODE_GEN7(ring), 249 - _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE)); 250 - } 251 - 252 232 for (i = used_pd - 1; i >= 0; i--) { 253 233 dma_addr_t addr = ppgtt->pd_dma_addr[i]; 254 - for_each_ring(ring, dev_priv, j) { 255 - ret = gen8_write_pdp(ring, i, addr); 256 - if (ret) 257 - goto err_out; 258 - } 234 + ret = gen8_write_pdp(ring, i, addr, synchronous); 235 + if (ret) 236 + return ret; 259 237 } 260 - return 0; 261 238 262 - err_out: 263 - for_each_ring(ring, dev_priv, j) 264 - I915_WRITE(RING_MODE_GEN7(ring), 265 - _MASKED_BIT_DISABLE(GFX_PPGTT_ENABLE)); 266 - return ret; 239 + return 0; 267 240 } 268 241 269 242 static void gen8_ppgtt_clear_range(struct i915_address_space *vm, ··· 325 324 container_of(vm, struct i915_hw_ppgtt, base); 326 325 int i, j; 327 326 327 + list_del(&vm->global_link); 328 328 drm_mm_takedown(&vm->mm); 329 329 330 330 for (i = 0; i < ppgtt->num_pd_pages ; i++) { ··· 388 386 ppgtt->num_pt_pages = 1 << get_order(num_pt_pages << PAGE_SHIFT); 389 387 ppgtt->num_pd_entries = max_pdp * GEN8_PDES_PER_PAGE; 390 388 ppgtt->enable = gen8_ppgtt_enable; 389 + ppgtt->switch_mm = gen8_mm_switch; 391 390 ppgtt->base.clear_range = gen8_ppgtt_clear_range; 392 391 ppgtt->base.insert_entries = gen8_ppgtt_insert_entries; 393 392 ppgtt->base.cleanup = gen8_ppgtt_cleanup; ··· 461 458 return ret; 462 459 } 463 460 461 + static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m) 462 + { 463 + struct drm_i915_private *dev_priv = ppgtt->base.dev->dev_private; 464 + struct i915_address_space *vm = &ppgtt->base; 465 + gen6_gtt_pte_t __iomem *pd_addr; 466 + gen6_gtt_pte_t scratch_pte; 467 + uint32_t pd_entry; 468 + int pte, pde; 469 + 470 + scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, true); 471 + 472 + pd_addr = (gen6_gtt_pte_t __iomem *)dev_priv->gtt.gsm + 473 + ppgtt->pd_offset / sizeof(gen6_gtt_pte_t); 474 + 475 + seq_printf(m, " VM %p (pd_offset %x-%x):\n", vm, 476 + ppgtt->pd_offset, ppgtt->pd_offset + ppgtt->num_pd_entries); 477 + for (pde = 0; pde < ppgtt->num_pd_entries; pde++) { 478 + u32 expected; 479 + gen6_gtt_pte_t *pt_vaddr; 480 + dma_addr_t pt_addr = ppgtt->pt_dma_addr[pde]; 481 + pd_entry = readl(pd_addr + pde); 482 + expected = (GEN6_PDE_ADDR_ENCODE(pt_addr) | GEN6_PDE_VALID); 483 + 484 + if (pd_entry != expected) 485 + seq_printf(m, "\tPDE #%d mismatch: Actual PDE: %x Expected PDE: %x\n", 486 + pde, 487 + pd_entry, 488 + expected); 489 + seq_printf(m, "\tPDE: %x\n", pd_entry); 490 + 491 + pt_vaddr = kmap_atomic(ppgtt->pt_pages[pde]); 492 + for (pte = 0; pte < I915_PPGTT_PT_ENTRIES; pte+=4) { 493 + unsigned long va = 494 + (pde * PAGE_SIZE * I915_PPGTT_PT_ENTRIES) + 495 + (pte * PAGE_SIZE); 496 + int i; 497 + bool found = false; 498 + for (i = 0; i < 4; i++) 499 + if (pt_vaddr[pte + i] != scratch_pte) 500 + found = true; 501 + if (!found) 502 + continue; 503 + 504 + seq_printf(m, "\t\t0x%lx [%03d,%04d]: =", va, pde, pte); 505 + for (i = 0; i < 4; i++) { 506 + if (pt_vaddr[pte + i] != scratch_pte) 507 + seq_printf(m, " %08x", pt_vaddr[pte + i]); 508 + else 509 + seq_puts(m, " SCRATCH "); 510 + } 511 + seq_puts(m, "\n"); 512 + } 513 + kunmap_atomic(pt_vaddr); 514 + } 515 + } 516 + 464 517 static void gen6_write_pdes(struct i915_hw_ppgtt *ppgtt) 465 518 { 466 519 struct drm_i915_private *dev_priv = ppgtt->base.dev->dev_private; ··· 539 480 readl(pd_addr); 540 481 } 541 482 542 - static int gen6_ppgtt_enable(struct drm_device *dev) 483 + static uint32_t get_pd_offset(struct i915_hw_ppgtt *ppgtt) 543 484 { 544 - drm_i915_private_t *dev_priv = dev->dev_private; 545 - uint32_t pd_offset; 546 - struct intel_ring_buffer *ring; 547 - struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt; 548 - int i; 549 - 550 485 BUG_ON(ppgtt->pd_offset & 0x3f); 551 486 552 - gen6_write_pdes(ppgtt); 487 + return (ppgtt->pd_offset / 64) << 16; 488 + } 553 489 554 - pd_offset = ppgtt->pd_offset; 555 - pd_offset /= 64; /* in cachelines, */ 556 - pd_offset <<= 16; 490 + static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt, 491 + struct intel_ring_buffer *ring, 492 + bool synchronous) 493 + { 494 + struct drm_device *dev = ppgtt->base.dev; 495 + struct drm_i915_private *dev_priv = dev->dev_private; 496 + int ret; 557 497 558 - if (INTEL_INFO(dev)->gen == 6) { 559 - uint32_t ecochk, gab_ctl, ecobits; 560 - 561 - ecobits = I915_READ(GAC_ECO_BITS); 562 - I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_SNB_BIT | 563 - ECOBITS_PPGTT_CACHE64B); 564 - 565 - gab_ctl = I915_READ(GAB_CTL); 566 - I915_WRITE(GAB_CTL, gab_ctl | GAB_CTL_CONT_AFTER_PAGEFAULT); 567 - 568 - ecochk = I915_READ(GAM_ECOCHK); 569 - I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT | 570 - ECOCHK_PPGTT_CACHE64B); 571 - I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE)); 572 - } else if (INTEL_INFO(dev)->gen >= 7) { 573 - uint32_t ecochk, ecobits; 574 - 575 - ecobits = I915_READ(GAC_ECO_BITS); 576 - I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B); 577 - 578 - ecochk = I915_READ(GAM_ECOCHK); 579 - if (IS_HASWELL(dev)) { 580 - ecochk |= ECOCHK_PPGTT_WB_HSW; 581 - } else { 582 - ecochk |= ECOCHK_PPGTT_LLC_IVB; 583 - ecochk &= ~ECOCHK_PPGTT_GFDT_IVB; 584 - } 585 - I915_WRITE(GAM_ECOCHK, ecochk); 586 - /* GFX_MODE is per-ring on gen7+ */ 498 + /* If we're in reset, we can assume the GPU is sufficiently idle to 499 + * manually frob these bits. Ideally we could use the ring functions, 500 + * except our error handling makes it quite difficult (can't use 501 + * intel_ring_begin, ring->flush, or intel_ring_advance) 502 + * 503 + * FIXME: We should try not to special case reset 504 + */ 505 + if (synchronous || 506 + i915_reset_in_progress(&dev_priv->gpu_error)) { 507 + WARN_ON(ppgtt != dev_priv->mm.aliasing_ppgtt); 508 + I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G); 509 + I915_WRITE(RING_PP_DIR_BASE(ring), get_pd_offset(ppgtt)); 510 + POSTING_READ(RING_PP_DIR_BASE(ring)); 511 + return 0; 587 512 } 513 + 514 + /* NB: TLBs must be flushed and invalidated before a switch */ 515 + ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); 516 + if (ret) 517 + return ret; 518 + 519 + ret = intel_ring_begin(ring, 6); 520 + if (ret) 521 + return ret; 522 + 523 + intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(2)); 524 + intel_ring_emit(ring, RING_PP_DIR_DCLV(ring)); 525 + intel_ring_emit(ring, PP_DIR_DCLV_2G); 526 + intel_ring_emit(ring, RING_PP_DIR_BASE(ring)); 527 + intel_ring_emit(ring, get_pd_offset(ppgtt)); 528 + intel_ring_emit(ring, MI_NOOP); 529 + intel_ring_advance(ring); 530 + 531 + return 0; 532 + } 533 + 534 + static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt, 535 + struct intel_ring_buffer *ring, 536 + bool synchronous) 537 + { 538 + struct drm_device *dev = ppgtt->base.dev; 539 + struct drm_i915_private *dev_priv = dev->dev_private; 540 + int ret; 541 + 542 + /* If we're in reset, we can assume the GPU is sufficiently idle to 543 + * manually frob these bits. Ideally we could use the ring functions, 544 + * except our error handling makes it quite difficult (can't use 545 + * intel_ring_begin, ring->flush, or intel_ring_advance) 546 + * 547 + * FIXME: We should try not to special case reset 548 + */ 549 + if (synchronous || 550 + i915_reset_in_progress(&dev_priv->gpu_error)) { 551 + WARN_ON(ppgtt != dev_priv->mm.aliasing_ppgtt); 552 + I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G); 553 + I915_WRITE(RING_PP_DIR_BASE(ring), get_pd_offset(ppgtt)); 554 + POSTING_READ(RING_PP_DIR_BASE(ring)); 555 + return 0; 556 + } 557 + 558 + /* NB: TLBs must be flushed and invalidated before a switch */ 559 + ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); 560 + if (ret) 561 + return ret; 562 + 563 + ret = intel_ring_begin(ring, 6); 564 + if (ret) 565 + return ret; 566 + 567 + intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(2)); 568 + intel_ring_emit(ring, RING_PP_DIR_DCLV(ring)); 569 + intel_ring_emit(ring, PP_DIR_DCLV_2G); 570 + intel_ring_emit(ring, RING_PP_DIR_BASE(ring)); 571 + intel_ring_emit(ring, get_pd_offset(ppgtt)); 572 + intel_ring_emit(ring, MI_NOOP); 573 + intel_ring_advance(ring); 574 + 575 + /* XXX: RCS is the only one to auto invalidate the TLBs? */ 576 + if (ring->id != RCS) { 577 + ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); 578 + if (ret) 579 + return ret; 580 + } 581 + 582 + return 0; 583 + } 584 + 585 + static int gen6_mm_switch(struct i915_hw_ppgtt *ppgtt, 586 + struct intel_ring_buffer *ring, 587 + bool synchronous) 588 + { 589 + struct drm_device *dev = ppgtt->base.dev; 590 + struct drm_i915_private *dev_priv = dev->dev_private; 591 + 592 + if (!synchronous) 593 + return 0; 594 + 595 + I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G); 596 + I915_WRITE(RING_PP_DIR_BASE(ring), get_pd_offset(ppgtt)); 597 + 598 + POSTING_READ(RING_PP_DIR_DCLV(ring)); 599 + 600 + return 0; 601 + } 602 + 603 + static int gen8_ppgtt_enable(struct i915_hw_ppgtt *ppgtt) 604 + { 605 + struct drm_device *dev = ppgtt->base.dev; 606 + struct drm_i915_private *dev_priv = dev->dev_private; 607 + struct intel_ring_buffer *ring; 608 + int j, ret; 609 + 610 + for_each_ring(ring, dev_priv, j) { 611 + I915_WRITE(RING_MODE_GEN7(ring), 612 + _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE)); 613 + 614 + /* We promise to do a switch later with FULL PPGTT. If this is 615 + * aliasing, this is the one and only switch we'll do */ 616 + if (USES_FULL_PPGTT(dev)) 617 + continue; 618 + 619 + ret = ppgtt->switch_mm(ppgtt, ring, true); 620 + if (ret) 621 + goto err_out; 622 + } 623 + 624 + return 0; 625 + 626 + err_out: 627 + for_each_ring(ring, dev_priv, j) 628 + I915_WRITE(RING_MODE_GEN7(ring), 629 + _MASKED_BIT_DISABLE(GFX_PPGTT_ENABLE)); 630 + return ret; 631 + } 632 + 633 + static int gen7_ppgtt_enable(struct i915_hw_ppgtt *ppgtt) 634 + { 635 + struct drm_device *dev = ppgtt->base.dev; 636 + drm_i915_private_t *dev_priv = dev->dev_private; 637 + struct intel_ring_buffer *ring; 638 + uint32_t ecochk, ecobits; 639 + int i; 640 + 641 + ecobits = I915_READ(GAC_ECO_BITS); 642 + I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B); 643 + 644 + ecochk = I915_READ(GAM_ECOCHK); 645 + if (IS_HASWELL(dev)) { 646 + ecochk |= ECOCHK_PPGTT_WB_HSW; 647 + } else { 648 + ecochk |= ECOCHK_PPGTT_LLC_IVB; 649 + ecochk &= ~ECOCHK_PPGTT_GFDT_IVB; 650 + } 651 + I915_WRITE(GAM_ECOCHK, ecochk); 588 652 589 653 for_each_ring(ring, dev_priv, i) { 590 - if (INTEL_INFO(dev)->gen >= 7) 591 - I915_WRITE(RING_MODE_GEN7(ring), 592 - _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE)); 654 + int ret; 655 + /* GFX_MODE is per-ring on gen7+ */ 656 + I915_WRITE(RING_MODE_GEN7(ring), 657 + _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE)); 593 658 594 - I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G); 595 - I915_WRITE(RING_PP_DIR_BASE(ring), pd_offset); 659 + /* We promise to do a switch later with FULL PPGTT. If this is 660 + * aliasing, this is the one and only switch we'll do */ 661 + if (USES_FULL_PPGTT(dev)) 662 + continue; 663 + 664 + ret = ppgtt->switch_mm(ppgtt, ring, true); 665 + if (ret) 666 + return ret; 596 667 } 668 + 669 + return 0; 670 + } 671 + 672 + static int gen6_ppgtt_enable(struct i915_hw_ppgtt *ppgtt) 673 + { 674 + struct drm_device *dev = ppgtt->base.dev; 675 + drm_i915_private_t *dev_priv = dev->dev_private; 676 + struct intel_ring_buffer *ring; 677 + uint32_t ecochk, gab_ctl, ecobits; 678 + int i; 679 + 680 + ecobits = I915_READ(GAC_ECO_BITS); 681 + I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_SNB_BIT | 682 + ECOBITS_PPGTT_CACHE64B); 683 + 684 + gab_ctl = I915_READ(GAB_CTL); 685 + I915_WRITE(GAB_CTL, gab_ctl | GAB_CTL_CONT_AFTER_PAGEFAULT); 686 + 687 + ecochk = I915_READ(GAM_ECOCHK); 688 + I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT | ECOCHK_PPGTT_CACHE64B); 689 + 690 + I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE)); 691 + 692 + for_each_ring(ring, dev_priv, i) { 693 + int ret = ppgtt->switch_mm(ppgtt, ring, true); 694 + if (ret) 695 + return ret; 696 + } 697 + 597 698 return 0; 598 699 } 599 700 ··· 827 608 container_of(vm, struct i915_hw_ppgtt, base); 828 609 int i; 829 610 611 + list_del(&vm->global_link); 830 612 drm_mm_takedown(&ppgtt->base.mm); 613 + drm_mm_remove_node(&ppgtt->node); 831 614 832 615 if (ppgtt->pt_dma_addr) { 833 616 for (i = 0; i < ppgtt->num_pd_entries; i++) ··· 847 626 848 627 static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt) 849 628 { 629 + #define GEN6_PD_ALIGN (PAGE_SIZE * 16) 630 + #define GEN6_PD_SIZE (GEN6_PPGTT_PD_ENTRIES * PAGE_SIZE) 850 631 struct drm_device *dev = ppgtt->base.dev; 851 632 struct drm_i915_private *dev_priv = dev->dev_private; 852 - unsigned first_pd_entry_in_global_pt; 853 - int i; 854 - int ret = -ENOMEM; 633 + bool retried = false; 634 + int i, ret; 855 635 856 - /* ppgtt PDEs reside in the global gtt pagetable, which has 512*1024 857 - * entries. For aliasing ppgtt support we just steal them at the end for 858 - * now. */ 859 - first_pd_entry_in_global_pt = gtt_total_entries(dev_priv->gtt); 636 + /* PPGTT PDEs reside in the GGTT and consists of 512 entries. The 637 + * allocator works in address space sizes, so it's multiplied by page 638 + * size. We allocate at the top of the GTT to avoid fragmentation. 639 + */ 640 + BUG_ON(!drm_mm_initialized(&dev_priv->gtt.base.mm)); 641 + alloc: 642 + ret = drm_mm_insert_node_in_range_generic(&dev_priv->gtt.base.mm, 643 + &ppgtt->node, GEN6_PD_SIZE, 644 + GEN6_PD_ALIGN, 0, 645 + 0, dev_priv->gtt.base.total, 646 + DRM_MM_SEARCH_DEFAULT); 647 + if (ret == -ENOSPC && !retried) { 648 + ret = i915_gem_evict_something(dev, &dev_priv->gtt.base, 649 + GEN6_PD_SIZE, GEN6_PD_ALIGN, 650 + I915_CACHE_NONE, false, true); 651 + if (ret) 652 + return ret; 653 + 654 + retried = true; 655 + goto alloc; 656 + } 657 + 658 + if (ppgtt->node.start < dev_priv->gtt.mappable_end) 659 + DRM_DEBUG("Forced to use aperture for PDEs\n"); 860 660 861 661 ppgtt->base.pte_encode = dev_priv->gtt.base.pte_encode; 862 662 ppgtt->num_pd_entries = GEN6_PPGTT_PD_ENTRIES; 863 - ppgtt->enable = gen6_ppgtt_enable; 663 + if (IS_GEN6(dev)) { 664 + ppgtt->enable = gen6_ppgtt_enable; 665 + ppgtt->switch_mm = gen6_mm_switch; 666 + } else if (IS_HASWELL(dev)) { 667 + ppgtt->enable = gen7_ppgtt_enable; 668 + ppgtt->switch_mm = hsw_mm_switch; 669 + } else if (IS_GEN7(dev)) { 670 + ppgtt->enable = gen7_ppgtt_enable; 671 + ppgtt->switch_mm = gen7_mm_switch; 672 + } else 673 + BUG(); 864 674 ppgtt->base.clear_range = gen6_ppgtt_clear_range; 865 675 ppgtt->base.insert_entries = gen6_ppgtt_insert_entries; 866 676 ppgtt->base.cleanup = gen6_ppgtt_cleanup; ··· 900 648 ppgtt->base.total = GEN6_PPGTT_PD_ENTRIES * I915_PPGTT_PT_ENTRIES * PAGE_SIZE; 901 649 ppgtt->pt_pages = kcalloc(ppgtt->num_pd_entries, sizeof(struct page *), 902 650 GFP_KERNEL); 903 - if (!ppgtt->pt_pages) 651 + if (!ppgtt->pt_pages) { 652 + drm_mm_remove_node(&ppgtt->node); 904 653 return -ENOMEM; 654 + } 905 655 906 656 for (i = 0; i < ppgtt->num_pd_entries; i++) { 907 657 ppgtt->pt_pages[i] = alloc_page(GFP_KERNEL); ··· 932 678 933 679 ppgtt->base.clear_range(&ppgtt->base, 0, 934 680 ppgtt->num_pd_entries * I915_PPGTT_PT_ENTRIES, true); 681 + ppgtt->debug_dump = gen6_dump_ppgtt; 935 682 936 - ppgtt->pd_offset = first_pd_entry_in_global_pt * sizeof(gen6_gtt_pte_t); 683 + DRM_DEBUG_DRIVER("Allocated pde space (%ldM) at GTT entry: %lx\n", 684 + ppgtt->node.size >> 20, 685 + ppgtt->node.start / PAGE_SIZE); 686 + ppgtt->pd_offset = 687 + ppgtt->node.start / PAGE_SIZE * sizeof(gen6_gtt_pte_t); 937 688 938 689 return 0; 939 690 ··· 955 696 __free_page(ppgtt->pt_pages[i]); 956 697 } 957 698 kfree(ppgtt->pt_pages); 699 + drm_mm_remove_node(&ppgtt->node); 958 700 959 701 return ret; 960 702 } 961 703 962 - static int i915_gem_init_aliasing_ppgtt(struct drm_device *dev) 704 + int i915_gem_init_ppgtt(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt) 963 705 { 964 706 struct drm_i915_private *dev_priv = dev->dev_private; 965 - struct i915_hw_ppgtt *ppgtt; 966 - int ret; 967 - 968 - ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL); 969 - if (!ppgtt) 970 - return -ENOMEM; 707 + int ret = 0; 971 708 972 709 ppgtt->base.dev = dev; 973 710 ··· 974 719 else 975 720 BUG(); 976 721 977 - if (ret) 978 - kfree(ppgtt); 979 - else { 980 - dev_priv->mm.aliasing_ppgtt = ppgtt; 722 + if (!ret) { 723 + struct drm_i915_private *dev_priv = dev->dev_private; 724 + kref_init(&ppgtt->ref); 981 725 drm_mm_init(&ppgtt->base.mm, ppgtt->base.start, 982 726 ppgtt->base.total); 727 + i915_init_vm(dev_priv, &ppgtt->base); 728 + if (INTEL_INFO(dev)->gen < 8) { 729 + gen6_write_pdes(ppgtt); 730 + DRM_DEBUG("Adding PPGTT at offset %x\n", 731 + ppgtt->pd_offset << 10); 732 + } 983 733 } 984 734 985 735 return ret; 986 736 } 987 737 988 - void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev) 738 + static void 739 + ppgtt_bind_vma(struct i915_vma *vma, 740 + enum i915_cache_level cache_level, 741 + u32 flags) 989 742 { 990 - struct drm_i915_private *dev_priv = dev->dev_private; 991 - struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt; 743 + const unsigned long entry = vma->node.start >> PAGE_SHIFT; 992 744 993 - if (!ppgtt) 994 - return; 745 + WARN_ON(flags); 995 746 996 - ppgtt->base.cleanup(&ppgtt->base); 997 - dev_priv->mm.aliasing_ppgtt = NULL; 747 + vma->vm->insert_entries(vma->vm, vma->obj->pages, entry, cache_level); 998 748 } 999 749 1000 - void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt, 1001 - struct drm_i915_gem_object *obj, 1002 - enum i915_cache_level cache_level) 750 + static void ppgtt_unbind_vma(struct i915_vma *vma) 1003 751 { 1004 - ppgtt->base.insert_entries(&ppgtt->base, obj->pages, 1005 - i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT, 1006 - cache_level); 1007 - } 752 + const unsigned long entry = vma->node.start >> PAGE_SHIFT; 1008 753 1009 - void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt, 1010 - struct drm_i915_gem_object *obj) 1011 - { 1012 - ppgtt->base.clear_range(&ppgtt->base, 1013 - i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT, 1014 - obj->base.size >> PAGE_SHIFT, 1015 - true); 754 + vma->vm->clear_range(vma->vm, 755 + entry, 756 + vma->obj->base.size >> PAGE_SHIFT, 757 + true); 1016 758 } 1017 759 1018 760 extern int intel_iommu_gfx_mapped; ··· 1101 849 { 1102 850 struct drm_i915_private *dev_priv = dev->dev_private; 1103 851 struct drm_i915_gem_object *obj; 852 + struct i915_address_space *vm; 1104 853 1105 854 i915_check_and_clear_faults(dev); 1106 855 ··· 1112 859 true); 1113 860 1114 861 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { 862 + struct i915_vma *vma = i915_gem_obj_to_vma(obj, 863 + &dev_priv->gtt.base); 864 + if (!vma) 865 + continue; 866 + 1115 867 i915_gem_clflush_object(obj, obj->pin_display); 1116 - i915_gem_gtt_bind_object(obj, obj->cache_level); 868 + /* The bind_vma code tries to be smart about tracking mappings. 869 + * Unfortunately above, we've just wiped out the mappings 870 + * without telling our object about it. So we need to fake it. 871 + */ 872 + obj->has_global_gtt_mapping = 0; 873 + vma->bind_vma(vma, obj->cache_level, GLOBAL_BIND); 874 + } 875 + 876 + 877 + if (INTEL_INFO(dev)->gen >= 8) 878 + return; 879 + 880 + list_for_each_entry(vm, &dev_priv->vm_list, global_link) { 881 + /* TODO: Perhaps it shouldn't be gen6 specific */ 882 + if (i915_is_ggtt(vm)) { 883 + if (dev_priv->mm.aliasing_ppgtt) 884 + gen6_write_pdes(dev_priv->mm.aliasing_ppgtt); 885 + continue; 886 + } 887 + 888 + gen6_write_pdes(container_of(vm, struct i915_hw_ppgtt, base)); 1117 889 } 1118 890 1119 891 i915_gem_chipset_flush(dev); ··· 1295 1017 readl(gtt_base); 1296 1018 } 1297 1019 1298 - static void i915_ggtt_insert_entries(struct i915_address_space *vm, 1299 - struct sg_table *st, 1300 - unsigned int pg_start, 1301 - enum i915_cache_level cache_level) 1020 + 1021 + static void i915_ggtt_bind_vma(struct i915_vma *vma, 1022 + enum i915_cache_level cache_level, 1023 + u32 unused) 1302 1024 { 1025 + const unsigned long entry = vma->node.start >> PAGE_SHIFT; 1303 1026 unsigned int flags = (cache_level == I915_CACHE_NONE) ? 1304 1027 AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY; 1305 1028 1306 - intel_gtt_insert_sg_entries(st, pg_start, flags); 1307 - 1029 + BUG_ON(!i915_is_ggtt(vma->vm)); 1030 + intel_gtt_insert_sg_entries(vma->obj->pages, entry, flags); 1031 + vma->obj->has_global_gtt_mapping = 1; 1308 1032 } 1309 1033 1310 1034 static void i915_ggtt_clear_range(struct i915_address_space *vm, ··· 1317 1037 intel_gtt_clear_range(first_entry, num_entries); 1318 1038 } 1319 1039 1320 - 1321 - void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj, 1322 - enum i915_cache_level cache_level) 1040 + static void i915_ggtt_unbind_vma(struct i915_vma *vma) 1323 1041 { 1324 - struct drm_device *dev = obj->base.dev; 1325 - struct drm_i915_private *dev_priv = dev->dev_private; 1326 - const unsigned long entry = i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT; 1042 + const unsigned int first = vma->node.start >> PAGE_SHIFT; 1043 + const unsigned int size = vma->obj->base.size >> PAGE_SHIFT; 1327 1044 1328 - dev_priv->gtt.base.insert_entries(&dev_priv->gtt.base, obj->pages, 1329 - entry, 1330 - cache_level); 1331 - 1332 - obj->has_global_gtt_mapping = 1; 1045 + BUG_ON(!i915_is_ggtt(vma->vm)); 1046 + vma->obj->has_global_gtt_mapping = 0; 1047 + intel_gtt_clear_range(first, size); 1333 1048 } 1334 1049 1335 - void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj) 1050 + static void ggtt_bind_vma(struct i915_vma *vma, 1051 + enum i915_cache_level cache_level, 1052 + u32 flags) 1336 1053 { 1337 - struct drm_device *dev = obj->base.dev; 1054 + struct drm_device *dev = vma->vm->dev; 1338 1055 struct drm_i915_private *dev_priv = dev->dev_private; 1339 - const unsigned long entry = i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT; 1056 + struct drm_i915_gem_object *obj = vma->obj; 1057 + const unsigned long entry = vma->node.start >> PAGE_SHIFT; 1340 1058 1341 - dev_priv->gtt.base.clear_range(&dev_priv->gtt.base, 1342 - entry, 1343 - obj->base.size >> PAGE_SHIFT, 1344 - true); 1059 + /* If there is no aliasing PPGTT, or the caller needs a global mapping, 1060 + * or we have a global mapping already but the cacheability flags have 1061 + * changed, set the global PTEs. 1062 + * 1063 + * If there is an aliasing PPGTT it is anecdotally faster, so use that 1064 + * instead if none of the above hold true. 1065 + * 1066 + * NB: A global mapping should only be needed for special regions like 1067 + * "gtt mappable", SNB errata, or if specified via special execbuf 1068 + * flags. At all other times, the GPU will use the aliasing PPGTT. 1069 + */ 1070 + if (!dev_priv->mm.aliasing_ppgtt || flags & GLOBAL_BIND) { 1071 + if (!obj->has_global_gtt_mapping || 1072 + (cache_level != obj->cache_level)) { 1073 + vma->vm->insert_entries(vma->vm, obj->pages, entry, 1074 + cache_level); 1075 + obj->has_global_gtt_mapping = 1; 1076 + } 1077 + } 1345 1078 1346 - obj->has_global_gtt_mapping = 0; 1079 + if (dev_priv->mm.aliasing_ppgtt && 1080 + (!obj->has_aliasing_ppgtt_mapping || 1081 + (cache_level != obj->cache_level))) { 1082 + struct i915_hw_ppgtt *appgtt = dev_priv->mm.aliasing_ppgtt; 1083 + appgtt->base.insert_entries(&appgtt->base, 1084 + vma->obj->pages, entry, cache_level); 1085 + vma->obj->has_aliasing_ppgtt_mapping = 1; 1086 + } 1087 + } 1088 + 1089 + static void ggtt_unbind_vma(struct i915_vma *vma) 1090 + { 1091 + struct drm_device *dev = vma->vm->dev; 1092 + struct drm_i915_private *dev_priv = dev->dev_private; 1093 + struct drm_i915_gem_object *obj = vma->obj; 1094 + const unsigned long entry = vma->node.start >> PAGE_SHIFT; 1095 + 1096 + if (obj->has_global_gtt_mapping) { 1097 + vma->vm->clear_range(vma->vm, entry, 1098 + vma->obj->base.size >> PAGE_SHIFT, 1099 + true); 1100 + obj->has_global_gtt_mapping = 0; 1101 + } 1102 + 1103 + if (obj->has_aliasing_ppgtt_mapping) { 1104 + struct i915_hw_ppgtt *appgtt = dev_priv->mm.aliasing_ppgtt; 1105 + appgtt->base.clear_range(&appgtt->base, 1106 + entry, 1107 + obj->base.size >> PAGE_SHIFT, 1108 + true); 1109 + obj->has_aliasing_ppgtt_mapping = 0; 1110 + } 1347 1111 } 1348 1112 1349 1113 void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj) ··· 1479 1155 ggtt_vm->clear_range(ggtt_vm, end / PAGE_SIZE - 1, 1, true); 1480 1156 } 1481 1157 1482 - static bool 1483 - intel_enable_ppgtt(struct drm_device *dev) 1484 - { 1485 - if (i915_enable_ppgtt >= 0) 1486 - return i915_enable_ppgtt; 1487 - 1488 - #ifdef CONFIG_INTEL_IOMMU 1489 - /* Disable ppgtt on SNB if VT-d is on. */ 1490 - if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped) 1491 - return false; 1492 - #endif 1493 - 1494 - return true; 1495 - } 1496 - 1497 1158 void i915_gem_init_global_gtt(struct drm_device *dev) 1498 1159 { 1499 1160 struct drm_i915_private *dev_priv = dev->dev_private; ··· 1487 1178 gtt_size = dev_priv->gtt.base.total; 1488 1179 mappable_size = dev_priv->gtt.mappable_end; 1489 1180 1490 - if (intel_enable_ppgtt(dev) && HAS_ALIASING_PPGTT(dev)) { 1491 - int ret; 1492 - 1493 - if (INTEL_INFO(dev)->gen <= 7) { 1494 - /* PPGTT pdes are stolen from global gtt ptes, so shrink the 1495 - * aperture accordingly when using aliasing ppgtt. */ 1496 - gtt_size -= GEN6_PPGTT_PD_ENTRIES * PAGE_SIZE; 1497 - } 1498 - 1499 - i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size); 1500 - 1501 - ret = i915_gem_init_aliasing_ppgtt(dev); 1502 - if (!ret) 1503 - return; 1504 - 1505 - DRM_ERROR("Aliased PPGTT setup failed %d\n", ret); 1506 - drm_mm_takedown(&dev_priv->gtt.base.mm); 1507 - if (INTEL_INFO(dev)->gen < 8) 1508 - gtt_size += GEN6_PPGTT_PD_ENTRIES*PAGE_SIZE; 1509 - } 1510 1181 i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size); 1511 1182 } 1512 1183 ··· 1542 1253 if (bdw_gmch_ctl) 1543 1254 bdw_gmch_ctl = 1 << bdw_gmch_ctl; 1544 1255 if (bdw_gmch_ctl > 4) { 1545 - WARN_ON(!i915_preliminary_hw_support); 1256 + WARN_ON(!i915.preliminary_hw_support); 1546 1257 return 4<<20; 1547 1258 } 1548 1259 ··· 1727 1438 1728 1439 dev_priv->gtt.do_idle_maps = needs_idle_maps(dev_priv->dev); 1729 1440 dev_priv->gtt.base.clear_range = i915_ggtt_clear_range; 1730 - dev_priv->gtt.base.insert_entries = i915_ggtt_insert_entries; 1731 1441 1732 1442 if (unlikely(dev_priv->gtt.do_idle_maps)) 1733 1443 DRM_INFO("applying Ironlake quirks for intel_iommu\n"); ··· 1780 1492 DRM_DEBUG_DRIVER("GTT stolen size = %zdM\n", gtt->stolen_size >> 20); 1781 1493 1782 1494 return 0; 1495 + } 1496 + 1497 + static struct i915_vma *__i915_gem_vma_create(struct drm_i915_gem_object *obj, 1498 + struct i915_address_space *vm) 1499 + { 1500 + struct i915_vma *vma = kzalloc(sizeof(*vma), GFP_KERNEL); 1501 + if (vma == NULL) 1502 + return ERR_PTR(-ENOMEM); 1503 + 1504 + INIT_LIST_HEAD(&vma->vma_link); 1505 + INIT_LIST_HEAD(&vma->mm_list); 1506 + INIT_LIST_HEAD(&vma->exec_list); 1507 + vma->vm = vm; 1508 + vma->obj = obj; 1509 + 1510 + switch (INTEL_INFO(vm->dev)->gen) { 1511 + case 8: 1512 + case 7: 1513 + case 6: 1514 + if (i915_is_ggtt(vm)) { 1515 + vma->unbind_vma = ggtt_unbind_vma; 1516 + vma->bind_vma = ggtt_bind_vma; 1517 + } else { 1518 + vma->unbind_vma = ppgtt_unbind_vma; 1519 + vma->bind_vma = ppgtt_bind_vma; 1520 + } 1521 + break; 1522 + case 5: 1523 + case 4: 1524 + case 3: 1525 + case 2: 1526 + BUG_ON(!i915_is_ggtt(vm)); 1527 + vma->unbind_vma = i915_ggtt_unbind_vma; 1528 + vma->bind_vma = i915_ggtt_bind_vma; 1529 + break; 1530 + default: 1531 + BUG(); 1532 + } 1533 + 1534 + /* Keep GGTT vmas first to make debug easier */ 1535 + if (i915_is_ggtt(vm)) 1536 + list_add(&vma->vma_link, &obj->vma_list); 1537 + else 1538 + list_add_tail(&vma->vma_link, &obj->vma_list); 1539 + 1540 + return vma; 1541 + } 1542 + 1543 + struct i915_vma * 1544 + i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj, 1545 + struct i915_address_space *vm) 1546 + { 1547 + struct i915_vma *vma; 1548 + 1549 + vma = i915_gem_obj_to_vma(obj, vm); 1550 + if (!vma) 1551 + vma = __i915_gem_vma_create(obj, vm); 1552 + 1553 + return vma; 1783 1554 }
+1 -1
drivers/gpu/drm/i915/i915_gem_tiling.c
··· 308 308 return -EINVAL; 309 309 } 310 310 311 - if (obj->pin_count || obj->framebuffer_references) { 311 + if (i915_gem_obj_is_pinned(obj) || obj->framebuffer_references) { 312 312 drm_gem_object_unreference_unlocked(&obj->base); 313 313 return -EBUSY; 314 314 }
+315 -127
drivers/gpu/drm/i915/i915_gpu_error.c
··· 238 238 239 239 static void i915_ring_error_state(struct drm_i915_error_state_buf *m, 240 240 struct drm_device *dev, 241 - struct drm_i915_error_state *error, 242 - unsigned ring) 241 + struct drm_i915_error_ring *ring) 243 242 { 244 - BUG_ON(ring >= I915_NUM_RINGS); /* shut up confused gcc */ 245 - if (!error->ring[ring].valid) 243 + if (!ring->valid) 246 244 return; 247 245 248 - err_printf(m, "%s command stream:\n", ring_str(ring)); 249 - err_printf(m, " HEAD: 0x%08x\n", error->head[ring]); 250 - err_printf(m, " TAIL: 0x%08x\n", error->tail[ring]); 251 - err_printf(m, " CTL: 0x%08x\n", error->ctl[ring]); 252 - err_printf(m, " ACTHD: 0x%08x\n", error->acthd[ring]); 253 - err_printf(m, " IPEIR: 0x%08x\n", error->ipeir[ring]); 254 - err_printf(m, " IPEHR: 0x%08x\n", error->ipehr[ring]); 255 - err_printf(m, " INSTDONE: 0x%08x\n", error->instdone[ring]); 246 + err_printf(m, " HEAD: 0x%08x\n", ring->head); 247 + err_printf(m, " TAIL: 0x%08x\n", ring->tail); 248 + err_printf(m, " CTL: 0x%08x\n", ring->ctl); 249 + err_printf(m, " HWS: 0x%08x\n", ring->hws); 250 + err_printf(m, " ACTHD: 0x%08x\n", ring->acthd); 251 + err_printf(m, " IPEIR: 0x%08x\n", ring->ipeir); 252 + err_printf(m, " IPEHR: 0x%08x\n", ring->ipehr); 253 + err_printf(m, " INSTDONE: 0x%08x\n", ring->instdone); 256 254 if (INTEL_INFO(dev)->gen >= 4) { 257 - err_printf(m, " BBADDR: 0x%08llx\n", error->bbaddr[ring]); 258 - err_printf(m, " BB_STATE: 0x%08x\n", error->bbstate[ring]); 259 - err_printf(m, " INSTPS: 0x%08x\n", error->instps[ring]); 255 + err_printf(m, " BBADDR: 0x%08llx\n", ring->bbaddr); 256 + err_printf(m, " BB_STATE: 0x%08x\n", ring->bbstate); 257 + err_printf(m, " INSTPS: 0x%08x\n", ring->instps); 260 258 } 261 - err_printf(m, " INSTPM: 0x%08x\n", error->instpm[ring]); 262 - err_printf(m, " FADDR: 0x%08x\n", error->faddr[ring]); 259 + err_printf(m, " INSTPM: 0x%08x\n", ring->instpm); 260 + err_printf(m, " FADDR: 0x%08x\n", ring->faddr); 263 261 if (INTEL_INFO(dev)->gen >= 6) { 264 - err_printf(m, " RC PSMI: 0x%08x\n", error->rc_psmi[ring]); 265 - err_printf(m, " FAULT_REG: 0x%08x\n", error->fault_reg[ring]); 262 + err_printf(m, " RC PSMI: 0x%08x\n", ring->rc_psmi); 263 + err_printf(m, " FAULT_REG: 0x%08x\n", ring->fault_reg); 266 264 err_printf(m, " SYNC_0: 0x%08x [last synced 0x%08x]\n", 267 - error->semaphore_mboxes[ring][0], 268 - error->semaphore_seqno[ring][0]); 265 + ring->semaphore_mboxes[0], 266 + ring->semaphore_seqno[0]); 269 267 err_printf(m, " SYNC_1: 0x%08x [last synced 0x%08x]\n", 270 - error->semaphore_mboxes[ring][1], 271 - error->semaphore_seqno[ring][1]); 268 + ring->semaphore_mboxes[1], 269 + ring->semaphore_seqno[1]); 272 270 if (HAS_VEBOX(dev)) { 273 271 err_printf(m, " SYNC_2: 0x%08x [last synced 0x%08x]\n", 274 - error->semaphore_mboxes[ring][2], 275 - error->semaphore_seqno[ring][2]); 272 + ring->semaphore_mboxes[2], 273 + ring->semaphore_seqno[2]); 276 274 } 277 275 } 278 - err_printf(m, " seqno: 0x%08x\n", error->seqno[ring]); 279 - err_printf(m, " waiting: %s\n", yesno(error->waiting[ring])); 280 - err_printf(m, " ring->head: 0x%08x\n", error->cpu_ring_head[ring]); 281 - err_printf(m, " ring->tail: 0x%08x\n", error->cpu_ring_tail[ring]); 276 + if (USES_PPGTT(dev)) { 277 + err_printf(m, " GFX_MODE: 0x%08x\n", ring->vm_info.gfx_mode); 278 + 279 + if (INTEL_INFO(dev)->gen >= 8) { 280 + int i; 281 + for (i = 0; i < 4; i++) 282 + err_printf(m, " PDP%d: 0x%016llx\n", 283 + i, ring->vm_info.pdp[i]); 284 + } else { 285 + err_printf(m, " PP_DIR_BASE: 0x%08x\n", 286 + ring->vm_info.pp_dir_base); 287 + } 288 + } 289 + err_printf(m, " seqno: 0x%08x\n", ring->seqno); 290 + err_printf(m, " waiting: %s\n", yesno(ring->waiting)); 291 + err_printf(m, " ring->head: 0x%08x\n", ring->cpu_ring_head); 292 + err_printf(m, " ring->tail: 0x%08x\n", ring->cpu_ring_tail); 282 293 err_printf(m, " hangcheck: %s [%d]\n", 283 - hangcheck_action_to_str(error->hangcheck_action[ring]), 284 - error->hangcheck_score[ring]); 294 + hangcheck_action_to_str(ring->hangcheck_action), 295 + ring->hangcheck_score); 285 296 } 286 297 287 298 void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...) ··· 344 333 if (INTEL_INFO(dev)->gen == 7) 345 334 err_printf(m, "ERR_INT: 0x%08x\n", error->err_int); 346 335 347 - for (i = 0; i < ARRAY_SIZE(error->ring); i++) 348 - i915_ring_error_state(m, dev, error, i); 336 + for (i = 0; i < ARRAY_SIZE(error->ring); i++) { 337 + err_printf(m, "%s command stream:\n", ring_str(i)); 338 + i915_ring_error_state(m, dev, &error->ring[i]); 339 + } 349 340 350 341 if (error->active_bo) 351 342 print_error_buffers(m, "Active", ··· 400 387 obj->pages[page][elt]); 401 388 offset += 4; 402 389 } 390 + } 391 + } 392 + 393 + if ((obj = error->ring[i].hws_page)) { 394 + err_printf(m, "%s --- HW Status = 0x%08x\n", 395 + dev_priv->ring[i].name, 396 + obj->gtt_offset); 397 + offset = 0; 398 + for (elt = 0; elt < PAGE_SIZE/16; elt += 4) { 399 + err_printf(m, "[%04x] %08x %08x %08x %08x\n", 400 + offset, 401 + obj->pages[0][elt], 402 + obj->pages[0][elt+1], 403 + obj->pages[0][elt+2], 404 + obj->pages[0][elt+3]); 405 + offset += 16; 403 406 } 404 407 } 405 408 ··· 501 472 for (i = 0; i < ARRAY_SIZE(error->ring); i++) { 502 473 i915_error_object_free(error->ring[i].batchbuffer); 503 474 i915_error_object_free(error->ring[i].ringbuffer); 475 + i915_error_object_free(error->ring[i].hws_page); 504 476 i915_error_object_free(error->ring[i].ctx); 505 477 kfree(error->ring[i].requests); 506 478 } ··· 515 485 static struct drm_i915_error_object * 516 486 i915_error_object_create_sized(struct drm_i915_private *dev_priv, 517 487 struct drm_i915_gem_object *src, 488 + struct i915_address_space *vm, 518 489 const int num_pages) 519 490 { 520 491 struct drm_i915_error_object *dst; ··· 529 498 if (dst == NULL) 530 499 return NULL; 531 500 532 - reloc_offset = dst->gtt_offset = i915_gem_obj_ggtt_offset(src); 501 + reloc_offset = dst->gtt_offset = i915_gem_obj_offset(src, vm); 533 502 for (i = 0; i < num_pages; i++) { 534 503 unsigned long flags; 535 504 void *d; ··· 539 508 goto unwind; 540 509 541 510 local_irq_save(flags); 542 - if (reloc_offset < dev_priv->gtt.mappable_end && 543 - src->has_global_gtt_mapping) { 511 + if (src->cache_level == I915_CACHE_NONE && 512 + reloc_offset < dev_priv->gtt.mappable_end && 513 + src->has_global_gtt_mapping && 514 + i915_is_ggtt(vm)) { 544 515 void __iomem *s; 545 516 546 517 /* Simply ignore tiling or any overlapping fence. ··· 592 559 kfree(dst); 593 560 return NULL; 594 561 } 595 - #define i915_error_object_create(dev_priv, src) \ 596 - i915_error_object_create_sized((dev_priv), (src), \ 562 + #define i915_error_object_create(dev_priv, src, vm) \ 563 + i915_error_object_create_sized((dev_priv), (src), (vm), \ 564 + (src)->base.size>>PAGE_SHIFT) 565 + 566 + #define i915_error_ggtt_object_create(dev_priv, src) \ 567 + i915_error_object_create_sized((dev_priv), (src), &(dev_priv)->gtt.base, \ 597 568 (src)->base.size>>PAGE_SHIFT) 598 569 599 570 static void capture_bo(struct drm_i915_error_buffer *err, ··· 612 575 err->write_domain = obj->base.write_domain; 613 576 err->fence_reg = obj->fence_reg; 614 577 err->pinned = 0; 615 - if (obj->pin_count > 0) 578 + if (i915_gem_obj_is_pinned(obj)) 616 579 err->pinned = 1; 617 580 if (obj->user_pin_count > 0) 618 581 err->pinned = -1; ··· 645 608 int i = 0; 646 609 647 610 list_for_each_entry(obj, head, global_list) { 648 - if (obj->pin_count == 0) 611 + if (!i915_gem_obj_is_pinned(obj)) 649 612 continue; 650 613 651 614 capture_bo(err++, obj); ··· 654 617 } 655 618 656 619 return i; 620 + } 621 + 622 + /* Generate a semi-unique error code. The code is not meant to have meaning, The 623 + * code's only purpose is to try to prevent false duplicated bug reports by 624 + * grossly estimating a GPU error state. 625 + * 626 + * TODO Ideally, hashing the batchbuffer would be a very nice way to determine 627 + * the hang if we could strip the GTT offset information from it. 628 + * 629 + * It's only a small step better than a random number in its current form. 630 + */ 631 + static uint32_t i915_error_generate_code(struct drm_i915_private *dev_priv, 632 + struct drm_i915_error_state *error) 633 + { 634 + uint32_t error_code = 0; 635 + int i; 636 + 637 + /* IPEHR would be an ideal way to detect errors, as it's the gross 638 + * measure of "the command that hung." However, has some very common 639 + * synchronization commands which almost always appear in the case 640 + * strictly a client bug. Use instdone to differentiate those some. 641 + */ 642 + for (i = 0; i < I915_NUM_RINGS; i++) 643 + if (error->ring[i].hangcheck_action == HANGCHECK_HUNG) 644 + return error->ring[i].ipehr ^ error->ring[i].instdone; 645 + 646 + return error_code; 657 647 } 658 648 659 649 static void i915_gem_record_fences(struct drm_device *dev, ··· 716 652 } 717 653 } 718 654 655 + /* This assumes all batchbuffers are executed from the PPGTT. It might have to 656 + * change in the future. */ 657 + static bool is_active_vm(struct i915_address_space *vm, 658 + struct intel_ring_buffer *ring) 659 + { 660 + struct drm_device *dev = vm->dev; 661 + struct drm_i915_private *dev_priv = dev->dev_private; 662 + struct i915_hw_ppgtt *ppgtt; 663 + 664 + if (INTEL_INFO(dev)->gen < 7) 665 + return i915_is_ggtt(vm); 666 + 667 + /* FIXME: This ignores that the global gtt vm is also on this list. */ 668 + ppgtt = container_of(vm, struct i915_hw_ppgtt, base); 669 + 670 + if (INTEL_INFO(dev)->gen >= 8) { 671 + u64 pdp0 = (u64)I915_READ(GEN8_RING_PDP_UDW(ring, 0)) << 32; 672 + pdp0 |= I915_READ(GEN8_RING_PDP_LDW(ring, 0)); 673 + return pdp0 == ppgtt->pd_dma_addr[0]; 674 + } else { 675 + u32 pp_db; 676 + pp_db = I915_READ(RING_PP_DIR_BASE(ring)); 677 + return (pp_db >> 10) == ppgtt->pd_offset; 678 + } 679 + } 680 + 719 681 static struct drm_i915_error_object * 720 682 i915_error_first_batchbuffer(struct drm_i915_private *dev_priv, 721 683 struct intel_ring_buffer *ring) ··· 749 659 struct i915_address_space *vm; 750 660 struct i915_vma *vma; 751 661 struct drm_i915_gem_object *obj; 662 + bool found_active = false; 752 663 u32 seqno; 753 664 754 665 if (!ring->get_seqno) ··· 765 674 if (obj != NULL && 766 675 acthd >= i915_gem_obj_ggtt_offset(obj) && 767 676 acthd < i915_gem_obj_ggtt_offset(obj) + obj->base.size) 768 - return i915_error_object_create(dev_priv, obj); 677 + return i915_error_ggtt_object_create(dev_priv, obj); 769 678 } 770 679 771 680 seqno = ring->get_seqno(ring, false); 772 681 list_for_each_entry(vm, &dev_priv->vm_list, global_link) { 682 + if (!is_active_vm(vm, ring)) 683 + continue; 684 + 685 + found_active = true; 686 + 773 687 list_for_each_entry(vma, &vm->active_list, mm_list) { 774 688 obj = vma->obj; 775 689 if (obj->ring != ring) ··· 789 693 /* We need to copy these to an anonymous buffer as the simplest 790 694 * method to avoid being overwritten by userspace. 791 695 */ 792 - return i915_error_object_create(dev_priv, obj); 696 + return i915_error_object_create(dev_priv, obj, vm); 793 697 } 794 698 } 795 699 700 + WARN_ON(!found_active); 796 701 return NULL; 797 702 } 798 703 799 704 static void i915_record_ring_state(struct drm_device *dev, 800 - struct drm_i915_error_state *error, 801 - struct intel_ring_buffer *ring) 705 + struct intel_ring_buffer *ring, 706 + struct drm_i915_error_ring *ering) 802 707 { 803 708 struct drm_i915_private *dev_priv = dev->dev_private; 804 709 805 710 if (INTEL_INFO(dev)->gen >= 6) { 806 - error->rc_psmi[ring->id] = I915_READ(ring->mmio_base + 0x50); 807 - error->fault_reg[ring->id] = I915_READ(RING_FAULT_REG(ring)); 808 - error->semaphore_mboxes[ring->id][0] 711 + ering->rc_psmi = I915_READ(ring->mmio_base + 0x50); 712 + ering->fault_reg = I915_READ(RING_FAULT_REG(ring)); 713 + ering->semaphore_mboxes[0] 809 714 = I915_READ(RING_SYNC_0(ring->mmio_base)); 810 - error->semaphore_mboxes[ring->id][1] 715 + ering->semaphore_mboxes[1] 811 716 = I915_READ(RING_SYNC_1(ring->mmio_base)); 812 - error->semaphore_seqno[ring->id][0] = ring->sync_seqno[0]; 813 - error->semaphore_seqno[ring->id][1] = ring->sync_seqno[1]; 717 + ering->semaphore_seqno[0] = ring->sync_seqno[0]; 718 + ering->semaphore_seqno[1] = ring->sync_seqno[1]; 814 719 } 815 720 816 721 if (HAS_VEBOX(dev)) { 817 - error->semaphore_mboxes[ring->id][2] = 722 + ering->semaphore_mboxes[2] = 818 723 I915_READ(RING_SYNC_2(ring->mmio_base)); 819 - error->semaphore_seqno[ring->id][2] = ring->sync_seqno[2]; 724 + ering->semaphore_seqno[2] = ring->sync_seqno[2]; 820 725 } 821 726 822 727 if (INTEL_INFO(dev)->gen >= 4) { 823 - error->faddr[ring->id] = I915_READ(RING_DMA_FADD(ring->mmio_base)); 824 - error->ipeir[ring->id] = I915_READ(RING_IPEIR(ring->mmio_base)); 825 - error->ipehr[ring->id] = I915_READ(RING_IPEHR(ring->mmio_base)); 826 - error->instdone[ring->id] = I915_READ(RING_INSTDONE(ring->mmio_base)); 827 - error->instps[ring->id] = I915_READ(RING_INSTPS(ring->mmio_base)); 828 - error->bbaddr[ring->id] = I915_READ(RING_BBADDR(ring->mmio_base)); 728 + ering->faddr = I915_READ(RING_DMA_FADD(ring->mmio_base)); 729 + ering->ipeir = I915_READ(RING_IPEIR(ring->mmio_base)); 730 + ering->ipehr = I915_READ(RING_IPEHR(ring->mmio_base)); 731 + ering->instdone = I915_READ(RING_INSTDONE(ring->mmio_base)); 732 + ering->instps = I915_READ(RING_INSTPS(ring->mmio_base)); 733 + ering->bbaddr = I915_READ(RING_BBADDR(ring->mmio_base)); 829 734 if (INTEL_INFO(dev)->gen >= 8) 830 - error->bbaddr[ring->id] |= (u64) I915_READ(RING_BBADDR_UDW(ring->mmio_base)) << 32; 831 - error->bbstate[ring->id] = I915_READ(RING_BBSTATE(ring->mmio_base)); 735 + ering->bbaddr |= (u64) I915_READ(RING_BBADDR_UDW(ring->mmio_base)) << 32; 736 + ering->bbstate = I915_READ(RING_BBSTATE(ring->mmio_base)); 832 737 } else { 833 - error->faddr[ring->id] = I915_READ(DMA_FADD_I8XX); 834 - error->ipeir[ring->id] = I915_READ(IPEIR); 835 - error->ipehr[ring->id] = I915_READ(IPEHR); 836 - error->instdone[ring->id] = I915_READ(INSTDONE); 738 + ering->faddr = I915_READ(DMA_FADD_I8XX); 739 + ering->ipeir = I915_READ(IPEIR); 740 + ering->ipehr = I915_READ(IPEHR); 741 + ering->instdone = I915_READ(INSTDONE); 837 742 } 838 743 839 - error->waiting[ring->id] = waitqueue_active(&ring->irq_queue); 840 - error->instpm[ring->id] = I915_READ(RING_INSTPM(ring->mmio_base)); 841 - error->seqno[ring->id] = ring->get_seqno(ring, false); 842 - error->acthd[ring->id] = intel_ring_get_active_head(ring); 843 - error->head[ring->id] = I915_READ_HEAD(ring); 844 - error->tail[ring->id] = I915_READ_TAIL(ring); 845 - error->ctl[ring->id] = I915_READ_CTL(ring); 744 + ering->waiting = waitqueue_active(&ring->irq_queue); 745 + ering->instpm = I915_READ(RING_INSTPM(ring->mmio_base)); 746 + ering->seqno = ring->get_seqno(ring, false); 747 + ering->acthd = intel_ring_get_active_head(ring); 748 + ering->head = I915_READ_HEAD(ring); 749 + ering->tail = I915_READ_TAIL(ring); 750 + ering->ctl = I915_READ_CTL(ring); 846 751 847 - error->cpu_ring_head[ring->id] = ring->head; 848 - error->cpu_ring_tail[ring->id] = ring->tail; 752 + if (I915_NEED_GFX_HWS(dev)) { 753 + int mmio; 849 754 850 - error->hangcheck_score[ring->id] = ring->hangcheck.score; 851 - error->hangcheck_action[ring->id] = ring->hangcheck.action; 755 + if (IS_GEN7(dev)) { 756 + switch (ring->id) { 757 + default: 758 + case RCS: 759 + mmio = RENDER_HWS_PGA_GEN7; 760 + break; 761 + case BCS: 762 + mmio = BLT_HWS_PGA_GEN7; 763 + break; 764 + case VCS: 765 + mmio = BSD_HWS_PGA_GEN7; 766 + break; 767 + case VECS: 768 + mmio = VEBOX_HWS_PGA_GEN7; 769 + break; 770 + } 771 + } else if (IS_GEN6(ring->dev)) { 772 + mmio = RING_HWS_PGA_GEN6(ring->mmio_base); 773 + } else { 774 + /* XXX: gen8 returns to sanity */ 775 + mmio = RING_HWS_PGA(ring->mmio_base); 776 + } 777 + 778 + ering->hws = I915_READ(mmio); 779 + } 780 + 781 + ering->cpu_ring_head = ring->head; 782 + ering->cpu_ring_tail = ring->tail; 783 + 784 + ering->hangcheck_score = ring->hangcheck.score; 785 + ering->hangcheck_action = ring->hangcheck.action; 786 + 787 + if (USES_PPGTT(dev)) { 788 + int i; 789 + 790 + ering->vm_info.gfx_mode = I915_READ(RING_MODE_GEN7(ring)); 791 + 792 + switch (INTEL_INFO(dev)->gen) { 793 + case 8: 794 + for (i = 0; i < 4; i++) { 795 + ering->vm_info.pdp[i] = 796 + I915_READ(GEN8_RING_PDP_UDW(ring, i)); 797 + ering->vm_info.pdp[i] <<= 32; 798 + ering->vm_info.pdp[i] |= 799 + I915_READ(GEN8_RING_PDP_LDW(ring, i)); 800 + } 801 + break; 802 + case 7: 803 + ering->vm_info.pp_dir_base = RING_PP_DIR_BASE(ring); 804 + break; 805 + case 6: 806 + ering->vm_info.pp_dir_base = RING_PP_DIR_BASE_READ(ring); 807 + break; 808 + } 809 + } 852 810 } 853 811 854 812 ··· 920 770 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { 921 771 if ((error->ccid & PAGE_MASK) == i915_gem_obj_ggtt_offset(obj)) { 922 772 ering->ctx = i915_error_object_create_sized(dev_priv, 923 - obj, 1); 773 + obj, 774 + &dev_priv->gtt.base, 775 + 1); 924 776 break; 925 777 } 926 778 } ··· 943 791 944 792 error->ring[i].valid = true; 945 793 946 - i915_record_ring_state(dev, error, ring); 794 + i915_record_ring_state(dev, ring, &error->ring[i]); 947 795 948 796 error->ring[i].batchbuffer = 949 797 i915_error_first_batchbuffer(dev_priv, ring); 950 798 951 799 error->ring[i].ringbuffer = 952 - i915_error_object_create(dev_priv, ring->obj); 800 + i915_error_ggtt_object_create(dev_priv, ring->obj); 953 801 802 + if (ring->status_page.obj) 803 + error->ring[i].hws_page = 804 + i915_error_ggtt_object_create(dev_priv, ring->status_page.obj); 954 805 955 806 i915_gem_record_active_context(ring, error, &error->ring[i]); 956 807 ··· 1000 845 i++; 1001 846 error->active_bo_count[ndx] = i; 1002 847 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) 1003 - if (obj->pin_count) 848 + if (i915_gem_obj_is_pinned(obj)) 1004 849 i++; 1005 850 error->pinned_bo_count[ndx] = i - error->active_bo_count[ndx]; 1006 851 ··· 1034 879 list_for_each_entry(vm, &dev_priv->vm_list, global_link) 1035 880 cnt++; 1036 881 1037 - if (WARN(cnt > 1, "Multiple VMs not yet supported\n")) 1038 - cnt = 1; 1039 - 1040 - vm = &dev_priv->gtt.base; 1041 - 1042 882 error->active_bo = kcalloc(cnt, sizeof(*error->active_bo), GFP_ATOMIC); 1043 883 error->pinned_bo = kcalloc(cnt, sizeof(*error->pinned_bo), GFP_ATOMIC); 1044 884 error->active_bo_count = kcalloc(cnt, sizeof(*error->active_bo_count), ··· 1043 893 1044 894 list_for_each_entry(vm, &dev_priv->vm_list, global_link) 1045 895 i915_gem_capture_vm(dev_priv, error, vm, i++); 896 + } 897 + 898 + /* Capture all registers which don't fit into another category. */ 899 + static void i915_capture_reg_state(struct drm_i915_private *dev_priv, 900 + struct drm_i915_error_state *error) 901 + { 902 + struct drm_device *dev = dev_priv->dev; 903 + int pipe; 904 + 905 + /* General organization 906 + * 1. Registers specific to a single generation 907 + * 2. Registers which belong to multiple generations 908 + * 3. Feature specific registers. 909 + * 4. Everything else 910 + * Please try to follow the order. 911 + */ 912 + 913 + /* 1: Registers specific to a single generation */ 914 + if (IS_VALLEYVIEW(dev)) { 915 + error->ier = I915_READ(GTIER) | I915_READ(VLV_IER); 916 + error->forcewake = I915_READ(FORCEWAKE_VLV); 917 + } 918 + 919 + if (IS_GEN7(dev)) 920 + error->err_int = I915_READ(GEN7_ERR_INT); 921 + 922 + if (IS_GEN6(dev)) { 923 + error->forcewake = I915_READ(FORCEWAKE); 924 + error->gab_ctl = I915_READ(GAB_CTL); 925 + error->gfx_mode = I915_READ(GFX_MODE); 926 + } 927 + 928 + if (IS_GEN2(dev)) 929 + error->ier = I915_READ16(IER); 930 + 931 + /* 2: Registers which belong to multiple generations */ 932 + if (INTEL_INFO(dev)->gen >= 7) 933 + error->forcewake = I915_READ(FORCEWAKE_MT); 934 + 935 + if (INTEL_INFO(dev)->gen >= 6) { 936 + error->derrmr = I915_READ(DERRMR); 937 + error->error = I915_READ(ERROR_GEN6); 938 + error->done_reg = I915_READ(DONE_REG); 939 + } 940 + 941 + /* 3: Feature specific registers */ 942 + if (IS_GEN6(dev) || IS_GEN7(dev)) { 943 + error->gam_ecochk = I915_READ(GAM_ECOCHK); 944 + error->gac_eco = I915_READ(GAC_ECO_BITS); 945 + } 946 + 947 + /* 4: Everything else */ 948 + if (HAS_HW_CONTEXTS(dev)) 949 + error->ccid = I915_READ(CCID); 950 + 951 + if (HAS_PCH_SPLIT(dev)) 952 + error->ier = I915_READ(DEIER) | I915_READ(GTIER); 953 + else { 954 + error->ier = I915_READ(IER); 955 + for_each_pipe(pipe) 956 + error->pipestat[pipe] = I915_READ(PIPESTAT(pipe)); 957 + } 958 + 959 + /* 4: Everything else */ 960 + error->eir = I915_READ(EIR); 961 + error->pgtbl_er = I915_READ(PGTBL_ER); 962 + 963 + i915_get_extra_instdone(dev, error->extra_instdone); 1046 964 } 1047 965 1048 966 /** ··· 1124 906 */ 1125 907 void i915_capture_error_state(struct drm_device *dev) 1126 908 { 909 + static bool warned; 1127 910 struct drm_i915_private *dev_priv = dev->dev_private; 1128 911 struct drm_i915_error_state *error; 1129 912 unsigned long flags; 1130 - int pipe; 913 + uint32_t ecode; 1131 914 1132 915 spin_lock_irqsave(&dev_priv->gpu_error.lock, flags); 1133 916 error = dev_priv->gpu_error.first_error; ··· 1145 926 1146 927 DRM_INFO("GPU crash dump saved to /sys/class/drm/card%d/error\n", 1147 928 dev->primary->index); 1148 - DRM_INFO("GPU hangs can indicate a bug anywhere in the entire gfx stack, including userspace.\n"); 1149 - DRM_INFO("Please file a _new_ bug report on bugs.freedesktop.org against DRI -> DRM/Intel\n"); 1150 - DRM_INFO("drm/i915 developers can then reassign to the right component if it's not a kernel issue.\n"); 1151 - DRM_INFO("The gpu crash dump is required to analyze gpu hangs, so please always attach it.\n"); 1152 - 1153 929 kref_init(&error->ref); 1154 - error->eir = I915_READ(EIR); 1155 - error->pgtbl_er = I915_READ(PGTBL_ER); 1156 - if (HAS_HW_CONTEXTS(dev)) 1157 - error->ccid = I915_READ(CCID); 1158 930 1159 - if (HAS_PCH_SPLIT(dev)) 1160 - error->ier = I915_READ(DEIER) | I915_READ(GTIER); 1161 - else if (IS_VALLEYVIEW(dev)) 1162 - error->ier = I915_READ(GTIER) | I915_READ(VLV_IER); 1163 - else if (IS_GEN2(dev)) 1164 - error->ier = I915_READ16(IER); 1165 - else 1166 - error->ier = I915_READ(IER); 1167 - 1168 - if (INTEL_INFO(dev)->gen >= 6) 1169 - error->derrmr = I915_READ(DERRMR); 1170 - 1171 - if (IS_VALLEYVIEW(dev)) 1172 - error->forcewake = I915_READ(FORCEWAKE_VLV); 1173 - else if (INTEL_INFO(dev)->gen >= 7) 1174 - error->forcewake = I915_READ(FORCEWAKE_MT); 1175 - else if (INTEL_INFO(dev)->gen == 6) 1176 - error->forcewake = I915_READ(FORCEWAKE); 1177 - 1178 - if (!HAS_PCH_SPLIT(dev)) 1179 - for_each_pipe(pipe) 1180 - error->pipestat[pipe] = I915_READ(PIPESTAT(pipe)); 1181 - 1182 - if (INTEL_INFO(dev)->gen >= 6) { 1183 - error->error = I915_READ(ERROR_GEN6); 1184 - error->done_reg = I915_READ(DONE_REG); 1185 - } 1186 - 1187 - if (INTEL_INFO(dev)->gen == 7) 1188 - error->err_int = I915_READ(GEN7_ERR_INT); 1189 - 1190 - i915_get_extra_instdone(dev, error->extra_instdone); 1191 - 931 + i915_capture_reg_state(dev_priv, error); 1192 932 i915_gem_capture_buffers(dev_priv, error); 1193 933 i915_gem_record_fences(dev, error); 1194 934 i915_gem_record_rings(dev, error); 935 + ecode = i915_error_generate_code(dev_priv, error); 936 + 937 + if (!warned) { 938 + DRM_INFO("GPU HANG [%x]\n", ecode); 939 + DRM_INFO("GPU hangs can indicate a bug anywhere in the entire gfx stack, including userspace.\n"); 940 + DRM_INFO("Please file a _new_ bug report on bugs.freedesktop.org against DRI -> DRM/Intel\n"); 941 + DRM_INFO("drm/i915 developers can then reassign to the right component if it's not a kernel issue.\n"); 942 + DRM_INFO("The gpu crash dump is required to analyze gpu hangs, so please always attach it.\n"); 943 + warned = true; 944 + } 1195 945 1196 946 do_gettimeofday(&error->time); 1197 947
+138 -121
drivers/gpu/drm/i915/i915_irq.c
··· 232 232 return true; 233 233 } 234 234 235 + static void i9xx_clear_fifo_underrun(struct drm_device *dev, enum pipe pipe) 236 + { 237 + struct drm_i915_private *dev_priv = dev->dev_private; 238 + u32 reg = PIPESTAT(pipe); 239 + u32 pipestat = I915_READ(reg) & 0x7fff0000; 240 + 241 + assert_spin_locked(&dev_priv->irq_lock); 242 + 243 + I915_WRITE(reg, pipestat | PIPE_FIFO_UNDERRUN_STATUS); 244 + POSTING_READ(reg); 245 + } 246 + 235 247 static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev, 236 248 enum pipe pipe, bool enable) 237 249 { ··· 405 393 406 394 intel_crtc->cpu_fifo_underrun_disabled = !enable; 407 395 408 - if (IS_GEN5(dev) || IS_GEN6(dev)) 396 + if (enable && (INTEL_INFO(dev)->gen < 5 || IS_VALLEYVIEW(dev))) 397 + i9xx_clear_fifo_underrun(dev, pipe); 398 + else if (IS_GEN5(dev) || IS_GEN6(dev)) 409 399 ironlake_set_fifo_underrun_reporting(dev, pipe, enable); 410 400 else if (IS_GEN7(dev)) 411 401 ivybridge_set_fifo_underrun_reporting(dev, pipe, enable); ··· 929 915 drm_kms_helper_hotplug_event(dev); 930 916 } 931 917 918 + static void intel_hpd_irq_uninstall(struct drm_i915_private *dev_priv) 919 + { 920 + del_timer_sync(&dev_priv->hotplug_reenable_timer); 921 + } 922 + 932 923 static void ironlake_rps_change_irq_handler(struct drm_device *dev) 933 924 { 934 925 drm_i915_private_t *dev_priv = dev->dev_private; ··· 983 964 984 965 wake_up_all(&ring->irq_queue); 985 966 i915_queue_hangcheck(dev); 967 + } 968 + 969 + void gen6_set_pm_mask(struct drm_i915_private *dev_priv, 970 + u32 pm_iir, int new_delay) 971 + { 972 + if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) { 973 + if (new_delay >= dev_priv->rps.max_delay) { 974 + /* Mask UP THRESHOLD Interrupts */ 975 + I915_WRITE(GEN6_PMINTRMSK, 976 + I915_READ(GEN6_PMINTRMSK) | 977 + GEN6_PM_RP_UP_THRESHOLD); 978 + dev_priv->rps.rp_up_masked = true; 979 + } 980 + if (dev_priv->rps.rp_down_masked) { 981 + /* UnMask DOWN THRESHOLD Interrupts */ 982 + I915_WRITE(GEN6_PMINTRMSK, 983 + I915_READ(GEN6_PMINTRMSK) & 984 + ~GEN6_PM_RP_DOWN_THRESHOLD); 985 + dev_priv->rps.rp_down_masked = false; 986 + } 987 + } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) { 988 + if (new_delay <= dev_priv->rps.min_delay) { 989 + /* Mask DOWN THRESHOLD Interrupts */ 990 + I915_WRITE(GEN6_PMINTRMSK, 991 + I915_READ(GEN6_PMINTRMSK) | 992 + GEN6_PM_RP_DOWN_THRESHOLD); 993 + dev_priv->rps.rp_down_masked = true; 994 + } 995 + 996 + if (dev_priv->rps.rp_up_masked) { 997 + /* UnMask UP THRESHOLD Interrupts */ 998 + I915_WRITE(GEN6_PMINTRMSK, 999 + I915_READ(GEN6_PMINTRMSK) & 1000 + ~GEN6_PM_RP_UP_THRESHOLD); 1001 + dev_priv->rps.rp_up_masked = false; 1002 + } 1003 + } 986 1004 } 987 1005 988 1006 static void gen6_pm_rps_work(struct work_struct *work) ··· 1079 1023 */ 1080 1024 new_delay = clamp_t(int, new_delay, 1081 1025 dev_priv->rps.min_delay, dev_priv->rps.max_delay); 1026 + 1027 + gen6_set_pm_mask(dev_priv, pm_iir, new_delay); 1082 1028 dev_priv->rps.last_adj = new_delay - dev_priv->rps.cur_delay; 1083 1029 1084 1030 if (IS_VALLEYVIEW(dev_priv->dev)) ··· 1294 1236 if (!hotplug_trigger) 1295 1237 return; 1296 1238 1239 + DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n", 1240 + hotplug_trigger); 1241 + 1297 1242 spin_lock(&dev_priv->irq_lock); 1298 1243 for (i = 1; i < HPD_NUM_PINS; i++) { 1299 1244 ··· 1476 1415 } 1477 1416 } 1478 1417 1418 + static void valleyview_pipestat_irq_handler(struct drm_device *dev, u32 iir) 1419 + { 1420 + struct drm_i915_private *dev_priv = dev->dev_private; 1421 + u32 pipe_stats[I915_MAX_PIPES]; 1422 + int pipe; 1423 + 1424 + spin_lock(&dev_priv->irq_lock); 1425 + for_each_pipe(pipe) { 1426 + int reg = PIPESTAT(pipe); 1427 + pipe_stats[pipe] = I915_READ(reg); 1428 + 1429 + /* 1430 + * Clear the PIPE*STAT regs before the IIR 1431 + */ 1432 + if (pipe_stats[pipe] & 0x8000ffff) 1433 + I915_WRITE(reg, pipe_stats[pipe]); 1434 + } 1435 + spin_unlock(&dev_priv->irq_lock); 1436 + 1437 + for_each_pipe(pipe) { 1438 + if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS) 1439 + drm_handle_vblank(dev, pipe); 1440 + 1441 + if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV) { 1442 + intel_prepare_page_flip(dev, pipe); 1443 + intel_finish_page_flip(dev, pipe); 1444 + } 1445 + 1446 + if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 1447 + i9xx_pipe_crc_irq_handler(dev, pipe); 1448 + 1449 + if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS && 1450 + intel_set_cpu_fifo_underrun_reporting(dev, pipe, false)) 1451 + DRM_ERROR("pipe %c underrun\n", pipe_name(pipe)); 1452 + } 1453 + 1454 + if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) 1455 + gmbus_irq_handler(dev); 1456 + } 1457 + 1479 1458 static irqreturn_t valleyview_irq_handler(int irq, void *arg) 1480 1459 { 1481 1460 struct drm_device *dev = (struct drm_device *) arg; 1482 1461 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1483 1462 u32 iir, gt_iir, pm_iir; 1484 1463 irqreturn_t ret = IRQ_NONE; 1485 - unsigned long irqflags; 1486 - int pipe; 1487 - u32 pipe_stats[I915_MAX_PIPES]; 1488 - 1489 - atomic_inc(&dev_priv->irq_received); 1490 1464 1491 1465 while (true) { 1492 1466 iir = I915_READ(VLV_IIR); ··· 1535 1439 1536 1440 snb_gt_irq_handler(dev, dev_priv, gt_iir); 1537 1441 1538 - spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 1539 - for_each_pipe(pipe) { 1540 - int reg = PIPESTAT(pipe); 1541 - pipe_stats[pipe] = I915_READ(reg); 1542 - 1543 - /* 1544 - * Clear the PIPE*STAT regs before the IIR 1545 - */ 1546 - if (pipe_stats[pipe] & 0x8000ffff) { 1547 - if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 1548 - DRM_DEBUG_DRIVER("pipe %c underrun\n", 1549 - pipe_name(pipe)); 1550 - I915_WRITE(reg, pipe_stats[pipe]); 1551 - } 1552 - } 1553 - spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 1554 - 1555 - for_each_pipe(pipe) { 1556 - if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS) 1557 - drm_handle_vblank(dev, pipe); 1558 - 1559 - if (pipe_stats[pipe] & PLANE_FLIPDONE_INT_STATUS_VLV) { 1560 - intel_prepare_page_flip(dev, pipe); 1561 - intel_finish_page_flip(dev, pipe); 1562 - } 1563 - 1564 - if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 1565 - i9xx_pipe_crc_irq_handler(dev, pipe); 1566 - } 1442 + valleyview_pipestat_irq_handler(dev, iir); 1567 1443 1568 1444 /* Consume port. Then clear IIR or we'll miss events */ 1569 1445 if (iir & I915_DISPLAY_PORT_INTERRUPT) { 1570 1446 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); 1571 1447 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915; 1572 - 1573 - DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n", 1574 - hotplug_status); 1575 1448 1576 1449 intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915); 1577 1450 ··· 1551 1486 I915_READ(PORT_HOTPLUG_STAT); 1552 1487 } 1553 1488 1554 - if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) 1555 - gmbus_irq_handler(dev); 1556 1489 1557 1490 if (pm_iir) 1558 1491 gen6_rps_irq_handler(dev_priv, pm_iir); ··· 1609 1546 if (pch_iir & SDE_TRANSA_FIFO_UNDER) 1610 1547 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, 1611 1548 false)) 1612 - DRM_DEBUG_DRIVER("PCH transcoder A FIFO underrun\n"); 1549 + DRM_ERROR("PCH transcoder A FIFO underrun\n"); 1613 1550 1614 1551 if (pch_iir & SDE_TRANSB_FIFO_UNDER) 1615 1552 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B, 1616 1553 false)) 1617 - DRM_DEBUG_DRIVER("PCH transcoder B FIFO underrun\n"); 1554 + DRM_ERROR("PCH transcoder B FIFO underrun\n"); 1618 1555 } 1619 1556 1620 1557 static void ivb_err_int_handler(struct drm_device *dev) ··· 1630 1567 if (err_int & ERR_INT_FIFO_UNDERRUN(pipe)) { 1631 1568 if (intel_set_cpu_fifo_underrun_reporting(dev, pipe, 1632 1569 false)) 1633 - DRM_DEBUG_DRIVER("Pipe %c FIFO underrun\n", 1634 - pipe_name(pipe)); 1570 + DRM_ERROR("Pipe %c FIFO underrun\n", 1571 + pipe_name(pipe)); 1635 1572 } 1636 1573 1637 1574 if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) { ··· 1656 1593 if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN) 1657 1594 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, 1658 1595 false)) 1659 - DRM_DEBUG_DRIVER("PCH transcoder A FIFO underrun\n"); 1596 + DRM_ERROR("PCH transcoder A FIFO underrun\n"); 1660 1597 1661 1598 if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN) 1662 1599 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B, 1663 1600 false)) 1664 - DRM_DEBUG_DRIVER("PCH transcoder B FIFO underrun\n"); 1601 + DRM_ERROR("PCH transcoder B FIFO underrun\n"); 1665 1602 1666 1603 if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN) 1667 1604 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_C, 1668 1605 false)) 1669 - DRM_DEBUG_DRIVER("PCH transcoder C FIFO underrun\n"); 1606 + DRM_ERROR("PCH transcoder C FIFO underrun\n"); 1670 1607 1671 1608 I915_WRITE(SERR_INT, serr_int); 1672 1609 } ··· 1728 1665 1729 1666 if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe)) 1730 1667 if (intel_set_cpu_fifo_underrun_reporting(dev, pipe, false)) 1731 - DRM_DEBUG_DRIVER("Pipe %c FIFO underrun\n", 1732 - pipe_name(pipe)); 1668 + DRM_ERROR("Pipe %c FIFO underrun\n", 1669 + pipe_name(pipe)); 1733 1670 1734 1671 if (de_iir & DE_PIPE_CRC_DONE(pipe)) 1735 1672 i9xx_pipe_crc_irq_handler(dev, pipe); ··· 1801 1738 u32 de_iir, gt_iir, de_ier, sde_ier = 0; 1802 1739 irqreturn_t ret = IRQ_NONE; 1803 1740 1804 - atomic_inc(&dev_priv->irq_received); 1805 - 1806 1741 /* We get interrupts on unclaimed registers, so check for this before we 1807 1742 * do any I915_{READ,WRITE}. */ 1808 1743 intel_uncore_check_errors(dev); ··· 1869 1808 uint32_t tmp = 0; 1870 1809 enum pipe pipe; 1871 1810 1872 - atomic_inc(&dev_priv->irq_received); 1873 - 1874 1811 master_ctl = I915_READ(GEN8_MASTER_IRQ); 1875 1812 master_ctl &= ~GEN8_MASTER_IRQ_CONTROL; 1876 1813 if (!master_ctl) ··· 1930 1871 if (pipe_iir & GEN8_PIPE_FIFO_UNDERRUN) { 1931 1872 if (intel_set_cpu_fifo_underrun_reporting(dev, pipe, 1932 1873 false)) 1933 - DRM_DEBUG_DRIVER("Pipe %c FIFO underrun\n", 1934 - pipe_name(pipe)); 1874 + DRM_ERROR("Pipe %c FIFO underrun\n", 1875 + pipe_name(pipe)); 1935 1876 } 1936 1877 1937 1878 if (pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS) { ··· 2303 2244 { 2304 2245 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2305 2246 unsigned long irqflags; 2306 - u32 imr; 2307 2247 2308 2248 if (!i915_pipe_enabled(dev, pipe)) 2309 2249 return -EINVAL; 2310 2250 2311 2251 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2312 - imr = I915_READ(VLV_IMR); 2313 - if (pipe == PIPE_A) 2314 - imr &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT; 2315 - else 2316 - imr &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; 2317 - I915_WRITE(VLV_IMR, imr); 2318 2252 i915_enable_pipestat(dev_priv, pipe, 2319 2253 PIPE_START_VBLANK_INTERRUPT_ENABLE); 2320 2254 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); ··· 2365 2313 { 2366 2314 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2367 2315 unsigned long irqflags; 2368 - u32 imr; 2369 2316 2370 2317 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2371 2318 i915_disable_pipestat(dev_priv, pipe, 2372 2319 PIPE_START_VBLANK_INTERRUPT_ENABLE); 2373 - imr = I915_READ(VLV_IMR); 2374 - if (pipe == PIPE_A) 2375 - imr |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT; 2376 - else 2377 - imr |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; 2378 - I915_WRITE(VLV_IMR, imr); 2379 2320 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2380 2321 } 2381 2322 ··· 2524 2479 #define BUSY 1 2525 2480 #define KICK 5 2526 2481 #define HUNG 20 2527 - #define FIRE 30 2528 2482 2529 - if (!i915_enable_hangcheck) 2483 + if (!i915.enable_hangcheck) 2530 2484 return; 2531 2485 2532 2486 for_each_ring(ring, dev_priv, i) { ··· 2607 2563 } 2608 2564 2609 2565 for_each_ring(ring, dev_priv, i) { 2610 - if (ring->hangcheck.score > FIRE) { 2566 + if (ring->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) { 2611 2567 DRM_INFO("%s on %s\n", 2612 2568 stuck[i] ? "stuck" : "no progress", 2613 2569 ring->name); ··· 2627 2583 void i915_queue_hangcheck(struct drm_device *dev) 2628 2584 { 2629 2585 struct drm_i915_private *dev_priv = dev->dev_private; 2630 - if (!i915_enable_hangcheck) 2586 + if (!i915.enable_hangcheck) 2631 2587 return; 2632 2588 2633 2589 mod_timer(&dev_priv->gpu_error.hangcheck_timer, ··· 2676 2632 { 2677 2633 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2678 2634 2679 - atomic_set(&dev_priv->irq_received, 0); 2680 - 2681 2635 I915_WRITE(HWSTAM, 0xeffe); 2682 2636 2683 2637 I915_WRITE(DEIMR, 0xffffffff); ··· 2691 2649 { 2692 2650 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2693 2651 int pipe; 2694 - 2695 - atomic_set(&dev_priv->irq_received, 0); 2696 2652 2697 2653 /* VLV magic */ 2698 2654 I915_WRITE(VLV_IMR, 0); ··· 2720 2680 { 2721 2681 struct drm_i915_private *dev_priv = dev->dev_private; 2722 2682 int pipe; 2723 - 2724 - atomic_set(&dev_priv->irq_received, 0); 2725 2683 2726 2684 I915_WRITE(GEN8_MASTER_IRQ, 0); 2727 2685 POSTING_READ(GEN8_MASTER_IRQ); ··· 3045 3007 if (!dev_priv) 3046 3008 return; 3047 3009 3048 - atomic_set(&dev_priv->irq_received, 0); 3049 - 3050 3010 I915_WRITE(GEN8_MASTER_IRQ, 0); 3051 3011 3052 3012 #define GEN8_IRQ_FINI_NDX(type, which) do { \ ··· 3085 3049 if (!dev_priv) 3086 3050 return; 3087 3051 3088 - del_timer_sync(&dev_priv->hotplug_reenable_timer); 3052 + intel_hpd_irq_uninstall(dev_priv); 3089 3053 3090 3054 for_each_pipe(pipe) 3091 3055 I915_WRITE(PIPESTAT(pipe), 0xffff); ··· 3108 3072 if (!dev_priv) 3109 3073 return; 3110 3074 3111 - del_timer_sync(&dev_priv->hotplug_reenable_timer); 3075 + intel_hpd_irq_uninstall(dev_priv); 3112 3076 3113 3077 I915_WRITE(HWSTAM, 0xffffffff); 3114 3078 ··· 3136 3100 { 3137 3101 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3138 3102 int pipe; 3139 - 3140 - atomic_set(&dev_priv->irq_received, 0); 3141 3103 3142 3104 for_each_pipe(pipe) 3143 3105 I915_WRITE(PIPESTAT(pipe), 0); ··· 3221 3187 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3222 3188 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 3223 3189 3224 - atomic_inc(&dev_priv->irq_received); 3225 - 3226 3190 iir = I915_READ16(IIR); 3227 3191 if (iir == 0) 3228 3192 return IRQ_NONE; ··· 3242 3210 /* 3243 3211 * Clear the PIPE*STAT regs before the IIR 3244 3212 */ 3245 - if (pipe_stats[pipe] & 0x8000ffff) { 3246 - if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 3247 - DRM_DEBUG_DRIVER("pipe %c underrun\n", 3248 - pipe_name(pipe)); 3213 + if (pipe_stats[pipe] & 0x8000ffff) 3249 3214 I915_WRITE(reg, pipe_stats[pipe]); 3250 - } 3251 3215 } 3252 3216 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3253 3217 ··· 3266 3238 3267 3239 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 3268 3240 i9xx_pipe_crc_irq_handler(dev, pipe); 3241 + 3242 + if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS && 3243 + intel_set_cpu_fifo_underrun_reporting(dev, pipe, false)) 3244 + DRM_ERROR("pipe %c underrun\n", pipe_name(pipe)); 3269 3245 } 3270 3246 3271 3247 iir = new_iir; ··· 3297 3265 { 3298 3266 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3299 3267 int pipe; 3300 - 3301 - atomic_set(&dev_priv->irq_received, 0); 3302 3268 3303 3269 if (I915_HAS_HOTPLUG(dev)) { 3304 3270 I915_WRITE(PORT_HOTPLUG_EN, 0); ··· 3403 3373 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 3404 3374 int pipe, ret = IRQ_NONE; 3405 3375 3406 - atomic_inc(&dev_priv->irq_received); 3407 - 3408 3376 iir = I915_READ(IIR); 3409 3377 do { 3410 3378 bool irq_received = (iir & ~flip_mask) != 0; ··· 3423 3395 3424 3396 /* Clear the PIPE*STAT regs before the IIR */ 3425 3397 if (pipe_stats[pipe] & 0x8000ffff) { 3426 - if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 3427 - DRM_DEBUG_DRIVER("pipe %c underrun\n", 3428 - pipe_name(pipe)); 3429 3398 I915_WRITE(reg, pipe_stats[pipe]); 3430 3399 irq_received = true; 3431 3400 } ··· 3437 3412 (iir & I915_DISPLAY_PORT_INTERRUPT)) { 3438 3413 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); 3439 3414 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915; 3440 - 3441 - DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n", 3442 - hotplug_status); 3443 3415 3444 3416 intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915); 3445 3417 ··· 3464 3442 3465 3443 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 3466 3444 i9xx_pipe_crc_irq_handler(dev, pipe); 3445 + 3446 + if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS && 3447 + intel_set_cpu_fifo_underrun_reporting(dev, pipe, false)) 3448 + DRM_ERROR("pipe %c underrun\n", pipe_name(pipe)); 3467 3449 } 3468 3450 3469 3451 if (blc_event || (iir & I915_ASLE_INTERRUPT)) ··· 3502 3476 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3503 3477 int pipe; 3504 3478 3505 - del_timer_sync(&dev_priv->hotplug_reenable_timer); 3479 + intel_hpd_irq_uninstall(dev_priv); 3506 3480 3507 3481 if (I915_HAS_HOTPLUG(dev)) { 3508 3482 I915_WRITE(PORT_HOTPLUG_EN, 0); ··· 3525 3499 { 3526 3500 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3527 3501 int pipe; 3528 - 3529 - atomic_set(&dev_priv->irq_received, 0); 3530 3502 3531 3503 I915_WRITE(PORT_HOTPLUG_EN, 0); 3532 3504 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); ··· 3634 3610 u32 iir, new_iir; 3635 3611 u32 pipe_stats[I915_MAX_PIPES]; 3636 3612 unsigned long irqflags; 3637 - int irq_received; 3638 3613 int ret = IRQ_NONE, pipe; 3639 3614 u32 flip_mask = 3640 3615 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3641 3616 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 3642 3617 3643 - atomic_inc(&dev_priv->irq_received); 3644 - 3645 3618 iir = I915_READ(IIR); 3646 3619 3647 3620 for (;;) { 3621 + bool irq_received = (iir & ~flip_mask) != 0; 3648 3622 bool blc_event = false; 3649 - 3650 - irq_received = (iir & ~flip_mask) != 0; 3651 3623 3652 3624 /* Can't rely on pipestat interrupt bit in iir as it might 3653 3625 * have been cleared after the pipestat interrupt was received. ··· 3662 3642 * Clear the PIPE*STAT regs before the IIR 3663 3643 */ 3664 3644 if (pipe_stats[pipe] & 0x8000ffff) { 3665 - if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 3666 - DRM_DEBUG_DRIVER("pipe %c underrun\n", 3667 - pipe_name(pipe)); 3668 3645 I915_WRITE(reg, pipe_stats[pipe]); 3669 - irq_received = 1; 3646 + irq_received = true; 3670 3647 } 3671 3648 } 3672 3649 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); ··· 3679 3662 u32 hotplug_trigger = hotplug_status & (IS_G4X(dev) ? 3680 3663 HOTPLUG_INT_STATUS_G4X : 3681 3664 HOTPLUG_INT_STATUS_I915); 3682 - 3683 - DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n", 3684 - hotplug_status); 3685 3665 3686 3666 intel_hpd_irq_handler(dev, hotplug_trigger, 3687 3667 IS_G4X(dev) ? hpd_status_g4x : hpd_status_i915); ··· 3709 3695 3710 3696 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 3711 3697 i9xx_pipe_crc_irq_handler(dev, pipe); 3712 - } 3713 3698 3699 + if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS && 3700 + intel_set_cpu_fifo_underrun_reporting(dev, pipe, false)) 3701 + DRM_ERROR("pipe %c underrun\n", pipe_name(pipe)); 3702 + } 3714 3703 3715 3704 if (blc_event || (iir & I915_ASLE_INTERRUPT)) 3716 3705 intel_opregion_asle_intr(dev); ··· 3752 3735 if (!dev_priv) 3753 3736 return; 3754 3737 3755 - del_timer_sync(&dev_priv->hotplug_reenable_timer); 3738 + intel_hpd_irq_uninstall(dev_priv); 3756 3739 3757 3740 I915_WRITE(PORT_HOTPLUG_EN, 0); 3758 3741 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); ··· 3769 3752 I915_WRITE(IIR, I915_READ(IIR)); 3770 3753 } 3771 3754 3772 - static void i915_reenable_hotplug_timer_func(unsigned long data) 3755 + static void intel_hpd_irq_reenable(unsigned long data) 3773 3756 { 3774 3757 drm_i915_private_t *dev_priv = (drm_i915_private_t *)data; 3775 3758 struct drm_device *dev = dev_priv->dev; ··· 3816 3799 setup_timer(&dev_priv->gpu_error.hangcheck_timer, 3817 3800 i915_hangcheck_elapsed, 3818 3801 (unsigned long) dev); 3819 - setup_timer(&dev_priv->hotplug_reenable_timer, i915_reenable_hotplug_timer_func, 3802 + setup_timer(&dev_priv->hotplug_reenable_timer, intel_hpd_irq_reenable, 3820 3803 (unsigned long) dev_priv); 3821 3804 3822 3805 pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
+155
drivers/gpu/drm/i915/i915_params.c
··· 1 + /* 2 + * Copyright © 2014 Intel Corporation 3 + * 4 + * Permission is hereby granted, free of charge, to any person obtaining a 5 + * copy of this software and associated documentation files (the 6 + * "Software"), to deal in the Software without restriction, including 7 + * without limitation the rights to use, copy, modify, merge, publish, 8 + * distribute, sub license, and/or sell copies of the Software, and to 9 + * permit persons to whom the Software is furnished to do so, subject to 10 + * the following conditions: 11 + * 12 + * The above copyright notice and this permission notice (including the 13 + * next paragraph) shall be included in all copies or substantial portions 14 + * of the Software. 15 + * 16 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 21 + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 22 + * IN THE SOFTWARE. 23 + */ 24 + 25 + #include "i915_drv.h" 26 + 27 + struct i915_params i915 __read_mostly = { 28 + .modeset = -1, 29 + .panel_ignore_lid = 1, 30 + .powersave = 1, 31 + .semaphores = -1, 32 + .lvds_downclock = 0, 33 + .lvds_channel_mode = 0, 34 + .panel_use_ssc = -1, 35 + .vbt_sdvo_panel_type = -1, 36 + .enable_rc6 = -1, 37 + .enable_fbc = -1, 38 + .enable_hangcheck = true, 39 + .enable_ppgtt = -1, 40 + .enable_psr = 0, 41 + .preliminary_hw_support = IS_ENABLED(CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT), 42 + .disable_power_well = 1, 43 + .enable_ips = 1, 44 + .fastboot = 0, 45 + .enable_pc8 = 1, 46 + .pc8_timeout = 5000, 47 + .prefault_disable = 0, 48 + .reset = true, 49 + .invert_brightness = 0, 50 + }; 51 + 52 + module_param_named(modeset, i915.modeset, int, 0400); 53 + MODULE_PARM_DESC(modeset, 54 + "Use kernel modesetting [KMS] (0=DRM_I915_KMS from .config, " 55 + "1=on, -1=force vga console preference [default])"); 56 + 57 + module_param_named(panel_ignore_lid, i915.panel_ignore_lid, int, 0600); 58 + MODULE_PARM_DESC(panel_ignore_lid, 59 + "Override lid status (0=autodetect, 1=autodetect disabled [default], " 60 + "-1=force lid closed, -2=force lid open)"); 61 + 62 + module_param_named(powersave, i915.powersave, int, 0600); 63 + MODULE_PARM_DESC(powersave, 64 + "Enable powersavings, fbc, downclocking, etc. (default: true)"); 65 + 66 + module_param_named(semaphores, i915.semaphores, int, 0400); 67 + MODULE_PARM_DESC(semaphores, 68 + "Use semaphores for inter-ring sync " 69 + "(default: -1 (use per-chip defaults))"); 70 + 71 + module_param_named(enable_rc6, i915.enable_rc6, int, 0400); 72 + MODULE_PARM_DESC(enable_rc6, 73 + "Enable power-saving render C-state 6. " 74 + "Different stages can be selected via bitmask values " 75 + "(0 = disable; 1 = enable rc6; 2 = enable deep rc6; 4 = enable deepest rc6). " 76 + "For example, 3 would enable rc6 and deep rc6, and 7 would enable everything. " 77 + "default: -1 (use per-chip default)"); 78 + 79 + module_param_named(enable_fbc, i915.enable_fbc, int, 0600); 80 + MODULE_PARM_DESC(enable_fbc, 81 + "Enable frame buffer compression for power savings " 82 + "(default: -1 (use per-chip default))"); 83 + 84 + module_param_named(lvds_downclock, i915.lvds_downclock, int, 0400); 85 + MODULE_PARM_DESC(lvds_downclock, 86 + "Use panel (LVDS/eDP) downclocking for power savings " 87 + "(default: false)"); 88 + 89 + module_param_named(lvds_channel_mode, i915.lvds_channel_mode, int, 0600); 90 + MODULE_PARM_DESC(lvds_channel_mode, 91 + "Specify LVDS channel mode " 92 + "(0=probe BIOS [default], 1=single-channel, 2=dual-channel)"); 93 + 94 + module_param_named(lvds_use_ssc, i915.panel_use_ssc, int, 0600); 95 + MODULE_PARM_DESC(lvds_use_ssc, 96 + "Use Spread Spectrum Clock with panels [LVDS/eDP] " 97 + "(default: auto from VBT)"); 98 + 99 + module_param_named(vbt_sdvo_panel_type, i915.vbt_sdvo_panel_type, int, 0600); 100 + MODULE_PARM_DESC(vbt_sdvo_panel_type, 101 + "Override/Ignore selection of SDVO panel mode in the VBT " 102 + "(-2=ignore, -1=auto [default], index in VBT BIOS table)"); 103 + 104 + module_param_named(reset, i915.reset, bool, 0600); 105 + MODULE_PARM_DESC(reset, "Attempt GPU resets (default: true)"); 106 + 107 + module_param_named(enable_hangcheck, i915.enable_hangcheck, bool, 0644); 108 + MODULE_PARM_DESC(enable_hangcheck, 109 + "Periodically check GPU activity for detecting hangs. " 110 + "WARNING: Disabling this can cause system wide hangs. " 111 + "(default: true)"); 112 + 113 + module_param_named(enable_ppgtt, i915.enable_ppgtt, int, 0400); 114 + MODULE_PARM_DESC(enable_ppgtt, 115 + "Override PPGTT usage. " 116 + "(-1=auto [default], 0=disabled, 1=aliasing, 2=full)"); 117 + 118 + module_param_named(enable_psr, i915.enable_psr, int, 0600); 119 + MODULE_PARM_DESC(enable_psr, "Enable PSR (default: false)"); 120 + 121 + module_param_named(preliminary_hw_support, i915.preliminary_hw_support, int, 0600); 122 + MODULE_PARM_DESC(preliminary_hw_support, 123 + "Enable preliminary hardware support."); 124 + 125 + module_param_named(disable_power_well, i915.disable_power_well, int, 0600); 126 + MODULE_PARM_DESC(disable_power_well, 127 + "Disable the power well when possible (default: true)"); 128 + 129 + module_param_named(enable_ips, i915.enable_ips, int, 0600); 130 + MODULE_PARM_DESC(enable_ips, "Enable IPS (default: true)"); 131 + 132 + module_param_named(fastboot, i915.fastboot, bool, 0600); 133 + MODULE_PARM_DESC(fastboot, 134 + "Try to skip unnecessary mode sets at boot time (default: false)"); 135 + 136 + module_param_named(enable_pc8, i915.enable_pc8, int, 0600); 137 + MODULE_PARM_DESC(enable_pc8, 138 + "Enable support for low power package C states (PC8+) (default: true)"); 139 + 140 + module_param_named(pc8_timeout, i915.pc8_timeout, int, 0600); 141 + MODULE_PARM_DESC(pc8_timeout, 142 + "Number of msecs of idleness required to enter PC8+ (default: 5000)"); 143 + 144 + module_param_named(prefault_disable, i915.prefault_disable, bool, 0600); 145 + MODULE_PARM_DESC(prefault_disable, 146 + "Disable page prefaulting for pread/pwrite/reloc (default:false). " 147 + "For developers only."); 148 + 149 + module_param_named(invert_brightness, i915.invert_brightness, int, 0600); 150 + MODULE_PARM_DESC(invert_brightness, 151 + "Invert backlight brightness " 152 + "(-1 force normal, 0 machine defaults, 1 force inversion), please " 153 + "report PCI device ID, subsystem vendor and subsystem device ID " 154 + "to dri-devel@lists.freedesktop.org, if your machine needs it. " 155 + "It will then be included in an upcoming module version.");
+192 -138
drivers/gpu/drm/i915/i915_reg.h
··· 26 26 #define _I915_REG_H_ 27 27 28 28 #define _PIPE(pipe, a, b) ((a) + (pipe)*((b)-(a))) 29 - #define _PIPE_INC(pipe, base, inc) ((base) + (pipe)*(inc)) 30 29 #define _TRANSCODER(tran, a, b) ((a) + (tran)*((b)-(a))) 31 30 32 31 #define _PORT(port, a, b) ((a) + (port)*((b)-(a))) ··· 72 73 #define I915_GC_RENDER_CLOCK_166_MHZ (0 << 0) 73 74 #define I915_GC_RENDER_CLOCK_200_MHZ (1 << 0) 74 75 #define I915_GC_RENDER_CLOCK_333_MHZ (4 << 0) 75 - #define LBB 0xf4 76 + #define PCI_LBPC 0xf4 /* legacy/combination backlight modes, also called LBB */ 77 + 76 78 77 79 /* Graphics reset regs */ 78 80 #define I965_GDRST 0xc0 /* PCI config register */ ··· 934 934 #define ECO_GATING_CX_ONLY (1<<3) 935 935 #define ECO_FLIP_DONE (1<<0) 936 936 937 + #define CACHE_MODE_0_GEN7 0x7000 /* IVB+ */ 938 + #define HIZ_RAW_STALL_OPT_DISABLE (1<<2) 937 939 #define CACHE_MODE_1 0x7004 /* IVB+ */ 938 940 #define PIXEL_SUBSPAN_COLLECT_OPT_DISABLE (1<<6) 939 941 ··· 1048 1046 #define FBC_CTL_IDLE_LINE (2<<2) 1049 1047 #define FBC_CTL_IDLE_DEBUG (3<<2) 1050 1048 #define FBC_CTL_CPU_FENCE (1<<1) 1051 - #define FBC_CTL_PLANEA (0<<0) 1052 - #define FBC_CTL_PLANEB (1<<0) 1053 - #define FBC_FENCE_OFF 0x0321b 1049 + #define FBC_CTL_PLANE(plane) ((plane)<<0) 1050 + #define FBC_FENCE_OFF 0x03218 /* BSpec typo has 321Bh */ 1054 1051 #define FBC_TAG 0x03300 1055 1052 1056 1053 #define FBC_LL_SIZE (1536) ··· 1058 1057 #define DPFC_CB_BASE 0x3200 1059 1058 #define DPFC_CONTROL 0x3208 1060 1059 #define DPFC_CTL_EN (1<<31) 1061 - #define DPFC_CTL_PLANEA (0<<30) 1062 - #define DPFC_CTL_PLANEB (1<<30) 1063 - #define IVB_DPFC_CTL_PLANE_SHIFT (29) 1060 + #define DPFC_CTL_PLANE(plane) ((plane)<<30) 1061 + #define IVB_DPFC_CTL_PLANE(plane) ((plane)<<29) 1064 1062 #define DPFC_CTL_FENCE_EN (1<<29) 1065 1063 #define IVB_DPFC_CTL_FENCE_EN (1<<28) 1066 1064 #define DPFC_CTL_PERSISTENT_MODE (1<<25) ··· 1202 1202 /* 1203 1203 * Clock control & power management 1204 1204 */ 1205 + #define DPLL_A_OFFSET 0x6014 1206 + #define DPLL_B_OFFSET 0x6018 1207 + #define DPLL(pipe) (dev_priv->info->dpll_offsets[pipe] + \ 1208 + dev_priv->info->display_mmio_offset) 1205 1209 1206 1210 #define VGA0 0x6000 1207 1211 #define VGA1 0x6004 ··· 1218 1214 #define VGA1_PD_P1_DIV_2 (1 << 13) 1219 1215 #define VGA1_PD_P1_SHIFT 8 1220 1216 #define VGA1_PD_P1_MASK (0x1f << 8) 1221 - #define _DPLL_A (dev_priv->info->display_mmio_offset + 0x6014) 1222 - #define _DPLL_B (dev_priv->info->display_mmio_offset + 0x6018) 1223 - #define DPLL(pipe) _PIPE(pipe, _DPLL_A, _DPLL_B) 1224 1217 #define DPLL_VCO_ENABLE (1 << 31) 1225 1218 #define DPLL_SDVO_HIGH_SPEED (1 << 30) 1226 1219 #define DPLL_DVO_2X_MODE (1 << 30) ··· 1279 1278 #define SDVO_MULTIPLIER_MASK 0x000000ff 1280 1279 #define SDVO_MULTIPLIER_SHIFT_HIRES 4 1281 1280 #define SDVO_MULTIPLIER_SHIFT_VGA 0 1282 - #define _DPLL_A_MD (dev_priv->info->display_mmio_offset + 0x601c) /* 965+ only */ 1281 + 1282 + #define DPLL_A_MD_OFFSET 0x601c /* 965+ only */ 1283 + #define DPLL_B_MD_OFFSET 0x6020 /* 965+ only */ 1284 + #define DPLL_MD(pipe) (dev_priv->info->dpll_md_offsets[pipe] + \ 1285 + dev_priv->info->display_mmio_offset) 1286 + 1283 1287 /* 1284 1288 * UDI pixel divider, controlling how many pixels are stuffed into a packet. 1285 1289 * ··· 1321 1315 */ 1322 1316 #define DPLL_MD_VGA_UDI_MULTIPLIER_MASK 0x0000003f 1323 1317 #define DPLL_MD_VGA_UDI_MULTIPLIER_SHIFT 0 1324 - #define _DPLL_B_MD (dev_priv->info->display_mmio_offset + 0x6020) /* 965+ only */ 1325 - #define DPLL_MD(pipe) _PIPE(pipe, _DPLL_A_MD, _DPLL_B_MD) 1326 1318 1327 1319 #define _FPA0 0x06040 1328 1320 #define _FPA1 0x06044 ··· 1476 1472 /* 1477 1473 * Palette regs 1478 1474 */ 1479 - 1480 - #define _PALETTE_A (dev_priv->info->display_mmio_offset + 0xa000) 1481 - #define _PALETTE_B (dev_priv->info->display_mmio_offset + 0xa800) 1482 - #define PALETTE(pipe) _PIPE(pipe, _PALETTE_A, _PALETTE_B) 1475 + #define PALETTE_A_OFFSET 0xa000 1476 + #define PALETTE_B_OFFSET 0xa800 1477 + #define PALETTE(pipe) (dev_priv->info->palette_offsets[pipe] + \ 1478 + dev_priv->info->display_mmio_offset) 1483 1479 1484 1480 /* MCH MMIO space */ 1485 1481 ··· 1866 1862 */ 1867 1863 1868 1864 /* Pipe A CRC regs */ 1869 - #define _PIPE_CRC_CTL_A (dev_priv->info->display_mmio_offset + 0x60050) 1865 + #define _PIPE_CRC_CTL_A 0x60050 1870 1866 #define PIPE_CRC_ENABLE (1 << 31) 1871 1867 /* ivb+ source selection */ 1872 1868 #define PIPE_CRC_SOURCE_PRIMARY_IVB (0 << 29) ··· 1906 1902 #define _PIPE_CRC_RES_4_A_IVB 0x60070 1907 1903 #define _PIPE_CRC_RES_5_A_IVB 0x60074 1908 1904 1909 - #define _PIPE_CRC_RES_RED_A (dev_priv->info->display_mmio_offset + 0x60060) 1910 - #define _PIPE_CRC_RES_GREEN_A (dev_priv->info->display_mmio_offset + 0x60064) 1911 - #define _PIPE_CRC_RES_BLUE_A (dev_priv->info->display_mmio_offset + 0x60068) 1912 - #define _PIPE_CRC_RES_RES1_A_I915 (dev_priv->info->display_mmio_offset + 0x6006c) 1913 - #define _PIPE_CRC_RES_RES2_A_G4X (dev_priv->info->display_mmio_offset + 0x60080) 1905 + #define _PIPE_CRC_RES_RED_A 0x60060 1906 + #define _PIPE_CRC_RES_GREEN_A 0x60064 1907 + #define _PIPE_CRC_RES_BLUE_A 0x60068 1908 + #define _PIPE_CRC_RES_RES1_A_I915 0x6006c 1909 + #define _PIPE_CRC_RES_RES2_A_G4X 0x60080 1914 1910 1915 1911 /* Pipe B CRC regs */ 1916 1912 #define _PIPE_CRC_RES_1_B_IVB 0x61064 ··· 1919 1915 #define _PIPE_CRC_RES_4_B_IVB 0x61070 1920 1916 #define _PIPE_CRC_RES_5_B_IVB 0x61074 1921 1917 1922 - #define PIPE_CRC_CTL(pipe) _PIPE_INC(pipe, _PIPE_CRC_CTL_A, 0x01000) 1918 + #define PIPE_CRC_CTL(pipe) _TRANSCODER2(pipe, _PIPE_CRC_CTL_A) 1923 1919 #define PIPE_CRC_RES_1_IVB(pipe) \ 1924 - _PIPE(pipe, _PIPE_CRC_RES_1_A_IVB, _PIPE_CRC_RES_1_B_IVB) 1920 + _TRANSCODER2(pipe, _PIPE_CRC_RES_1_A_IVB) 1925 1921 #define PIPE_CRC_RES_2_IVB(pipe) \ 1926 - _PIPE(pipe, _PIPE_CRC_RES_2_A_IVB, _PIPE_CRC_RES_2_B_IVB) 1922 + _TRANSCODER2(pipe, _PIPE_CRC_RES_2_A_IVB) 1927 1923 #define PIPE_CRC_RES_3_IVB(pipe) \ 1928 - _PIPE(pipe, _PIPE_CRC_RES_3_A_IVB, _PIPE_CRC_RES_3_B_IVB) 1924 + _TRANSCODER2(pipe, _PIPE_CRC_RES_3_A_IVB) 1929 1925 #define PIPE_CRC_RES_4_IVB(pipe) \ 1930 - _PIPE(pipe, _PIPE_CRC_RES_4_A_IVB, _PIPE_CRC_RES_4_B_IVB) 1926 + _TRANSCODER2(pipe, _PIPE_CRC_RES_4_A_IVB) 1931 1927 #define PIPE_CRC_RES_5_IVB(pipe) \ 1932 - _PIPE(pipe, _PIPE_CRC_RES_5_A_IVB, _PIPE_CRC_RES_5_B_IVB) 1928 + _TRANSCODER2(pipe, _PIPE_CRC_RES_5_A_IVB) 1933 1929 1934 1930 #define PIPE_CRC_RES_RED(pipe) \ 1935 - _PIPE_INC(pipe, _PIPE_CRC_RES_RED_A, 0x01000) 1931 + _TRANSCODER2(pipe, _PIPE_CRC_RES_RED_A) 1936 1932 #define PIPE_CRC_RES_GREEN(pipe) \ 1937 - _PIPE_INC(pipe, _PIPE_CRC_RES_GREEN_A, 0x01000) 1933 + _TRANSCODER2(pipe, _PIPE_CRC_RES_GREEN_A) 1938 1934 #define PIPE_CRC_RES_BLUE(pipe) \ 1939 - _PIPE_INC(pipe, _PIPE_CRC_RES_BLUE_A, 0x01000) 1935 + _TRANSCODER2(pipe, _PIPE_CRC_RES_BLUE_A) 1940 1936 #define PIPE_CRC_RES_RES1_I915(pipe) \ 1941 - _PIPE_INC(pipe, _PIPE_CRC_RES_RES1_A_I915, 0x01000) 1937 + _TRANSCODER2(pipe, _PIPE_CRC_RES_RES1_A_I915) 1942 1938 #define PIPE_CRC_RES_RES2_G4X(pipe) \ 1943 - _PIPE_INC(pipe, _PIPE_CRC_RES_RES2_A_G4X, 0x01000) 1939 + _TRANSCODER2(pipe, _PIPE_CRC_RES_RES2_A_G4X) 1944 1940 1945 1941 /* Pipe A timing regs */ 1946 - #define _HTOTAL_A (dev_priv->info->display_mmio_offset + 0x60000) 1947 - #define _HBLANK_A (dev_priv->info->display_mmio_offset + 0x60004) 1948 - #define _HSYNC_A (dev_priv->info->display_mmio_offset + 0x60008) 1949 - #define _VTOTAL_A (dev_priv->info->display_mmio_offset + 0x6000c) 1950 - #define _VBLANK_A (dev_priv->info->display_mmio_offset + 0x60010) 1951 - #define _VSYNC_A (dev_priv->info->display_mmio_offset + 0x60014) 1952 - #define _PIPEASRC (dev_priv->info->display_mmio_offset + 0x6001c) 1953 - #define _BCLRPAT_A (dev_priv->info->display_mmio_offset + 0x60020) 1954 - #define _VSYNCSHIFT_A (dev_priv->info->display_mmio_offset + 0x60028) 1942 + #define _HTOTAL_A 0x60000 1943 + #define _HBLANK_A 0x60004 1944 + #define _HSYNC_A 0x60008 1945 + #define _VTOTAL_A 0x6000c 1946 + #define _VBLANK_A 0x60010 1947 + #define _VSYNC_A 0x60014 1948 + #define _PIPEASRC 0x6001c 1949 + #define _BCLRPAT_A 0x60020 1950 + #define _VSYNCSHIFT_A 0x60028 1955 1951 1956 1952 /* Pipe B timing regs */ 1957 - #define _HTOTAL_B (dev_priv->info->display_mmio_offset + 0x61000) 1958 - #define _HBLANK_B (dev_priv->info->display_mmio_offset + 0x61004) 1959 - #define _HSYNC_B (dev_priv->info->display_mmio_offset + 0x61008) 1960 - #define _VTOTAL_B (dev_priv->info->display_mmio_offset + 0x6100c) 1961 - #define _VBLANK_B (dev_priv->info->display_mmio_offset + 0x61010) 1962 - #define _VSYNC_B (dev_priv->info->display_mmio_offset + 0x61014) 1963 - #define _PIPEBSRC (dev_priv->info->display_mmio_offset + 0x6101c) 1964 - #define _BCLRPAT_B (dev_priv->info->display_mmio_offset + 0x61020) 1965 - #define _VSYNCSHIFT_B (dev_priv->info->display_mmio_offset + 0x61028) 1953 + #define _HTOTAL_B 0x61000 1954 + #define _HBLANK_B 0x61004 1955 + #define _HSYNC_B 0x61008 1956 + #define _VTOTAL_B 0x6100c 1957 + #define _VBLANK_B 0x61010 1958 + #define _VSYNC_B 0x61014 1959 + #define _PIPEBSRC 0x6101c 1960 + #define _BCLRPAT_B 0x61020 1961 + #define _VSYNCSHIFT_B 0x61028 1966 1962 1967 - #define HTOTAL(trans) _TRANSCODER(trans, _HTOTAL_A, _HTOTAL_B) 1968 - #define HBLANK(trans) _TRANSCODER(trans, _HBLANK_A, _HBLANK_B) 1969 - #define HSYNC(trans) _TRANSCODER(trans, _HSYNC_A, _HSYNC_B) 1970 - #define VTOTAL(trans) _TRANSCODER(trans, _VTOTAL_A, _VTOTAL_B) 1971 - #define VBLANK(trans) _TRANSCODER(trans, _VBLANK_A, _VBLANK_B) 1972 - #define VSYNC(trans) _TRANSCODER(trans, _VSYNC_A, _VSYNC_B) 1973 - #define BCLRPAT(pipe) _PIPE(pipe, _BCLRPAT_A, _BCLRPAT_B) 1974 - #define VSYNCSHIFT(trans) _TRANSCODER(trans, _VSYNCSHIFT_A, _VSYNCSHIFT_B) 1963 + #define TRANSCODER_A_OFFSET 0x60000 1964 + #define TRANSCODER_B_OFFSET 0x61000 1965 + #define TRANSCODER_C_OFFSET 0x62000 1966 + #define TRANSCODER_EDP_OFFSET 0x6f000 1967 + 1968 + #define _TRANSCODER2(pipe, reg) (dev_priv->info->trans_offsets[(pipe)] - \ 1969 + dev_priv->info->trans_offsets[TRANSCODER_A] + (reg) + \ 1970 + dev_priv->info->display_mmio_offset) 1971 + 1972 + #define HTOTAL(trans) _TRANSCODER2(trans, _HTOTAL_A) 1973 + #define HBLANK(trans) _TRANSCODER2(trans, _HBLANK_A) 1974 + #define HSYNC(trans) _TRANSCODER2(trans, _HSYNC_A) 1975 + #define VTOTAL(trans) _TRANSCODER2(trans, _VTOTAL_A) 1976 + #define VBLANK(trans) _TRANSCODER2(trans, _VBLANK_A) 1977 + #define VSYNC(trans) _TRANSCODER2(trans, _VSYNC_A) 1978 + #define BCLRPAT(trans) _TRANSCODER2(trans, _BCLRPAT_A) 1979 + #define VSYNCSHIFT(trans) _TRANSCODER2(trans, _VSYNCSHIFT_A) 1980 + #define PIPESRC(trans) _TRANSCODER2(trans, _PIPEASRC) 1975 1981 1976 1982 /* HSW+ eDP PSR registers */ 1977 1983 #define EDP_PSR_BASE(dev) (IS_HASWELL(dev) ? 0x64800 : 0x6f800) ··· 3192 3178 /* Display & cursor control */ 3193 3179 3194 3180 /* Pipe A */ 3195 - #define _PIPEADSL (dev_priv->info->display_mmio_offset + 0x70000) 3181 + #define _PIPEADSL 0x70000 3196 3182 #define DSL_LINEMASK_GEN2 0x00000fff 3197 3183 #define DSL_LINEMASK_GEN3 0x00001fff 3198 - #define _PIPEACONF (dev_priv->info->display_mmio_offset + 0x70008) 3184 + #define _PIPEACONF 0x70008 3199 3185 #define PIPECONF_ENABLE (1<<31) 3200 3186 #define PIPECONF_DISABLE 0 3201 3187 #define PIPECONF_DOUBLE_WIDE (1<<30) ··· 3238 3224 #define PIPECONF_DITHER_TYPE_ST1 (1<<2) 3239 3225 #define PIPECONF_DITHER_TYPE_ST2 (2<<2) 3240 3226 #define PIPECONF_DITHER_TYPE_TEMP (3<<2) 3241 - #define _PIPEASTAT (dev_priv->info->display_mmio_offset + 0x70024) 3227 + #define _PIPEASTAT 0x70024 3242 3228 #define PIPE_FIFO_UNDERRUN_STATUS (1UL<<31) 3243 - #define SPRITE1_FLIPDONE_INT_EN_VLV (1UL<<30) 3229 + #define SPRITE1_FLIP_DONE_INT_EN_VLV (1UL<<30) 3244 3230 #define PIPE_CRC_ERROR_ENABLE (1UL<<29) 3245 3231 #define PIPE_CRC_DONE_ENABLE (1UL<<28) 3246 3232 #define PIPE_GMBUS_EVENT_ENABLE (1UL<<27) ··· 3258 3244 #define PIPE_VBLANK_INTERRUPT_ENABLE (1UL<<17) 3259 3245 #define PIPEA_HBLANK_INT_EN_VLV (1UL<<16) 3260 3246 #define PIPE_OVERLAY_UPDATED_ENABLE (1UL<<16) 3261 - #define SPRITE1_FLIPDONE_INT_STATUS_VLV (1UL<<15) 3262 - #define SPRITE0_FLIPDONE_INT_STATUS_VLV (1UL<<14) 3247 + #define SPRITE1_FLIP_DONE_INT_STATUS_VLV (1UL<<15) 3248 + #define SPRITE0_FLIP_DONE_INT_STATUS_VLV (1UL<<14) 3263 3249 #define PIPE_CRC_ERROR_INTERRUPT_STATUS (1UL<<13) 3264 3250 #define PIPE_CRC_DONE_INTERRUPT_STATUS (1UL<<12) 3265 3251 #define PIPE_GMBUS_INTERRUPT_STATUS (1UL<<11) 3266 - #define PLANE_FLIPDONE_INT_STATUS_VLV (1UL<<10) 3252 + #define PLANE_FLIP_DONE_INT_STATUS_VLV (1UL<<10) 3267 3253 #define PIPE_HOTPLUG_INTERRUPT_STATUS (1UL<<10) 3268 3254 #define PIPE_VSYNC_INTERRUPT_STATUS (1UL<<9) 3269 3255 #define PIPE_DISPLAY_LINE_COMPARE_STATUS (1UL<<8) ··· 3276 3262 #define PIPE_VBLANK_INTERRUPT_STATUS (1UL<<1) 3277 3263 #define PIPE_OVERLAY_UPDATED_STATUS (1UL<<0) 3278 3264 3279 - #define PIPESRC(pipe) _PIPE(pipe, _PIPEASRC, _PIPEBSRC) 3280 - #define PIPECONF(tran) _TRANSCODER(tran, _PIPEACONF, _PIPEBCONF) 3281 - #define PIPEDSL(pipe) _PIPE(pipe, _PIPEADSL, _PIPEBDSL) 3282 - #define PIPEFRAME(pipe) _PIPE(pipe, _PIPEAFRAMEHIGH, _PIPEBFRAMEHIGH) 3283 - #define PIPEFRAMEPIXEL(pipe) _PIPE(pipe, _PIPEAFRAMEPIXEL, _PIPEBFRAMEPIXEL) 3284 - #define PIPESTAT(pipe) _PIPE(pipe, _PIPEASTAT, _PIPEBSTAT) 3265 + #define PIPE_A_OFFSET 0x70000 3266 + #define PIPE_B_OFFSET 0x71000 3267 + #define PIPE_C_OFFSET 0x72000 3268 + /* 3269 + * There's actually no pipe EDP. Some pipe registers have 3270 + * simply shifted from the pipe to the transcoder, while 3271 + * keeping their original offset. Thus we need PIPE_EDP_OFFSET 3272 + * to access such registers in transcoder EDP. 3273 + */ 3274 + #define PIPE_EDP_OFFSET 0x7f000 3275 + 3276 + #define _PIPE2(pipe, reg) (dev_priv->info->pipe_offsets[pipe] - \ 3277 + dev_priv->info->pipe_offsets[PIPE_A] + (reg) + \ 3278 + dev_priv->info->display_mmio_offset) 3279 + 3280 + #define PIPECONF(pipe) _PIPE2(pipe, _PIPEACONF) 3281 + #define PIPEDSL(pipe) _PIPE2(pipe, _PIPEADSL) 3282 + #define PIPEFRAME(pipe) _PIPE2(pipe, _PIPEAFRAMEHIGH) 3283 + #define PIPEFRAMEPIXEL(pipe) _PIPE2(pipe, _PIPEAFRAMEPIXEL) 3284 + #define PIPESTAT(pipe) _PIPE2(pipe, _PIPEASTAT) 3285 3285 3286 3286 #define _PIPE_MISC_A 0x70030 3287 3287 #define _PIPE_MISC_B 0x71030 ··· 3307 3279 #define PIPEMISC_DITHER_ENABLE (1<<4) 3308 3280 #define PIPEMISC_DITHER_TYPE_MASK (3<<2) 3309 3281 #define PIPEMISC_DITHER_TYPE_SP (0<<2) 3310 - #define PIPEMISC(pipe) _PIPE(pipe, _PIPE_MISC_A, _PIPE_MISC_B) 3282 + #define PIPEMISC(pipe) _PIPE2(pipe, _PIPE_MISC_A) 3311 3283 3312 3284 #define VLV_DPFLIPSTAT (VLV_DISPLAY_BASE + 0x70028) 3313 3285 #define PIPEB_LINE_COMPARE_INT_EN (1<<29) 3314 3286 #define PIPEB_HLINE_INT_EN (1<<28) 3315 3287 #define PIPEB_VBLANK_INT_EN (1<<27) 3316 - #define SPRITED_FLIPDONE_INT_EN (1<<26) 3317 - #define SPRITEC_FLIPDONE_INT_EN (1<<25) 3318 - #define PLANEB_FLIPDONE_INT_EN (1<<24) 3288 + #define SPRITED_FLIP_DONE_INT_EN (1<<26) 3289 + #define SPRITEC_FLIP_DONE_INT_EN (1<<25) 3290 + #define PLANEB_FLIP_DONE_INT_EN (1<<24) 3319 3291 #define PIPEA_LINE_COMPARE_INT_EN (1<<21) 3320 3292 #define PIPEA_HLINE_INT_EN (1<<20) 3321 3293 #define PIPEA_VBLANK_INT_EN (1<<19) 3322 - #define SPRITEB_FLIPDONE_INT_EN (1<<18) 3323 - #define SPRITEA_FLIPDONE_INT_EN (1<<17) 3294 + #define SPRITEB_FLIP_DONE_INT_EN (1<<18) 3295 + #define SPRITEA_FLIP_DONE_INT_EN (1<<17) 3324 3296 #define PLANEA_FLIPDONE_INT_EN (1<<16) 3325 3297 3326 3298 #define DPINVGTT (VLV_DISPLAY_BASE + 0x7002c) /* VLV only */ ··· 3548 3520 #define CURPOS_IVB(pipe) _PIPE(pipe, _CURAPOS, _CURBPOS_IVB) 3549 3521 3550 3522 /* Display A control */ 3551 - #define _DSPACNTR (dev_priv->info->display_mmio_offset + 0x70180) 3523 + #define _DSPACNTR 0x70180 3552 3524 #define DISPLAY_PLANE_ENABLE (1<<31) 3553 3525 #define DISPLAY_PLANE_DISABLE 0 3554 3526 #define DISPPLANE_GAMMA_ENABLE (1<<30) ··· 3582 3554 #define DISPPLANE_STEREO_POLARITY_SECOND (1<<18) 3583 3555 #define DISPPLANE_TRICKLE_FEED_DISABLE (1<<14) /* Ironlake */ 3584 3556 #define DISPPLANE_TILED (1<<10) 3585 - #define _DSPAADDR (dev_priv->info->display_mmio_offset + 0x70184) 3586 - #define _DSPASTRIDE (dev_priv->info->display_mmio_offset + 0x70188) 3587 - #define _DSPAPOS (dev_priv->info->display_mmio_offset + 0x7018C) /* reserved */ 3588 - #define _DSPASIZE (dev_priv->info->display_mmio_offset + 0x70190) 3589 - #define _DSPASURF (dev_priv->info->display_mmio_offset + 0x7019C) /* 965+ only */ 3590 - #define _DSPATILEOFF (dev_priv->info->display_mmio_offset + 0x701A4) /* 965+ only */ 3591 - #define _DSPAOFFSET (dev_priv->info->display_mmio_offset + 0x701A4) /* HSW */ 3592 - #define _DSPASURFLIVE (dev_priv->info->display_mmio_offset + 0x701AC) 3557 + #define _DSPAADDR 0x70184 3558 + #define _DSPASTRIDE 0x70188 3559 + #define _DSPAPOS 0x7018C /* reserved */ 3560 + #define _DSPASIZE 0x70190 3561 + #define _DSPASURF 0x7019C /* 965+ only */ 3562 + #define _DSPATILEOFF 0x701A4 /* 965+ only */ 3563 + #define _DSPAOFFSET 0x701A4 /* HSW */ 3564 + #define _DSPASURFLIVE 0x701AC 3593 3565 3594 - #define DSPCNTR(plane) _PIPE(plane, _DSPACNTR, _DSPBCNTR) 3595 - #define DSPADDR(plane) _PIPE(plane, _DSPAADDR, _DSPBADDR) 3596 - #define DSPSTRIDE(plane) _PIPE(plane, _DSPASTRIDE, _DSPBSTRIDE) 3597 - #define DSPPOS(plane) _PIPE(plane, _DSPAPOS, _DSPBPOS) 3598 - #define DSPSIZE(plane) _PIPE(plane, _DSPASIZE, _DSPBSIZE) 3599 - #define DSPSURF(plane) _PIPE(plane, _DSPASURF, _DSPBSURF) 3600 - #define DSPTILEOFF(plane) _PIPE(plane, _DSPATILEOFF, _DSPBTILEOFF) 3566 + #define DSPCNTR(plane) _PIPE2(plane, _DSPACNTR) 3567 + #define DSPADDR(plane) _PIPE2(plane, _DSPAADDR) 3568 + #define DSPSTRIDE(plane) _PIPE2(plane, _DSPASTRIDE) 3569 + #define DSPPOS(plane) _PIPE2(plane, _DSPAPOS) 3570 + #define DSPSIZE(plane) _PIPE2(plane, _DSPASIZE) 3571 + #define DSPSURF(plane) _PIPE2(plane, _DSPASURF) 3572 + #define DSPTILEOFF(plane) _PIPE2(plane, _DSPATILEOFF) 3601 3573 #define DSPLINOFF(plane) DSPADDR(plane) 3602 - #define DSPOFFSET(plane) _PIPE(plane, _DSPAOFFSET, _DSPBOFFSET) 3603 - #define DSPSURFLIVE(plane) _PIPE(plane, _DSPASURFLIVE, _DSPBSURFLIVE) 3574 + #define DSPOFFSET(plane) _PIPE2(plane, _DSPAOFFSET) 3575 + #define DSPSURFLIVE(plane) _PIPE2(plane, _DSPASURFLIVE) 3604 3576 3605 3577 /* Display/Sprite base address macros */ 3606 3578 #define DISP_BASEADDR_MASK (0xfffff000) ··· 3894 3866 #define FDI_PLL_FREQ_DISABLE_COUNT_LIMIT_MASK 0xff 3895 3867 3896 3868 3897 - #define _PIPEA_DATA_M1 (dev_priv->info->display_mmio_offset + 0x60030) 3869 + #define _PIPEA_DATA_M1 0x60030 3898 3870 #define PIPE_DATA_M1_OFFSET 0 3899 - #define _PIPEA_DATA_N1 (dev_priv->info->display_mmio_offset + 0x60034) 3871 + #define _PIPEA_DATA_N1 0x60034 3900 3872 #define PIPE_DATA_N1_OFFSET 0 3901 3873 3902 - #define _PIPEA_DATA_M2 (dev_priv->info->display_mmio_offset + 0x60038) 3874 + #define _PIPEA_DATA_M2 0x60038 3903 3875 #define PIPE_DATA_M2_OFFSET 0 3904 - #define _PIPEA_DATA_N2 (dev_priv->info->display_mmio_offset + 0x6003c) 3876 + #define _PIPEA_DATA_N2 0x6003c 3905 3877 #define PIPE_DATA_N2_OFFSET 0 3906 3878 3907 - #define _PIPEA_LINK_M1 (dev_priv->info->display_mmio_offset + 0x60040) 3879 + #define _PIPEA_LINK_M1 0x60040 3908 3880 #define PIPE_LINK_M1_OFFSET 0 3909 - #define _PIPEA_LINK_N1 (dev_priv->info->display_mmio_offset + 0x60044) 3881 + #define _PIPEA_LINK_N1 0x60044 3910 3882 #define PIPE_LINK_N1_OFFSET 0 3911 3883 3912 - #define _PIPEA_LINK_M2 (dev_priv->info->display_mmio_offset + 0x60048) 3884 + #define _PIPEA_LINK_M2 0x60048 3913 3885 #define PIPE_LINK_M2_OFFSET 0 3914 - #define _PIPEA_LINK_N2 (dev_priv->info->display_mmio_offset + 0x6004c) 3886 + #define _PIPEA_LINK_N2 0x6004c 3915 3887 #define PIPE_LINK_N2_OFFSET 0 3916 3888 3917 3889 /* PIPEB timing regs are same start from 0x61000 */ 3918 3890 3919 - #define _PIPEB_DATA_M1 (dev_priv->info->display_mmio_offset + 0x61030) 3920 - #define _PIPEB_DATA_N1 (dev_priv->info->display_mmio_offset + 0x61034) 3891 + #define _PIPEB_DATA_M1 0x61030 3892 + #define _PIPEB_DATA_N1 0x61034 3893 + #define _PIPEB_DATA_M2 0x61038 3894 + #define _PIPEB_DATA_N2 0x6103c 3895 + #define _PIPEB_LINK_M1 0x61040 3896 + #define _PIPEB_LINK_N1 0x61044 3897 + #define _PIPEB_LINK_M2 0x61048 3898 + #define _PIPEB_LINK_N2 0x6104c 3921 3899 3922 - #define _PIPEB_DATA_M2 (dev_priv->info->display_mmio_offset + 0x61038) 3923 - #define _PIPEB_DATA_N2 (dev_priv->info->display_mmio_offset + 0x6103c) 3924 - 3925 - #define _PIPEB_LINK_M1 (dev_priv->info->display_mmio_offset + 0x61040) 3926 - #define _PIPEB_LINK_N1 (dev_priv->info->display_mmio_offset + 0x61044) 3927 - 3928 - #define _PIPEB_LINK_M2 (dev_priv->info->display_mmio_offset + 0x61048) 3929 - #define _PIPEB_LINK_N2 (dev_priv->info->display_mmio_offset + 0x6104c) 3930 - 3931 - #define PIPE_DATA_M1(tran) _TRANSCODER(tran, _PIPEA_DATA_M1, _PIPEB_DATA_M1) 3932 - #define PIPE_DATA_N1(tran) _TRANSCODER(tran, _PIPEA_DATA_N1, _PIPEB_DATA_N1) 3933 - #define PIPE_DATA_M2(tran) _TRANSCODER(tran, _PIPEA_DATA_M2, _PIPEB_DATA_M2) 3934 - #define PIPE_DATA_N2(tran) _TRANSCODER(tran, _PIPEA_DATA_N2, _PIPEB_DATA_N2) 3935 - #define PIPE_LINK_M1(tran) _TRANSCODER(tran, _PIPEA_LINK_M1, _PIPEB_LINK_M1) 3936 - #define PIPE_LINK_N1(tran) _TRANSCODER(tran, _PIPEA_LINK_N1, _PIPEB_LINK_N1) 3937 - #define PIPE_LINK_M2(tran) _TRANSCODER(tran, _PIPEA_LINK_M2, _PIPEB_LINK_M2) 3938 - #define PIPE_LINK_N2(tran) _TRANSCODER(tran, _PIPEA_LINK_N2, _PIPEB_LINK_N2) 3900 + #define PIPE_DATA_M1(tran) _TRANSCODER2(tran, _PIPEA_DATA_M1) 3901 + #define PIPE_DATA_N1(tran) _TRANSCODER2(tran, _PIPEA_DATA_N1) 3902 + #define PIPE_DATA_M2(tran) _TRANSCODER2(tran, _PIPEA_DATA_M2) 3903 + #define PIPE_DATA_N2(tran) _TRANSCODER2(tran, _PIPEA_DATA_N2) 3904 + #define PIPE_LINK_M1(tran) _TRANSCODER2(tran, _PIPEA_LINK_M1) 3905 + #define PIPE_LINK_N1(tran) _TRANSCODER2(tran, _PIPEA_LINK_N1) 3906 + #define PIPE_LINK_M2(tran) _TRANSCODER2(tran, _PIPEA_LINK_M2) 3907 + #define PIPE_LINK_N2(tran) _TRANSCODER2(tran, _PIPEA_LINK_N2) 3939 3908 3940 3909 /* CPU panel fitter */ 3941 3910 /* IVB+ has 3 fitters, 0 is 7x5 capable, the other two only 3x3 */ ··· 4145 4120 #define GEN7_MSG_CTL 0x45010 4146 4121 #define WAIT_FOR_PCH_RESET_ACK (1<<1) 4147 4122 #define WAIT_FOR_PCH_FLR_ACK (1<<0) 4123 + #define HSW_NDE_RSTWRN_OPT 0x46408 4124 + #define RESET_PCH_HANDSHAKE_ENABLE (1<<4) 4148 4125 4149 4126 /* GEN7 chicken */ 4150 4127 #define GEN7_COMMON_SLICE_CHICKEN1 0x7010 4151 4128 # define GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC ((1<<10) | (1<<26)) 4152 4129 #define COMMON_SLICE_CHICKEN2 0x7014 4153 4130 # define GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE (1<<0) 4131 + 4132 + #define GEN7_L3SQCREG1 0xB010 4133 + #define VLV_B0_WA_L3SQCREG1_VALUE 0x00D30000 4154 4134 4155 4135 #define GEN7_L3CNTLREG1 0xB01C 4156 4136 #define GEN7_WA_FOR_GEN7_L3_CONTROL 0x3C4FFF8C ··· 4466 4436 #define HSW_VIDEO_DIP_GCP_B 0x61210 4467 4437 4468 4438 #define HSW_TVIDEO_DIP_CTL(trans) \ 4469 - _TRANSCODER(trans, HSW_VIDEO_DIP_CTL_A, HSW_VIDEO_DIP_CTL_B) 4439 + _TRANSCODER2(trans, HSW_VIDEO_DIP_CTL_A) 4470 4440 #define HSW_TVIDEO_DIP_AVI_DATA(trans) \ 4471 - _TRANSCODER(trans, HSW_VIDEO_DIP_AVI_DATA_A, HSW_VIDEO_DIP_AVI_DATA_B) 4441 + _TRANSCODER2(trans, HSW_VIDEO_DIP_AVI_DATA_A) 4472 4442 #define HSW_TVIDEO_DIP_VS_DATA(trans) \ 4473 - _TRANSCODER(trans, HSW_VIDEO_DIP_VS_DATA_A, HSW_VIDEO_DIP_VS_DATA_B) 4443 + _TRANSCODER2(trans, HSW_VIDEO_DIP_VS_DATA_A) 4474 4444 #define HSW_TVIDEO_DIP_SPD_DATA(trans) \ 4475 - _TRANSCODER(trans, HSW_VIDEO_DIP_SPD_DATA_A, HSW_VIDEO_DIP_SPD_DATA_B) 4445 + _TRANSCODER2(trans, HSW_VIDEO_DIP_SPD_DATA_A) 4476 4446 #define HSW_TVIDEO_DIP_GCP(trans) \ 4477 - _TRANSCODER(trans, HSW_VIDEO_DIP_GCP_A, HSW_VIDEO_DIP_GCP_B) 4447 + _TRANSCODER2(trans, HSW_VIDEO_DIP_GCP_A) 4478 4448 #define HSW_TVIDEO_DIP_VSC_DATA(trans) \ 4479 - _TRANSCODER(trans, HSW_VIDEO_DIP_VSC_DATA_A, HSW_VIDEO_DIP_VSC_DATA_B) 4449 + _TRANSCODER2(trans, HSW_VIDEO_DIP_VSC_DATA_A) 4480 4450 4481 4451 #define HSW_STEREO_3D_CTL_A 0x70020 4482 4452 #define S3D_ENABLE (1<<31) 4483 4453 #define HSW_STEREO_3D_CTL_B 0x71020 4484 4454 4485 4455 #define HSW_STEREO_3D_CTL(trans) \ 4486 - _TRANSCODER(trans, HSW_STEREO_3D_CTL_A, HSW_STEREO_3D_CTL_A) 4456 + _PIPE2(trans, HSW_STEREO_3D_CTL_A) 4487 4457 4488 4458 #define _PCH_TRANS_HTOTAL_B 0xe1000 4489 4459 #define _PCH_TRANS_HBLANK_B 0xe1004 ··· 4975 4945 GEN6_PM_RP_DOWN_THRESHOLD | \ 4976 4946 GEN6_PM_RP_DOWN_TIMEOUT) 4977 4947 4948 + #define VLV_GTLC_SURVIVABILITY_REG 0x130098 4949 + #define VLV_GFX_CLK_STATUS_BIT (1<<3) 4950 + #define VLV_GFX_CLK_FORCE_ON_BIT (1<<2) 4951 + 4978 4952 #define GEN6_GT_GFX_RC6_LOCKED 0x138104 4979 4953 #define VLV_COUNTER_CONTROL 0x138104 4980 4954 #define VLV_COUNT_RANGE_HIGH (1<<15) ··· 5212 5178 #define TRANS_DDI_FUNC_CTL_B 0x61400 5213 5179 #define TRANS_DDI_FUNC_CTL_C 0x62400 5214 5180 #define TRANS_DDI_FUNC_CTL_EDP 0x6F400 5215 - #define TRANS_DDI_FUNC_CTL(tran) _TRANSCODER(tran, TRANS_DDI_FUNC_CTL_A, \ 5216 - TRANS_DDI_FUNC_CTL_B) 5181 + #define TRANS_DDI_FUNC_CTL(tran) _TRANSCODER2(tran, TRANS_DDI_FUNC_CTL_A) 5182 + 5217 5183 #define TRANS_DDI_FUNC_ENABLE (1<<31) 5218 5184 /* Those bits are ignored by pipe EDP since it can only connect to DDI A */ 5219 5185 #define TRANS_DDI_PORT_MASK (7<<28) ··· 5345 5311 #define SPLL_PLL_ENABLE (1<<31) 5346 5312 #define SPLL_PLL_SSC (1<<28) 5347 5313 #define SPLL_PLL_NON_SSC (2<<28) 5314 + #define SPLL_PLL_LCPLL (3<<28) 5315 + #define SPLL_PLL_REF_MASK (3<<28) 5348 5316 #define SPLL_PLL_FREQ_810MHz (0<<26) 5349 5317 #define SPLL_PLL_FREQ_1350MHz (1<<26) 5318 + #define SPLL_PLL_FREQ_2700MHz (2<<26) 5319 + #define SPLL_PLL_FREQ_MASK (3<<26) 5350 5320 5351 5321 /* WRPLL */ 5352 5322 #define WRPLL_CTL1 0x46040 ··· 5361 5323 #define WRPLL_PLL_SELECT_LCPLL_2700 (0x03<<28) 5362 5324 /* WRPLL divider programming */ 5363 5325 #define WRPLL_DIVIDER_REFERENCE(x) ((x)<<0) 5326 + #define WRPLL_DIVIDER_REF_MASK (0xff) 5364 5327 #define WRPLL_DIVIDER_POST(x) ((x)<<8) 5328 + #define WRPLL_DIVIDER_POST_MASK (0x3f<<8) 5329 + #define WRPLL_DIVIDER_POST_SHIFT 8 5365 5330 #define WRPLL_DIVIDER_FEEDBACK(x) ((x)<<16) 5331 + #define WRPLL_DIVIDER_FB_SHIFT 16 5332 + #define WRPLL_DIVIDER_FB_MASK (0xff<<16) 5366 5333 5367 5334 /* Port clock selection */ 5368 5335 #define PORT_CLK_SEL_A 0x46100 ··· 5380 5337 #define PORT_CLK_SEL_WRPLL1 (4<<29) 5381 5338 #define PORT_CLK_SEL_WRPLL2 (5<<29) 5382 5339 #define PORT_CLK_SEL_NONE (7<<29) 5340 + #define PORT_CLK_SEL_MASK (7<<29) 5383 5341 5384 5342 /* Transcoder clock selection */ 5385 5343 #define TRANS_CLK_SEL_A 0x46140 ··· 5390 5346 #define TRANS_CLK_SEL_DISABLED (0x0<<29) 5391 5347 #define TRANS_CLK_SEL_PORT(x) ((x+1)<<29) 5392 5348 5393 - #define _TRANSA_MSA_MISC 0x60410 5394 - #define _TRANSB_MSA_MISC 0x61410 5395 - #define TRANS_MSA_MISC(tran) _TRANSCODER(tran, _TRANSA_MSA_MISC, \ 5396 - _TRANSB_MSA_MISC) 5349 + #define TRANSA_MSA_MISC 0x60410 5350 + #define TRANSB_MSA_MISC 0x61410 5351 + #define TRANSC_MSA_MISC 0x62410 5352 + #define TRANS_EDP_MSA_MISC 0x6f410 5353 + #define TRANS_MSA_MISC(tran) _TRANSCODER2(tran, TRANSA_MSA_MISC) 5354 + 5397 5355 #define TRANS_MSA_SYNC_CLK (1<<0) 5398 5356 #define TRANS_MSA_6_BPC (0<<5) 5399 5357 #define TRANS_MSA_8_BPC (1<<5) ··· 5902 5856 #define _MIPIB_READ_DATA_VALID (VLV_DISPLAY_BASE + 0xb938) 5903 5857 #define MIPI_READ_DATA_VALID(pipe) _PIPE(pipe, _MIPIA_READ_DATA_VALID, _MIPIB_READ_DATA_VALID) 5904 5858 #define READ_DATA_VALID(n) (1 << (n)) 5859 + 5860 + /* For UMS only (deprecated): */ 5861 + #define _PALETTE_A (dev_priv->info->display_mmio_offset + 0xa000) 5862 + #define _PALETTE_B (dev_priv->info->display_mmio_offset + 0xa800) 5863 + #define _DPLL_A (dev_priv->info->display_mmio_offset + 0x6014) 5864 + #define _DPLL_B (dev_priv->info->display_mmio_offset + 0x6018) 5865 + #define _DPLL_A_MD (dev_priv->info->display_mmio_offset + 0x601c) 5866 + #define _DPLL_B_MD (dev_priv->info->display_mmio_offset + 0x6020) 5905 5867 5906 5868 #endif /* _I915_REG_H_ */
+7 -33
drivers/gpu/drm/i915/i915_suspend.c
··· 236 236 dev_priv->regfile.savePP_DIVISOR = I915_READ(PP_DIVISOR); 237 237 } 238 238 239 - /* Only regfile.save FBC state on the platform that supports FBC */ 240 - if (HAS_FBC(dev)) { 241 - if (HAS_PCH_SPLIT(dev)) { 242 - dev_priv->regfile.saveDPFC_CB_BASE = I915_READ(ILK_DPFC_CB_BASE); 243 - } else if (IS_GM45(dev)) { 244 - dev_priv->regfile.saveDPFC_CB_BASE = I915_READ(DPFC_CB_BASE); 245 - } else { 246 - dev_priv->regfile.saveFBC_CFB_BASE = I915_READ(FBC_CFB_BASE); 247 - dev_priv->regfile.saveFBC_LL_BASE = I915_READ(FBC_LL_BASE); 248 - dev_priv->regfile.saveFBC_CONTROL2 = I915_READ(FBC_CONTROL2); 249 - dev_priv->regfile.saveFBC_CONTROL = I915_READ(FBC_CONTROL); 250 - } 251 - } 239 + /* save FBC interval */ 240 + if (HAS_FBC(dev) && INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev)) 241 + dev_priv->regfile.saveFBC_CONTROL = I915_READ(FBC_CONTROL); 252 242 253 243 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 254 244 i915_save_vga(dev); ··· 290 300 291 301 /* only restore FBC info on the platform that supports FBC*/ 292 302 intel_disable_fbc(dev); 293 - if (HAS_FBC(dev)) { 294 - if (HAS_PCH_SPLIT(dev)) { 295 - I915_WRITE(ILK_DPFC_CB_BASE, dev_priv->regfile.saveDPFC_CB_BASE); 296 - } else if (IS_GM45(dev)) { 297 - I915_WRITE(DPFC_CB_BASE, dev_priv->regfile.saveDPFC_CB_BASE); 298 - } else { 299 - I915_WRITE(FBC_CFB_BASE, dev_priv->regfile.saveFBC_CFB_BASE); 300 - I915_WRITE(FBC_LL_BASE, dev_priv->regfile.saveFBC_LL_BASE); 301 - I915_WRITE(FBC_CONTROL2, dev_priv->regfile.saveFBC_CONTROL2); 302 - I915_WRITE(FBC_CONTROL, dev_priv->regfile.saveFBC_CONTROL); 303 - } 304 - } 303 + 304 + /* restore FBC interval */ 305 + if (HAS_FBC(dev) && INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev)) 306 + I915_WRITE(FBC_CONTROL, dev_priv->regfile.saveFBC_CONTROL); 305 307 306 308 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 307 309 i915_restore_vga(dev); ··· 305 323 { 306 324 struct drm_i915_private *dev_priv = dev->dev_private; 307 325 int i; 308 - 309 - if (INTEL_INFO(dev)->gen <= 4) 310 - pci_read_config_byte(dev->pdev, LBB, 311 - &dev_priv->regfile.saveLBB); 312 326 313 327 mutex_lock(&dev->struct_mutex); 314 328 ··· 354 376 { 355 377 struct drm_i915_private *dev_priv = dev->dev_private; 356 378 int i; 357 - 358 - if (INTEL_INFO(dev)->gen <= 4) 359 - pci_write_config_byte(dev->pdev, LBB, 360 - dev_priv->regfile.saveLBB); 361 379 362 380 mutex_lock(&dev->struct_mutex); 363 381
+10
drivers/gpu/drm/i915/i915_sysfs.c
··· 357 357 else 358 358 gen6_set_rps(dev, val); 359 359 } 360 + else if (!IS_VALLEYVIEW(dev)) 361 + /* We still need gen6_set_rps to process the new max_delay 362 + and update the interrupt limits even though frequency 363 + request is unchanged. */ 364 + gen6_set_rps(dev, dev_priv->rps.cur_delay); 360 365 361 366 mutex_unlock(&dev_priv->rps.hw_lock); 362 367 ··· 431 426 else 432 427 gen6_set_rps(dev, val); 433 428 } 429 + else if (!IS_VALLEYVIEW(dev)) 430 + /* We still need gen6_set_rps to process the new min_delay 431 + and update the interrupt limits even though frequency 432 + request is unchanged. */ 433 + gen6_set_rps(dev, dev_priv->rps.cur_delay); 434 434 435 435 mutex_unlock(&dev_priv->rps.hw_lock); 436 436
+8
drivers/gpu/drm/i915/i915_ums.c
··· 271 271 /* FIXME: regfile.save TV & SDVO state */ 272 272 273 273 /* Backlight */ 274 + if (INTEL_INFO(dev)->gen <= 4) 275 + pci_read_config_byte(dev->pdev, PCI_LBPC, 276 + &dev_priv->regfile.saveLBB); 277 + 274 278 if (HAS_PCH_SPLIT(dev)) { 275 279 dev_priv->regfile.saveBLC_PWM_CTL = I915_READ(BLC_PWM_PCH_CTL1); 276 280 dev_priv->regfile.saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_PCH_CTL2); ··· 297 293 int i; 298 294 299 295 /* Backlight */ 296 + if (INTEL_INFO(dev)->gen <= 4) 297 + pci_write_config_byte(dev->pdev, PCI_LBPC, 298 + dev_priv->regfile.saveLBB); 299 + 300 300 if (HAS_PCH_SPLIT(dev)) { 301 301 I915_WRITE(BLC_PWM_PCH_CTL1, dev_priv->regfile.saveBLC_PWM_CTL); 302 302 I915_WRITE(BLC_PWM_PCH_CTL2, dev_priv->regfile.saveBLC_PWM_CTL2);
+2 -2
drivers/gpu/drm/i915/intel_bios.c
··· 259 259 downclock = dvo_timing->clock; 260 260 } 261 261 262 - if (downclock < panel_dvo_timing->clock && i915_lvds_downclock) { 262 + if (downclock < panel_dvo_timing->clock && i915.lvds_downclock) { 263 263 dev_priv->lvds_downclock_avail = 1; 264 264 dev_priv->lvds_downclock = downclock * 10; 265 265 DRM_DEBUG_KMS("LVDS downclock is found in VBT. " ··· 318 318 struct drm_display_mode *panel_fixed_mode; 319 319 int index; 320 320 321 - index = i915_vbt_sdvo_panel_type; 321 + index = i915.vbt_sdvo_panel_type; 322 322 if (index == -2) { 323 323 DRM_DEBUG_KMS("Ignore SDVO panel mode from BIOS VBT tables.\n"); 324 324 return;
+2
drivers/gpu/drm/i915/intel_crt.c
··· 857 857 858 858 dev_priv->fdi_rx_config = I915_READ(_FDI_RXA_CTL) & fdi_config; 859 859 } 860 + 861 + intel_crt_reset(connector); 860 862 }
+97 -4
drivers/gpu/drm/i915/intel_ddi.c
··· 633 633 /* Otherwise a < c && b >= d, do nothing */ 634 634 } 635 635 636 + static int intel_ddi_calc_wrpll_link(struct drm_i915_private *dev_priv, 637 + int reg) 638 + { 639 + int refclk = LC_FREQ; 640 + int n, p, r; 641 + u32 wrpll; 642 + 643 + wrpll = I915_READ(reg); 644 + switch (wrpll & SPLL_PLL_REF_MASK) { 645 + case SPLL_PLL_SSC: 646 + case SPLL_PLL_NON_SSC: 647 + /* 648 + * We could calculate spread here, but our checking 649 + * code only cares about 5% accuracy, and spread is a max of 650 + * 0.5% downspread. 651 + */ 652 + refclk = 135; 653 + break; 654 + case SPLL_PLL_LCPLL: 655 + refclk = LC_FREQ; 656 + break; 657 + default: 658 + WARN(1, "bad wrpll refclk\n"); 659 + return 0; 660 + } 661 + 662 + r = wrpll & WRPLL_DIVIDER_REF_MASK; 663 + p = (wrpll & WRPLL_DIVIDER_POST_MASK) >> WRPLL_DIVIDER_POST_SHIFT; 664 + n = (wrpll & WRPLL_DIVIDER_FB_MASK) >> WRPLL_DIVIDER_FB_SHIFT; 665 + 666 + /* Convert to KHz, p & r have a fixed point portion */ 667 + return (refclk * n * 100) / (p * r); 668 + } 669 + 670 + static void intel_ddi_clock_get(struct intel_encoder *encoder, 671 + struct intel_crtc_config *pipe_config) 672 + { 673 + struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; 674 + enum port port = intel_ddi_get_encoder_port(encoder); 675 + int link_clock = 0; 676 + u32 val, pll; 677 + 678 + val = I915_READ(PORT_CLK_SEL(port)); 679 + switch (val & PORT_CLK_SEL_MASK) { 680 + case PORT_CLK_SEL_LCPLL_810: 681 + link_clock = 81000; 682 + break; 683 + case PORT_CLK_SEL_LCPLL_1350: 684 + link_clock = 135000; 685 + break; 686 + case PORT_CLK_SEL_LCPLL_2700: 687 + link_clock = 270000; 688 + break; 689 + case PORT_CLK_SEL_WRPLL1: 690 + link_clock = intel_ddi_calc_wrpll_link(dev_priv, WRPLL_CTL1); 691 + break; 692 + case PORT_CLK_SEL_WRPLL2: 693 + link_clock = intel_ddi_calc_wrpll_link(dev_priv, WRPLL_CTL2); 694 + break; 695 + case PORT_CLK_SEL_SPLL: 696 + pll = I915_READ(SPLL_CTL) & SPLL_PLL_FREQ_MASK; 697 + if (pll == SPLL_PLL_FREQ_810MHz) 698 + link_clock = 81000; 699 + else if (pll == SPLL_PLL_FREQ_1350MHz) 700 + link_clock = 135000; 701 + else if (pll == SPLL_PLL_FREQ_2700MHz) 702 + link_clock = 270000; 703 + else { 704 + WARN(1, "bad spll freq\n"); 705 + return; 706 + } 707 + break; 708 + default: 709 + WARN(1, "bad port clock sel\n"); 710 + return; 711 + } 712 + 713 + pipe_config->port_clock = link_clock * 2; 714 + 715 + if (pipe_config->has_pch_encoder) 716 + pipe_config->adjusted_mode.crtc_clock = 717 + intel_dotclock_calculate(pipe_config->port_clock, 718 + &pipe_config->fdi_m_n); 719 + else if (pipe_config->has_dp_encoder) 720 + pipe_config->adjusted_mode.crtc_clock = 721 + intel_dotclock_calculate(pipe_config->port_clock, 722 + &pipe_config->dp_m_n); 723 + else 724 + pipe_config->adjusted_mode.crtc_clock = pipe_config->port_clock; 725 + } 726 + 636 727 static void 637 728 intel_ddi_calculate_wrpll(int clock /* in Hz */, 638 729 unsigned *r2_out, unsigned *n2_out, unsigned *p_out) ··· 1291 1200 1292 1201 if (type == INTEL_OUTPUT_EDP) { 1293 1202 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 1294 - ironlake_edp_panel_on(intel_dp); 1203 + intel_edp_panel_on(intel_dp); 1295 1204 } 1296 1205 1297 1206 WARN_ON(intel_crtc->ddi_pll_sel == PORT_CLK_SEL_NONE); ··· 1335 1244 if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) { 1336 1245 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 1337 1246 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF); 1338 - ironlake_edp_panel_off(intel_dp); 1247 + intel_edp_panel_off(intel_dp); 1339 1248 } 1340 1249 1341 1250 I915_WRITE(PORT_CLK_SEL(port), PORT_CLK_SEL_NONE); ··· 1370 1279 if (port == PORT_A) 1371 1280 intel_dp_stop_link_train(intel_dp); 1372 1281 1373 - ironlake_edp_backlight_on(intel_dp); 1282 + intel_edp_backlight_on(intel_dp); 1374 1283 intel_edp_psr_enable(intel_dp); 1375 1284 } 1376 1285 ··· 1403 1312 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 1404 1313 1405 1314 intel_edp_psr_disable(intel_dp); 1406 - ironlake_edp_backlight_off(intel_dp); 1315 + intel_edp_backlight_off(intel_dp); 1407 1316 } 1408 1317 } 1409 1318 ··· 1600 1509 pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp); 1601 1510 dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp; 1602 1511 } 1512 + 1513 + intel_ddi_clock_get(encoder, pipe_config); 1603 1514 } 1604 1515 1605 1516 static void intel_ddi_destroy(struct drm_encoder *encoder)
+162 -54
drivers/gpu/drm/i915/intel_display.c
··· 2372 2372 * whether the platform allows pfit disable with pipe active, and only 2373 2373 * then update the pipesrc and pfit state, even on the flip path. 2374 2374 */ 2375 - if (i915_fastboot) { 2375 + if (i915.fastboot) { 2376 2376 const struct drm_display_mode *adjusted_mode = 2377 2377 &intel_crtc->config.adjusted_mode; 2378 2378 ··· 4088 4088 /* Looks like the 200MHz CDclk freq doesn't work on some configs */ 4089 4089 } 4090 4090 4091 - static int intel_mode_max_pixclk(struct drm_i915_private *dev_priv, 4092 - unsigned modeset_pipes, 4093 - struct intel_crtc_config *pipe_config) 4091 + /* compute the max pixel clock for new configuration */ 4092 + static int intel_mode_max_pixclk(struct drm_i915_private *dev_priv) 4094 4093 { 4095 4094 struct drm_device *dev = dev_priv->dev; 4096 4095 struct intel_crtc *intel_crtc; ··· 4097 4098 4098 4099 list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, 4099 4100 base.head) { 4100 - if (modeset_pipes & (1 << intel_crtc->pipe)) 4101 + if (intel_crtc->new_enabled) 4101 4102 max_pixclk = max(max_pixclk, 4102 - pipe_config->adjusted_mode.crtc_clock); 4103 - else if (intel_crtc->base.enabled) 4104 - max_pixclk = max(max_pixclk, 4105 - intel_crtc->config.adjusted_mode.crtc_clock); 4103 + intel_crtc->new_config->adjusted_mode.crtc_clock); 4106 4104 } 4107 4105 4108 4106 return max_pixclk; 4109 4107 } 4110 4108 4111 4109 static void valleyview_modeset_global_pipes(struct drm_device *dev, 4112 - unsigned *prepare_pipes, 4113 - unsigned modeset_pipes, 4114 - struct intel_crtc_config *pipe_config) 4110 + unsigned *prepare_pipes) 4115 4111 { 4116 4112 struct drm_i915_private *dev_priv = dev->dev_private; 4117 4113 struct intel_crtc *intel_crtc; 4118 - int max_pixclk = intel_mode_max_pixclk(dev_priv, modeset_pipes, 4119 - pipe_config); 4114 + int max_pixclk = intel_mode_max_pixclk(dev_priv); 4120 4115 int cur_cdclk = valleyview_cur_cdclk(dev_priv); 4121 4116 4122 4117 if (valleyview_calc_cdclk(dev_priv, max_pixclk) == cur_cdclk) 4123 4118 return; 4124 4119 4120 + /* disable/enable all currently active pipes while we change cdclk */ 4125 4121 list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, 4126 4122 base.head) 4127 4123 if (intel_crtc->base.enabled) ··· 4126 4132 static void valleyview_modeset_global_resources(struct drm_device *dev) 4127 4133 { 4128 4134 struct drm_i915_private *dev_priv = dev->dev_private; 4129 - int max_pixclk = intel_mode_max_pixclk(dev_priv, 0, NULL); 4135 + int max_pixclk = intel_mode_max_pixclk(dev_priv); 4130 4136 int cur_cdclk = valleyview_cur_cdclk(dev_priv); 4131 4137 int req_cdclk = valleyview_calc_cdclk(dev_priv, max_pixclk); 4132 4138 ··· 4170 4176 4171 4177 intel_update_watermarks(crtc); 4172 4178 intel_enable_pipe(dev_priv, pipe, false, is_dsi); 4179 + intel_set_cpu_fifo_underrun_reporting(dev, pipe, true); 4173 4180 intel_enable_primary_plane(dev_priv, plane, pipe); 4174 4181 intel_enable_planes(crtc); 4175 4182 intel_crtc_update_cursor(crtc, true); ··· 4209 4214 4210 4215 intel_update_watermarks(crtc); 4211 4216 intel_enable_pipe(dev_priv, pipe, false, false); 4217 + intel_set_cpu_fifo_underrun_reporting(dev, pipe, true); 4212 4218 intel_enable_primary_plane(dev_priv, plane, pipe); 4213 4219 intel_enable_planes(crtc); 4214 4220 /* The fixup needs to happen before cursor is enabled */ ··· 4268 4272 intel_disable_planes(crtc); 4269 4273 intel_disable_primary_plane(dev_priv, plane, pipe); 4270 4274 4275 + intel_set_cpu_fifo_underrun_reporting(dev, pipe, false); 4271 4276 intel_disable_pipe(dev_priv, pipe); 4272 4277 4273 4278 i9xx_pfit_disable(intel_crtc); ··· 4580 4583 static void hsw_compute_ips_config(struct intel_crtc *crtc, 4581 4584 struct intel_crtc_config *pipe_config) 4582 4585 { 4583 - pipe_config->ips_enabled = i915_enable_ips && 4586 + pipe_config->ips_enabled = i915.enable_ips && 4584 4587 hsw_crtc_supports_ips(crtc) && 4585 4588 pipe_config->pipe_bpp <= 24; 4586 4589 } ··· 4781 4784 4782 4785 static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv) 4783 4786 { 4784 - if (i915_panel_use_ssc >= 0) 4785 - return i915_panel_use_ssc != 0; 4787 + if (i915.panel_use_ssc >= 0) 4788 + return i915.panel_use_ssc != 0; 4786 4789 return dev_priv->vbt.lvds_use_ssc 4787 4790 && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE); 4788 4791 } ··· 4841 4844 4842 4845 crtc->lowfreq_avail = false; 4843 4846 if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) && 4844 - reduced_clock && i915_powersave) { 4847 + reduced_clock && i915.powersave) { 4845 4848 I915_WRITE(FP1(pipe), fp2); 4846 4849 crtc->config.dpll_hw_state.fp1 = fp2; 4847 4850 crtc->lowfreq_avail = true; ··· 6345 6348 if (intel_crtc->config.has_dp_encoder) 6346 6349 intel_dp_set_m_n(intel_crtc); 6347 6350 6348 - if (is_lvds && has_reduced_clock && i915_powersave) 6351 + if (is_lvds && has_reduced_clock && i915.powersave) 6349 6352 intel_crtc->lowfreq_avail = true; 6350 6353 else 6351 6354 intel_crtc->lowfreq_avail = false; ··· 6713 6716 return; 6714 6717 6715 6718 schedule_delayed_work(&dev_priv->pc8.enable_work, 6716 - msecs_to_jiffies(i915_pc8_timeout)); 6719 + msecs_to_jiffies(i915.pc8_timeout)); 6717 6720 } 6718 6721 6719 6722 static void __hsw_disable_package_c8(struct drm_i915_private *dev_priv) ··· 6812 6815 if (!HAS_PC8(dev_priv->dev)) 6813 6816 return; 6814 6817 6815 - if (!i915_enable_pc8) 6818 + if (!i915.enable_pc8) 6816 6819 return; 6817 6820 6818 6821 mutex_lock(&dev_priv->pc8.lock); ··· 7852 7855 to_intel_connector(connector)->new_encoder = intel_encoder; 7853 7856 7854 7857 intel_crtc = to_intel_crtc(crtc); 7858 + intel_crtc->new_enabled = true; 7859 + intel_crtc->new_config = &intel_crtc->config; 7855 7860 old->dpms_mode = connector->dpms; 7856 7861 old->load_detect_temp = true; 7857 7862 old->release_fb = NULL; ··· 7877 7878 DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n"); 7878 7879 if (IS_ERR(fb)) { 7879 7880 DRM_DEBUG_KMS("failed to allocate framebuffer for load-detection\n"); 7880 - mutex_unlock(&crtc->mutex); 7881 - return false; 7881 + goto fail; 7882 7882 } 7883 7883 7884 7884 if (intel_set_mode(crtc, mode, 0, 0, fb)) { 7885 7885 DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n"); 7886 7886 if (old->release_fb) 7887 7887 old->release_fb->funcs->destroy(old->release_fb); 7888 - mutex_unlock(&crtc->mutex); 7889 - return false; 7888 + goto fail; 7890 7889 } 7891 7890 7892 7891 /* let the connector get through one full cycle before testing */ 7893 7892 intel_wait_for_vblank(dev, intel_crtc->pipe); 7894 7893 return true; 7894 + 7895 + fail: 7896 + intel_crtc->new_enabled = crtc->enabled; 7897 + if (intel_crtc->new_enabled) 7898 + intel_crtc->new_config = &intel_crtc->config; 7899 + else 7900 + intel_crtc->new_config = NULL; 7901 + mutex_unlock(&crtc->mutex); 7902 + return false; 7895 7903 } 7896 7904 7897 7905 void intel_release_load_detect_pipe(struct drm_connector *connector, ··· 7908 7902 intel_attached_encoder(connector); 7909 7903 struct drm_encoder *encoder = &intel_encoder->base; 7910 7904 struct drm_crtc *crtc = encoder->crtc; 7905 + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 7911 7906 7912 7907 DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n", 7913 7908 connector->base.id, drm_get_connector_name(connector), ··· 7917 7910 if (old->load_detect_temp) { 7918 7911 to_intel_connector(connector)->new_encoder = NULL; 7919 7912 intel_encoder->new_crtc = NULL; 7913 + intel_crtc->new_enabled = false; 7914 + intel_crtc->new_config = NULL; 7920 7915 intel_set_mode(crtc, NULL, 0, 0, NULL); 7921 7916 7922 7917 if (old->release_fb) { ··· 8210 8201 8211 8202 hsw_package_c8_gpu_idle(dev_priv); 8212 8203 8213 - if (!i915_powersave) 8204 + if (!i915.powersave) 8214 8205 return; 8215 8206 8216 8207 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { ··· 8230 8221 struct drm_device *dev = obj->base.dev; 8231 8222 struct drm_crtc *crtc; 8232 8223 8233 - if (!i915_powersave) 8224 + if (!i915.powersave) 8234 8225 return; 8235 8226 8236 8227 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { ··· 8775 8766 */ 8776 8767 static void intel_modeset_update_staged_output_state(struct drm_device *dev) 8777 8768 { 8769 + struct intel_crtc *crtc; 8778 8770 struct intel_encoder *encoder; 8779 8771 struct intel_connector *connector; 8780 8772 ··· 8790 8780 encoder->new_crtc = 8791 8781 to_intel_crtc(encoder->base.crtc); 8792 8782 } 8783 + 8784 + list_for_each_entry(crtc, &dev->mode_config.crtc_list, 8785 + base.head) { 8786 + crtc->new_enabled = crtc->base.enabled; 8787 + 8788 + if (crtc->new_enabled) 8789 + crtc->new_config = &crtc->config; 8790 + else 8791 + crtc->new_config = NULL; 8792 + } 8793 8793 } 8794 8794 8795 8795 /** ··· 8809 8789 */ 8810 8790 static void intel_modeset_commit_output_state(struct drm_device *dev) 8811 8791 { 8792 + struct intel_crtc *crtc; 8812 8793 struct intel_encoder *encoder; 8813 8794 struct intel_connector *connector; 8814 8795 ··· 8821 8800 list_for_each_entry(encoder, &dev->mode_config.encoder_list, 8822 8801 base.head) { 8823 8802 encoder->base.crtc = &encoder->new_crtc->base; 8803 + } 8804 + 8805 + list_for_each_entry(crtc, &dev->mode_config.crtc_list, 8806 + base.head) { 8807 + crtc->base.enabled = crtc->new_enabled; 8824 8808 } 8825 8809 } 8826 8810 ··· 9153 9127 *prepare_pipes |= 1 << encoder->new_crtc->pipe; 9154 9128 } 9155 9129 9156 - /* Check for any pipes that will be fully disabled ... */ 9130 + /* Check for pipes that will be enabled/disabled ... */ 9157 9131 list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, 9158 9132 base.head) { 9159 - bool used = false; 9160 - 9161 - /* Don't try to disable disabled crtcs. */ 9162 - if (!intel_crtc->base.enabled) 9133 + if (intel_crtc->base.enabled == intel_crtc->new_enabled) 9163 9134 continue; 9164 9135 9165 - list_for_each_entry(encoder, &dev->mode_config.encoder_list, 9166 - base.head) { 9167 - if (encoder->new_crtc == intel_crtc) 9168 - used = true; 9169 - } 9170 - 9171 - if (!used) 9136 + if (!intel_crtc->new_enabled) 9172 9137 *disable_pipes |= 1 << intel_crtc->pipe; 9138 + else 9139 + *prepare_pipes |= 1 << intel_crtc->pipe; 9173 9140 } 9174 9141 9175 9142 9176 9143 /* set_mode is also used to update properties on life display pipes. */ 9177 9144 intel_crtc = to_intel_crtc(crtc); 9178 - if (crtc->enabled) 9145 + if (intel_crtc->new_enabled) 9179 9146 *prepare_pipes |= 1 << intel_crtc->pipe; 9180 9147 9181 9148 /* ··· 9227 9208 9228 9209 intel_modeset_commit_output_state(dev); 9229 9210 9230 - /* Update computed state. */ 9211 + /* Double check state. */ 9231 9212 list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, 9232 9213 base.head) { 9233 - intel_crtc->base.enabled = intel_crtc_in_use(&intel_crtc->base); 9214 + WARN_ON(intel_crtc->base.enabled != intel_crtc_in_use(&intel_crtc->base)); 9215 + WARN_ON(intel_crtc->new_config && 9216 + intel_crtc->new_config != &intel_crtc->config); 9217 + WARN_ON(intel_crtc->base.enabled != !!intel_crtc->new_config); 9234 9218 } 9235 9219 9236 9220 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { ··· 9402 9380 if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) 9403 9381 PIPE_CONF_CHECK_I(pipe_bpp); 9404 9382 9405 - if (!HAS_DDI(dev)) { 9406 - PIPE_CONF_CHECK_CLOCK_FUZZY(adjusted_mode.crtc_clock); 9407 - PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock); 9408 - } 9383 + PIPE_CONF_CHECK_CLOCK_FUZZY(adjusted_mode.crtc_clock); 9384 + PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock); 9409 9385 9410 9386 #undef PIPE_CONF_CHECK_X 9411 9387 #undef PIPE_CONF_CHECK_I ··· 9663 9643 } 9664 9644 intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config, 9665 9645 "[modeset]"); 9646 + to_intel_crtc(crtc)->new_config = pipe_config; 9666 9647 } 9667 9648 9668 9649 /* ··· 9674 9653 * adjusted_mode bits in the crtc directly. 9675 9654 */ 9676 9655 if (IS_VALLEYVIEW(dev)) { 9677 - valleyview_modeset_global_pipes(dev, &prepare_pipes, 9678 - modeset_pipes, pipe_config); 9656 + valleyview_modeset_global_pipes(dev, &prepare_pipes); 9679 9657 9680 9658 /* may have added more to prepare_pipes than we should */ 9681 9659 prepare_pipes &= ~disable_pipes; ··· 9696 9676 /* mode_set/enable/disable functions rely on a correct pipe 9697 9677 * config. */ 9698 9678 to_intel_crtc(crtc)->config = *pipe_config; 9679 + to_intel_crtc(crtc)->new_config = &to_intel_crtc(crtc)->config; 9699 9680 9700 9681 /* 9701 9682 * Calculate and store various constants which ··· 9767 9746 9768 9747 kfree(config->save_connector_encoders); 9769 9748 kfree(config->save_encoder_crtcs); 9749 + kfree(config->save_crtc_enabled); 9770 9750 kfree(config); 9771 9751 } 9772 9752 9773 9753 static int intel_set_config_save_state(struct drm_device *dev, 9774 9754 struct intel_set_config *config) 9775 9755 { 9756 + struct drm_crtc *crtc; 9776 9757 struct drm_encoder *encoder; 9777 9758 struct drm_connector *connector; 9778 9759 int count; 9760 + 9761 + config->save_crtc_enabled = 9762 + kcalloc(dev->mode_config.num_crtc, 9763 + sizeof(bool), GFP_KERNEL); 9764 + if (!config->save_crtc_enabled) 9765 + return -ENOMEM; 9779 9766 9780 9767 config->save_encoder_crtcs = 9781 9768 kcalloc(dev->mode_config.num_encoder, ··· 9802 9773 * restored, not the drivers personal bookkeeping. 9803 9774 */ 9804 9775 count = 0; 9776 + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 9777 + config->save_crtc_enabled[count++] = crtc->enabled; 9778 + } 9779 + 9780 + count = 0; 9805 9781 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 9806 9782 config->save_encoder_crtcs[count++] = encoder->crtc; 9807 9783 } ··· 9822 9788 static void intel_set_config_restore_state(struct drm_device *dev, 9823 9789 struct intel_set_config *config) 9824 9790 { 9791 + struct intel_crtc *crtc; 9825 9792 struct intel_encoder *encoder; 9826 9793 struct intel_connector *connector; 9827 9794 int count; 9795 + 9796 + count = 0; 9797 + list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) { 9798 + crtc->new_enabled = config->save_crtc_enabled[count++]; 9799 + 9800 + if (crtc->new_enabled) 9801 + crtc->new_config = &crtc->config; 9802 + else 9803 + crtc->new_config = NULL; 9804 + } 9828 9805 9829 9806 count = 0; 9830 9807 list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) { ··· 9885 9840 struct intel_crtc *intel_crtc = 9886 9841 to_intel_crtc(set->crtc); 9887 9842 9888 - if (intel_crtc->active && i915_fastboot) { 9843 + if (intel_crtc->active && i915.fastboot) { 9889 9844 DRM_DEBUG_KMS("crtc has no fb, will flip\n"); 9890 9845 config->fb_changed = true; 9891 9846 } else { ··· 9921 9876 struct drm_mode_set *set, 9922 9877 struct intel_set_config *config) 9923 9878 { 9924 - struct drm_crtc *new_crtc; 9925 9879 struct intel_connector *connector; 9926 9880 struct intel_encoder *encoder; 9881 + struct intel_crtc *crtc; 9927 9882 int ro; 9928 9883 9929 9884 /* The upper layers ensure that we either disable a crtc or have a list ··· 9966 9921 /* Update crtc of enabled connectors. */ 9967 9922 list_for_each_entry(connector, &dev->mode_config.connector_list, 9968 9923 base.head) { 9924 + struct drm_crtc *new_crtc; 9925 + 9969 9926 if (!connector->new_encoder) 9970 9927 continue; 9971 9928 ··· 10018 9971 } 10019 9972 /* Now we've also updated encoder->new_crtc for all encoders. */ 10020 9973 9974 + list_for_each_entry(crtc, &dev->mode_config.crtc_list, 9975 + base.head) { 9976 + crtc->new_enabled = false; 9977 + 9978 + list_for_each_entry(encoder, 9979 + &dev->mode_config.encoder_list, 9980 + base.head) { 9981 + if (encoder->new_crtc == crtc) { 9982 + crtc->new_enabled = true; 9983 + break; 9984 + } 9985 + } 9986 + 9987 + if (crtc->new_enabled != crtc->base.enabled) { 9988 + DRM_DEBUG_KMS("crtc %sabled, full mode switch\n", 9989 + crtc->new_enabled ? "en" : "dis"); 9990 + config->mode_changed = true; 9991 + } 9992 + 9993 + if (crtc->new_enabled) 9994 + crtc->new_config = &crtc->config; 9995 + else 9996 + crtc->new_config = NULL; 9997 + } 9998 + 10021 9999 return 0; 10000 + } 10001 + 10002 + static void disable_crtc_nofb(struct intel_crtc *crtc) 10003 + { 10004 + struct drm_device *dev = crtc->base.dev; 10005 + struct intel_encoder *encoder; 10006 + struct intel_connector *connector; 10007 + 10008 + DRM_DEBUG_KMS("Trying to restore without FB -> disabling pipe %c\n", 10009 + pipe_name(crtc->pipe)); 10010 + 10011 + list_for_each_entry(connector, &dev->mode_config.connector_list, base.head) { 10012 + if (connector->new_encoder && 10013 + connector->new_encoder->new_crtc == crtc) 10014 + connector->new_encoder = NULL; 10015 + } 10016 + 10017 + list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) { 10018 + if (encoder->new_crtc == crtc) 10019 + encoder->new_crtc = NULL; 10020 + } 10021 + 10022 + crtc->new_enabled = false; 10023 + crtc->new_config = NULL; 10022 10024 } 10023 10025 10024 10026 static int intel_crtc_set_config(struct drm_mode_set *set) ··· 10136 10040 * flipping, so increasing its cost here shouldn't be a big 10137 10041 * deal). 10138 10042 */ 10139 - if (i915_fastboot && ret == 0) 10043 + if (i915.fastboot && ret == 0) 10140 10044 intel_modeset_check_state(set->crtc->dev); 10141 10045 } 10142 10046 ··· 10145 10049 set->crtc->base.id, ret); 10146 10050 fail: 10147 10051 intel_set_config_restore_state(dev, config); 10052 + 10053 + /* 10054 + * HACK: if the pipe was on, but we didn't have a framebuffer, 10055 + * force the pipe off to avoid oopsing in the modeset code 10056 + * due to fb==NULL. This should only happen during boot since 10057 + * we don't yet reconstruct the FB from the hardware state. 10058 + */ 10059 + if (to_intel_crtc(save_set.crtc)->new_enabled && !save_set.fb) 10060 + disable_crtc_nofb(to_intel_crtc(save_set.crtc)); 10148 10061 10149 10062 /* Try to restore the config */ 10150 10063 if (config->mode_changed && ··· 10944 10839 10945 10840 /* Acer Aspire 4736Z */ 10946 10841 { 0x2a42, 0x1025, 0x0260, quirk_invert_brightness }, 10842 + 10843 + /* Acer Aspire 5336 */ 10844 + { 0x2a42, 0x1025, 0x048a, quirk_invert_brightness }, 10947 10845 }; 10948 10846 10949 10847 static void intel_init_quirks(struct drm_device *dev) ··· 10977 10869 u8 sr1; 10978 10870 u32 vga_reg = i915_vgacntrl_reg(dev); 10979 10871 10872 + /* WaEnableVGAAccessThroughIOPort:ctg,elk,ilk,snb,ivb,vlv,hsw */ 10980 10873 vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO); 10981 10874 outb(SR01, VGA_SR_INDEX); 10982 10875 sr1 = inb(VGA_SR_DATA); ··· 11374 11265 */ 11375 11266 list_for_each_entry(crtc, &dev->mode_config.crtc_list, 11376 11267 base.head) { 11377 - if (crtc->active && i915_fastboot) { 11268 + if (crtc->active && i915.fastboot) { 11378 11269 intel_crtc_mode_from_pipe_config(crtc, &crtc->config); 11379 11270 11380 11271 DRM_DEBUG_KMS("[CRTC:%d] found active mode: ", ··· 11438 11329 intel_setup_overlay(dev); 11439 11330 11440 11331 mutex_lock(&dev->mode_config.mutex); 11441 - drm_mode_config_reset(dev); 11442 11332 intel_modeset_setup_hw_state(dev, false); 11443 11333 mutex_unlock(&dev->mode_config.mutex); 11444 11334 }
+251 -116
drivers/gpu/drm/i915/intel_dp.c
··· 91 91 } 92 92 93 93 static void intel_dp_link_down(struct intel_dp *intel_dp); 94 + static void edp_panel_vdd_on(struct intel_dp *intel_dp); 95 + static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync); 94 96 95 97 static int 96 98 intel_dp_max_link_bw(struct intel_dp *intel_dp) 97 99 { 98 100 int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE]; 101 + struct drm_device *dev = intel_dp->attached_connector->base.dev; 99 102 100 103 switch (max_link_bw) { 101 104 case DP_LINK_BW_1_62: 102 105 case DP_LINK_BW_2_7: 103 106 break; 104 107 case DP_LINK_BW_5_4: /* 1.2 capable displays may advertise higher bw */ 105 - max_link_bw = DP_LINK_BW_2_7; 108 + if ((IS_HASWELL(dev) || INTEL_INFO(dev)->gen >= 8) && 109 + intel_dp->dpcd[DP_DPCD_REV] >= 0x12) 110 + max_link_bw = DP_LINK_BW_5_4; 111 + else 112 + max_link_bw = DP_LINK_BW_2_7; 106 113 break; 107 114 default: 108 115 WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n", ··· 301 294 return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp)); 302 295 } 303 296 304 - static bool ironlake_edp_have_panel_power(struct intel_dp *intel_dp) 297 + static bool edp_have_panel_power(struct intel_dp *intel_dp) 305 298 { 306 299 struct drm_device *dev = intel_dp_to_dev(intel_dp); 307 300 struct drm_i915_private *dev_priv = dev->dev_private; ··· 309 302 return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0; 310 303 } 311 304 312 - static bool ironlake_edp_have_panel_vdd(struct intel_dp *intel_dp) 305 + static bool edp_have_panel_vdd(struct intel_dp *intel_dp) 313 306 { 314 307 struct drm_device *dev = intel_dp_to_dev(intel_dp); 315 308 struct drm_i915_private *dev_priv = dev->dev_private; ··· 326 319 if (!is_edp(intel_dp)) 327 320 return; 328 321 329 - if (!ironlake_edp_have_panel_power(intel_dp) && !ironlake_edp_have_panel_vdd(intel_dp)) { 322 + if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) { 330 323 WARN(1, "eDP powered off while attempting aux channel communication.\n"); 331 324 DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n", 332 325 I915_READ(_pp_stat_reg(intel_dp)), ··· 358 351 return status; 359 352 } 360 353 361 - static uint32_t get_aux_clock_divider(struct intel_dp *intel_dp, 362 - int index) 354 + static uint32_t i9xx_get_aux_clock_divider(struct intel_dp *intel_dp, int index) 355 + { 356 + struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 357 + struct drm_device *dev = intel_dig_port->base.base.dev; 358 + 359 + /* 360 + * The clock divider is based off the hrawclk, and would like to run at 361 + * 2MHz. So, take the hrawclk value and divide by 2 and use that 362 + */ 363 + return index ? 0 : intel_hrawclk(dev) / 2; 364 + } 365 + 366 + static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index) 367 + { 368 + struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 369 + struct drm_device *dev = intel_dig_port->base.base.dev; 370 + 371 + if (index) 372 + return 0; 373 + 374 + if (intel_dig_port->port == PORT_A) { 375 + if (IS_GEN6(dev) || IS_GEN7(dev)) 376 + return 200; /* SNB & IVB eDP input clock at 400Mhz */ 377 + else 378 + return 225; /* eDP input clock at 450Mhz */ 379 + } else { 380 + return DIV_ROUND_UP(intel_pch_rawclk(dev), 2); 381 + } 382 + } 383 + 384 + static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index) 363 385 { 364 386 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 365 387 struct drm_device *dev = intel_dig_port->base.base.dev; 366 388 struct drm_i915_private *dev_priv = dev->dev_private; 367 389 368 - /* The clock divider is based off the hrawclk, 369 - * and would like to run at 2MHz. So, take the 370 - * hrawclk value and divide by 2 and use that 371 - * 372 - * Note that PCH attached eDP panels should use a 125MHz input 373 - * clock divider. 374 - */ 375 - if (IS_VALLEYVIEW(dev)) { 376 - return index ? 0 : 100; 377 - } else if (intel_dig_port->port == PORT_A) { 390 + if (intel_dig_port->port == PORT_A) { 378 391 if (index) 379 392 return 0; 380 - if (HAS_DDI(dev)) 381 - return DIV_ROUND_CLOSEST(intel_ddi_get_cdclk_freq(dev_priv), 2000); 382 - else if (IS_GEN6(dev) || IS_GEN7(dev)) 383 - return 200; /* SNB & IVB eDP input clock at 400Mhz */ 384 - else 385 - return 225; /* eDP input clock at 450Mhz */ 393 + return DIV_ROUND_CLOSEST(intel_ddi_get_cdclk_freq(dev_priv), 2000); 386 394 } else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) { 387 395 /* Workaround for non-ULT HSW */ 388 396 switch (index) { ··· 405 383 case 1: return 72; 406 384 default: return 0; 407 385 } 408 - } else if (HAS_PCH_SPLIT(dev)) { 386 + } else { 409 387 return index ? 0 : DIV_ROUND_UP(intel_pch_rawclk(dev), 2); 410 - } else { 411 - return index ? 0 :intel_hrawclk(dev) / 2; 412 388 } 389 + } 390 + 391 + static uint32_t vlv_get_aux_clock_divider(struct intel_dp *intel_dp, int index) 392 + { 393 + return index ? 0 : 100; 394 + } 395 + 396 + static uint32_t i9xx_get_aux_send_ctl(struct intel_dp *intel_dp, 397 + bool has_aux_irq, 398 + int send_bytes, 399 + uint32_t aux_clock_divider) 400 + { 401 + struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 402 + struct drm_device *dev = intel_dig_port->base.base.dev; 403 + uint32_t precharge, timeout; 404 + 405 + if (IS_GEN6(dev)) 406 + precharge = 3; 407 + else 408 + precharge = 5; 409 + 410 + if (IS_BROADWELL(dev) && intel_dp->aux_ch_ctl_reg == DPA_AUX_CH_CTL) 411 + timeout = DP_AUX_CH_CTL_TIME_OUT_600us; 412 + else 413 + timeout = DP_AUX_CH_CTL_TIME_OUT_400us; 414 + 415 + return DP_AUX_CH_CTL_SEND_BUSY | 416 + DP_AUX_CH_CTL_DONE | 417 + (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) | 418 + DP_AUX_CH_CTL_TIME_OUT_ERROR | 419 + timeout | 420 + DP_AUX_CH_CTL_RECEIVE_ERROR | 421 + (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) | 422 + (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) | 423 + (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT); 413 424 } 414 425 415 426 static int ··· 458 403 uint32_t aux_clock_divider; 459 404 int i, ret, recv_bytes; 460 405 uint32_t status; 461 - int try, precharge, clock = 0; 406 + int try, clock = 0; 462 407 bool has_aux_irq = HAS_AUX_IRQ(dev); 463 - uint32_t timeout; 464 408 465 409 /* dp aux is extremely sensitive to irq latency, hence request the 466 410 * lowest possible wakeup latency and so prevent the cpu from going into ··· 468 414 pm_qos_update_request(&dev_priv->pm_qos, 0); 469 415 470 416 intel_dp_check_edp(intel_dp); 471 - 472 - if (IS_GEN6(dev)) 473 - precharge = 3; 474 - else 475 - precharge = 5; 476 - 477 - if (IS_BROADWELL(dev) && ch_ctl == DPA_AUX_CH_CTL) 478 - timeout = DP_AUX_CH_CTL_TIME_OUT_600us; 479 - else 480 - timeout = DP_AUX_CH_CTL_TIME_OUT_400us; 481 417 482 418 intel_aux_display_runtime_get(dev_priv); 483 419 ··· 492 448 goto out; 493 449 } 494 450 495 - while ((aux_clock_divider = get_aux_clock_divider(intel_dp, clock++))) { 451 + while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) { 452 + u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp, 453 + has_aux_irq, 454 + send_bytes, 455 + aux_clock_divider); 456 + 496 457 /* Must try at least 3 times according to DP spec */ 497 458 for (try = 0; try < 5; try++) { 498 459 /* Load the send data into the aux channel data registers */ ··· 506 457 pack_aux(send + i, send_bytes - i)); 507 458 508 459 /* Send the command and wait for it to complete */ 509 - I915_WRITE(ch_ctl, 510 - DP_AUX_CH_CTL_SEND_BUSY | 511 - (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) | 512 - timeout | 513 - (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) | 514 - (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) | 515 - (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT) | 516 - DP_AUX_CH_CTL_DONE | 517 - DP_AUX_CH_CTL_TIME_OUT_ERROR | 518 - DP_AUX_CH_CTL_RECEIVE_ERROR); 460 + I915_WRITE(ch_ctl, send_ctl); 519 461 520 462 status = intel_dp_aux_wait_done(intel_dp, has_aux_irq); 521 463 ··· 677 637 int reply_bytes; 678 638 int ret; 679 639 680 - ironlake_edp_panel_vdd_on(intel_dp); 640 + edp_panel_vdd_on(intel_dp); 681 641 intel_dp_check_edp(intel_dp); 682 642 /* Set up the command byte */ 683 643 if (mode & MODE_I2C_READ) ··· 780 740 ret = -EREMOTEIO; 781 741 782 742 out: 783 - ironlake_edp_panel_vdd_off(intel_dp, false); 743 + edp_panel_vdd_off(intel_dp, false); 784 744 return ret; 785 745 } 786 746 ··· 852 812 struct intel_connector *intel_connector = intel_dp->attached_connector; 853 813 int lane_count, clock; 854 814 int max_lane_count = drm_dp_max_lane_count(intel_dp->dpcd); 855 - int max_clock = intel_dp_max_link_bw(intel_dp) == DP_LINK_BW_2_7 ? 1 : 0; 815 + /* Conveniently, the link BW constants become indices with a shift...*/ 816 + int max_clock = intel_dp_max_link_bw(intel_dp) >> 3; 856 817 int bpp, mode_rate; 857 - static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 }; 818 + static int bws[] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7, DP_LINK_BW_5_4 }; 858 819 int link_avail, link_clock; 859 820 860 821 if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A) ··· 1056 1015 ironlake_set_pll_cpu_edp(intel_dp); 1057 1016 } 1058 1017 1059 - #define IDLE_ON_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK) 1060 - #define IDLE_ON_VALUE (PP_ON | 0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE) 1018 + #define IDLE_ON_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK) 1019 + #define IDLE_ON_VALUE (PP_ON | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE) 1061 1020 1062 - #define IDLE_OFF_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK) 1063 - #define IDLE_OFF_VALUE (0 | 0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE) 1021 + #define IDLE_OFF_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | 0) 1022 + #define IDLE_OFF_VALUE (0 | PP_SEQUENCE_NONE | 0 | 0) 1064 1023 1065 - #define IDLE_CYCLE_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK) 1066 - #define IDLE_CYCLE_VALUE (0 | 0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE) 1024 + #define IDLE_CYCLE_MASK (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK) 1025 + #define IDLE_CYCLE_VALUE (0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE) 1067 1026 1068 - static void ironlake_wait_panel_status(struct intel_dp *intel_dp, 1027 + static void wait_panel_status(struct intel_dp *intel_dp, 1069 1028 u32 mask, 1070 1029 u32 value) 1071 1030 { ··· 1090 1049 DRM_DEBUG_KMS("Wait complete\n"); 1091 1050 } 1092 1051 1093 - static void ironlake_wait_panel_on(struct intel_dp *intel_dp) 1052 + static void wait_panel_on(struct intel_dp *intel_dp) 1094 1053 { 1095 1054 DRM_DEBUG_KMS("Wait for panel power on\n"); 1096 - ironlake_wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE); 1055 + wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE); 1097 1056 } 1098 1057 1099 - static void ironlake_wait_panel_off(struct intel_dp *intel_dp) 1058 + static void wait_panel_off(struct intel_dp *intel_dp) 1100 1059 { 1101 1060 DRM_DEBUG_KMS("Wait for panel power off time\n"); 1102 - ironlake_wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE); 1061 + wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE); 1103 1062 } 1104 1063 1105 - static void ironlake_wait_panel_power_cycle(struct intel_dp *intel_dp) 1064 + static void wait_panel_power_cycle(struct intel_dp *intel_dp) 1106 1065 { 1107 1066 DRM_DEBUG_KMS("Wait for panel power cycle\n"); 1108 - ironlake_wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE); 1067 + 1068 + /* When we disable the VDD override bit last we have to do the manual 1069 + * wait. */ 1070 + wait_remaining_ms_from_jiffies(intel_dp->last_power_cycle, 1071 + intel_dp->panel_power_cycle_delay); 1072 + 1073 + wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE); 1109 1074 } 1110 1075 1076 + static void wait_backlight_on(struct intel_dp *intel_dp) 1077 + { 1078 + wait_remaining_ms_from_jiffies(intel_dp->last_power_on, 1079 + intel_dp->backlight_on_delay); 1080 + } 1081 + 1082 + static void edp_wait_backlight_off(struct intel_dp *intel_dp) 1083 + { 1084 + wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off, 1085 + intel_dp->backlight_off_delay); 1086 + } 1111 1087 1112 1088 /* Read the current pp_control value, unlocking the register if it 1113 1089 * is locked ··· 1142 1084 return control; 1143 1085 } 1144 1086 1145 - void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp) 1087 + static void edp_panel_vdd_on(struct intel_dp *intel_dp) 1146 1088 { 1147 1089 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1148 1090 struct drm_i915_private *dev_priv = dev->dev_private; ··· 1157 1099 1158 1100 intel_dp->want_panel_vdd = true; 1159 1101 1160 - if (ironlake_edp_have_panel_vdd(intel_dp)) 1102 + if (edp_have_panel_vdd(intel_dp)) 1161 1103 return; 1162 1104 1163 1105 intel_runtime_pm_get(dev_priv); 1164 1106 1165 1107 DRM_DEBUG_KMS("Turning eDP VDD on\n"); 1166 1108 1167 - if (!ironlake_edp_have_panel_power(intel_dp)) 1168 - ironlake_wait_panel_power_cycle(intel_dp); 1109 + if (!edp_have_panel_power(intel_dp)) 1110 + wait_panel_power_cycle(intel_dp); 1169 1111 1170 1112 pp = ironlake_get_pp_control(intel_dp); 1171 1113 pp |= EDP_FORCE_VDD; ··· 1180 1122 /* 1181 1123 * If the panel wasn't on, delay before accessing aux channel 1182 1124 */ 1183 - if (!ironlake_edp_have_panel_power(intel_dp)) { 1125 + if (!edp_have_panel_power(intel_dp)) { 1184 1126 DRM_DEBUG_KMS("eDP was not running\n"); 1185 1127 msleep(intel_dp->panel_power_up_delay); 1186 1128 } 1187 1129 } 1188 1130 1189 - static void ironlake_panel_vdd_off_sync(struct intel_dp *intel_dp) 1131 + static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp) 1190 1132 { 1191 1133 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1192 1134 struct drm_i915_private *dev_priv = dev->dev_private; ··· 1195 1137 1196 1138 WARN_ON(!mutex_is_locked(&dev->mode_config.mutex)); 1197 1139 1198 - if (!intel_dp->want_panel_vdd && ironlake_edp_have_panel_vdd(intel_dp)) { 1140 + if (!intel_dp->want_panel_vdd && edp_have_panel_vdd(intel_dp)) { 1199 1141 DRM_DEBUG_KMS("Turning eDP VDD off\n"); 1200 1142 1201 1143 pp = ironlake_get_pp_control(intel_dp); ··· 1212 1154 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg)); 1213 1155 1214 1156 if ((pp & POWER_TARGET_ON) == 0) 1215 - msleep(intel_dp->panel_power_cycle_delay); 1157 + intel_dp->last_power_cycle = jiffies; 1216 1158 1217 1159 intel_runtime_pm_put(dev_priv); 1218 1160 } 1219 1161 } 1220 1162 1221 - static void ironlake_panel_vdd_work(struct work_struct *__work) 1163 + static void edp_panel_vdd_work(struct work_struct *__work) 1222 1164 { 1223 1165 struct intel_dp *intel_dp = container_of(to_delayed_work(__work), 1224 1166 struct intel_dp, panel_vdd_work); 1225 1167 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1226 1168 1227 1169 mutex_lock(&dev->mode_config.mutex); 1228 - ironlake_panel_vdd_off_sync(intel_dp); 1170 + edp_panel_vdd_off_sync(intel_dp); 1229 1171 mutex_unlock(&dev->mode_config.mutex); 1230 1172 } 1231 1173 1232 - void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync) 1174 + static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync) 1233 1175 { 1234 1176 if (!is_edp(intel_dp)) 1235 1177 return; ··· 1239 1181 intel_dp->want_panel_vdd = false; 1240 1182 1241 1183 if (sync) { 1242 - ironlake_panel_vdd_off_sync(intel_dp); 1184 + edp_panel_vdd_off_sync(intel_dp); 1243 1185 } else { 1244 1186 /* 1245 1187 * Queue the timer to fire a long ··· 1251 1193 } 1252 1194 } 1253 1195 1254 - void ironlake_edp_panel_on(struct intel_dp *intel_dp) 1196 + void intel_edp_panel_on(struct intel_dp *intel_dp) 1255 1197 { 1256 1198 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1257 1199 struct drm_i915_private *dev_priv = dev->dev_private; ··· 1263 1205 1264 1206 DRM_DEBUG_KMS("Turn eDP power on\n"); 1265 1207 1266 - if (ironlake_edp_have_panel_power(intel_dp)) { 1208 + if (edp_have_panel_power(intel_dp)) { 1267 1209 DRM_DEBUG_KMS("eDP power already on\n"); 1268 1210 return; 1269 1211 } 1270 1212 1271 - ironlake_wait_panel_power_cycle(intel_dp); 1213 + wait_panel_power_cycle(intel_dp); 1272 1214 1273 1215 pp_ctrl_reg = _pp_ctrl_reg(intel_dp); 1274 1216 pp = ironlake_get_pp_control(intel_dp); ··· 1286 1228 I915_WRITE(pp_ctrl_reg, pp); 1287 1229 POSTING_READ(pp_ctrl_reg); 1288 1230 1289 - ironlake_wait_panel_on(intel_dp); 1231 + wait_panel_on(intel_dp); 1232 + intel_dp->last_power_on = jiffies; 1290 1233 1291 1234 if (IS_GEN5(dev)) { 1292 1235 pp |= PANEL_POWER_RESET; /* restore panel reset bit */ ··· 1296 1237 } 1297 1238 } 1298 1239 1299 - void ironlake_edp_panel_off(struct intel_dp *intel_dp) 1240 + void intel_edp_panel_off(struct intel_dp *intel_dp) 1300 1241 { 1301 1242 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1302 1243 struct drm_i915_private *dev_priv = dev->dev_private; ··· 1308 1249 1309 1250 DRM_DEBUG_KMS("Turn eDP power off\n"); 1310 1251 1252 + edp_wait_backlight_off(intel_dp); 1253 + 1311 1254 pp = ironlake_get_pp_control(intel_dp); 1312 1255 /* We need to switch off panel power _and_ force vdd, for otherwise some 1313 1256 * panels get very unhappy and cease to work. */ ··· 1320 1259 I915_WRITE(pp_ctrl_reg, pp); 1321 1260 POSTING_READ(pp_ctrl_reg); 1322 1261 1323 - ironlake_wait_panel_off(intel_dp); 1262 + intel_dp->last_power_cycle = jiffies; 1263 + wait_panel_off(intel_dp); 1324 1264 } 1325 1265 1326 - void ironlake_edp_backlight_on(struct intel_dp *intel_dp) 1266 + void intel_edp_backlight_on(struct intel_dp *intel_dp) 1327 1267 { 1328 1268 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 1329 1269 struct drm_device *dev = intel_dig_port->base.base.dev; ··· 1342 1280 * link. So delay a bit to make sure the image is solid before 1343 1281 * allowing it to appear. 1344 1282 */ 1345 - msleep(intel_dp->backlight_on_delay); 1283 + wait_backlight_on(intel_dp); 1346 1284 pp = ironlake_get_pp_control(intel_dp); 1347 1285 pp |= EDP_BLC_ENABLE; 1348 1286 ··· 1354 1292 intel_panel_enable_backlight(intel_dp->attached_connector); 1355 1293 } 1356 1294 1357 - void ironlake_edp_backlight_off(struct intel_dp *intel_dp) 1295 + void intel_edp_backlight_off(struct intel_dp *intel_dp) 1358 1296 { 1359 1297 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1360 1298 struct drm_i915_private *dev_priv = dev->dev_private; ··· 1374 1312 1375 1313 I915_WRITE(pp_ctrl_reg, pp); 1376 1314 POSTING_READ(pp_ctrl_reg); 1377 - msleep(intel_dp->backlight_off_delay); 1315 + intel_dp->last_backlight_off = jiffies; 1378 1316 } 1379 1317 1380 1318 static void ironlake_edp_pll_on(struct intel_dp *intel_dp) ··· 1659 1597 { 1660 1598 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1661 1599 struct drm_i915_private *dev_priv = dev->dev_private; 1662 - uint32_t aux_clock_divider = get_aux_clock_divider(intel_dp, 0); 1600 + uint32_t aux_clock_divider; 1663 1601 int precharge = 0x3; 1664 1602 int msg_size = 5; /* Header(4) + Message(1) */ 1603 + 1604 + aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0); 1665 1605 1666 1606 /* Enable PSR in sink */ 1667 1607 if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT) ··· 1732 1668 return false; 1733 1669 } 1734 1670 1735 - if (!i915_enable_psr) { 1671 + if (!i915.enable_psr) { 1736 1672 DRM_DEBUG_KMS("PSR disable by flag\n"); 1737 1673 return false; 1738 1674 } ··· 1848 1784 1849 1785 /* Make sure the panel is off before trying to change the mode. But also 1850 1786 * ensure that we have vdd while we switch off the panel. */ 1851 - ironlake_edp_backlight_off(intel_dp); 1787 + intel_edp_backlight_off(intel_dp); 1852 1788 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF); 1853 - ironlake_edp_panel_off(intel_dp); 1789 + intel_edp_panel_off(intel_dp); 1854 1790 1855 1791 /* cpu edp my only be disable _after_ the cpu pipe/plane is disabled. */ 1856 1792 if (!(port == PORT_A || IS_VALLEYVIEW(dev))) ··· 1880 1816 if (WARN_ON(dp_reg & DP_PORT_EN)) 1881 1817 return; 1882 1818 1883 - ironlake_edp_panel_vdd_on(intel_dp); 1819 + edp_panel_vdd_on(intel_dp); 1884 1820 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON); 1885 1821 intel_dp_start_link_train(intel_dp); 1886 - ironlake_edp_panel_on(intel_dp); 1887 - ironlake_edp_panel_vdd_off(intel_dp, true); 1822 + intel_edp_panel_on(intel_dp); 1823 + edp_panel_vdd_off(intel_dp, true); 1888 1824 intel_dp_complete_link_train(intel_dp); 1889 1825 intel_dp_stop_link_train(intel_dp); 1890 1826 } ··· 1894 1830 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 1895 1831 1896 1832 intel_enable_dp(encoder); 1897 - ironlake_edp_backlight_on(intel_dp); 1833 + intel_edp_backlight_on(intel_dp); 1898 1834 } 1899 1835 1900 1836 static void vlv_enable_dp(struct intel_encoder *encoder) 1901 1837 { 1902 1838 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 1903 1839 1904 - ironlake_edp_backlight_on(intel_dp); 1840 + intel_edp_backlight_on(intel_dp); 1905 1841 } 1906 1842 1907 1843 static void g4x_pre_enable_dp(struct intel_encoder *encoder) ··· 2694 2630 bool channel_eq = false; 2695 2631 int tries, cr_tries; 2696 2632 uint32_t DP = intel_dp->DP; 2633 + uint32_t training_pattern = DP_TRAINING_PATTERN_2; 2634 + 2635 + /* Training Pattern 3 for HBR2 ot 1.2 devices that support it*/ 2636 + if (intel_dp->link_bw == DP_LINK_BW_5_4 || intel_dp->use_tps3) 2637 + training_pattern = DP_TRAINING_PATTERN_3; 2697 2638 2698 2639 /* channel equalization */ 2699 2640 if (!intel_dp_set_link_train(intel_dp, &DP, 2700 - DP_TRAINING_PATTERN_2 | 2641 + training_pattern | 2701 2642 DP_LINK_SCRAMBLING_DISABLE)) { 2702 2643 DRM_ERROR("failed to start channel equalization\n"); 2703 2644 return; ··· 2729 2660 if (!drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) { 2730 2661 intel_dp_start_link_train(intel_dp); 2731 2662 intel_dp_set_link_train(intel_dp, &DP, 2732 - DP_TRAINING_PATTERN_2 | 2663 + training_pattern | 2733 2664 DP_LINK_SCRAMBLING_DISABLE); 2734 2665 cr_tries++; 2735 2666 continue; ··· 2745 2676 intel_dp_link_down(intel_dp); 2746 2677 intel_dp_start_link_train(intel_dp); 2747 2678 intel_dp_set_link_train(intel_dp, &DP, 2748 - DP_TRAINING_PATTERN_2 | 2679 + training_pattern | 2749 2680 DP_LINK_SCRAMBLING_DISABLE); 2750 2681 tries = 0; 2751 2682 cr_tries++; ··· 2887 2818 } 2888 2819 } 2889 2820 2821 + /* Training Pattern 3 support */ 2822 + if (intel_dp->dpcd[DP_DPCD_REV] >= 0x12 && 2823 + intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_TPS3_SUPPORTED) { 2824 + intel_dp->use_tps3 = true; 2825 + DRM_DEBUG_KMS("Displayport TPS3 supported"); 2826 + } else 2827 + intel_dp->use_tps3 = false; 2828 + 2890 2829 if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] & 2891 2830 DP_DWN_STRM_PORT_PRESENT)) 2892 2831 return true; /* native DP sink */ ··· 2918 2841 if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT)) 2919 2842 return; 2920 2843 2921 - ironlake_edp_panel_vdd_on(intel_dp); 2844 + edp_panel_vdd_on(intel_dp); 2922 2845 2923 2846 if (intel_dp_aux_native_read_retry(intel_dp, DP_SINK_OUI, buf, 3)) 2924 2847 DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n", ··· 2928 2851 DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n", 2929 2852 buf[0], buf[1], buf[2]); 2930 2853 2931 - ironlake_edp_panel_vdd_off(intel_dp, false); 2854 + edp_panel_vdd_off(intel_dp, false); 2855 + } 2856 + 2857 + int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc) 2858 + { 2859 + struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 2860 + struct drm_device *dev = intel_dig_port->base.base.dev; 2861 + struct intel_crtc *intel_crtc = 2862 + to_intel_crtc(intel_dig_port->base.base.crtc); 2863 + u8 buf[1]; 2864 + 2865 + if (!intel_dp_aux_native_read(intel_dp, DP_TEST_SINK_MISC, buf, 1)) 2866 + return -EAGAIN; 2867 + 2868 + if (!(buf[0] & DP_TEST_CRC_SUPPORTED)) 2869 + return -ENOTTY; 2870 + 2871 + if (!intel_dp_aux_native_write_1(intel_dp, DP_TEST_SINK, 2872 + DP_TEST_SINK_START)) 2873 + return -EAGAIN; 2874 + 2875 + /* Wait 2 vblanks to be sure we will have the correct CRC value */ 2876 + intel_wait_for_vblank(dev, intel_crtc->pipe); 2877 + intel_wait_for_vblank(dev, intel_crtc->pipe); 2878 + 2879 + if (!intel_dp_aux_native_read(intel_dp, DP_TEST_CRC_R_CR, crc, 6)) 2880 + return -EAGAIN; 2881 + 2882 + intel_dp_aux_native_write_1(intel_dp, DP_TEST_SINK, 0); 2883 + return 0; 2932 2884 } 2933 2885 2934 2886 static bool ··· 3401 3295 if (is_edp(intel_dp)) { 3402 3296 cancel_delayed_work_sync(&intel_dp->panel_vdd_work); 3403 3297 mutex_lock(&dev->mode_config.mutex); 3404 - ironlake_panel_vdd_off_sync(intel_dp); 3298 + edp_panel_vdd_off_sync(intel_dp); 3405 3299 mutex_unlock(&dev->mode_config.mutex); 3406 3300 } 3407 3301 kfree(intel_dig_port); ··· 3498 3392 DRM_MODE_SCALE_ASPECT); 3499 3393 intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT; 3500 3394 } 3395 + } 3396 + 3397 + static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp) 3398 + { 3399 + intel_dp->last_power_cycle = jiffies; 3400 + intel_dp->last_power_on = jiffies; 3401 + intel_dp->last_backlight_off = jiffies; 3501 3402 } 3502 3403 3503 3404 static void ··· 3629 3516 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe); 3630 3517 } 3631 3518 3632 - /* And finally store the new values in the power sequencer. */ 3519 + /* 3520 + * And finally store the new values in the power sequencer. The 3521 + * backlight delays are set to 1 because we do manual waits on them. For 3522 + * T8, even BSpec recommends doing it. For T9, if we don't do this, 3523 + * we'll end up waiting for the backlight off delay twice: once when we 3524 + * do the manual sleep, and once when we disable the panel and wait for 3525 + * the PP_STATUS bit to become zero. 3526 + */ 3633 3527 pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) | 3634 - (seq->t8 << PANEL_LIGHT_ON_DELAY_SHIFT); 3635 - pp_off = (seq->t9 << PANEL_LIGHT_OFF_DELAY_SHIFT) | 3528 + (1 << PANEL_LIGHT_ON_DELAY_SHIFT); 3529 + pp_off = (1 << PANEL_LIGHT_OFF_DELAY_SHIFT) | 3636 3530 (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT); 3637 3531 /* Compute the divisor for the pp clock, simply match the Bspec 3638 3532 * formula. */ ··· 3674 3554 } 3675 3555 3676 3556 static bool intel_edp_init_connector(struct intel_dp *intel_dp, 3677 - struct intel_connector *intel_connector) 3557 + struct intel_connector *intel_connector, 3558 + struct edp_power_seq *power_seq) 3678 3559 { 3679 3560 struct drm_connector *connector = &intel_connector->base; 3680 3561 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 3681 3562 struct drm_device *dev = intel_dig_port->base.base.dev; 3682 3563 struct drm_i915_private *dev_priv = dev->dev_private; 3683 3564 struct drm_display_mode *fixed_mode = NULL; 3684 - struct edp_power_seq power_seq = { 0 }; 3685 3565 bool has_dpcd; 3686 3566 struct drm_display_mode *scan; 3687 3567 struct edid *edid; ··· 3689 3569 if (!is_edp(intel_dp)) 3690 3570 return true; 3691 3571 3692 - intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq); 3693 - 3694 3572 /* Cache DPCD and EDID for edp. */ 3695 - ironlake_edp_panel_vdd_on(intel_dp); 3573 + edp_panel_vdd_on(intel_dp); 3696 3574 has_dpcd = intel_dp_get_dpcd(intel_dp); 3697 - ironlake_edp_panel_vdd_off(intel_dp, false); 3575 + edp_panel_vdd_off(intel_dp, false); 3698 3576 3699 3577 if (has_dpcd) { 3700 3578 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) ··· 3706 3588 } 3707 3589 3708 3590 /* We now know it's not a ghost, init power sequence regs. */ 3709 - intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, 3710 - &power_seq); 3591 + intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, power_seq); 3711 3592 3712 3593 edid = drm_get_edid(connector, &intel_dp->adapter); 3713 3594 if (edid) { ··· 3755 3638 struct drm_device *dev = intel_encoder->base.dev; 3756 3639 struct drm_i915_private *dev_priv = dev->dev_private; 3757 3640 enum port port = intel_dig_port->port; 3641 + struct edp_power_seq power_seq = { 0 }; 3758 3642 const char *name = NULL; 3759 3643 int type, error; 3644 + 3645 + /* intel_dp vfuncs */ 3646 + if (IS_VALLEYVIEW(dev)) 3647 + intel_dp->get_aux_clock_divider = vlv_get_aux_clock_divider; 3648 + else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) 3649 + intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider; 3650 + else if (HAS_PCH_SPLIT(dev)) 3651 + intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider; 3652 + else 3653 + intel_dp->get_aux_clock_divider = i9xx_get_aux_clock_divider; 3654 + 3655 + intel_dp->get_aux_send_ctl = i9xx_get_aux_send_ctl; 3760 3656 3761 3657 /* Preserve the current hw state. */ 3762 3658 intel_dp->DP = I915_READ(intel_dp->output_reg); ··· 3799 3669 connector->doublescan_allowed = 0; 3800 3670 3801 3671 INIT_DELAYED_WORK(&intel_dp->panel_vdd_work, 3802 - ironlake_panel_vdd_work); 3672 + edp_panel_vdd_work); 3803 3673 3804 3674 intel_connector_attach_encoder(intel_connector, intel_encoder); 3805 3675 drm_sysfs_connector_add(connector); ··· 3851 3721 BUG(); 3852 3722 } 3853 3723 3724 + if (is_edp(intel_dp)) { 3725 + intel_dp_init_panel_power_timestamps(intel_dp); 3726 + intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq); 3727 + } 3728 + 3854 3729 error = intel_dp_i2c_init(intel_dp, intel_connector, name); 3855 3730 WARN(error, "intel_dp_i2c_init failed with error %d for port %c\n", 3856 3731 error, port_name(port)); 3857 3732 3858 3733 intel_dp->psr_setup_done = false; 3859 3734 3860 - if (!intel_edp_init_connector(intel_dp, intel_connector)) { 3735 + if (!intel_edp_init_connector(intel_dp, intel_connector, &power_seq)) { 3861 3736 i2c_del_adapter(&intel_dp->adapter); 3862 3737 if (is_edp(intel_dp)) { 3863 3738 cancel_delayed_work_sync(&intel_dp->panel_vdd_work); 3864 3739 mutex_lock(&dev->mode_config.mutex); 3865 - ironlake_panel_vdd_off_sync(intel_dp); 3740 + edp_panel_vdd_off_sync(intel_dp); 3866 3741 mutex_unlock(&dev->mode_config.mutex); 3867 3742 } 3868 3743 drm_sysfs_connector_remove(connector);
+22 -6
drivers/gpu/drm/i915/intel_drv.h
··· 359 359 bool cursor_visible; 360 360 361 361 struct intel_crtc_config config; 362 + struct intel_crtc_config *new_config; 363 + bool new_enabled; 362 364 363 365 uint32_t ddi_pll_sel; 364 366 ··· 487 485 int backlight_off_delay; 488 486 struct delayed_work panel_vdd_work; 489 487 bool want_panel_vdd; 488 + unsigned long last_power_cycle; 489 + unsigned long last_power_on; 490 + unsigned long last_backlight_off; 490 491 bool psr_setup_done; 492 + bool use_tps3; 491 493 struct intel_connector *attached_connector; 494 + 495 + uint32_t (*get_aux_clock_divider)(struct intel_dp *dp, int index); 496 + /* 497 + * This function returns the value we have to program the AUX_CTL 498 + * register with to kick off an AUX transaction. 499 + */ 500 + uint32_t (*get_aux_send_ctl)(struct intel_dp *dp, 501 + bool has_aux_irq, 502 + int send_bytes, 503 + uint32_t aux_clock_divider); 492 504 }; 493 505 494 506 struct intel_digital_port { ··· 556 540 struct intel_set_config { 557 541 struct drm_encoder **save_connector_encoders; 558 542 struct drm_crtc **save_encoder_crtcs; 543 + bool *save_crtc_enabled; 559 544 560 545 bool fb_changed; 561 546 bool mode_changed; ··· 738 721 void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode); 739 722 void intel_dp_encoder_destroy(struct drm_encoder *encoder); 740 723 void intel_dp_check_link_status(struct intel_dp *intel_dp); 724 + int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc); 741 725 bool intel_dp_compute_config(struct intel_encoder *encoder, 742 726 struct intel_crtc_config *pipe_config); 743 727 bool intel_dp_is_edp(struct drm_device *dev, enum port port); 744 - void ironlake_edp_backlight_on(struct intel_dp *intel_dp); 745 - void ironlake_edp_backlight_off(struct intel_dp *intel_dp); 746 - void ironlake_edp_panel_on(struct intel_dp *intel_dp); 747 - void ironlake_edp_panel_off(struct intel_dp *intel_dp); 748 - void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp); 749 - void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync); 728 + void intel_edp_backlight_on(struct intel_dp *intel_dp); 729 + void intel_edp_backlight_off(struct intel_dp *intel_dp); 730 + void intel_edp_panel_on(struct intel_dp *intel_dp); 731 + void intel_edp_panel_off(struct intel_dp *intel_dp); 750 732 void intel_edp_psr_enable(struct intel_dp *intel_dp); 751 733 void intel_edp_psr_disable(struct intel_dp *intel_dp); 752 734 void intel_edp_psr_update(struct drm_device *dev);
+2 -2
drivers/gpu/drm/i915/intel_fbdev.c
··· 104 104 return 0; 105 105 106 106 out_unpin: 107 - i915_gem_object_unpin(obj); 107 + i915_gem_object_ggtt_unpin(obj); 108 108 out_unref: 109 109 drm_gem_object_unreference(&obj->base); 110 110 out: ··· 208 208 return 0; 209 209 210 210 out_unpin: 211 - i915_gem_object_unpin(obj); 211 + i915_gem_object_ggtt_unpin(obj); 212 212 drm_gem_object_unreference(&obj->base); 213 213 out_unlock: 214 214 mutex_unlock(&dev->struct_mutex);
+4 -2
drivers/gpu/drm/i915/intel_hdmi.c
··· 113 113 } 114 114 115 115 static u32 hsw_infoframe_data_reg(enum hdmi_infoframe_type type, 116 - enum transcoder cpu_transcoder) 116 + enum transcoder cpu_transcoder, 117 + struct drm_i915_private *dev_priv) 117 118 { 118 119 switch (type) { 119 120 case HDMI_INFOFRAME_TYPE_AVI: ··· 297 296 u32 val = I915_READ(ctl_reg); 298 297 299 298 data_reg = hsw_infoframe_data_reg(type, 300 - intel_crtc->config.cpu_transcoder); 299 + intel_crtc->config.cpu_transcoder, 300 + dev_priv); 301 301 if (data_reg == 0) 302 302 return; 303 303
+3 -3
drivers/gpu/drm/i915/intel_lvds.c
··· 848 848 struct drm_i915_private *dev_priv = dev->dev_private; 849 849 850 850 /* use the module option value if specified */ 851 - if (i915_lvds_channel_mode > 0) 852 - return i915_lvds_channel_mode == 2; 851 + if (i915.lvds_channel_mode > 0) 852 + return i915.lvds_channel_mode == 2; 853 853 854 854 if (dmi_check_system(intel_dual_link_lvds)) 855 855 return true; ··· 1036 1036 intel_find_panel_downclock(dev, 1037 1037 fixed_mode, connector); 1038 1038 if (intel_connector->panel.downclock_mode != 1039 - NULL && i915_lvds_downclock) { 1039 + NULL && i915.lvds_downclock) { 1040 1040 /* We found the downclock for LVDS. */ 1041 1041 dev_priv->lvds_downclock_avail = true; 1042 1042 dev_priv->lvds_downclock =
+4 -4
drivers/gpu/drm/i915/intel_overlay.c
··· 293 293 { 294 294 struct drm_i915_gem_object *obj = overlay->old_vid_bo; 295 295 296 - i915_gem_object_unpin(obj); 296 + i915_gem_object_ggtt_unpin(obj); 297 297 drm_gem_object_unreference(&obj->base); 298 298 299 299 overlay->old_vid_bo = NULL; ··· 306 306 /* never have the overlay hw on without showing a frame */ 307 307 BUG_ON(!overlay->vid_bo); 308 308 309 - i915_gem_object_unpin(obj); 309 + i915_gem_object_ggtt_unpin(obj); 310 310 drm_gem_object_unreference(&obj->base); 311 311 overlay->vid_bo = NULL; 312 312 ··· 782 782 return 0; 783 783 784 784 out_unpin: 785 - i915_gem_object_unpin(new_bo); 785 + i915_gem_object_ggtt_unpin(new_bo); 786 786 return ret; 787 787 } 788 788 ··· 1386 1386 1387 1387 out_unpin_bo: 1388 1388 if (!OVERLAY_NEEDS_PHYSICAL(dev)) 1389 - i915_gem_object_unpin(reg_bo); 1389 + i915_gem_object_ggtt_unpin(reg_bo); 1390 1390 out_free_bo: 1391 1391 drm_gem_object_unreference(&reg_bo->base); 1392 1392 out_free:
+4 -13
drivers/gpu/drm/i915/intel_panel.c
··· 33 33 #include <linux/moduleparam.h> 34 34 #include "intel_drv.h" 35 35 36 - #define PCI_LBPC 0xf4 /* legacy/combination backlight modes */ 37 - 38 36 void 39 37 intel_fixed_panel_mode(const struct drm_display_mode *fixed_mode, 40 38 struct drm_display_mode *adjusted_mode) ··· 323 325 pipe_config->gmch_pfit.lvds_border_bits = border; 324 326 } 325 327 326 - static int i915_panel_invert_brightness; 327 - MODULE_PARM_DESC(invert_brightness, "Invert backlight brightness " 328 - "(-1 force normal, 0 machine defaults, 1 force inversion), please " 329 - "report PCI device ID, subsystem vendor and subsystem device ID " 330 - "to dri-devel@lists.freedesktop.org, if your machine needs it. " 331 - "It will then be included in an upcoming module version."); 332 - module_param_named(invert_brightness, i915_panel_invert_brightness, int, 0600); 333 328 static u32 intel_panel_compute_brightness(struct intel_connector *connector, 334 329 u32 val) 335 330 { ··· 332 341 333 342 WARN_ON(panel->backlight.max == 0); 334 343 335 - if (i915_panel_invert_brightness < 0) 344 + if (i915.invert_brightness < 0) 336 345 return val; 337 346 338 - if (i915_panel_invert_brightness > 0 || 347 + if (i915.invert_brightness > 0 || 339 348 dev_priv->quirks & QUIRK_INVERT_BRIGHTNESS) { 340 349 return panel->backlight.max - val; 341 350 } ··· 801 810 struct drm_i915_private *dev_priv = dev->dev_private; 802 811 803 812 /* Assume that the BIOS does not lie through the OpRegion... */ 804 - if (!i915_panel_ignore_lid && dev_priv->opregion.lid_state) { 813 + if (!i915.panel_ignore_lid && dev_priv->opregion.lid_state) { 805 814 return ioread32(dev_priv->opregion.lid_state) & 0x1 ? 806 815 connector_status_connected : 807 816 connector_status_disconnected; 808 817 } 809 818 810 - switch (i915_panel_ignore_lid) { 819 + switch (i915.panel_ignore_lid) { 811 820 case -2: 812 821 return connector_status_connected; 813 822 case -1:
+173 -121
drivers/gpu/drm/i915/intel_pm.c
··· 97 97 struct drm_i915_gem_object *obj = intel_fb->obj; 98 98 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 99 99 int cfb_pitch; 100 - int plane, i; 100 + int i; 101 101 u32 fbc_ctl; 102 102 103 103 cfb_pitch = dev_priv->fbc.size / FBC_LL_SIZE; ··· 109 109 cfb_pitch = (cfb_pitch / 32) - 1; 110 110 else 111 111 cfb_pitch = (cfb_pitch / 64) - 1; 112 - plane = intel_crtc->plane == 0 ? FBC_CTL_PLANEA : FBC_CTL_PLANEB; 113 112 114 113 /* Clear old tags */ 115 114 for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++) ··· 119 120 120 121 /* Set it up... */ 121 122 fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE; 122 - fbc_ctl2 |= plane; 123 + fbc_ctl2 |= FBC_CTL_PLANE(intel_crtc->plane); 123 124 I915_WRITE(FBC_CONTROL2, fbc_ctl2); 124 125 I915_WRITE(FBC_FENCE_OFF, crtc->y); 125 126 } ··· 134 135 fbc_ctl |= obj->fence_reg; 135 136 I915_WRITE(FBC_CONTROL, fbc_ctl); 136 137 137 - DRM_DEBUG_KMS("enabled FBC, pitch %d, yoff %d, plane %c, ", 138 + DRM_DEBUG_KMS("enabled FBC, pitch %d, yoff %d, plane %c\n", 138 139 cfb_pitch, crtc->y, plane_name(intel_crtc->plane)); 139 140 } 140 141 ··· 153 154 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); 154 155 struct drm_i915_gem_object *obj = intel_fb->obj; 155 156 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 156 - int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB; 157 157 u32 dpfc_ctl; 158 158 159 - dpfc_ctl = plane | DPFC_SR_EN | DPFC_CTL_LIMIT_1X; 159 + dpfc_ctl = DPFC_CTL_PLANE(intel_crtc->plane) | DPFC_SR_EN; 160 + if (drm_format_plane_cpp(fb->pixel_format, 0) == 2) 161 + dpfc_ctl |= DPFC_CTL_LIMIT_2X; 162 + else 163 + dpfc_ctl |= DPFC_CTL_LIMIT_1X; 160 164 dpfc_ctl |= DPFC_CTL_FENCE_EN | obj->fence_reg; 161 - I915_WRITE(DPFC_CHICKEN, DPFC_HT_MODIFY); 162 165 163 166 I915_WRITE(DPFC_FENCE_YOFF, crtc->y); 164 167 165 168 /* enable it... */ 166 - I915_WRITE(DPFC_CONTROL, I915_READ(DPFC_CONTROL) | DPFC_CTL_EN); 169 + I915_WRITE(DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN); 167 170 168 171 DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane)); 169 172 } ··· 225 224 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); 226 225 struct drm_i915_gem_object *obj = intel_fb->obj; 227 226 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 228 - int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB; 229 227 u32 dpfc_ctl; 230 228 231 - dpfc_ctl = I915_READ(ILK_DPFC_CONTROL); 232 - dpfc_ctl &= DPFC_RESERVED; 233 - dpfc_ctl |= (plane | DPFC_CTL_LIMIT_1X); 234 - /* Set persistent mode for front-buffer rendering, ala X. */ 235 - dpfc_ctl |= DPFC_CTL_PERSISTENT_MODE; 229 + dpfc_ctl = DPFC_CTL_PLANE(intel_crtc->plane); 230 + if (drm_format_plane_cpp(fb->pixel_format, 0) == 2) 231 + dpfc_ctl |= DPFC_CTL_LIMIT_2X; 232 + else 233 + dpfc_ctl |= DPFC_CTL_LIMIT_1X; 236 234 dpfc_ctl |= DPFC_CTL_FENCE_EN; 237 235 if (IS_GEN5(dev)) 238 236 dpfc_ctl |= obj->fence_reg; 239 - I915_WRITE(ILK_DPFC_CHICKEN, DPFC_HT_MODIFY); 240 237 241 238 I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y); 242 239 I915_WRITE(ILK_FBC_RT_BASE, i915_gem_obj_ggtt_offset(obj) | ILK_FBC_RT_VALID); ··· 281 282 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); 282 283 struct drm_i915_gem_object *obj = intel_fb->obj; 283 284 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 285 + u32 dpfc_ctl; 284 286 285 - I915_WRITE(IVB_FBC_RT_BASE, i915_gem_obj_ggtt_offset(obj)); 287 + dpfc_ctl = IVB_DPFC_CTL_PLANE(intel_crtc->plane); 288 + if (drm_format_plane_cpp(fb->pixel_format, 0) == 2) 289 + dpfc_ctl |= DPFC_CTL_LIMIT_2X; 290 + else 291 + dpfc_ctl |= DPFC_CTL_LIMIT_1X; 292 + dpfc_ctl |= IVB_DPFC_CTL_FENCE_EN; 286 293 287 - I915_WRITE(ILK_DPFC_CONTROL, DPFC_CTL_EN | DPFC_CTL_LIMIT_1X | 288 - IVB_DPFC_CTL_FENCE_EN | 289 - intel_crtc->plane << IVB_DPFC_CTL_PLANE_SHIFT); 294 + I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN); 290 295 291 296 if (IS_IVYBRIDGE(dev)) { 292 297 /* WaFbcAsynchFlipDisableFbcQueue:ivb */ ··· 469 466 return; 470 467 } 471 468 472 - if (!i915_powersave) { 469 + if (!i915.powersave) { 473 470 if (set_no_fbc_reason(dev_priv, FBC_MODULE_PARAM)) 474 471 DRM_DEBUG_KMS("fbc disabled per module param\n"); 475 472 return; ··· 508 505 obj = intel_fb->obj; 509 506 adjusted_mode = &intel_crtc->config.adjusted_mode; 510 507 511 - if (i915_enable_fbc < 0 && 508 + if (i915.enable_fbc < 0 && 512 509 INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev)) { 513 510 if (set_no_fbc_reason(dev_priv, FBC_CHIP_DEFAULT)) 514 511 DRM_DEBUG_KMS("disabled per chip default\n"); 515 512 goto out_disable; 516 513 } 517 - if (!i915_enable_fbc) { 514 + if (!i915.enable_fbc) { 518 515 if (set_no_fbc_reason(dev_priv, FBC_MODULE_PARAM)) 519 516 DRM_DEBUG_KMS("fbc disabled per module param\n"); 520 517 goto out_disable; ··· 1889 1886 } 1890 1887 1891 1888 /* Calculate the maximum FBC watermark */ 1892 - static unsigned int ilk_fbc_wm_max(struct drm_device *dev) 1889 + static unsigned int ilk_fbc_wm_max(const struct drm_device *dev) 1893 1890 { 1894 1891 /* max that registers can hold */ 1895 1892 if (INTEL_INFO(dev)->gen >= 8) ··· 1898 1895 return 15; 1899 1896 } 1900 1897 1901 - static void ilk_compute_wm_maximums(struct drm_device *dev, 1898 + static void ilk_compute_wm_maximums(const struct drm_device *dev, 1902 1899 int level, 1903 1900 const struct intel_wm_config *config, 1904 1901 enum intel_ddb_partitioning ddb_partitioning, ··· 1951 1948 return ret; 1952 1949 } 1953 1950 1954 - static void ilk_compute_wm_level(struct drm_i915_private *dev_priv, 1951 + static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv, 1955 1952 int level, 1956 1953 const struct ilk_pipe_wm_parameters *p, 1957 1954 struct intel_wm_level *result) ··· 2143 2140 struct intel_pipe_wm *pipe_wm) 2144 2141 { 2145 2142 struct drm_device *dev = crtc->dev; 2146 - struct drm_i915_private *dev_priv = dev->dev_private; 2143 + const struct drm_i915_private *dev_priv = dev->dev_private; 2147 2144 int level, max_level = ilk_wm_max_level(dev); 2148 2145 /* LP0 watermark maximums depend on this pipe alone */ 2149 2146 struct intel_wm_config config = { ··· 2756 2753 return ctx; 2757 2754 2758 2755 err_unpin: 2759 - i915_gem_object_unpin(ctx); 2756 + i915_gem_object_ggtt_unpin(ctx); 2760 2757 err_unref: 2761 2758 drm_gem_object_unreference(&ctx->base); 2762 2759 return NULL; ··· 3003 3000 dev_priv->rps.last_adj = 0; 3004 3001 } 3005 3002 3003 + /* gen6_set_rps is called to update the frequency request, but should also be 3004 + * called when the range (min_delay and max_delay) is modified so that we can 3005 + * update the GEN6_RP_INTERRUPT_LIMITS register accordingly. */ 3006 3006 void gen6_set_rps(struct drm_device *dev, u8 val) 3007 3007 { 3008 3008 struct drm_i915_private *dev_priv = dev->dev_private; ··· 3014 3008 WARN_ON(val > dev_priv->rps.max_delay); 3015 3009 WARN_ON(val < dev_priv->rps.min_delay); 3016 3010 3017 - if (val == dev_priv->rps.cur_delay) 3011 + if (val == dev_priv->rps.cur_delay) { 3012 + /* min/max delay may still have been modified so be sure to 3013 + * write the limits value */ 3014 + I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, 3015 + gen6_rps_limits(dev_priv, val)); 3016 + 3018 3017 return; 3018 + } 3019 3019 3020 3020 gen6_set_rps_thresholds(dev_priv, val); 3021 3021 ··· 3047 3035 trace_intel_gpu_freq_change(val * 50); 3048 3036 } 3049 3037 3038 + /* vlv_set_rps_idle: Set the frequency to Rpn if Gfx clocks are down 3039 + * 3040 + * * If Gfx is Idle, then 3041 + * 1. Mask Turbo interrupts 3042 + * 2. Bring up Gfx clock 3043 + * 3. Change the freq to Rpn and wait till P-Unit updates freq 3044 + * 4. Clear the Force GFX CLK ON bit so that Gfx can down 3045 + * 5. Unmask Turbo interrupts 3046 + */ 3047 + static void vlv_set_rps_idle(struct drm_i915_private *dev_priv) 3048 + { 3049 + /* 3050 + * When we are idle. Drop to min voltage state. 3051 + */ 3052 + 3053 + if (dev_priv->rps.cur_delay <= dev_priv->rps.min_delay) 3054 + return; 3055 + 3056 + /* Mask turbo interrupt so that they will not come in between */ 3057 + I915_WRITE(GEN6_PMINTRMSK, 0xffffffff); 3058 + 3059 + /* Bring up the Gfx clock */ 3060 + I915_WRITE(VLV_GTLC_SURVIVABILITY_REG, 3061 + I915_READ(VLV_GTLC_SURVIVABILITY_REG) | 3062 + VLV_GFX_CLK_FORCE_ON_BIT); 3063 + 3064 + if (wait_for(((VLV_GFX_CLK_STATUS_BIT & 3065 + I915_READ(VLV_GTLC_SURVIVABILITY_REG)) != 0), 5)) { 3066 + DRM_ERROR("GFX_CLK_ON request timed out\n"); 3067 + return; 3068 + } 3069 + 3070 + dev_priv->rps.cur_delay = dev_priv->rps.min_delay; 3071 + 3072 + vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, 3073 + dev_priv->rps.min_delay); 3074 + 3075 + if (wait_for(((vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS)) 3076 + & GENFREQSTATUS) == 0, 5)) 3077 + DRM_ERROR("timed out waiting for Punit\n"); 3078 + 3079 + /* Release the Gfx clock */ 3080 + I915_WRITE(VLV_GTLC_SURVIVABILITY_REG, 3081 + I915_READ(VLV_GTLC_SURVIVABILITY_REG) & 3082 + ~VLV_GFX_CLK_FORCE_ON_BIT); 3083 + 3084 + /* Unmask Up interrupts */ 3085 + dev_priv->rps.rp_up_masked = true; 3086 + gen6_set_pm_mask(dev_priv, GEN6_PM_RP_DOWN_THRESHOLD, 3087 + dev_priv->rps.min_delay); 3088 + } 3089 + 3050 3090 void gen6_rps_idle(struct drm_i915_private *dev_priv) 3051 3091 { 3052 3092 struct drm_device *dev = dev_priv->dev; ··· 3106 3042 mutex_lock(&dev_priv->rps.hw_lock); 3107 3043 if (dev_priv->rps.enabled) { 3108 3044 if (IS_VALLEYVIEW(dev)) 3109 - valleyview_set_rps(dev_priv->dev, dev_priv->rps.min_delay); 3045 + vlv_set_rps_idle(dev_priv); 3110 3046 else 3111 3047 gen6_set_rps(dev_priv->dev, dev_priv->rps.min_delay); 3112 3048 dev_priv->rps.last_adj = 0; ··· 3215 3151 return 0; 3216 3152 3217 3153 /* Respect the kernel parameter if it is set */ 3218 - if (i915_enable_rc6 >= 0) 3219 - return i915_enable_rc6; 3154 + if (i915.enable_rc6 >= 0) 3155 + return i915.enable_rc6; 3220 3156 3221 3157 /* Disable RC6 on Ironlake */ 3222 3158 if (INTEL_INFO(dev)->gen == 5) ··· 3331 3267 { 3332 3268 struct drm_i915_private *dev_priv = dev->dev_private; 3333 3269 struct intel_ring_buffer *ring; 3334 - u32 rp_state_cap; 3270 + u32 rp_state_cap, hw_max, hw_min; 3335 3271 u32 gt_perf_status; 3336 3272 u32 rc6vids, pcu_mbox, rc6_mask = 0; 3337 3273 u32 gtfifodbg; ··· 3360 3296 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS); 3361 3297 3362 3298 /* In units of 50MHz */ 3363 - dev_priv->rps.hw_max = dev_priv->rps.max_delay = rp_state_cap & 0xff; 3364 - dev_priv->rps.min_delay = (rp_state_cap >> 16) & 0xff; 3299 + dev_priv->rps.hw_max = hw_max = rp_state_cap & 0xff; 3300 + hw_min = (rp_state_cap >> 16) & 0xff; 3365 3301 dev_priv->rps.rp1_delay = (rp_state_cap >> 8) & 0xff; 3366 3302 dev_priv->rps.rp0_delay = (rp_state_cap >> 0) & 0xff; 3367 3303 dev_priv->rps.rpe_delay = dev_priv->rps.rp1_delay; 3368 3304 dev_priv->rps.cur_delay = 0; 3305 + 3306 + /* Preserve min/max settings in case of re-init */ 3307 + if (dev_priv->rps.max_delay == 0) 3308 + dev_priv->rps.max_delay = hw_max; 3309 + 3310 + if (dev_priv->rps.min_delay == 0) 3311 + dev_priv->rps.min_delay = hw_min; 3369 3312 3370 3313 /* disable the counters and set deterministic thresholds */ 3371 3314 I915_WRITE(GEN6_RC_CONTROL, 0); ··· 3602 3531 { 3603 3532 struct drm_i915_private *dev_priv = dev->dev_private; 3604 3533 struct intel_ring_buffer *ring; 3605 - u32 gtfifodbg, val, rc6_mode = 0; 3534 + u32 gtfifodbg, val, hw_max, hw_min, rc6_mode = 0; 3606 3535 int i; 3607 3536 3608 3537 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); ··· 3664 3593 vlv_gpu_freq(dev_priv, dev_priv->rps.cur_delay), 3665 3594 dev_priv->rps.cur_delay); 3666 3595 3667 - dev_priv->rps.max_delay = valleyview_rps_max_freq(dev_priv); 3668 - dev_priv->rps.hw_max = dev_priv->rps.max_delay; 3596 + dev_priv->rps.hw_max = hw_max = valleyview_rps_max_freq(dev_priv); 3669 3597 DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n", 3670 - vlv_gpu_freq(dev_priv, dev_priv->rps.max_delay), 3671 - dev_priv->rps.max_delay); 3598 + vlv_gpu_freq(dev_priv, hw_max), 3599 + hw_max); 3672 3600 3673 3601 dev_priv->rps.rpe_delay = valleyview_rps_rpe_freq(dev_priv); 3674 3602 DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n", 3675 3603 vlv_gpu_freq(dev_priv, dev_priv->rps.rpe_delay), 3676 3604 dev_priv->rps.rpe_delay); 3677 3605 3678 - dev_priv->rps.min_delay = valleyview_rps_min_freq(dev_priv); 3606 + hw_min = valleyview_rps_min_freq(dev_priv); 3679 3607 DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n", 3680 - vlv_gpu_freq(dev_priv, dev_priv->rps.min_delay), 3681 - dev_priv->rps.min_delay); 3608 + vlv_gpu_freq(dev_priv, hw_min), 3609 + hw_min); 3610 + 3611 + /* Preserve min/max settings in case of re-init */ 3612 + if (dev_priv->rps.max_delay == 0) 3613 + dev_priv->rps.max_delay = hw_max; 3614 + 3615 + if (dev_priv->rps.min_delay == 0) 3616 + dev_priv->rps.min_delay = hw_min; 3682 3617 3683 3618 DRM_DEBUG_DRIVER("setting GPU freq to %d MHz (%u)\n", 3684 3619 vlv_gpu_freq(dev_priv, dev_priv->rps.rpe_delay), 3685 3620 dev_priv->rps.rpe_delay); 3686 3621 3687 3622 valleyview_set_rps(dev_priv->dev, dev_priv->rps.rpe_delay); 3623 + 3624 + dev_priv->rps.rp_up_masked = false; 3625 + dev_priv->rps.rp_down_masked = false; 3688 3626 3689 3627 gen6_enable_rps_interrupts(dev); 3690 3628 ··· 3705 3625 struct drm_i915_private *dev_priv = dev->dev_private; 3706 3626 3707 3627 if (dev_priv->ips.renderctx) { 3708 - i915_gem_object_unpin(dev_priv->ips.renderctx); 3628 + i915_gem_object_ggtt_unpin(dev_priv->ips.renderctx); 3709 3629 drm_gem_object_unreference(&dev_priv->ips.renderctx->base); 3710 3630 dev_priv->ips.renderctx = NULL; 3711 3631 } 3712 3632 3713 3633 if (dev_priv->ips.pwrctx) { 3714 - i915_gem_object_unpin(dev_priv->ips.pwrctx); 3634 + i915_gem_object_ggtt_unpin(dev_priv->ips.pwrctx); 3715 3635 drm_gem_object_unreference(&dev_priv->ips.pwrctx->base); 3716 3636 dev_priv->ips.pwrctx = NULL; 3717 3637 } ··· 4350 4270 i915_mch_dev = NULL; 4351 4271 spin_unlock_irq(&mchdev_lock); 4352 4272 } 4273 + 4353 4274 static void intel_init_emon(struct drm_device *dev) 4354 4275 { 4355 4276 struct drm_i915_private *dev_priv = dev->dev_private; ··· 4686 4605 * According to the spec, bit 11 (RCCUNIT) must also be set, 4687 4606 * but we didn't debug actual testcases to find it out. 4688 4607 * 4689 - * Also apply WaDisableVDSUnitClockGating:snb and 4690 - * WaDisableRCPBUnitClockGating:snb. 4608 + * WaDisableRCCUnitClockGating:snb 4609 + * WaDisableRCPBUnitClockGating:snb 4691 4610 */ 4692 4611 I915_WRITE(GEN6_UCGCTL2, 4693 - GEN7_VDSUNIT_CLOCK_GATE_DISABLE | 4694 4612 GEN6_RCPBUNIT_CLOCK_GATE_DISABLE | 4695 4613 GEN6_RCCUNIT_CLOCK_GATE_DISABLE); 4696 4614 ··· 4735 4655 { 4736 4656 uint32_t reg = I915_READ(GEN7_FF_THREAD_MODE); 4737 4657 4658 + /* 4659 + * WaVSThreadDispatchOverride:ivb,vlv 4660 + * 4661 + * This actually overrides the dispatch 4662 + * mode for all thread types. 4663 + */ 4738 4664 reg &= ~GEN7_FF_SCHED_MASK; 4739 4665 reg |= GEN7_FF_TS_SCHED_HW; 4740 4666 reg |= GEN7_FF_VS_SCHED_HW; 4741 4667 reg |= GEN7_FF_DS_SCHED_HW; 4742 - 4743 - if (IS_HASWELL(dev_priv->dev)) 4744 - reg &= ~GEN7_FF_VS_REF_CNT_FFME; 4745 4668 4746 4669 I915_WRITE(GEN7_FF_THREAD_MODE, reg); 4747 4670 } ··· 4792 4709 /* FIXME(BDW): Check all the w/a, some might only apply to 4793 4710 * pre-production hw. */ 4794 4711 4795 - WARN(!i915_preliminary_hw_support, 4796 - "GEN8_CENTROID_PIXEL_OPT_DIS not be needed for production\n"); 4712 + /* 4713 + * This GEN8_CENTROID_PIXEL_OPT_DIS W/A is only needed for 4714 + * pre-production hardware 4715 + */ 4797 4716 I915_WRITE(HALF_SLICE_CHICKEN3, 4798 4717 _MASKED_BIT_ENABLE(GEN8_CENTROID_PIXEL_OPT_DIS)); 4799 4718 I915_WRITE(HALF_SLICE_CHICKEN3, ··· 4846 4761 4847 4762 ilk_init_lp_watermarks(dev); 4848 4763 4849 - /* According to the spec, bit 13 (RCZUNIT) must be set on IVB. 4850 - * This implements the WaDisableRCZUnitClockGating:hsw workaround. 4851 - */ 4852 - I915_WRITE(GEN6_UCGCTL2, GEN6_RCZUNIT_CLOCK_GATE_DISABLE); 4853 - 4854 - /* Apply the WaDisableRHWOOptimizationForRenderHang:hsw workaround. */ 4855 - I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1, 4856 - GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC); 4857 - 4858 - /* WaApplyL3ControlAndL3ChickenMode:hsw */ 4859 - I915_WRITE(GEN7_L3CNTLREG1, 4860 - GEN7_WA_FOR_GEN7_L3_CONTROL); 4861 - I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER, 4862 - GEN7_WA_L3_CHICKEN_MODE); 4863 - 4864 4764 /* L3 caching of data atomics doesn't work -- disable it. */ 4865 4765 I915_WRITE(HSW_SCRATCH1, HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE); 4866 4766 I915_WRITE(HSW_ROW_CHICKEN3, ··· 4857 4787 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB); 4858 4788 4859 4789 /* WaVSRefCountFullforceMissDisable:hsw */ 4860 - gen7_setup_fixed_func_scheduler(dev_priv); 4790 + I915_WRITE(GEN7_FF_THREAD_MODE, 4791 + I915_READ(GEN7_FF_THREAD_MODE) & ~GEN7_FF_VS_REF_CNT_FFME); 4792 + 4793 + /* enable HiZ Raw Stall Optimization */ 4794 + I915_WRITE(CACHE_MODE_0_GEN7, 4795 + _MASKED_BIT_DISABLE(HIZ_RAW_STALL_OPT_DISABLE)); 4861 4796 4862 4797 /* WaDisable4x2SubspanOptimization:hsw */ 4863 4798 I915_WRITE(CACHE_MODE_1, ··· 4900 4825 if (IS_IVB_GT1(dev)) 4901 4826 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1, 4902 4827 _MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE)); 4903 - else 4904 - I915_WRITE(GEN7_HALF_SLICE_CHICKEN1_GT2, 4905 - _MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE)); 4906 4828 4907 4829 /* Apply the WaDisableRHWOOptimizationForRenderHang:ivb workaround. */ 4908 4830 I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1, ··· 4913 4841 if (IS_IVB_GT1(dev)) 4914 4842 I915_WRITE(GEN7_ROW_CHICKEN2, 4915 4843 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE)); 4916 - else 4844 + else { 4845 + /* must write both registers */ 4846 + I915_WRITE(GEN7_ROW_CHICKEN2, 4847 + _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE)); 4917 4848 I915_WRITE(GEN7_ROW_CHICKEN2_GT2, 4918 4849 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE)); 4919 - 4850 + } 4920 4851 4921 4852 /* WaForceL3Serialization:ivb */ 4922 4853 I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) & 4923 4854 ~L3SQ_URB_READ_CAM_MATCH_DISABLE); 4924 4855 4925 - /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock 4926 - * gating disable must be set. Failure to set it results in 4927 - * flickering pixels due to Z write ordering failures after 4928 - * some amount of runtime in the Mesa "fire" demo, and Unigine 4929 - * Sanctuary and Tropics, and apparently anything else with 4930 - * alpha test or pixel discard. 4931 - * 4932 - * According to the spec, bit 11 (RCCUNIT) must also be set, 4933 - * but we didn't debug actual testcases to find it out. 4934 - * 4856 + /* 4935 4857 * According to the spec, bit 13 (RCZUNIT) must be set on IVB. 4936 4858 * This implements the WaDisableRCZUnitClockGating:ivb workaround. 4937 4859 */ 4938 4860 I915_WRITE(GEN6_UCGCTL2, 4939 - GEN6_RCZUNIT_CLOCK_GATE_DISABLE | 4940 - GEN6_RCCUNIT_CLOCK_GATE_DISABLE); 4861 + GEN6_RCZUNIT_CLOCK_GATE_DISABLE); 4941 4862 4942 4863 /* This is required by WaCatErrorRejectionIssue:ivb */ 4943 4864 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG, ··· 4939 4874 4940 4875 g4x_disable_trickle_feed(dev); 4941 4876 4942 - /* WaVSRefCountFullforceMissDisable:ivb */ 4943 4877 gen7_setup_fixed_func_scheduler(dev_priv); 4878 + 4879 + /* enable HiZ Raw Stall Optimization */ 4880 + I915_WRITE(CACHE_MODE_0_GEN7, 4881 + _MASKED_BIT_DISABLE(HIZ_RAW_STALL_OPT_DISABLE)); 4944 4882 4945 4883 /* WaDisable4x2SubspanOptimization:ivb */ 4946 4884 I915_WRITE(CACHE_MODE_1, ··· 4995 4927 CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE | 4996 4928 CHICKEN3_DGMG_DONE_FIX_DISABLE); 4997 4929 4930 + /* WaPsdDispatchEnable:vlv */ 4998 4931 /* WaDisablePSDDualDispatchEnable:vlv */ 4999 4932 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1, 5000 4933 _MASKED_BIT_ENABLE(GEN7_MAX_PS_THREAD_DEP | 5001 4934 GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE)); 5002 4935 5003 - /* Apply the WaDisableRHWOOptimizationForRenderHang:vlv workaround. */ 5004 - I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1, 5005 - GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC); 5006 - 5007 - /* WaApplyL3ControlAndL3ChickenMode:vlv */ 4936 + /* WaDisableL3CacheAging:vlv */ 5008 4937 I915_WRITE(GEN7_L3CNTLREG1, I915_READ(GEN7_L3CNTLREG1) | GEN7_L3AGDIS); 5009 - I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER, GEN7_WA_L3_CHICKEN_MODE); 5010 4938 5011 4939 /* WaForceL3Serialization:vlv */ 5012 4940 I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) & ··· 5017 4953 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) | 5018 4954 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB); 5019 4955 5020 - /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock 5021 - * gating disable must be set. Failure to set it results in 5022 - * flickering pixels due to Z write ordering failures after 5023 - * some amount of runtime in the Mesa "fire" demo, and Unigine 5024 - * Sanctuary and Tropics, and apparently anything else with 5025 - * alpha test or pixel discard. 5026 - * 5027 - * According to the spec, bit 11 (RCCUNIT) must also be set, 5028 - * but we didn't debug actual testcases to find it out. 5029 - * 4956 + gen7_setup_fixed_func_scheduler(dev_priv); 4957 + 4958 + /* 5030 4959 * According to the spec, bit 13 (RCZUNIT) must be set on IVB. 5031 4960 * This implements the WaDisableRCZUnitClockGating:vlv workaround. 5032 - * 5033 - * Also apply WaDisableVDSUnitClockGating:vlv and 5034 - * WaDisableRCPBUnitClockGating:vlv. 5035 4961 */ 5036 4962 I915_WRITE(GEN6_UCGCTL2, 5037 - GEN7_VDSUNIT_CLOCK_GATE_DISABLE | 5038 - GEN7_TDLUNIT_CLOCK_GATE_DISABLE | 5039 - GEN6_RCZUNIT_CLOCK_GATE_DISABLE | 5040 - GEN6_RCPBUNIT_CLOCK_GATE_DISABLE | 5041 - GEN6_RCCUNIT_CLOCK_GATE_DISABLE); 4963 + GEN6_RCZUNIT_CLOCK_GATE_DISABLE); 5042 4964 4965 + /* WaDisableL3Bank2xClockGate:vlv */ 5043 4966 I915_WRITE(GEN7_UCGCTL4, GEN7_L3BANK2X_CLOCK_GATE_DISABLE); 5044 4967 5045 4968 I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE); 5046 4969 4970 + /* 4971 + * BSpec says this must be set, even though 4972 + * WaDisable4x2SubspanOptimization isn't listed for VLV. 4973 + */ 5047 4974 I915_WRITE(CACHE_MODE_1, 5048 4975 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE)); 4976 + 4977 + /* 4978 + * WaIncreaseL3CreditsForVLVB0:vlv 4979 + * This is the hardware default actually. 4980 + */ 4981 + I915_WRITE(GEN7_L3SQCREG1, VLV_B0_WA_L3SQCREG1_VALUE); 5049 4982 5050 4983 /* 5051 4984 * WaDisableVLVClockGating_VBIIssue:vlv 5052 4985 * Disable clock gating on th GCFG unit to prevent a delay 5053 4986 * in the reporting of vblank events. 5054 4987 */ 5055 - I915_WRITE(VLV_GUNIT_CLOCK_GATE, 0xffffffff); 5056 - 5057 - /* Conservative clock gating settings for now */ 5058 - I915_WRITE(0x9400, 0xffffffff); 5059 - I915_WRITE(0x9404, 0xffffffff); 5060 - I915_WRITE(0x9408, 0xffffffff); 5061 - I915_WRITE(0x940c, 0xffffffff); 5062 - I915_WRITE(0x9410, 0xffffffff); 5063 - I915_WRITE(0x9414, 0xffffffff); 5064 - I915_WRITE(0x9418, 0xffffffff); 4988 + I915_WRITE(VLV_GUNIT_CLOCK_GATE, GCFG_DIS); 5065 4989 } 5066 4990 5067 4991 static void g4x_init_clock_gating(struct drm_device *dev) ··· 5324 5272 WARN_ON(!power_well->count); 5325 5273 5326 5274 if (!--power_well->count && power_well->set && 5327 - i915_disable_power_well) { 5275 + i915.disable_power_well) { 5328 5276 power_well->set(dev, power_well, false); 5329 5277 hsw_enable_package_c8(dev_priv); 5330 5278 }
+11 -26
drivers/gpu/drm/i915/intel_ringbuffer.c
··· 549 549 return 0; 550 550 551 551 err_unpin: 552 - i915_gem_object_unpin(ring->scratch.obj); 552 + i915_gem_object_ggtt_unpin(ring->scratch.obj); 553 553 err_unref: 554 554 drm_gem_object_unreference(&ring->scratch.obj->base); 555 555 err: ··· 625 625 626 626 if (INTEL_INFO(dev)->gen >= 5) { 627 627 kunmap(sg_page(ring->scratch.obj->pages->sgl)); 628 - i915_gem_object_unpin(ring->scratch.obj); 628 + i915_gem_object_ggtt_unpin(ring->scratch.obj); 629 629 } 630 630 631 631 drm_gem_object_unreference(&ring->scratch.obj->base); ··· 1253 1253 return; 1254 1254 1255 1255 kunmap(sg_page(obj->pages->sgl)); 1256 - i915_gem_object_unpin(obj); 1256 + i915_gem_object_ggtt_unpin(obj); 1257 1257 drm_gem_object_unreference(&obj->base); 1258 1258 ring->status_page.obj = NULL; 1259 1259 } ··· 1293 1293 return 0; 1294 1294 1295 1295 err_unpin: 1296 - i915_gem_object_unpin(obj); 1296 + i915_gem_object_ggtt_unpin(obj); 1297 1297 err_unref: 1298 1298 drm_gem_object_unreference(&obj->base); 1299 1299 err: ··· 1390 1390 err_unmap: 1391 1391 iounmap(ring->virtual_start); 1392 1392 err_unpin: 1393 - i915_gem_object_unpin(obj); 1393 + i915_gem_object_ggtt_unpin(obj); 1394 1394 err_unref: 1395 1395 drm_gem_object_unreference(&obj->base); 1396 1396 ring->obj = NULL; ··· 1418 1418 1419 1419 iounmap(ring->virtual_start); 1420 1420 1421 - i915_gem_object_unpin(ring->obj); 1421 + i915_gem_object_ggtt_unpin(ring->obj); 1422 1422 drm_gem_object_unreference(&ring->obj->base); 1423 1423 ring->obj = NULL; 1424 1424 ring->preallocated_lazy_request = NULL; ··· 1430 1430 cleanup_status_page(ring); 1431 1431 } 1432 1432 1433 - static int intel_ring_wait_seqno(struct intel_ring_buffer *ring, u32 seqno) 1434 - { 1435 - int ret; 1436 - 1437 - ret = i915_wait_seqno(ring, seqno); 1438 - if (!ret) 1439 - i915_gem_retire_requests_ring(ring); 1440 - 1441 - return ret; 1442 - } 1443 - 1444 1433 static int intel_ring_wait_request(struct intel_ring_buffer *ring, int n) 1445 1434 { 1446 1435 struct drm_i915_gem_request *request; 1447 - u32 seqno = 0; 1436 + u32 seqno = 0, tail; 1448 1437 int ret; 1449 - 1450 - i915_gem_retire_requests_ring(ring); 1451 1438 1452 1439 if (ring->last_retired_head != -1) { 1453 1440 ring->head = ring->last_retired_head; 1454 1441 ring->last_retired_head = -1; 1442 + 1455 1443 ring->space = ring_space(ring); 1456 1444 if (ring->space >= n) 1457 1445 return 0; ··· 1456 1468 space += ring->size; 1457 1469 if (space >= n) { 1458 1470 seqno = request->seqno; 1471 + tail = request->tail; 1459 1472 break; 1460 1473 } 1461 1474 ··· 1471 1482 if (seqno == 0) 1472 1483 return -ENOSPC; 1473 1484 1474 - ret = intel_ring_wait_seqno(ring, seqno); 1485 + ret = i915_wait_seqno(ring, seqno); 1475 1486 if (ret) 1476 1487 return ret; 1477 1488 1478 - if (WARN_ON(ring->last_retired_head == -1)) 1479 - return -ENOSPC; 1480 - 1481 - ring->head = ring->last_retired_head; 1482 - ring->last_retired_head = -1; 1489 + ring->head = tail; 1483 1490 ring->space = ring_space(ring); 1484 1491 if (WARN_ON(ring->space < n)) 1485 1492 return -ENOSPC;
+2
drivers/gpu/drm/i915/intel_ringbuffer.h
··· 41 41 HANGCHECK_HUNG, 42 42 }; 43 43 44 + #define HANGCHECK_SCORE_RING_HUNG 31 45 + 44 46 struct intel_ring_hangcheck { 45 47 bool deadlock; 46 48 u32 seqno;
+9 -9
drivers/gpu/drm/i915/intel_sprite.c
··· 124 124 crtc_w--; 125 125 crtc_h--; 126 126 127 - I915_WRITE(SPSTRIDE(pipe, plane), fb->pitches[0]); 128 - I915_WRITE(SPPOS(pipe, plane), (crtc_y << 16) | crtc_x); 129 - 130 127 linear_offset = y * fb->pitches[0] + x * pixel_size; 131 128 sprsurf_offset = intel_gen4_compute_page_offset(&x, &y, 132 129 obj->tiling_mode, 133 130 pixel_size, 134 131 fb->pitches[0]); 135 132 linear_offset -= sprsurf_offset; 133 + 134 + I915_WRITE(SPSTRIDE(pipe, plane), fb->pitches[0]); 135 + I915_WRITE(SPPOS(pipe, plane), (crtc_y << 16) | crtc_x); 136 136 137 137 if (obj->tiling_mode != I915_TILING_NONE) 138 138 I915_WRITE(SPTILEOFF(pipe, plane), (y << 16) | x); ··· 293 293 if (crtc_w != src_w || crtc_h != src_h) 294 294 sprscale = SPRITE_SCALE_ENABLE | (src_w << 16) | src_h; 295 295 296 - I915_WRITE(SPRSTRIDE(pipe), fb->pitches[0]); 297 - I915_WRITE(SPRPOS(pipe), (crtc_y << 16) | crtc_x); 298 - 299 296 linear_offset = y * fb->pitches[0] + x * pixel_size; 300 297 sprsurf_offset = 301 298 intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode, 302 299 pixel_size, fb->pitches[0]); 303 300 linear_offset -= sprsurf_offset; 301 + 302 + I915_WRITE(SPRSTRIDE(pipe), fb->pitches[0]); 303 + I915_WRITE(SPRPOS(pipe), (crtc_y << 16) | crtc_x); 304 304 305 305 /* HSW consolidates SPRTILEOFF and SPRLINOFF into a single SPROFFSET 306 306 * register */ ··· 472 472 if (crtc_w != src_w || crtc_h != src_h) 473 473 dvsscale = DVS_SCALE_ENABLE | (src_w << 16) | src_h; 474 474 475 - I915_WRITE(DVSSTRIDE(pipe), fb->pitches[0]); 476 - I915_WRITE(DVSPOS(pipe), (crtc_y << 16) | crtc_x); 477 - 478 475 linear_offset = y * fb->pitches[0] + x * pixel_size; 479 476 dvssurf_offset = 480 477 intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode, 481 478 pixel_size, fb->pitches[0]); 482 479 linear_offset -= dvssurf_offset; 480 + 481 + I915_WRITE(DVSSTRIDE(pipe), fb->pitches[0]); 482 + I915_WRITE(DVSPOS(pipe), (crtc_y << 16) | crtc_x); 483 483 484 484 if (obj->tiling_mode != I915_TILING_NONE) 485 485 I915_WRITE(DVSTILEOFF(pipe), (y << 16) | x);
+5 -3
drivers/gpu/drm/i915/intel_uncore.c
··· 852 852 struct drm_i915_private *dev_priv = dev->dev_private; 853 853 struct drm_i915_reset_stats *args = data; 854 854 struct i915_ctx_hang_stats *hs; 855 + struct i915_hw_context *ctx; 855 856 int ret; 856 857 857 858 if (args->flags || args->pad) ··· 865 864 if (ret) 866 865 return ret; 867 866 868 - hs = i915_gem_context_get_hang_stats(dev, file, args->ctx_id); 869 - if (IS_ERR(hs)) { 867 + ctx = i915_gem_context_get(file->driver_priv, args->ctx_id); 868 + if (IS_ERR(ctx)) { 870 869 mutex_unlock(&dev->struct_mutex); 871 - return PTR_ERR(hs); 870 + return PTR_ERR(ctx); 872 871 } 872 + hs = &ctx->hang_stats; 873 873 874 874 if (capable(CAP_SYS_ADMIN)) 875 875 args->reset_count = i915_reset_count(&dev_priv->gpu_error);
+10
include/drm/drm_dp_helper.h
··· 279 279 280 280 #define DP_TEST_PATTERN 0x221 281 281 282 + #define DP_TEST_CRC_R_CR 0x240 283 + #define DP_TEST_CRC_G_Y 0x242 284 + #define DP_TEST_CRC_B_CB 0x244 285 + 286 + #define DP_TEST_SINK_MISC 0x246 287 + #define DP_TEST_CRC_SUPPORTED (1 << 5) 288 + 282 289 #define DP_TEST_RESPONSE 0x260 283 290 # define DP_TEST_ACK (1 << 0) 284 291 # define DP_TEST_NAK (1 << 1) 285 292 # define DP_TEST_EDID_CHECKSUM_WRITE (1 << 2) 293 + 294 + #define DP_TEST_SINK 0x270 295 + #define DP_TEST_SINK_START (1 << 0) 286 296 287 297 #define DP_SOURCE_OUI 0x300 288 298 #define DP_SINK_OUI 0x400