Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'drm-intel-next-fixes-2014-12-04' of git://anongit.freedesktop.org/drm-intel into drm-next

Fixes for 3.20. I did stick the gen3/4 reset work from Ville in because we
have an awful lot of gen4 mesa hangs, and with this reset should also work
on vintage i965g/gm (we already have reset for g4x/gen4.5). So should help
to appease users suffering from these hangs. Otherwise all over.

This is the last 3.20 pull from me, from here on Jani will take over. By Ville Syrjälä (8) and others
* tag 'drm-intel-next-fixes-2014-12-04' of git://anongit.freedesktop.org/drm-intel:
drm/i915: Reject modeset when the same digital port is used more than once
drm/i915: mask RPS IRQs properly when disabling RPS
drm/i915: Tune down spurious CRC interrupt warning
drm/i915: Fix context object leak for legacy contexts
drm/i915/skl: Update in Gen9 multi-engine forcewake range
drm/i915/eDP: When enabling panel VDD cancel pending disable worker
drm/i915: Handle runtime pm in the CRC setup code
drm/i915: Disable crtcs gracefully before GPU reset on gen3/4
drm/i915: Grab modeset locks for GPU rest on pre-ctg
drm/i915: Implement GPU reset for g33
drm/i915: Implement GPU reset for 915/945
drm/i915: Restore the display config after a GPU reset on gen4
drm/i915: Fix gen4 GPU reset
drm/i915: Stop gathering error states for CS error interrupts
drm/i915: Disallow pin ioctl completely for kms drivers
drm/i915: Only warn the first time we attempt to mmio whilst suspended
drm/i915/chv: Enable AVI, SPD and HDMI infoframes for CHV.
drm/i915: Don't clobber crtc->new_config when nothing changes

+234 -97
+5
drivers/gpu/drm/i915/i915_debugfs.c
··· 3338 3338 if (pipe_crc->source && source) 3339 3339 return -EINVAL; 3340 3340 3341 + if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PIPE(pipe))) { 3342 + DRM_DEBUG_KMS("Trying to capture CRC while pipe is off\n"); 3343 + return -EIO; 3344 + } 3345 + 3341 3346 if (IS_GEN2(dev)) 3342 3347 ret = i8xx_pipe_crc_ctl_reg(&source, &val); 3343 3348 else if (INTEL_INFO(dev)->gen < 5)
+10 -5
drivers/gpu/drm/i915/i915_gem.c
··· 2574 2574 list_del(&request->list); 2575 2575 i915_gem_request_remove_from_client(request); 2576 2576 2577 - if (i915.enable_execlists && ctx) { 2578 - struct intel_engine_cs *ring = request->ring; 2577 + if (ctx) { 2578 + if (i915.enable_execlists) { 2579 + struct intel_engine_cs *ring = request->ring; 2579 2580 2580 - if (ctx != ring->default_context) 2581 - intel_lr_context_unpin(ring, ctx); 2581 + if (ctx != ring->default_context) 2582 + intel_lr_context_unpin(ring, ctx); 2583 + } 2582 2584 i915_gem_context_unreference(ctx); 2583 2585 } 2584 2586 kfree(request); ··· 4265 4263 struct drm_i915_gem_object *obj; 4266 4264 int ret; 4267 4265 4268 - if (INTEL_INFO(dev)->gen >= 6) 4266 + if (drm_core_check_feature(dev, DRIVER_MODESET)) 4269 4267 return -ENODEV; 4270 4268 4271 4269 ret = i915_mutex_lock_interruptible(dev); ··· 4320 4318 struct drm_i915_gem_pin *args = data; 4321 4319 struct drm_i915_gem_object *obj; 4322 4320 int ret; 4321 + 4322 + if (drm_core_check_feature(dev, DRIVER_MODESET)) 4323 + return -ENODEV; 4323 4324 4324 4325 ret = i915_mutex_lock_interruptible(dev); 4325 4326 if (ret)
+34 -29
drivers/gpu/drm/i915/i915_irq.c
··· 231 231 232 232 assert_spin_locked(&dev_priv->irq_lock); 233 233 234 - if (WARN_ON(!intel_irqs_enabled(dev_priv))) 235 - return; 236 - 237 234 new_val = dev_priv->pm_irq_mask; 238 235 new_val &= ~interrupt_mask; 239 236 new_val |= (~enabled_irq_mask & interrupt_mask); ··· 244 247 245 248 void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask) 246 249 { 250 + if (WARN_ON(!intel_irqs_enabled(dev_priv))) 251 + return; 252 + 247 253 snb_update_pm_irq(dev_priv, mask, mask); 254 + } 255 + 256 + static void __gen6_disable_pm_irq(struct drm_i915_private *dev_priv, 257 + uint32_t mask) 258 + { 259 + snb_update_pm_irq(dev_priv, mask, 0); 248 260 } 249 261 250 262 void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask) 251 263 { 252 - snb_update_pm_irq(dev_priv, mask, 0); 264 + if (WARN_ON(!intel_irqs_enabled(dev_priv))) 265 + return; 266 + 267 + __gen6_disable_pm_irq(dev_priv, mask); 253 268 } 254 269 255 270 void gen6_reset_rps_interrupts(struct drm_device *dev) ··· 298 289 299 290 cancel_work_sync(&dev_priv->rps.work); 300 291 292 + spin_lock_irq(&dev_priv->irq_lock); 293 + 301 294 I915_WRITE(GEN6_PMINTRMSK, INTEL_INFO(dev_priv)->gen >= 8 ? 302 295 ~GEN8_PMINTR_REDIRECT_TO_NON_DISP : ~0); 296 + 297 + __gen6_disable_pm_irq(dev_priv, dev_priv->pm_rps_events); 303 298 I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) & 304 299 ~dev_priv->pm_rps_events); 305 - 306 - spin_lock_irq(&dev_priv->irq_lock); 307 - dev_priv->rps.pm_iir = 0; 308 - spin_unlock_irq(&dev_priv->irq_lock); 309 - 310 300 I915_WRITE(gen6_pm_iir(dev_priv), dev_priv->pm_rps_events); 301 + I915_WRITE(gen6_pm_iir(dev_priv), dev_priv->pm_rps_events); 302 + 303 + dev_priv->rps.pm_iir = 0; 304 + 305 + spin_unlock_irq(&dev_priv->irq_lock); 311 306 } 312 307 313 308 /** ··· 1352 1339 1353 1340 if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT | 1354 1341 GT_BSD_CS_ERROR_INTERRUPT | 1355 - GT_RENDER_CS_MASTER_ERROR_INTERRUPT)) { 1356 - i915_handle_error(dev, false, "GT error interrupt 0x%08x", 1357 - gt_iir); 1358 - } 1342 + GT_RENDER_CS_MASTER_ERROR_INTERRUPT)) 1343 + DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir); 1359 1344 1360 1345 if (gt_iir & GT_PARITY_ERROR(dev)) 1361 1346 ivybridge_parity_error_irq_handler(dev, gt_iir); ··· 1634 1623 1635 1624 if (!pipe_crc->entries) { 1636 1625 spin_unlock(&pipe_crc->lock); 1637 - DRM_ERROR("spurious interrupt\n"); 1626 + DRM_DEBUG_KMS("spurious interrupt\n"); 1638 1627 return; 1639 1628 } 1640 1629 ··· 1742 1731 if (pm_iir & PM_VEBOX_USER_INTERRUPT) 1743 1732 notify_ring(dev_priv->dev, &dev_priv->ring[VECS]); 1744 1733 1745 - if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) { 1746 - i915_handle_error(dev_priv->dev, false, 1747 - "VEBOX CS error interrupt 0x%08x", 1748 - pm_iir); 1749 - } 1734 + if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) 1735 + DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir); 1750 1736 } 1751 1737 } 1752 1738 ··· 2436 2428 * simulated reset via debugs, so get an RPM reference. 2437 2429 */ 2438 2430 intel_runtime_pm_get(dev_priv); 2431 + 2432 + intel_prepare_reset(dev); 2433 + 2439 2434 /* 2440 2435 * All state reset _must_ be completed before we update the 2441 2436 * reset counter, for otherwise waiters might miss the reset ··· 2447 2436 */ 2448 2437 ret = i915_reset(dev); 2449 2438 2450 - intel_display_handle_reset(dev); 2439 + intel_finish_reset(dev); 2451 2440 2452 2441 intel_runtime_pm_put(dev_priv); 2453 2442 ··· 3757 3746 */ 3758 3747 spin_lock(&dev_priv->irq_lock); 3759 3748 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 3760 - i915_handle_error(dev, false, 3761 - "Command parser error, iir 0x%08x", 3762 - iir); 3749 + DRM_DEBUG("Command parser error, iir 0x%08x\n", iir); 3763 3750 3764 3751 for_each_pipe(dev_priv, pipe) { 3765 3752 int reg = PIPESTAT(pipe); ··· 3938 3929 */ 3939 3930 spin_lock(&dev_priv->irq_lock); 3940 3931 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 3941 - i915_handle_error(dev, false, 3942 - "Command parser error, iir 0x%08x", 3943 - iir); 3932 + DRM_DEBUG("Command parser error, iir 0x%08x\n", iir); 3944 3933 3945 3934 for_each_pipe(dev_priv, pipe) { 3946 3935 int reg = PIPESTAT(pipe); ··· 4161 4154 */ 4162 4155 spin_lock(&dev_priv->irq_lock); 4163 4156 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 4164 - i915_handle_error(dev, false, 4165 - "Command parser error, iir 0x%08x", 4166 - iir); 4157 + DRM_DEBUG("Command parser error, iir 0x%08x\n", iir); 4167 4158 4168 4159 for_each_pipe(dev_priv, pipe) { 4169 4160 int reg = PIPESTAT(pipe);
+2 -1
drivers/gpu/drm/i915/i915_reg.h
··· 78 78 79 79 80 80 /* Graphics reset regs */ 81 - #define I965_GDRST 0xc0 /* PCI config register */ 81 + #define I915_GDRST 0xc0 /* PCI config register */ 82 82 #define GRDOM_FULL (0<<2) 83 83 #define GRDOM_RENDER (1<<2) 84 84 #define GRDOM_MEDIA (3<<2) 85 85 #define GRDOM_MASK (3<<2) 86 + #define GRDOM_RESET_STATUS (1<<1) 86 87 #define GRDOM_RESET_ENABLE (1<<0) 87 88 88 89 #define ILK_GDSR 0x2ca4 /* MCHBAR offset */
+132 -19
drivers/gpu/drm/i915/intel_display.c
··· 2765 2765 return 0; 2766 2766 } 2767 2767 2768 - void intel_display_handle_reset(struct drm_device *dev) 2768 + static void intel_complete_page_flips(struct drm_device *dev) 2769 2769 { 2770 - struct drm_i915_private *dev_priv = dev->dev_private; 2771 2770 struct drm_crtc *crtc; 2772 - 2773 - /* 2774 - * Flips in the rings have been nuked by the reset, 2775 - * so complete all pending flips so that user space 2776 - * will get its events and not get stuck. 2777 - * 2778 - * Also update the base address of all primary 2779 - * planes to the the last fb to make sure we're 2780 - * showing the correct fb after a reset. 2781 - * 2782 - * Need to make two loops over the crtcs so that we 2783 - * don't try to grab a crtc mutex before the 2784 - * pending_flip_queue really got woken up. 2785 - */ 2786 2771 2787 2772 for_each_crtc(dev, crtc) { 2788 2773 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); ··· 2776 2791 intel_prepare_page_flip(dev, plane); 2777 2792 intel_finish_page_flip_plane(dev, plane); 2778 2793 } 2794 + } 2795 + 2796 + static void intel_update_primary_planes(struct drm_device *dev) 2797 + { 2798 + struct drm_i915_private *dev_priv = dev->dev_private; 2799 + struct drm_crtc *crtc; 2779 2800 2780 2801 for_each_crtc(dev, crtc) { 2781 2802 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); ··· 2799 2808 crtc->y); 2800 2809 drm_modeset_unlock(&crtc->mutex); 2801 2810 } 2811 + } 2812 + 2813 + void intel_prepare_reset(struct drm_device *dev) 2814 + { 2815 + struct drm_i915_private *dev_priv = to_i915(dev); 2816 + struct intel_crtc *crtc; 2817 + 2818 + /* no reset support for gen2 */ 2819 + if (IS_GEN2(dev)) 2820 + return; 2821 + 2822 + /* reset doesn't touch the display */ 2823 + if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev)) 2824 + return; 2825 + 2826 + drm_modeset_lock_all(dev); 2827 + 2828 + /* 2829 + * Disabling the crtcs gracefully seems nicer. Also the 2830 + * g33 docs say we should at least disable all the planes. 2831 + */ 2832 + for_each_intel_crtc(dev, crtc) { 2833 + if (crtc->active) 2834 + dev_priv->display.crtc_disable(&crtc->base); 2835 + } 2836 + } 2837 + 2838 + void intel_finish_reset(struct drm_device *dev) 2839 + { 2840 + struct drm_i915_private *dev_priv = to_i915(dev); 2841 + 2842 + /* 2843 + * Flips in the rings will be nuked by the reset, 2844 + * so complete all pending flips so that user space 2845 + * will get its events and not get stuck. 2846 + */ 2847 + intel_complete_page_flips(dev); 2848 + 2849 + /* no reset support for gen2 */ 2850 + if (IS_GEN2(dev)) 2851 + return; 2852 + 2853 + /* reset doesn't touch the display */ 2854 + if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev)) { 2855 + /* 2856 + * Flips in the rings have been nuked by the reset, 2857 + * so update the base address of all primary 2858 + * planes to the the last fb to make sure we're 2859 + * showing the correct fb after a reset. 2860 + */ 2861 + intel_update_primary_planes(dev); 2862 + return; 2863 + } 2864 + 2865 + /* 2866 + * The display has been reset as well, 2867 + * so need a full re-initialization. 2868 + */ 2869 + intel_runtime_pm_disable_interrupts(dev_priv); 2870 + intel_runtime_pm_enable_interrupts(dev_priv); 2871 + 2872 + intel_modeset_init_hw(dev); 2873 + 2874 + spin_lock_irq(&dev_priv->irq_lock); 2875 + if (dev_priv->display.hpd_irq_setup) 2876 + dev_priv->display.hpd_irq_setup(dev); 2877 + spin_unlock_irq(&dev_priv->irq_lock); 2878 + 2879 + intel_modeset_setup_hw_state(dev, true); 2880 + 2881 + intel_hpd_init(dev_priv); 2882 + 2883 + drm_modeset_unlock_all(dev); 2802 2884 } 2803 2885 2804 2886 static int ··· 10153 10089 return true; 10154 10090 } 10155 10091 10092 + static bool check_digital_port_conflicts(struct drm_device *dev) 10093 + { 10094 + struct intel_connector *connector; 10095 + unsigned int used_ports = 0; 10096 + 10097 + /* 10098 + * Walk the connector list instead of the encoder 10099 + * list to detect the problem on ddi platforms 10100 + * where there's just one encoder per digital port. 10101 + */ 10102 + list_for_each_entry(connector, 10103 + &dev->mode_config.connector_list, base.head) { 10104 + struct intel_encoder *encoder = connector->new_encoder; 10105 + 10106 + if (!encoder) 10107 + continue; 10108 + 10109 + WARN_ON(!encoder->new_crtc); 10110 + 10111 + switch (encoder->type) { 10112 + unsigned int port_mask; 10113 + case INTEL_OUTPUT_UNKNOWN: 10114 + if (WARN_ON(!HAS_DDI(dev))) 10115 + break; 10116 + case INTEL_OUTPUT_DISPLAYPORT: 10117 + case INTEL_OUTPUT_HDMI: 10118 + case INTEL_OUTPUT_EDP: 10119 + port_mask = 1 << enc_to_dig_port(&encoder->base)->port; 10120 + 10121 + /* the same port mustn't appear more than once */ 10122 + if (used_ports & port_mask) 10123 + return false; 10124 + 10125 + used_ports |= port_mask; 10126 + default: 10127 + break; 10128 + } 10129 + } 10130 + 10131 + return true; 10132 + } 10133 + 10156 10134 static struct intel_crtc_config * 10157 10135 intel_modeset_pipe_config(struct drm_crtc *crtc, 10158 10136 struct drm_framebuffer *fb, ··· 10208 10102 10209 10103 if (!check_encoder_cloning(to_intel_crtc(crtc))) { 10210 10104 DRM_DEBUG_KMS("rejecting invalid cloning configuration\n"); 10105 + return ERR_PTR(-EINVAL); 10106 + } 10107 + 10108 + if (!check_digital_port_conflicts(dev)) { 10109 + DRM_DEBUG_KMS("rejecting conflicting digital port configuration\n"); 10211 10110 return ERR_PTR(-EINVAL); 10212 10111 } 10213 10112 ··· 11018 10907 } 11019 10908 intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config, 11020 10909 "[modeset]"); 11021 - to_intel_crtc(crtc)->new_config = pipe_config; 11022 10910 11023 10911 out: 11024 10912 return pipe_config; ··· 11042 10932 return -ENOMEM; 11043 10933 11044 10934 *saved_mode = crtc->mode; 10935 + 10936 + if (modeset_pipes) 10937 + to_intel_crtc(crtc)->new_config = pipe_config; 11045 10938 11046 10939 /* 11047 10940 * See if the config requires any additional preparation, e.g. ··· 11579 11466 ret = PTR_ERR(pipe_config); 11580 11467 goto fail; 11581 11468 } else if (pipe_config) { 11582 - if (to_intel_crtc(set->crtc)->new_config->has_audio != 11469 + if (pipe_config->has_audio != 11583 11470 to_intel_crtc(set->crtc)->config.has_audio) 11584 11471 config->mode_changed = true; 11585 11472 11586 11473 /* Force mode sets for any infoframe stuff */ 11587 - if (to_intel_crtc(set->crtc)->new_config->has_infoframe || 11474 + if (pipe_config->has_infoframe || 11588 11475 to_intel_crtc(set->crtc)->config.has_infoframe) 11589 11476 config->mode_changed = true; 11590 11477 }
+1
drivers/gpu/drm/i915/intel_dp.c
··· 1503 1503 if (!is_edp(intel_dp)) 1504 1504 return false; 1505 1505 1506 + cancel_delayed_work(&intel_dp->panel_vdd_work); 1506 1507 intel_dp->want_panel_vdd = true; 1507 1508 1508 1509 if (edp_have_panel_vdd(intel_dp))
+2 -1
drivers/gpu/drm/i915/intel_drv.h
··· 958 958 unsigned int tiling_mode, 959 959 unsigned int bpp, 960 960 unsigned int pitch); 961 - void intel_display_handle_reset(struct drm_device *dev); 961 + void intel_prepare_reset(struct drm_device *dev); 962 + void intel_finish_reset(struct drm_device *dev); 962 963 void hsw_enable_pc8(struct drm_i915_private *dev_priv); 963 964 void hsw_disable_pc8(struct drm_i915_private *dev_priv); 964 965 void intel_dp_get_m_n(struct intel_crtc *crtc,
+7
drivers/gpu/drm/i915/intel_hdmi.c
··· 1461 1461 static void chv_hdmi_pre_enable(struct intel_encoder *encoder) 1462 1462 { 1463 1463 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base); 1464 + struct intel_hdmi *intel_hdmi = &dport->hdmi; 1464 1465 struct drm_device *dev = encoder->base.dev; 1465 1466 struct drm_i915_private *dev_priv = dev->dev_private; 1466 1467 struct intel_crtc *intel_crtc = 1467 1468 to_intel_crtc(encoder->base.crtc); 1469 + struct drm_display_mode *adjusted_mode = 1470 + &intel_crtc->config.adjusted_mode; 1468 1471 enum dpio_channel ch = vlv_dport_to_channel(dport); 1469 1472 int pipe = intel_crtc->pipe; 1470 1473 int data, i; ··· 1591 1588 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, val); 1592 1589 1593 1590 mutex_unlock(&dev_priv->dpio_lock); 1591 + 1592 + intel_hdmi->set_infoframes(&encoder->base, 1593 + intel_crtc->config.has_hdmi_sink, 1594 + adjusted_mode); 1594 1595 1595 1596 intel_enable_hdmi(encoder); 1596 1597
+41 -42
drivers/gpu/drm/i915/intel_uncore.c
··· 43 43 static void 44 44 assert_device_not_suspended(struct drm_i915_private *dev_priv) 45 45 { 46 - WARN(HAS_RUNTIME_PM(dev_priv->dev) && dev_priv->pm.suspended, 47 - "Device suspended\n"); 46 + WARN_ONCE(HAS_RUNTIME_PM(dev_priv->dev) && dev_priv->pm.suspended, 47 + "Device suspended\n"); 48 48 } 49 49 50 50 static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv) ··· 671 671 REG_RANGE((reg), 0x22000, 0x24000)) 672 672 673 673 #define FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg) \ 674 - REG_RANGE((reg), 0xC00, 0x2000) 674 + REG_RANGE((reg), 0xB00, 0x2000) 675 675 676 676 #define FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg) \ 677 - (REG_RANGE((reg), 0x2000, 0x4000) || \ 677 + (REG_RANGE((reg), 0x2000, 0x2700) || \ 678 + REG_RANGE((reg), 0x3000, 0x4000) || \ 678 679 REG_RANGE((reg), 0x5200, 0x8000) || \ 680 + REG_RANGE((reg), 0x8140, 0x8160) || \ 679 681 REG_RANGE((reg), 0x8300, 0x8500) || \ 680 682 REG_RANGE((reg), 0x8C00, 0x8D00) || \ 681 683 REG_RANGE((reg), 0xB000, 0xB480) || \ 682 - REG_RANGE((reg), 0xE000, 0xE800)) 684 + REG_RANGE((reg), 0xE000, 0xE900) || \ 685 + REG_RANGE((reg), 0x24400, 0x24800)) 683 686 684 687 #define FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg) \ 685 - (REG_RANGE((reg), 0x8800, 0x8A00) || \ 688 + (REG_RANGE((reg), 0x8130, 0x8140) || \ 689 + REG_RANGE((reg), 0x8800, 0x8A00) || \ 686 690 REG_RANGE((reg), 0xD000, 0xD800) || \ 687 691 REG_RANGE((reg), 0x12000, 0x14000) || \ 688 692 REG_RANGE((reg), 0x1A000, 0x1EA00) || \ ··· 1349 1345 return 0; 1350 1346 } 1351 1347 1352 - static int i965_reset_complete(struct drm_device *dev) 1348 + static int i915_reset_complete(struct drm_device *dev) 1353 1349 { 1354 1350 u8 gdrst; 1355 - pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst); 1351 + pci_read_config_byte(dev->pdev, I915_GDRST, &gdrst); 1352 + return (gdrst & GRDOM_RESET_STATUS) == 0; 1353 + } 1354 + 1355 + static int i915_do_reset(struct drm_device *dev) 1356 + { 1357 + /* assert reset for at least 20 usec */ 1358 + pci_write_config_byte(dev->pdev, I915_GDRST, GRDOM_RESET_ENABLE); 1359 + udelay(20); 1360 + pci_write_config_byte(dev->pdev, I915_GDRST, 0); 1361 + 1362 + return wait_for(i915_reset_complete(dev), 500); 1363 + } 1364 + 1365 + static int g4x_reset_complete(struct drm_device *dev) 1366 + { 1367 + u8 gdrst; 1368 + pci_read_config_byte(dev->pdev, I915_GDRST, &gdrst); 1356 1369 return (gdrst & GRDOM_RESET_ENABLE) == 0; 1357 1370 } 1358 1371 1359 - static int i965_do_reset(struct drm_device *dev) 1372 + static int g33_do_reset(struct drm_device *dev) 1360 1373 { 1361 - int ret; 1362 - 1363 - /* FIXME: i965g/gm need a display save/restore for gpu reset. */ 1364 - return -ENODEV; 1365 - 1366 - /* 1367 - * Set the domains we want to reset (GRDOM/bits 2 and 3) as 1368 - * well as the reset bit (GR/bit 0). Setting the GR bit 1369 - * triggers the reset; when done, the hardware will clear it. 1370 - */ 1371 - pci_write_config_byte(dev->pdev, I965_GDRST, 1372 - GRDOM_RENDER | GRDOM_RESET_ENABLE); 1373 - ret = wait_for(i965_reset_complete(dev), 500); 1374 - if (ret) 1375 - return ret; 1376 - 1377 - pci_write_config_byte(dev->pdev, I965_GDRST, 1378 - GRDOM_MEDIA | GRDOM_RESET_ENABLE); 1379 - 1380 - ret = wait_for(i965_reset_complete(dev), 500); 1381 - if (ret) 1382 - return ret; 1383 - 1384 - pci_write_config_byte(dev->pdev, I965_GDRST, 0); 1385 - 1386 - return 0; 1374 + pci_write_config_byte(dev->pdev, I915_GDRST, GRDOM_RESET_ENABLE); 1375 + return wait_for(g4x_reset_complete(dev), 500); 1387 1376 } 1388 1377 1389 1378 static int g4x_do_reset(struct drm_device *dev) ··· 1384 1387 struct drm_i915_private *dev_priv = dev->dev_private; 1385 1388 int ret; 1386 1389 1387 - pci_write_config_byte(dev->pdev, I965_GDRST, 1390 + pci_write_config_byte(dev->pdev, I915_GDRST, 1388 1391 GRDOM_RENDER | GRDOM_RESET_ENABLE); 1389 - ret = wait_for(i965_reset_complete(dev), 500); 1392 + ret = wait_for(g4x_reset_complete(dev), 500); 1390 1393 if (ret) 1391 1394 return ret; 1392 1395 ··· 1394 1397 I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) | VCP_UNIT_CLOCK_GATE_DISABLE); 1395 1398 POSTING_READ(VDECCLK_GATE_D); 1396 1399 1397 - pci_write_config_byte(dev->pdev, I965_GDRST, 1400 + pci_write_config_byte(dev->pdev, I915_GDRST, 1398 1401 GRDOM_MEDIA | GRDOM_RESET_ENABLE); 1399 - ret = wait_for(i965_reset_complete(dev), 500); 1402 + ret = wait_for(g4x_reset_complete(dev), 500); 1400 1403 if (ret) 1401 1404 return ret; 1402 1405 ··· 1404 1407 I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) & ~VCP_UNIT_CLOCK_GATE_DISABLE); 1405 1408 POSTING_READ(VDECCLK_GATE_D); 1406 1409 1407 - pci_write_config_byte(dev->pdev, I965_GDRST, 0); 1410 + pci_write_config_byte(dev->pdev, I915_GDRST, 0); 1408 1411 1409 1412 return 0; 1410 1413 } ··· 1462 1465 return ironlake_do_reset(dev); 1463 1466 else if (IS_G4X(dev)) 1464 1467 return g4x_do_reset(dev); 1465 - else if (IS_GEN4(dev)) 1466 - return i965_do_reset(dev); 1468 + else if (IS_G33(dev)) 1469 + return g33_do_reset(dev); 1470 + else if (INTEL_INFO(dev)->gen >= 3) 1471 + return i915_do_reset(dev); 1467 1472 else 1468 1473 return -ENODEV; 1469 1474 }