Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/i915: fix HPD IRQ reenable work cancelation

Atm, the HPD IRQ reenable timer can get rearmed right after it's
canceled. Also to access the HPD IRQ mask registers we need to wake up
the HW.

Solve both issues by converting the reenable timer to a delayed work and
grabbing a runtime PM reference in the work. By this we can also forgo
canceling the timer during runtime suspend, since the only important
thing there is that the HW is awake when we write the registers and
that's ensured by the RPM ref. So do the cancelation only during driver
unload time; this is also a requirement for an upcoming patch where we
want to cancel all HPD related works only during system suspend and
driver unload time, but not during runtime suspend.

Note that there is still a race between the HPD IRQ reenable work and
drm_irq_uninstall() during driver unload, where the work can reenable
the HPD IRQs disabled by drm_irq_uninstall(). This isn't a problem since
the HPD IRQs will still be effectively masked by the first level
interrupt mask.

v2-3:
- unchanged
v4:
- use proper API for changing the expiration time for an already pending
delayed work (Jani)

Signed-off-by: Imre Deak <imre.deak@intel.com>
Reviewed-by: Ville Syrjälä <ville.syrjala@linux.intel.com> (v2)
Cc: stable@vger.kernel.org (3.16+)
Signed-off-by: Jani Nikula <jani.nikula@intel.com>

authored by

Imre Deak and committed by
Jani Nikula
6323751d 1c767b33

+14 -22
+1 -1
drivers/gpu/drm/i915/i915_drv.h
··· 1458 1458 } hpd_mark; 1459 1459 } hpd_stats[HPD_NUM_PINS]; 1460 1460 u32 hpd_event_bits; 1461 - struct timer_list hotplug_reenable_timer; 1461 + struct delayed_work hotplug_reenable_work; 1462 1462 1463 1463 struct i915_fbc fbc; 1464 1464 struct i915_drrs drrs;
+12 -21
drivers/gpu/drm/i915/i915_irq.c
··· 1189 1189 * some connectors */ 1190 1190 if (hpd_disabled) { 1191 1191 drm_kms_helper_poll_enable(dev); 1192 - mod_timer(&dev_priv->hotplug_reenable_timer, 1193 - jiffies + msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY)); 1192 + mod_delayed_work(system_wq, &dev_priv->hotplug_reenable_work, 1193 + msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY)); 1194 1194 } 1195 1195 1196 1196 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); ··· 1211 1211 1212 1212 if (changed) 1213 1213 drm_kms_helper_hotplug_event(dev); 1214 - } 1215 - 1216 - static void intel_hpd_irq_uninstall(struct drm_i915_private *dev_priv) 1217 - { 1218 - del_timer_sync(&dev_priv->hotplug_reenable_timer); 1219 1214 } 1220 1215 1221 1216 static void ironlake_rps_change_irq_handler(struct drm_device *dev) ··· 3887 3892 if (!dev_priv) 3888 3893 return; 3889 3894 3890 - intel_hpd_irq_uninstall(dev_priv); 3891 - 3892 3895 gen8_irq_reset(dev); 3893 3896 } 3894 3897 ··· 3900 3907 return; 3901 3908 3902 3909 I915_WRITE(VLV_MASTER_IER, 0); 3903 - 3904 - intel_hpd_irq_uninstall(dev_priv); 3905 3910 3906 3911 for_each_pipe(pipe) 3907 3912 I915_WRITE(PIPESTAT(pipe), 0xffff); ··· 3978 3987 3979 3988 if (!dev_priv) 3980 3989 return; 3981 - 3982 - intel_hpd_irq_uninstall(dev_priv); 3983 3990 3984 3991 ironlake_irq_reset(dev); 3985 3992 } ··· 4349 4360 struct drm_i915_private *dev_priv = dev->dev_private; 4350 4361 int pipe; 4351 4362 4352 - intel_hpd_irq_uninstall(dev_priv); 4353 - 4354 4363 if (I915_HAS_HOTPLUG(dev)) { 4355 4364 I915_WRITE(PORT_HOTPLUG_EN, 0); 4356 4365 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); ··· 4585 4598 if (!dev_priv) 4586 4599 return; 4587 4600 4588 - intel_hpd_irq_uninstall(dev_priv); 4589 - 4590 4601 I915_WRITE(PORT_HOTPLUG_EN, 0); 4591 4602 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 4592 4603 ··· 4600 4615 I915_WRITE(IIR, I915_READ(IIR)); 4601 4616 } 4602 4617 4603 - static void intel_hpd_irq_reenable(unsigned long data) 4618 + static void intel_hpd_irq_reenable(struct work_struct *work) 4604 4619 { 4605 - struct drm_i915_private *dev_priv = (struct drm_i915_private *)data; 4620 + struct drm_i915_private *dev_priv = 4621 + container_of(work, typeof(*dev_priv), 4622 + hotplug_reenable_work.work); 4606 4623 struct drm_device *dev = dev_priv->dev; 4607 4624 struct drm_mode_config *mode_config = &dev->mode_config; 4608 4625 unsigned long irqflags; 4609 4626 int i; 4627 + 4628 + intel_runtime_pm_get(dev_priv); 4610 4629 4611 4630 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 4612 4631 for (i = (HPD_NONE + 1); i < HPD_NUM_PINS; i++) { ··· 4637 4648 if (dev_priv->display.hpd_irq_setup) 4638 4649 dev_priv->display.hpd_irq_setup(dev); 4639 4650 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 4651 + 4652 + intel_runtime_pm_put(dev_priv); 4640 4653 } 4641 4654 4642 4655 void intel_irq_init(struct drm_device *dev) ··· 4661 4670 setup_timer(&dev_priv->gpu_error.hangcheck_timer, 4662 4671 i915_hangcheck_elapsed, 4663 4672 (unsigned long) dev); 4664 - setup_timer(&dev_priv->hotplug_reenable_timer, intel_hpd_irq_reenable, 4665 - (unsigned long) dev_priv); 4673 + INIT_DELAYED_WORK(&dev_priv->hotplug_reenable_work, 4674 + intel_hpd_irq_reenable); 4666 4675 4667 4676 pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE); 4668 4677
+1
drivers/gpu/drm/i915/intel_display.c
··· 13104 13104 */ 13105 13105 drm_irq_uninstall(dev); 13106 13106 cancel_work_sync(&dev_priv->hotplug_work); 13107 + cancel_delayed_work_sync(&dev_priv->hotplug_reenable_work); 13107 13108 dev_priv->pm._irqs_disabled = true; 13108 13109 13109 13110 /*