Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

irq: Handle spurios irq detection for threaded irqs

The detection of spurios interrupts is currently limited to first level
handler. In force-threaded mode we never notice if the threaded irq does
not feel responsible.
This patch catches the return value of the threaded handler and forwards
it to the spurious detector. If the primary handler returns only
IRQ_WAKE_THREAD then the spourious detector ignores it because it gets
called again from the threaded handler.

[ tglx: Report the erroneous return value early and bail out ]

Signed-off-by: Sebastian Andrzej Siewior <sebastian@breakpoint.cc>
Link: http://lkml.kernel.org/r/1306824972-27067-2-git-send-email-sebastian@breakpoint.cc
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>

authored by

Sebastian Andrzej Siewior and committed by
Thomas Gleixner
3a43e05f ef26f20c

+39 -19
+3 -3
include/linux/irqreturn.h
··· 8 8 * @IRQ_WAKE_THREAD handler requests to wake the handler thread 9 9 */ 10 10 enum irqreturn { 11 - IRQ_NONE, 12 - IRQ_HANDLED, 13 - IRQ_WAKE_THREAD, 11 + IRQ_NONE = (0 << 0), 12 + IRQ_HANDLED = (1 << 0), 13 + IRQ_WAKE_THREAD = (1 << 1), 14 14 }; 15 15 16 16 typedef enum irqreturn irqreturn_t;
-6
kernel/irq/handle.c
··· 133 133 switch (res) { 134 134 case IRQ_WAKE_THREAD: 135 135 /* 136 - * Set result to handled so the spurious check 137 - * does not trigger. 138 - */ 139 - res = IRQ_HANDLED; 140 - 141 - /* 142 136 * Catch drivers which return WAKE_THREAD but 143 137 * did not set up a thread function 144 138 */
+18 -6
kernel/irq/manage.c
··· 723 723 * context. So we need to disable bh here to avoid deadlocks and other 724 724 * side effects. 725 725 */ 726 - static void 726 + static irqreturn_t 727 727 irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action) 728 728 { 729 + irqreturn_t ret; 730 + 729 731 local_bh_disable(); 730 - action->thread_fn(action->irq, action->dev_id); 732 + ret = action->thread_fn(action->irq, action->dev_id); 731 733 irq_finalize_oneshot(desc, action, false); 732 734 local_bh_enable(); 735 + return ret; 733 736 } 734 737 735 738 /* ··· 740 737 * preemtible - many of them need to sleep and wait for slow busses to 741 738 * complete. 742 739 */ 743 - static void irq_thread_fn(struct irq_desc *desc, struct irqaction *action) 740 + static irqreturn_t irq_thread_fn(struct irq_desc *desc, 741 + struct irqaction *action) 744 742 { 745 - action->thread_fn(action->irq, action->dev_id); 743 + irqreturn_t ret; 744 + 745 + ret = action->thread_fn(action->irq, action->dev_id); 746 746 irq_finalize_oneshot(desc, action, false); 747 + return ret; 747 748 } 748 749 749 750 /* ··· 760 753 }; 761 754 struct irqaction *action = data; 762 755 struct irq_desc *desc = irq_to_desc(action->irq); 763 - void (*handler_fn)(struct irq_desc *desc, struct irqaction *action); 756 + irqreturn_t (*handler_fn)(struct irq_desc *desc, 757 + struct irqaction *action); 764 758 int wake; 765 759 766 760 if (force_irqthreads & test_bit(IRQTF_FORCED_THREAD, ··· 791 783 desc->istate |= IRQS_PENDING; 792 784 raw_spin_unlock_irq(&desc->lock); 793 785 } else { 786 + irqreturn_t action_ret; 787 + 794 788 raw_spin_unlock_irq(&desc->lock); 795 - handler_fn(desc, action); 789 + action_ret = handler_fn(desc, action); 790 + if (!noirqdebug) 791 + note_interrupt(action->irq, desc, action_ret); 796 792 } 797 793 798 794 wake = atomic_dec_and_test(&desc->threads_active);
+18 -4
kernel/irq/spurious.c
··· 167 167 jiffies + POLL_SPURIOUS_IRQ_INTERVAL); 168 168 } 169 169 170 + static inline int bad_action_ret(irqreturn_t action_ret) 171 + { 172 + if (likely(action_ret <= (IRQ_HANDLED | IRQ_WAKE_THREAD))) 173 + return 0; 174 + return 1; 175 + } 176 + 170 177 /* 171 178 * If 99,900 of the previous 100,000 interrupts have not been handled 172 179 * then assume that the IRQ is stuck in some manner. Drop a diagnostic ··· 189 182 struct irqaction *action; 190 183 unsigned long flags; 191 184 192 - if (action_ret != IRQ_HANDLED && action_ret != IRQ_NONE) { 185 + if (bad_action_ret(action_ret)) { 193 186 printk(KERN_ERR "irq event %d: bogus return value %x\n", 194 187 irq, action_ret); 195 188 } else { ··· 270 263 if (desc->istate & IRQS_POLL_INPROGRESS) 271 264 return; 272 265 273 - if (unlikely(action_ret != IRQ_HANDLED)) { 266 + /* we get here again via the threaded handler */ 267 + if (action_ret == IRQ_WAKE_THREAD) 268 + return; 269 + 270 + if (bad_action_ret(action_ret)) { 271 + report_bad_irq(irq, desc, action_ret); 272 + return; 273 + } 274 + 275 + if (unlikely(action_ret == IRQ_NONE)) { 274 276 /* 275 277 * If we are seeing only the odd spurious IRQ caused by 276 278 * bus asynchronicity then don't eventually trigger an error, ··· 291 275 else 292 276 desc->irqs_unhandled++; 293 277 desc->last_unhandled = jiffies; 294 - if (unlikely(action_ret != IRQ_NONE)) 295 - report_bad_irq(irq, desc, action_ret); 296 278 } 297 279 298 280 if (unlikely(try_misrouted_irq(irq, desc, action_ret))) {