Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

genirq: Prevent spurious detection for unconditionally polled interrupts

On a 68k platform a couple of interrupts are demultiplexed and
"polled" from a top level interrupt. Unfortunately there is no way to
determine which of the sub interrupts raised the top level interrupt,
so all of the demultiplexed interrupt handlers need to be
invoked. Given a high enough frequency this can trigger the spurious
interrupt detection mechanism, if one of the demultiplex interrupts
returns IRQ_NONE continuously. But this is a false positive as the
polling causes this behaviour and not buggy hardware/software.

Introduce IRQ_POLLED which can be set at interrupt chip setup time via
irq_set_status_flags(). The flag excludes the interrupt from the
spurious detector and from all core polling activities.

Reported-and-tested-by: Michael Schmitz <schmitzmic@gmail.com>
Cc: Geert Uytterhoeven <geert@linux-m68k.org>
Cc: linux-m68k@vger.kernel.org
Link: http://lkml.kernel.org/r/alpine.DEB.2.02.1311061149250.23353@ionos.tec.linutronix.de

+22 -4
+6 -1
include/linux/irq.h
··· 70 70 * IRQ_MOVE_PCNTXT - Interrupt can be migrated from process context 71 71 * IRQ_NESTED_TRHEAD - Interrupt nests into another thread 72 72 * IRQ_PER_CPU_DEVID - Dev_id is a per-cpu variable 73 + * IRQ_IS_POLLED - Always polled by another interrupt. Exclude 74 + * it from the spurious interrupt detection 75 + * mechanism and from core side polling. 73 76 */ 74 77 enum { 75 78 IRQ_TYPE_NONE = 0x00000000, ··· 97 94 IRQ_NESTED_THREAD = (1 << 15), 98 95 IRQ_NOTHREAD = (1 << 16), 99 96 IRQ_PER_CPU_DEVID = (1 << 17), 97 + IRQ_IS_POLLED = (1 << 18), 100 98 }; 101 99 102 100 #define IRQF_MODIFY_MASK \ 103 101 (IRQ_TYPE_SENSE_MASK | IRQ_NOPROBE | IRQ_NOREQUEST | \ 104 102 IRQ_NOAUTOEN | IRQ_MOVE_PCNTXT | IRQ_LEVEL | IRQ_NO_BALANCING | \ 105 - IRQ_PER_CPU | IRQ_NESTED_THREAD | IRQ_NOTHREAD | IRQ_PER_CPU_DEVID) 103 + IRQ_PER_CPU | IRQ_NESTED_THREAD | IRQ_NOTHREAD | IRQ_PER_CPU_DEVID | \ 104 + IRQ_IS_POLLED) 106 105 107 106 #define IRQ_NO_BALANCING_MASK (IRQ_PER_CPU | IRQ_NO_BALANCING) 108 107
+7
kernel/irq/settings.h
··· 14 14 _IRQ_NO_BALANCING = IRQ_NO_BALANCING, 15 15 _IRQ_NESTED_THREAD = IRQ_NESTED_THREAD, 16 16 _IRQ_PER_CPU_DEVID = IRQ_PER_CPU_DEVID, 17 + _IRQ_IS_POLLED = IRQ_IS_POLLED, 17 18 _IRQF_MODIFY_MASK = IRQF_MODIFY_MASK, 18 19 }; 19 20 ··· 27 26 #define IRQ_NOAUTOEN GOT_YOU_MORON 28 27 #define IRQ_NESTED_THREAD GOT_YOU_MORON 29 28 #define IRQ_PER_CPU_DEVID GOT_YOU_MORON 29 + #define IRQ_IS_POLLED GOT_YOU_MORON 30 30 #undef IRQF_MODIFY_MASK 31 31 #define IRQF_MODIFY_MASK GOT_YOU_MORON 32 32 ··· 148 146 static inline bool irq_settings_is_nested_thread(struct irq_desc *desc) 149 147 { 150 148 return desc->status_use_accessors & _IRQ_NESTED_THREAD; 149 + } 150 + 151 + static inline bool irq_settings_is_polled(struct irq_desc *desc) 152 + { 153 + return desc->status_use_accessors & _IRQ_IS_POLLED; 151 154 }
+9 -3
kernel/irq/spurious.c
··· 67 67 68 68 raw_spin_lock(&desc->lock); 69 69 70 - /* PER_CPU and nested thread interrupts are never polled */ 71 - if (irq_settings_is_per_cpu(desc) || irq_settings_is_nested_thread(desc)) 70 + /* 71 + * PER_CPU, nested thread interrupts and interrupts explicitely 72 + * marked polled are excluded from polling. 73 + */ 74 + if (irq_settings_is_per_cpu(desc) || 75 + irq_settings_is_nested_thread(desc) || 76 + irq_settings_is_polled(desc)) 72 77 goto out; 73 78 74 79 /* ··· 273 268 void note_interrupt(unsigned int irq, struct irq_desc *desc, 274 269 irqreturn_t action_ret) 275 270 { 276 - if (desc->istate & IRQS_POLL_INPROGRESS) 271 + if (desc->istate & IRQS_POLL_INPROGRESS || 272 + irq_settings_is_polled(desc)) 277 273 return; 278 274 279 275 /* we get here again via the threaded handler */