Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'x86-irq-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 interrupt updates from Thomas Gleixner:
"A small set of changes to simplify and improve the interrupt handling
in do_IRQ() by moving the common case into common code and thereby
cleaning it up"

* 'x86-irq-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
x86/irq: Check for VECTOR_UNUSED directly
x86/irq: Move IS_ERR_OR_NULL() check into common do_IRQ() code
x86/irq: Improve definition of VECTOR_SHUTDOWN et al

+11 -21
+2 -2
arch/x86/include/asm/hw_irq.h
··· 153 153 extern char spurious_entries_start[]; 154 154 155 155 #define VECTOR_UNUSED NULL 156 - #define VECTOR_SHUTDOWN ((void *)~0UL) 157 - #define VECTOR_RETRIGGERED ((void *)~1UL) 156 + #define VECTOR_SHUTDOWN ((void *)-1L) 157 + #define VECTOR_RETRIGGERED ((void *)-2L) 158 158 159 159 typedef struct irq_desc* vector_irq_t[NR_VECTORS]; 160 160 DECLARE_PER_CPU(vector_irq_t, vector_irq);
+1 -1
arch/x86/include/asm/irq.h
··· 34 34 extern void (*x86_platform_ipi_callback)(void); 35 35 extern void native_init_IRQ(void); 36 36 37 - extern bool handle_irq(struct irq_desc *desc, struct pt_regs *regs); 37 + extern void handle_irq(struct irq_desc *desc, struct pt_regs *regs); 38 38 39 39 extern __visible unsigned int do_IRQ(struct pt_regs *regs); 40 40
+7 -3
arch/x86/kernel/irq.c
··· 243 243 RCU_LOCKDEP_WARN(!rcu_is_watching(), "IRQ failed to wake up RCU"); 244 244 245 245 desc = __this_cpu_read(vector_irq[vector]); 246 - 247 - if (!handle_irq(desc, regs)) { 246 + if (likely(!IS_ERR_OR_NULL(desc))) { 247 + if (IS_ENABLED(CONFIG_X86_32)) 248 + handle_irq(desc, regs); 249 + else 250 + generic_handle_irq_desc(desc); 251 + } else { 248 252 ack_APIC_irq(); 249 253 250 - if (desc != VECTOR_RETRIGGERED && desc != VECTOR_SHUTDOWN) { 254 + if (desc == VECTOR_UNUSED) { 251 255 pr_emerg_ratelimited("%s: %d.%d No irq handler for vector\n", 252 256 __func__, smp_processor_id(), 253 257 vector);
+1 -6
arch/x86/kernel/irq_32.c
··· 148 148 call_on_stack(__do_softirq, isp); 149 149 } 150 150 151 - bool handle_irq(struct irq_desc *desc, struct pt_regs *regs) 151 + void handle_irq(struct irq_desc *desc, struct pt_regs *regs) 152 152 { 153 153 int overflow = check_stack_overflow(); 154 - 155 - if (IS_ERR_OR_NULL(desc)) 156 - return false; 157 154 158 155 if (user_mode(regs) || !execute_on_irq_stack(overflow, desc)) { 159 156 if (unlikely(overflow)) 160 157 print_stack_overflow(); 161 158 generic_handle_irq_desc(desc); 162 159 } 163 - 164 - return true; 165 160 }
-9
arch/x86/kernel/irq_64.c
··· 26 26 DEFINE_PER_CPU_PAGE_ALIGNED(struct irq_stack, irq_stack_backing_store) __visible; 27 27 DECLARE_INIT_PER_CPU(irq_stack_backing_store); 28 28 29 - bool handle_irq(struct irq_desc *desc, struct pt_regs *regs) 30 - { 31 - if (IS_ERR_OR_NULL(desc)) 32 - return false; 33 - 34 - generic_handle_irq_desc(desc); 35 - return true; 36 - } 37 - 38 29 #ifdef CONFIG_VMAP_STACK 39 30 /* 40 31 * VMAP the backing store with guard pages