Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

arm64: remove irq_count and do_softirq_own_stack()

sysrq_handle_reboot() re-enables interrupts while on the irq stack. The
irq_stack implementation wrongly assumed this would only ever happen
via the softirq path, allowing it to update irq_count late, in
do_softirq_own_stack().

This means if an irq occurs in sysrq_handle_reboot(), during
emergency_restart() the stack will be corrupted, as irq_count wasn't
updated.

Lose the optimisation, and instead of moving the adding/subtracting of
irq_count into irq_stack_entry/irq_stack_exit, remove it, and compare
sp_el0 (struct thread_info) with sp & ~(THREAD_SIZE - 1). This tells us
if we are on a task stack, if so, we can safely switch to the irq stack.
Finally, remove do_softirq_own_stack(), we don't need it anymore.

Reported-by: Will Deacon <will.deacon@arm.com>
Signed-off-by: James Morse <james.morse@arm.com>
[will: use get_thread_info macro]
Signed-off-by: Will Deacon <will.deacon@arm.com>

authored by

James Morse and committed by
Will Deacon
d224a69e 66b3923a

+11 -48
-2
arch/arm64/include/asm/irq.h
··· 11 11 #include <asm-generic/irq.h> 12 12 #include <asm/thread_info.h> 13 13 14 - #define __ARCH_HAS_DO_SOFTIRQ 15 - 16 14 struct pt_regs; 17 15 18 16 DECLARE_PER_CPU(unsigned long [IRQ_STACK_SIZE/sizeof(long)], irq_stack);
+10 -9
arch/arm64/kernel/entry.S
··· 181 181 .macro irq_stack_entry 182 182 mov x19, sp // preserve the original sp 183 183 184 - this_cpu_ptr irq_stack, x25, x26 185 - 186 184 /* 187 - * Check the lowest address on irq_stack for the irq_count value, 188 - * incremented by do_softirq_own_stack if we have re-enabled irqs 189 - * while on the irq_stack. 185 + * Compare sp with the current thread_info, if the top 186 + * ~(THREAD_SIZE - 1) bits match, we are on a task stack, and 187 + * should switch to the irq stack. 190 188 */ 191 - ldr x26, [x25] 192 - cbnz x26, 9998f // recursive use? 189 + and x25, x19, #~(THREAD_SIZE - 1) 190 + cmp x25, tsk 191 + b.ne 9998f 193 192 194 - /* switch to the irq stack */ 193 + this_cpu_ptr irq_stack, x25, x26 195 194 mov x26, #IRQ_STACK_START_SP 196 195 add x26, x25, x26 196 + 197 + /* switch to the irq stack */ 197 198 mov sp, x26 198 199 199 200 /* ··· 406 405 bl trace_hardirqs_off 407 406 #endif 408 407 408 + get_thread_info tsk 409 409 irq_handler 410 410 411 411 #ifdef CONFIG_PREEMPT 412 - get_thread_info tsk 413 412 ldr w24, [tsk, #TI_PREEMPT] // get preempt count 414 413 cbnz w24, 1f // preempt count != 0 415 414 ldr x0, [tsk, #TI_FLAGS] // get flags
+1 -37
arch/arm64/kernel/irq.c
··· 25 25 #include <linux/irq.h> 26 26 #include <linux/smp.h> 27 27 #include <linux/init.h> 28 - #include <linux/interrupt.h> 29 28 #include <linux/irqchip.h> 30 29 #include <linux/seq_file.h> 31 30 32 31 unsigned long irq_err_count; 33 32 34 - /* 35 - * irq stack only needs to be 16 byte aligned - not IRQ_STACK_SIZE aligned. 36 - * irq_stack[0] is used as irq_count, a non-zero value indicates the stack 37 - * is in use, and el?_irq() shouldn't switch to it. This is used to detect 38 - * recursive use of the irq_stack, it is lazily updated by 39 - * do_softirq_own_stack(), which is called on the irq_stack, before 40 - * re-enabling interrupts to process softirqs. 41 - */ 33 + /* irq stack only needs to be 16 byte aligned - not IRQ_STACK_SIZE aligned. */ 42 34 DEFINE_PER_CPU(unsigned long [IRQ_STACK_SIZE/sizeof(long)], irq_stack) __aligned(16); 43 - 44 - #define IRQ_COUNT() (*per_cpu(irq_stack, smp_processor_id())) 45 35 46 36 int arch_show_interrupts(struct seq_file *p, int prec) 47 37 { ··· 55 65 irqchip_init(); 56 66 if (!handle_arch_irq) 57 67 panic("No interrupt controller found."); 58 - } 59 - 60 - /* 61 - * do_softirq_own_stack() is called from irq_exit() before __do_softirq() 62 - * re-enables interrupts, at which point we may re-enter el?_irq(). We 63 - * increase irq_count here so that el1_irq() knows that it is already on the 64 - * irq stack. 65 - * 66 - * Called with interrupts disabled, so we don't worry about moving cpu, or 67 - * being interrupted while modifying irq_count. 68 - * 69 - * This function doesn't actually switch stack. 70 - */ 71 - void do_softirq_own_stack(void) 72 - { 73 - int cpu = smp_processor_id(); 74 - 75 - WARN_ON_ONCE(!irqs_disabled()); 76 - 77 - if (on_irq_stack(current_stack_pointer, cpu)) { 78 - IRQ_COUNT()++; 79 - __do_softirq(); 80 - IRQ_COUNT()--; 81 - } else { 82 - __do_softirq(); 83 - } 84 68 }