Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

parisc: fix irq stack on UP and SMP

The logic to detect if the irq stack was already in use with
raw_spin_trylock() is wrong, because it will generate a "trylock failure
on UP" error message with CONFIG_SMP=n and CONFIG_DEBUG_SPINLOCK=y.

arch_spin_trylock() can't be used either since in the CONFIG_SMP=n case
no atomic protection is given and we are reentrant here. A mutex didn't
worked either and brings more overhead by turning off interrupts.

So, let's use the fastest path for parisc which is the ldcw instruction.

Counting how often the irq stack was used is pretty useless, so just
drop this piece of code.

Signed-off-by: Helge Deller <deller@gmx.de>

+26 -41
-5
arch/parisc/include/asm/hardirq.h
··· 17 17 18 18 typedef struct { 19 19 unsigned int __softirq_pending; 20 - #ifdef CONFIG_DEBUG_STACKOVERFLOW 21 20 unsigned int kernel_stack_usage; 22 - #ifdef CONFIG_IRQSTACKS 23 21 unsigned int irq_stack_usage; 24 - unsigned int irq_stack_counter; 25 - #endif 26 - #endif 27 22 #ifdef CONFIG_SMP 28 23 unsigned int irq_resched_count; 29 24 unsigned int irq_call_count;
-21
arch/parisc/include/asm/processor.h
··· 17 17 #include <asm/ptrace.h> 18 18 #include <asm/types.h> 19 19 #include <asm/percpu.h> 20 - 21 20 #endif /* __ASSEMBLY__ */ 22 21 23 22 /* ··· 56 57 #endif 57 58 58 59 #ifndef __ASSEMBLY__ 59 - 60 - /* 61 - * IRQ STACK - used for irq handler 62 - */ 63 - #ifdef __KERNEL__ 64 - 65 - #include <linux/spinlock_types.h> 66 - 67 - #define IRQ_STACK_SIZE (4096 << 2) /* 16k irq stack size */ 68 - 69 - union irq_stack_union { 70 - unsigned long stack[IRQ_STACK_SIZE/sizeof(unsigned long)]; 71 - raw_spinlock_t lock; 72 - }; 73 - 74 - DECLARE_PER_CPU(union irq_stack_union, irq_stack_union); 75 - 76 - void call_on_stack(unsigned long p1, void *func, unsigned long new_stack); 77 - 78 - #endif /* __KERNEL__ */ 79 60 80 61 /* 81 62 * Data detected about CPUs at boot time which is the same for all CPU's.
+26 -15
arch/parisc/kernel/irq.c
··· 27 27 #include <linux/interrupt.h> 28 28 #include <linux/kernel_stat.h> 29 29 #include <linux/seq_file.h> 30 - #include <linux/spinlock.h> 31 30 #include <linux/types.h> 32 31 #include <asm/io.h> 33 32 34 33 #include <asm/smp.h> 34 + #include <asm/ldcw.h> 35 35 36 36 #undef PARISC_IRQ_CR16_COUNTS 37 37 ··· 172 172 for_each_online_cpu(j) 173 173 seq_printf(p, "%10u ", irq_stats(j)->irq_stack_usage); 174 174 seq_puts(p, " Interrupt stack usage\n"); 175 - seq_printf(p, "%*s: ", prec, "ISC"); 176 - for_each_online_cpu(j) 177 - seq_printf(p, "%10u ", irq_stats(j)->irq_stack_counter); 178 - seq_puts(p, " Interrupt stack usage counter\n"); 179 175 # endif 180 176 #endif 181 177 #ifdef CONFIG_SMP ··· 380 384 return (BITS_PER_LONG - bit) + TIMER_IRQ; 381 385 } 382 386 387 + #ifdef CONFIG_IRQSTACKS 388 + /* 389 + * IRQ STACK - used for irq handler 390 + */ 391 + #define IRQ_STACK_SIZE (4096 << 2) /* 16k irq stack size */ 392 + 393 + union irq_stack_union { 394 + unsigned long stack[IRQ_STACK_SIZE/sizeof(unsigned long)]; 395 + volatile unsigned int slock[4]; 396 + volatile unsigned int lock[1]; 397 + }; 398 + 399 + DEFINE_PER_CPU(union irq_stack_union, irq_stack_union) = { 400 + .slock = { 1,1,1,1 }, 401 + }; 402 + #endif 403 + 404 + 383 405 int sysctl_panic_on_stackoverflow = 1; 384 406 385 407 static inline void stack_overflow_check(struct pt_regs *regs) ··· 464 450 } 465 451 466 452 #ifdef CONFIG_IRQSTACKS 467 - DEFINE_PER_CPU(union irq_stack_union, irq_stack_union) = { 468 - .lock = __RAW_SPIN_LOCK_UNLOCKED((irq_stack_union).lock) 469 - }; 453 + /* in entry.S: */ 454 + void call_on_stack(unsigned long p1, void *func, unsigned long new_stack); 470 455 471 456 static void execute_on_irq_stack(void *func, unsigned long param1) 472 457 { 473 458 union irq_stack_union *union_ptr; 474 459 unsigned long irq_stack; 475 - raw_spinlock_t *irq_stack_in_use; 460 + volatile unsigned int *irq_stack_in_use; 476 461 477 462 union_ptr = &per_cpu(irq_stack_union, smp_processor_id()); 478 463 irq_stack = (unsigned long) &union_ptr->stack; 479 - irq_stack = ALIGN(irq_stack + sizeof(irq_stack_union.lock), 464 + irq_stack = ALIGN(irq_stack + sizeof(irq_stack_union.slock), 480 465 64); /* align for stack frame usage */ 481 466 482 467 /* We may be called recursive. If we are already using the irq stack, 483 468 * just continue to use it. Use spinlocks to serialize 484 469 * the irq stack usage. 485 470 */ 486 - irq_stack_in_use = &union_ptr->lock; 487 - if (!raw_spin_trylock(irq_stack_in_use)) { 471 + irq_stack_in_use = (volatile unsigned int *)__ldcw_align(union_ptr); 472 + if (!__ldcw(irq_stack_in_use)) { 488 473 void (*direct_call)(unsigned long p1) = func; 489 474 490 475 /* We are using the IRQ stack already. ··· 495 482 /* This is where we switch to the IRQ stack. */ 496 483 call_on_stack(param1, func, irq_stack); 497 484 498 - __inc_irq_stat(irq_stack_counter); 499 - 500 485 /* free up irq stack usage. */ 501 - do_raw_spin_unlock(irq_stack_in_use); 486 + *irq_stack_in_use = 1; 502 487 } 503 488 504 489 asmlinkage void do_softirq(void)