Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc-2.6

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc-2.6:
sparc64: Fix hardirq tracing in trap return path.
sparc64: Use correct pt_regs in decode_access_size() error paths.
sparc64: Fix PREEMPT_ACTIVE value.
sparc64: Run NMIs on the hardirq stack.
sparc64: Allocate sufficient stack space in ftrace stubs.
sparc: Fix forgotten kmemleak headers inclusion

+47 -27
+1 -1
arch/sparc/include/asm/thread_info_64.h
··· 111 #define THREAD_SHIFT PAGE_SHIFT 112 #endif /* PAGE_SHIFT == 13 */ 113 114 - #define PREEMPT_ACTIVE 0x4000000 115 116 /* 117 * macros/functions for gaining access to the thread information structure
··· 111 #define THREAD_SHIFT PAGE_SHIFT 112 #endif /* PAGE_SHIFT == 13 */ 113 114 + #define PREEMPT_ACTIVE 0x10000000 115 116 /* 117 * macros/functions for gaining access to the thread information structure
+2 -18
arch/sparc/kernel/irq_64.c
··· 22 #include <linux/seq_file.h> 23 #include <linux/ftrace.h> 24 #include <linux/irq.h> 25 26 #include <asm/ptrace.h> 27 #include <asm/processor.h> ··· 47 48 #include "entry.h" 49 #include "cpumap.h" 50 51 #define NUM_IVECS (IMAP_INR + 1) 52 ··· 713 714 void *hardirq_stack[NR_CPUS]; 715 void *softirq_stack[NR_CPUS]; 716 - 717 - static __attribute__((always_inline)) void *set_hardirq_stack(void) 718 - { 719 - void *orig_sp, *sp = hardirq_stack[smp_processor_id()]; 720 - 721 - __asm__ __volatile__("mov %%sp, %0" : "=r" (orig_sp)); 722 - if (orig_sp < sp || 723 - orig_sp > (sp + THREAD_SIZE)) { 724 - sp += THREAD_SIZE - 192 - STACK_BIAS; 725 - __asm__ __volatile__("mov %0, %%sp" : : "r" (sp)); 726 - } 727 - 728 - return orig_sp; 729 - } 730 - static __attribute__((always_inline)) void restore_hardirq_stack(void *orig_sp) 731 - { 732 - __asm__ __volatile__("mov %0, %%sp" : : "r" (orig_sp)); 733 - } 734 735 void __irq_entry handler_irq(int irq, struct pt_regs *regs) 736 {
··· 22 #include <linux/seq_file.h> 23 #include <linux/ftrace.h> 24 #include <linux/irq.h> 25 + #include <linux/kmemleak.h> 26 27 #include <asm/ptrace.h> 28 #include <asm/processor.h> ··· 46 47 #include "entry.h" 48 #include "cpumap.h" 49 + #include "kstack.h" 50 51 #define NUM_IVECS (IMAP_INR + 1) 52 ··· 711 712 void *hardirq_stack[NR_CPUS]; 713 void *softirq_stack[NR_CPUS]; 714 715 void __irq_entry handler_irq(int irq, struct pt_regs *regs) 716 {
+19
arch/sparc/kernel/kstack.h
··· 61 62 } 63 64 #endif /* _KSTACK_H */
··· 61 62 } 63 64 + static inline __attribute__((always_inline)) void *set_hardirq_stack(void) 65 + { 66 + void *orig_sp, *sp = hardirq_stack[smp_processor_id()]; 67 + 68 + __asm__ __volatile__("mov %%sp, %0" : "=r" (orig_sp)); 69 + if (orig_sp < sp || 70 + orig_sp > (sp + THREAD_SIZE)) { 71 + sp += THREAD_SIZE - 192 - STACK_BIAS; 72 + __asm__ __volatile__("mov %0, %%sp" : : "r" (sp)); 73 + } 74 + 75 + return orig_sp; 76 + } 77 + 78 + static inline __attribute__((always_inline)) void restore_hardirq_stack(void *orig_sp) 79 + { 80 + __asm__ __volatile__("mov %0, %%sp" : : "r" (orig_sp)); 81 + } 82 + 83 #endif /* _KSTACK_H */
+7
arch/sparc/kernel/nmi.c
··· 23 #include <asm/ptrace.h> 24 #include <asm/pcr.h> 25 26 /* We don't have a real NMI on sparc64, but we can fake one 27 * up using profiling counter overflow interrupts and interrupt 28 * levels. ··· 94 notrace __kprobes void perfctr_irq(int irq, struct pt_regs *regs) 95 { 96 unsigned int sum, touched = 0; 97 98 clear_softint(1 << irq); 99 100 local_cpu_data().__nmi_count++; 101 102 nmi_enter(); 103 104 if (notify_die(DIE_NMI, "nmi", regs, 0, 105 pt_regs_trap_type(regs), SIGINT) == NOTIFY_STOP) ··· 128 write_pic(picl_value(nmi_hz)); 129 pcr_ops->write(pcr_enable); 130 } 131 132 nmi_exit(); 133 }
··· 23 #include <asm/ptrace.h> 24 #include <asm/pcr.h> 25 26 + #include "kstack.h" 27 + 28 /* We don't have a real NMI on sparc64, but we can fake one 29 * up using profiling counter overflow interrupts and interrupt 30 * levels. ··· 92 notrace __kprobes void perfctr_irq(int irq, struct pt_regs *regs) 93 { 94 unsigned int sum, touched = 0; 95 + void *orig_sp; 96 97 clear_softint(1 << irq); 98 99 local_cpu_data().__nmi_count++; 100 101 nmi_enter(); 102 + 103 + orig_sp = set_hardirq_stack(); 104 105 if (notify_die(DIE_NMI, "nmi", regs, 0, 106 pt_regs_trap_type(regs), SIGINT) == NOTIFY_STOP) ··· 123 write_pic(picl_value(nmi_hz)); 124 pcr_ops->write(pcr_enable); 125 } 126 + 127 + restore_hardirq_stack(orig_sp); 128 129 nmi_exit(); 130 }
+11 -1
arch/sparc/kernel/rtrap_64.S
··· 130 nop 131 call trace_hardirqs_on 132 nop 133 - wrpr %l4, %pil 134 #endif 135 rtrap_no_irq_enable: 136 andcc %l1, TSTATE_PRIV, %l3
··· 130 nop 131 call trace_hardirqs_on 132 nop 133 + /* Do not actually set the %pil here. We will do that 134 + * below after we clear PSTATE_IE in the %pstate register. 135 + * If we re-enable interrupts here, we can recurse down 136 + * the hardirq stack potentially endlessly, causing a 137 + * stack overflow. 138 + * 139 + * It is tempting to put this test and trace_hardirqs_on 140 + * call at the 'rt_continue' label, but that will not work 141 + * as that path hits unconditionally and we do not want to 142 + * execute this in NMI return paths, for example. 143 + */ 144 #endif 145 rtrap_no_irq_enable: 146 andcc %l1, TSTATE_PRIV, %l3
+3 -3
arch/sparc/kernel/unaligned_64.c
··· 50 } 51 52 /* 16 = double-word, 8 = extra-word, 4 = word, 2 = half-word */ 53 - static inline int decode_access_size(unsigned int insn) 54 { 55 unsigned int tmp; 56 ··· 66 return 2; 67 else { 68 printk("Impossible unaligned trap. insn=%08x\n", insn); 69 - die_if_kernel("Byte sized unaligned access?!?!", current_thread_info()->kregs); 70 71 /* GCC should never warn that control reaches the end 72 * of this function without returning a value because ··· 286 asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn) 287 { 288 enum direction dir = decode_direction(insn); 289 - int size = decode_access_size(insn); 290 int orig_asi, asi; 291 292 current_thread_info()->kern_una_regs = regs;
··· 50 } 51 52 /* 16 = double-word, 8 = extra-word, 4 = word, 2 = half-word */ 53 + static inline int decode_access_size(struct pt_regs *regs, unsigned int insn) 54 { 55 unsigned int tmp; 56 ··· 66 return 2; 67 else { 68 printk("Impossible unaligned trap. insn=%08x\n", insn); 69 + die_if_kernel("Byte sized unaligned access?!?!", regs); 70 71 /* GCC should never warn that control reaches the end 72 * of this function without returning a value because ··· 286 asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn) 287 { 288 enum direction dir = decode_direction(insn); 289 + int size = decode_access_size(regs, insn); 290 int orig_asi, asi; 291 292 current_thread_info()->kern_una_regs = regs;
+4 -4
arch/sparc/lib/mcount.S
··· 34 cmp %g1, %g2 35 be,pn %icc, 1f 36 mov %i7, %g3 37 - save %sp, -128, %sp 38 mov %g3, %o1 39 jmpl %g1, %o7 40 mov %i7, %o0 ··· 56 nop 57 5: mov %i7, %g2 58 mov %fp, %g3 59 - save %sp, -128, %sp 60 mov %g2, %l0 61 ba,pt %xcc, ftrace_graph_caller 62 mov %g3, %l1 ··· 85 lduw [%g1 + %lo(function_trace_stop)], %g1 86 brnz,pn %g1, ftrace_stub 87 mov %fp, %g3 88 - save %sp, -128, %sp 89 mov %g2, %o1 90 mov %g2, %l0 91 mov %g3, %l1 ··· 120 END(ftrace_graph_caller) 121 122 ENTRY(return_to_handler) 123 - save %sp, -128, %sp 124 call ftrace_return_to_handler 125 mov %fp, %o0 126 jmpl %o0 + 8, %g0
··· 34 cmp %g1, %g2 35 be,pn %icc, 1f 36 mov %i7, %g3 37 + save %sp, -176, %sp 38 mov %g3, %o1 39 jmpl %g1, %o7 40 mov %i7, %o0 ··· 56 nop 57 5: mov %i7, %g2 58 mov %fp, %g3 59 + save %sp, -176, %sp 60 mov %g2, %l0 61 ba,pt %xcc, ftrace_graph_caller 62 mov %g3, %l1 ··· 85 lduw [%g1 + %lo(function_trace_stop)], %g1 86 brnz,pn %g1, ftrace_stub 87 mov %fp, %g3 88 + save %sp, -176, %sp 89 mov %g2, %o1 90 mov %g2, %l0 91 mov %g3, %l1 ··· 120 END(ftrace_graph_caller) 121 122 ENTRY(return_to_handler) 123 + save %sp, -176, %sp 124 call ftrace_return_to_handler 125 mov %fp, %o0 126 jmpl %o0 + 8, %g0