Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc-2.6

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc-2.6:
sparc64: Add some more commentary to __raw_local_irq_save()
sparc64: Fix memory leak in pci_register_iommu_region().
sparc64: Add kmemleak annotation to sun4v_build_virq()
sparc64: Support kmemleak.
sparc64: Add function graph tracer support.
sparc64: Give a stack frame to the ftrace call sites.
sparc64: Use a seperate counter for timer interrupts and NMI checks, like x86.
sparc64: Remove profiling from some low-level bits.
sparc64: Kill unnecessary static on local var in ftrace_call_replace().
sparc64: Kill CONFIG_STACK_DEBUG code.
sparc64: Add HAVE_FUNCTION_TRACE_MCOUNT_TEST and tidy up.
sparc64: Adjust __raw_local_irq_save() to cooperate in NMIs.
sparc64: Use kstack_valid() in die_if_kernel().

+205 -134
+3
arch/sparc/Kconfig
··· 37 37 def_bool 64BIT 38 38 select ARCH_SUPPORTS_MSI 39 39 select HAVE_FUNCTION_TRACER 40 + select HAVE_FUNCTION_GRAPH_TRACER 41 + select HAVE_FUNCTION_GRAPH_FP_TEST 42 + select HAVE_FUNCTION_TRACE_MCOUNT_TEST 40 43 select HAVE_KRETPROBES 41 44 select HAVE_KPROBES 42 45 select HAVE_LMB
+1 -4
arch/sparc/Kconfig.debug
··· 19 19 bool "D-cache flush debugging" 20 20 depends on SPARC64 && DEBUG_KERNEL 21 21 22 - config STACK_DEBUG 23 - bool "Stack Overflow Detection Support" 24 - 25 22 config MCOUNT 26 23 bool 27 24 depends on SPARC64 28 - depends on STACK_DEBUG || FUNCTION_TRACER 25 + depends on FUNCTION_TRACER 29 26 default y 30 27 31 28 config FRAME_POINTER
+1 -1
arch/sparc/include/asm/cpudata_64.h
··· 17 17 unsigned int __nmi_count; 18 18 unsigned long clock_tick; /* %tick's per second */ 19 19 unsigned long __pad; 20 - unsigned int __pad1; 20 + unsigned int irq0_irqs; 21 21 unsigned int __pad2; 22 22 23 23 /* Dcache line 2, rarely used */
+19 -2
arch/sparc/include/asm/irqflags_64.h
··· 76 76 */ 77 77 static inline unsigned long __raw_local_irq_save(void) 78 78 { 79 - unsigned long flags = __raw_local_save_flags(); 79 + unsigned long flags, tmp; 80 80 81 - raw_local_irq_disable(); 81 + /* Disable interrupts to PIL_NORMAL_MAX unless we already 82 + * are using PIL_NMI, in which case PIL_NMI is retained. 83 + * 84 + * The only values we ever program into the %pil are 0, 85 + * PIL_NORMAL_MAX and PIL_NMI. 86 + * 87 + * Since PIL_NMI is the largest %pil value and all bits are 88 + * set in it (0xf), it doesn't matter what PIL_NORMAL_MAX 89 + * actually is. 90 + */ 91 + __asm__ __volatile__( 92 + "rdpr %%pil, %0\n\t" 93 + "or %0, %2, %1\n\t" 94 + "wrpr %1, 0x0, %%pil" 95 + : "=r" (flags), "=r" (tmp) 96 + : "i" (PIL_NORMAL_MAX) 97 + : "memory" 98 + ); 82 99 83 100 return flags; 84 101 }
+9 -1
arch/sparc/kernel/Makefile
··· 13 13 CPPFLAGS_vmlinux.lds := -Usparc -m$(BITS) 14 14 extra-y += vmlinux.lds 15 15 16 + ifdef CONFIG_FUNCTION_TRACER 17 + # Do not profile debug and lowlevel utilities 18 + CFLAGS_REMOVE_ftrace.o := -pg 19 + CFLAGS_REMOVE_time_$(BITS).o := -pg 20 + CFLAGS_REMOVE_perf_event.o := -pg 21 + CFLAGS_REMOVE_pcr.o := -pg 22 + endif 23 + 16 24 obj-$(CONFIG_SPARC32) += entry.o wof.o wuf.o 17 25 obj-$(CONFIG_SPARC32) += etrap_32.o 18 26 obj-$(CONFIG_SPARC32) += rtrap_32.o ··· 93 85 94 86 95 87 obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o 96 - CFLAGS_REMOVE_ftrace.o := -pg 88 + obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o 97 89 98 90 obj-$(CONFIG_EARLYFB) += btext.o 99 91 obj-$(CONFIG_STACKTRACE) += stacktrace.o
+59 -1
arch/sparc/kernel/ftrace.c
··· 13 13 14 14 static u32 ftrace_call_replace(unsigned long ip, unsigned long addr) 15 15 { 16 - static u32 call; 16 + u32 call; 17 17 s32 off; 18 18 19 19 off = ((s32)addr - (s32)ip); ··· 91 91 return 0; 92 92 } 93 93 #endif 94 + 95 + #ifdef CONFIG_FUNCTION_GRAPH_TRACER 96 + 97 + #ifdef CONFIG_DYNAMIC_FTRACE 98 + extern void ftrace_graph_call(void); 99 + 100 + int ftrace_enable_ftrace_graph_caller(void) 101 + { 102 + unsigned long ip = (unsigned long)(&ftrace_graph_call); 103 + u32 old, new; 104 + 105 + old = *(u32 *) &ftrace_graph_call; 106 + new = ftrace_call_replace(ip, (unsigned long) &ftrace_graph_caller); 107 + return ftrace_modify_code(ip, old, new); 108 + } 109 + 110 + int ftrace_disable_ftrace_graph_caller(void) 111 + { 112 + unsigned long ip = (unsigned long)(&ftrace_graph_call); 113 + u32 old, new; 114 + 115 + old = *(u32 *) &ftrace_graph_call; 116 + new = ftrace_call_replace(ip, (unsigned long) &ftrace_stub); 117 + 118 + return ftrace_modify_code(ip, old, new); 119 + } 120 + 121 + #endif /* !CONFIG_DYNAMIC_FTRACE */ 122 + 123 + /* 124 + * Hook the return address and push it in the stack of return addrs 125 + * in current thread info. 126 + */ 127 + unsigned long prepare_ftrace_return(unsigned long parent, 128 + unsigned long self_addr, 129 + unsigned long frame_pointer) 130 + { 131 + unsigned long return_hooker = (unsigned long) &return_to_handler; 132 + struct ftrace_graph_ent trace; 133 + 134 + if (unlikely(atomic_read(&current->tracing_graph_pause))) 135 + return parent + 8UL; 136 + 137 + if (ftrace_push_return_trace(parent, self_addr, &trace.depth, 138 + frame_pointer) == -EBUSY) 139 + return parent + 8UL; 140 + 141 + trace.func = self_addr; 142 + 143 + /* Only trace if the calling function expects to */ 144 + if (!ftrace_graph_entry(&trace)) { 145 + current->curr_ret_stack--; 146 + return parent + 8UL; 147 + } 148 + 149 + return return_hooker; 150 + } 151 + #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
+10 -1
arch/sparc/kernel/irq_64.c
··· 20 20 #include <linux/delay.h> 21 21 #include <linux/proc_fs.h> 22 22 #include <linux/seq_file.h> 23 + #include <linux/ftrace.h> 23 24 #include <linux/irq.h> 24 25 25 26 #include <asm/ptrace.h> ··· 648 647 bucket = kzalloc(sizeof(struct ino_bucket), GFP_ATOMIC); 649 648 if (unlikely(!bucket)) 650 649 return 0; 650 + 651 + /* The only reference we store to the IRQ bucket is 652 + * by physical address which kmemleak can't see, tell 653 + * it that this object explicitly is not a leak and 654 + * should be scanned. 655 + */ 656 + kmemleak_not_leak(bucket); 657 + 651 658 __flush_dcache_range((unsigned long) bucket, 652 659 ((unsigned long) bucket + 653 660 sizeof(struct ino_bucket))); ··· 730 721 __asm__ __volatile__("mov %0, %%sp" : : "r" (orig_sp)); 731 722 } 732 723 733 - void handler_irq(int irq, struct pt_regs *regs) 724 + void __irq_entry handler_irq(int irq, struct pt_regs *regs) 734 725 { 735 726 unsigned long pstate, bucket_pa; 736 727 struct pt_regs *old_regs;
+2 -1
arch/sparc/kernel/kgdb_64.c
··· 5 5 6 6 #include <linux/kgdb.h> 7 7 #include <linux/kdebug.h> 8 + #include <linux/ftrace.h> 8 9 9 10 #include <asm/kdebug.h> 10 11 #include <asm/ptrace.h> ··· 109 108 } 110 109 111 110 #ifdef CONFIG_SMP 112 - void smp_kgdb_capture_client(int irq, struct pt_regs *regs) 111 + void __irq_entry smp_kgdb_capture_client(int irq, struct pt_regs *regs) 113 112 { 114 113 unsigned long flags; 115 114
+1 -2
arch/sparc/kernel/nmi.c
··· 92 92 notrace __kprobes void perfctr_irq(int irq, struct pt_regs *regs) 93 93 { 94 94 unsigned int sum, touched = 0; 95 - int cpu = smp_processor_id(); 96 95 97 96 clear_softint(1 << irq); 98 97 ··· 105 106 else 106 107 pcr_ops->write(PCR_PIC_PRIV); 107 108 108 - sum = kstat_irqs_cpu(0, cpu); 109 + sum = local_cpu_data().irq0_irqs; 109 110 if (__get_cpu_var(nmi_touch)) { 110 111 __get_cpu_var(nmi_touch) = 0; 111 112 touched = 1;
+8 -3
arch/sparc/kernel/pci_common.c
··· 371 371 struct resource *rp = kzalloc(sizeof(*rp), GFP_KERNEL); 372 372 373 373 if (!rp) { 374 - prom_printf("Cannot allocate IOMMU resource.\n"); 375 - prom_halt(); 374 + pr_info("%s: Cannot allocate IOMMU resource.\n", 375 + pbm->name); 376 + return; 376 377 } 377 378 rp->name = "IOMMU"; 378 379 rp->start = pbm->mem_space.start + (unsigned long) vdma[0]; 379 380 rp->end = rp->start + (unsigned long) vdma[1] - 1UL; 380 381 rp->flags = IORESOURCE_BUSY; 381 - request_resource(&pbm->mem_space, rp); 382 + if (request_resource(&pbm->mem_space, rp)) { 383 + pr_info("%s: Unable to request IOMMU resource.\n", 384 + pbm->name); 385 + kfree(rp); 386 + } 382 387 } 383 388 } 384 389
+2 -1
arch/sparc/kernel/pcr.c
··· 8 8 #include <linux/irq.h> 9 9 10 10 #include <linux/perf_event.h> 11 + #include <linux/ftrace.h> 11 12 12 13 #include <asm/pil.h> 13 14 #include <asm/pcr.h> ··· 35 34 * Therefore in such situations we defer the work by signalling 36 35 * a lower level cpu IRQ. 37 36 */ 38 - void deferred_pcr_work_irq(int irq, struct pt_regs *regs) 37 + void __irq_entry deferred_pcr_work_irq(int irq, struct pt_regs *regs) 39 38 { 40 39 struct pt_regs *old_regs; 41 40
+6 -5
arch/sparc/kernel/smp_64.c
··· 22 22 #include <linux/profile.h> 23 23 #include <linux/bootmem.h> 24 24 #include <linux/vmalloc.h> 25 + #include <linux/ftrace.h> 25 26 #include <linux/cpu.h> 26 27 #include <linux/slab.h> 27 28 ··· 824 823 &cpumask_of_cpu(cpu)); 825 824 } 826 825 827 - void smp_call_function_client(int irq, struct pt_regs *regs) 826 + void __irq_entry smp_call_function_client(int irq, struct pt_regs *regs) 828 827 { 829 828 clear_softint(1 << irq); 830 829 generic_smp_call_function_interrupt(); 831 830 } 832 831 833 - void smp_call_function_single_client(int irq, struct pt_regs *regs) 832 + void __irq_entry smp_call_function_single_client(int irq, struct pt_regs *regs) 834 833 { 835 834 clear_softint(1 << irq); 836 835 generic_smp_call_function_single_interrupt(); ··· 966 965 put_cpu(); 967 966 } 968 967 969 - void smp_new_mmu_context_version_client(int irq, struct pt_regs *regs) 968 + void __irq_entry smp_new_mmu_context_version_client(int irq, struct pt_regs *regs) 970 969 { 971 970 struct mm_struct *mm; 972 971 unsigned long flags; ··· 1150 1149 */ 1151 1150 extern void prom_world(int); 1152 1151 1153 - void smp_penguin_jailcell(int irq, struct pt_regs *regs) 1152 + void __irq_entry smp_penguin_jailcell(int irq, struct pt_regs *regs) 1154 1153 { 1155 1154 clear_softint(1 << irq); 1156 1155 ··· 1366 1365 &cpumask_of_cpu(cpu)); 1367 1366 } 1368 1367 1369 - void smp_receive_signal_client(int irq, struct pt_regs *regs) 1368 + void __irq_entry smp_receive_signal_client(int irq, struct pt_regs *regs) 1370 1369 { 1371 1370 clear_softint(1 << irq); 1372 1371 }
+3 -1
arch/sparc/kernel/time_64.c
··· 35 35 #include <linux/clocksource.h> 36 36 #include <linux/of_device.h> 37 37 #include <linux/platform_device.h> 38 + #include <linux/ftrace.h> 38 39 39 40 #include <asm/oplib.h> 40 41 #include <asm/timer.h> ··· 718 717 }; 719 718 static DEFINE_PER_CPU(struct clock_event_device, sparc64_events); 720 719 721 - void timer_interrupt(int irq, struct pt_regs *regs) 720 + void __irq_entry timer_interrupt(int irq, struct pt_regs *regs) 722 721 { 723 722 struct pt_regs *old_regs = set_irq_regs(regs); 724 723 unsigned long tick_mask = tick_ops->softint_mask; ··· 729 728 730 729 irq_enter(); 731 730 731 + local_cpu_data().irq0_irqs++; 732 732 kstat_incr_irqs_this_cpu(0, irq_to_desc(0)); 733 733 734 734 if (unlikely(!evt->event_handler)) {
+3 -23
arch/sparc/kernel/traps_64.c
··· 2203 2203 2204 2204 EXPORT_SYMBOL(dump_stack); 2205 2205 2206 - static inline int is_kernel_stack(struct task_struct *task, 2207 - struct reg_window *rw) 2208 - { 2209 - unsigned long rw_addr = (unsigned long) rw; 2210 - unsigned long thread_base, thread_end; 2211 - 2212 - if (rw_addr < PAGE_OFFSET) { 2213 - if (task != &init_task) 2214 - return 0; 2215 - } 2216 - 2217 - thread_base = (unsigned long) task_stack_page(task); 2218 - thread_end = thread_base + sizeof(union thread_union); 2219 - if (rw_addr >= thread_base && 2220 - rw_addr < thread_end && 2221 - !(rw_addr & 0x7UL)) 2222 - return 1; 2223 - 2224 - return 0; 2225 - } 2226 - 2227 2206 static inline struct reg_window *kernel_stack_up(struct reg_window *rw) 2228 2207 { 2229 2208 unsigned long fp = rw->ins[6]; ··· 2231 2252 show_regs(regs); 2232 2253 add_taint(TAINT_DIE); 2233 2254 if (regs->tstate & TSTATE_PRIV) { 2255 + struct thread_info *tp = current_thread_info(); 2234 2256 struct reg_window *rw = (struct reg_window *) 2235 2257 (regs->u_regs[UREG_FP] + STACK_BIAS); 2236 2258 ··· 2239 2259 * find some badly aligned kernel stack. 2240 2260 */ 2241 2261 while (rw && 2242 - count++ < 30&& 2243 - is_kernel_stack(current, rw)) { 2262 + count++ < 30 && 2263 + kstack_valid(tp, (unsigned long) rw)) { 2244 2264 printk("Caller[%016lx]: %pS\n", rw->ins[7], 2245 2265 (void *) rw->ins[7]); 2246 2266
+5
arch/sparc/kernel/vmlinux.lds.S
··· 46 46 SCHED_TEXT 47 47 LOCK_TEXT 48 48 KPROBES_TEXT 49 + IRQENTRY_TEXT 49 50 *(.gnu.warning) 50 51 } = 0 51 52 _etext = .; 52 53 53 54 RO_DATA(PAGE_SIZE) 55 + 56 + /* Start of data section */ 57 + _sdata = .; 58 + 54 59 .data1 : { 55 60 *(.data1) 56 61 }
+72 -87
arch/sparc/lib/mcount.S
··· 7 7 8 8 #include <linux/linkage.h> 9 9 10 - #include <asm/ptrace.h> 11 - #include <asm/thread_info.h> 12 - 13 10 /* 14 11 * This is the main variant and is called by C code. GCC's -pg option 15 12 * automatically instruments every C function with a call to this. 16 13 */ 17 14 18 - #ifdef CONFIG_STACK_DEBUG 19 - 20 - #define OVSTACKSIZE 4096 /* lets hope this is enough */ 21 - 22 - .data 23 - .align 8 24 - panicstring: 25 - .asciz "Stack overflow\n" 26 - .align 8 27 - ovstack: 28 - .skip OVSTACKSIZE 29 - #endif 30 15 .text 31 16 .align 32 32 17 .globl _mcount ··· 20 35 .type mcount,#function 21 36 _mcount: 22 37 mcount: 23 - #ifdef CONFIG_STACK_DEBUG 24 - /* 25 - * Check whether %sp is dangerously low. 26 - */ 27 - ldub [%g6 + TI_FPDEPTH], %g1 28 - srl %g1, 1, %g3 29 - add %g3, 1, %g3 30 - sllx %g3, 8, %g3 ! each fpregs frame is 256b 31 - add %g3, 192, %g3 32 - add %g6, %g3, %g3 ! where does task_struct+frame end? 33 - sub %g3, STACK_BIAS, %g3 34 - cmp %sp, %g3 35 - bg,pt %xcc, 1f 36 - nop 37 - lduh [%g6 + TI_CPU], %g1 38 - sethi %hi(hardirq_stack), %g3 39 - or %g3, %lo(hardirq_stack), %g3 40 - sllx %g1, 3, %g1 41 - ldx [%g3 + %g1], %g7 42 - sub %g7, STACK_BIAS, %g7 43 - cmp %sp, %g7 44 - bleu,pt %xcc, 2f 45 - sethi %hi(THREAD_SIZE), %g3 46 - add %g7, %g3, %g7 47 - cmp %sp, %g7 48 - blu,pn %xcc, 1f 49 - 2: sethi %hi(softirq_stack), %g3 50 - or %g3, %lo(softirq_stack), %g3 51 - ldx [%g3 + %g1], %g7 52 - sub %g7, STACK_BIAS, %g7 53 - cmp %sp, %g7 54 - bleu,pt %xcc, 3f 55 - sethi %hi(THREAD_SIZE), %g3 56 - add %g7, %g3, %g7 57 - cmp %sp, %g7 58 - blu,pn %xcc, 1f 59 - nop 60 - /* If we are already on ovstack, don't hop onto it 61 - * again, we are already trying to output the stack overflow 62 - * message. 63 - */ 64 - 3: sethi %hi(ovstack), %g7 ! cant move to panic stack fast enough 65 - or %g7, %lo(ovstack), %g7 66 - add %g7, OVSTACKSIZE, %g3 67 - sub %g3, STACK_BIAS + 192, %g3 68 - sub %g7, STACK_BIAS, %g7 69 - cmp %sp, %g7 70 - blu,pn %xcc, 2f 71 - cmp %sp, %g3 72 - bleu,pn %xcc, 1f 73 - nop 74 - 2: mov %g3, %sp 75 - sethi %hi(panicstring), %g3 76 - call prom_printf 77 - or %g3, %lo(panicstring), %o0 78 - call prom_halt 79 - nop 80 - 1: 81 - #endif 82 38 #ifdef CONFIG_FUNCTION_TRACER 83 39 #ifdef CONFIG_DYNAMIC_FTRACE 84 - mov %o7, %o0 85 - .globl mcount_call 86 - mcount_call: 87 - call ftrace_stub 88 - mov %o0, %o7 40 + /* Do nothing, the retl/nop below is all we need. */ 89 41 #else 90 - sethi %hi(ftrace_trace_function), %g1 42 + sethi %hi(function_trace_stop), %g1 43 + lduw [%g1 + %lo(function_trace_stop)], %g2 44 + brnz,pn %g2, 2f 45 + sethi %hi(ftrace_trace_function), %g1 91 46 sethi %hi(ftrace_stub), %g2 92 47 ldx [%g1 + %lo(ftrace_trace_function)], %g1 93 48 or %g2, %lo(ftrace_stub), %g2 94 49 cmp %g1, %g2 95 50 be,pn %icc, 1f 96 - mov %i7, %o1 97 - jmpl %g1, %g0 98 - mov %o7, %o0 51 + mov %i7, %g3 52 + save %sp, -128, %sp 53 + mov %g3, %o1 54 + jmpl %g1, %o7 55 + mov %i7, %o0 56 + ret 57 + restore 99 58 /* not reached */ 100 59 1: 60 + #ifdef CONFIG_FUNCTION_GRAPH_TRACER 61 + sethi %hi(ftrace_graph_return), %g1 62 + ldx [%g1 + %lo(ftrace_graph_return)], %g3 63 + cmp %g2, %g3 64 + bne,pn %xcc, 5f 65 + sethi %hi(ftrace_graph_entry_stub), %g2 66 + sethi %hi(ftrace_graph_entry), %g1 67 + or %g2, %lo(ftrace_graph_entry_stub), %g2 68 + ldx [%g1 + %lo(ftrace_graph_entry)], %g1 69 + cmp %g1, %g2 70 + be,pt %xcc, 2f 71 + nop 72 + 5: mov %i7, %g2 73 + mov %fp, %g3 74 + save %sp, -128, %sp 75 + mov %g2, %l0 76 + ba,pt %xcc, ftrace_graph_caller 77 + mov %g3, %l1 78 + #endif 79 + 2: 101 80 #endif 102 81 #endif 103 82 retl ··· 80 131 .globl ftrace_caller 81 132 .type ftrace_caller,#function 82 133 ftrace_caller: 83 - mov %i7, %o1 84 - mov %o7, %o0 134 + sethi %hi(function_trace_stop), %g1 135 + mov %i7, %g2 136 + lduw [%g1 + %lo(function_trace_stop)], %g1 137 + brnz,pn %g1, ftrace_stub 138 + mov %fp, %g3 139 + save %sp, -128, %sp 140 + mov %g2, %o1 141 + mov %g2, %l0 142 + mov %g3, %l1 85 143 .globl ftrace_call 86 144 ftrace_call: 87 145 call ftrace_stub 88 - mov %o0, %o7 89 - retl 146 + mov %i7, %o0 147 + #ifdef CONFIG_FUNCTION_GRAPH_TRACER 148 + .globl ftrace_graph_call 149 + ftrace_graph_call: 150 + call ftrace_stub 90 151 nop 152 + #endif 153 + ret 154 + restore 155 + #ifdef CONFIG_FUNCTION_GRAPH_TRACER 156 + .size ftrace_graph_call,.-ftrace_graph_call 157 + #endif 158 + .size ftrace_call,.-ftrace_call 91 159 .size ftrace_caller,.-ftrace_caller 92 160 #endif 161 + #endif 162 + 163 + #ifdef CONFIG_FUNCTION_GRAPH_TRACER 164 + ENTRY(ftrace_graph_caller) 165 + mov %l0, %o0 166 + mov %i7, %o1 167 + call prepare_ftrace_return 168 + mov %l1, %o2 169 + ret 170 + restore %o0, -8, %i7 171 + END(ftrace_graph_caller) 172 + 173 + ENTRY(return_to_handler) 174 + save %sp, -128, %sp 175 + call ftrace_return_to_handler 176 + mov %fp, %o0 177 + jmpl %o0 + 8, %g0 178 + restore 179 + END(return_to_handler) 93 180 #endif
+1 -1
lib/Kconfig.debug
··· 356 356 config DEBUG_KMEMLEAK 357 357 bool "Kernel memory leak detector" 358 358 depends on DEBUG_KERNEL && EXPERIMENTAL && !MEMORY_HOTPLUG && \ 359 - (X86 || ARM || PPC || S390 || SUPERH || MICROBLAZE) 359 + (X86 || ARM || PPC || S390 || SPARC64 || SUPERH || MICROBLAZE) 360 360 361 361 select DEBUG_FS if SYSFS 362 362 select STACKTRACE if STACKTRACE_SUPPORT