Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc-2.6

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc-2.6:
sparc64: Add some more commentary to __raw_local_irq_save()
sparc64: Fix memory leak in pci_register_iommu_region().
sparc64: Add kmemleak annotation to sun4v_build_virq()
sparc64: Support kmemleak.
sparc64: Add function graph tracer support.
sparc64: Give a stack frame to the ftrace call sites.
sparc64: Use a seperate counter for timer interrupts and NMI checks, like x86.
sparc64: Remove profiling from some low-level bits.
sparc64: Kill unnecessary static on local var in ftrace_call_replace().
sparc64: Kill CONFIG_STACK_DEBUG code.
sparc64: Add HAVE_FUNCTION_TRACE_MCOUNT_TEST and tidy up.
sparc64: Adjust __raw_local_irq_save() to cooperate in NMIs.
sparc64: Use kstack_valid() in die_if_kernel().

+205 -134
+3
arch/sparc/Kconfig
··· 37 def_bool 64BIT 38 select ARCH_SUPPORTS_MSI 39 select HAVE_FUNCTION_TRACER 40 select HAVE_KRETPROBES 41 select HAVE_KPROBES 42 select HAVE_LMB
··· 37 def_bool 64BIT 38 select ARCH_SUPPORTS_MSI 39 select HAVE_FUNCTION_TRACER 40 + select HAVE_FUNCTION_GRAPH_TRACER 41 + select HAVE_FUNCTION_GRAPH_FP_TEST 42 + select HAVE_FUNCTION_TRACE_MCOUNT_TEST 43 select HAVE_KRETPROBES 44 select HAVE_KPROBES 45 select HAVE_LMB
+1 -4
arch/sparc/Kconfig.debug
··· 19 bool "D-cache flush debugging" 20 depends on SPARC64 && DEBUG_KERNEL 21 22 - config STACK_DEBUG 23 - bool "Stack Overflow Detection Support" 24 - 25 config MCOUNT 26 bool 27 depends on SPARC64 28 - depends on STACK_DEBUG || FUNCTION_TRACER 29 default y 30 31 config FRAME_POINTER
··· 19 bool "D-cache flush debugging" 20 depends on SPARC64 && DEBUG_KERNEL 21 22 config MCOUNT 23 bool 24 depends on SPARC64 25 + depends on FUNCTION_TRACER 26 default y 27 28 config FRAME_POINTER
+1 -1
arch/sparc/include/asm/cpudata_64.h
··· 17 unsigned int __nmi_count; 18 unsigned long clock_tick; /* %tick's per second */ 19 unsigned long __pad; 20 - unsigned int __pad1; 21 unsigned int __pad2; 22 23 /* Dcache line 2, rarely used */
··· 17 unsigned int __nmi_count; 18 unsigned long clock_tick; /* %tick's per second */ 19 unsigned long __pad; 20 + unsigned int irq0_irqs; 21 unsigned int __pad2; 22 23 /* Dcache line 2, rarely used */
+19 -2
arch/sparc/include/asm/irqflags_64.h
··· 76 */ 77 static inline unsigned long __raw_local_irq_save(void) 78 { 79 - unsigned long flags = __raw_local_save_flags(); 80 81 - raw_local_irq_disable(); 82 83 return flags; 84 }
··· 76 */ 77 static inline unsigned long __raw_local_irq_save(void) 78 { 79 + unsigned long flags, tmp; 80 81 + /* Disable interrupts to PIL_NORMAL_MAX unless we already 82 + * are using PIL_NMI, in which case PIL_NMI is retained. 83 + * 84 + * The only values we ever program into the %pil are 0, 85 + * PIL_NORMAL_MAX and PIL_NMI. 86 + * 87 + * Since PIL_NMI is the largest %pil value and all bits are 88 + * set in it (0xf), it doesn't matter what PIL_NORMAL_MAX 89 + * actually is. 90 + */ 91 + __asm__ __volatile__( 92 + "rdpr %%pil, %0\n\t" 93 + "or %0, %2, %1\n\t" 94 + "wrpr %1, 0x0, %%pil" 95 + : "=r" (flags), "=r" (tmp) 96 + : "i" (PIL_NORMAL_MAX) 97 + : "memory" 98 + ); 99 100 return flags; 101 }
+9 -1
arch/sparc/kernel/Makefile
··· 13 CPPFLAGS_vmlinux.lds := -Usparc -m$(BITS) 14 extra-y += vmlinux.lds 15 16 obj-$(CONFIG_SPARC32) += entry.o wof.o wuf.o 17 obj-$(CONFIG_SPARC32) += etrap_32.o 18 obj-$(CONFIG_SPARC32) += rtrap_32.o ··· 93 94 95 obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o 96 - CFLAGS_REMOVE_ftrace.o := -pg 97 98 obj-$(CONFIG_EARLYFB) += btext.o 99 obj-$(CONFIG_STACKTRACE) += stacktrace.o
··· 13 CPPFLAGS_vmlinux.lds := -Usparc -m$(BITS) 14 extra-y += vmlinux.lds 15 16 + ifdef CONFIG_FUNCTION_TRACER 17 + # Do not profile debug and lowlevel utilities 18 + CFLAGS_REMOVE_ftrace.o := -pg 19 + CFLAGS_REMOVE_time_$(BITS).o := -pg 20 + CFLAGS_REMOVE_perf_event.o := -pg 21 + CFLAGS_REMOVE_pcr.o := -pg 22 + endif 23 + 24 obj-$(CONFIG_SPARC32) += entry.o wof.o wuf.o 25 obj-$(CONFIG_SPARC32) += etrap_32.o 26 obj-$(CONFIG_SPARC32) += rtrap_32.o ··· 85 86 87 obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o 88 + obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o 89 90 obj-$(CONFIG_EARLYFB) += btext.o 91 obj-$(CONFIG_STACKTRACE) += stacktrace.o
+59 -1
arch/sparc/kernel/ftrace.c
··· 13 14 static u32 ftrace_call_replace(unsigned long ip, unsigned long addr) 15 { 16 - static u32 call; 17 s32 off; 18 19 off = ((s32)addr - (s32)ip); ··· 91 return 0; 92 } 93 #endif
··· 13 14 static u32 ftrace_call_replace(unsigned long ip, unsigned long addr) 15 { 16 + u32 call; 17 s32 off; 18 19 off = ((s32)addr - (s32)ip); ··· 91 return 0; 92 } 93 #endif 94 + 95 + #ifdef CONFIG_FUNCTION_GRAPH_TRACER 96 + 97 + #ifdef CONFIG_DYNAMIC_FTRACE 98 + extern void ftrace_graph_call(void); 99 + 100 + int ftrace_enable_ftrace_graph_caller(void) 101 + { 102 + unsigned long ip = (unsigned long)(&ftrace_graph_call); 103 + u32 old, new; 104 + 105 + old = *(u32 *) &ftrace_graph_call; 106 + new = ftrace_call_replace(ip, (unsigned long) &ftrace_graph_caller); 107 + return ftrace_modify_code(ip, old, new); 108 + } 109 + 110 + int ftrace_disable_ftrace_graph_caller(void) 111 + { 112 + unsigned long ip = (unsigned long)(&ftrace_graph_call); 113 + u32 old, new; 114 + 115 + old = *(u32 *) &ftrace_graph_call; 116 + new = ftrace_call_replace(ip, (unsigned long) &ftrace_stub); 117 + 118 + return ftrace_modify_code(ip, old, new); 119 + } 120 + 121 + #endif /* !CONFIG_DYNAMIC_FTRACE */ 122 + 123 + /* 124 + * Hook the return address and push it in the stack of return addrs 125 + * in current thread info. 126 + */ 127 + unsigned long prepare_ftrace_return(unsigned long parent, 128 + unsigned long self_addr, 129 + unsigned long frame_pointer) 130 + { 131 + unsigned long return_hooker = (unsigned long) &return_to_handler; 132 + struct ftrace_graph_ent trace; 133 + 134 + if (unlikely(atomic_read(&current->tracing_graph_pause))) 135 + return parent + 8UL; 136 + 137 + if (ftrace_push_return_trace(parent, self_addr, &trace.depth, 138 + frame_pointer) == -EBUSY) 139 + return parent + 8UL; 140 + 141 + trace.func = self_addr; 142 + 143 + /* Only trace if the calling function expects to */ 144 + if (!ftrace_graph_entry(&trace)) { 145 + current->curr_ret_stack--; 146 + return parent + 8UL; 147 + } 148 + 149 + return return_hooker; 150 + } 151 + #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
+10 -1
arch/sparc/kernel/irq_64.c
··· 20 #include <linux/delay.h> 21 #include <linux/proc_fs.h> 22 #include <linux/seq_file.h> 23 #include <linux/irq.h> 24 25 #include <asm/ptrace.h> ··· 648 bucket = kzalloc(sizeof(struct ino_bucket), GFP_ATOMIC); 649 if (unlikely(!bucket)) 650 return 0; 651 __flush_dcache_range((unsigned long) bucket, 652 ((unsigned long) bucket + 653 sizeof(struct ino_bucket))); ··· 730 __asm__ __volatile__("mov %0, %%sp" : : "r" (orig_sp)); 731 } 732 733 - void handler_irq(int irq, struct pt_regs *regs) 734 { 735 unsigned long pstate, bucket_pa; 736 struct pt_regs *old_regs;
··· 20 #include <linux/delay.h> 21 #include <linux/proc_fs.h> 22 #include <linux/seq_file.h> 23 + #include <linux/ftrace.h> 24 #include <linux/irq.h> 25 26 #include <asm/ptrace.h> ··· 647 bucket = kzalloc(sizeof(struct ino_bucket), GFP_ATOMIC); 648 if (unlikely(!bucket)) 649 return 0; 650 + 651 + /* The only reference we store to the IRQ bucket is 652 + * by physical address which kmemleak can't see, tell 653 + * it that this object explicitly is not a leak and 654 + * should be scanned. 655 + */ 656 + kmemleak_not_leak(bucket); 657 + 658 __flush_dcache_range((unsigned long) bucket, 659 ((unsigned long) bucket + 660 sizeof(struct ino_bucket))); ··· 721 __asm__ __volatile__("mov %0, %%sp" : : "r" (orig_sp)); 722 } 723 724 + void __irq_entry handler_irq(int irq, struct pt_regs *regs) 725 { 726 unsigned long pstate, bucket_pa; 727 struct pt_regs *old_regs;
+2 -1
arch/sparc/kernel/kgdb_64.c
··· 5 6 #include <linux/kgdb.h> 7 #include <linux/kdebug.h> 8 9 #include <asm/kdebug.h> 10 #include <asm/ptrace.h> ··· 109 } 110 111 #ifdef CONFIG_SMP 112 - void smp_kgdb_capture_client(int irq, struct pt_regs *regs) 113 { 114 unsigned long flags; 115
··· 5 6 #include <linux/kgdb.h> 7 #include <linux/kdebug.h> 8 + #include <linux/ftrace.h> 9 10 #include <asm/kdebug.h> 11 #include <asm/ptrace.h> ··· 108 } 109 110 #ifdef CONFIG_SMP 111 + void __irq_entry smp_kgdb_capture_client(int irq, struct pt_regs *regs) 112 { 113 unsigned long flags; 114
+1 -2
arch/sparc/kernel/nmi.c
··· 92 notrace __kprobes void perfctr_irq(int irq, struct pt_regs *regs) 93 { 94 unsigned int sum, touched = 0; 95 - int cpu = smp_processor_id(); 96 97 clear_softint(1 << irq); 98 ··· 105 else 106 pcr_ops->write(PCR_PIC_PRIV); 107 108 - sum = kstat_irqs_cpu(0, cpu); 109 if (__get_cpu_var(nmi_touch)) { 110 __get_cpu_var(nmi_touch) = 0; 111 touched = 1;
··· 92 notrace __kprobes void perfctr_irq(int irq, struct pt_regs *regs) 93 { 94 unsigned int sum, touched = 0; 95 96 clear_softint(1 << irq); 97 ··· 106 else 107 pcr_ops->write(PCR_PIC_PRIV); 108 109 + sum = local_cpu_data().irq0_irqs; 110 if (__get_cpu_var(nmi_touch)) { 111 __get_cpu_var(nmi_touch) = 0; 112 touched = 1;
+8 -3
arch/sparc/kernel/pci_common.c
··· 371 struct resource *rp = kzalloc(sizeof(*rp), GFP_KERNEL); 372 373 if (!rp) { 374 - prom_printf("Cannot allocate IOMMU resource.\n"); 375 - prom_halt(); 376 } 377 rp->name = "IOMMU"; 378 rp->start = pbm->mem_space.start + (unsigned long) vdma[0]; 379 rp->end = rp->start + (unsigned long) vdma[1] - 1UL; 380 rp->flags = IORESOURCE_BUSY; 381 - request_resource(&pbm->mem_space, rp); 382 } 383 } 384
··· 371 struct resource *rp = kzalloc(sizeof(*rp), GFP_KERNEL); 372 373 if (!rp) { 374 + pr_info("%s: Cannot allocate IOMMU resource.\n", 375 + pbm->name); 376 + return; 377 } 378 rp->name = "IOMMU"; 379 rp->start = pbm->mem_space.start + (unsigned long) vdma[0]; 380 rp->end = rp->start + (unsigned long) vdma[1] - 1UL; 381 rp->flags = IORESOURCE_BUSY; 382 + if (request_resource(&pbm->mem_space, rp)) { 383 + pr_info("%s: Unable to request IOMMU resource.\n", 384 + pbm->name); 385 + kfree(rp); 386 + } 387 } 388 } 389
+2 -1
arch/sparc/kernel/pcr.c
··· 8 #include <linux/irq.h> 9 10 #include <linux/perf_event.h> 11 12 #include <asm/pil.h> 13 #include <asm/pcr.h> ··· 35 * Therefore in such situations we defer the work by signalling 36 * a lower level cpu IRQ. 37 */ 38 - void deferred_pcr_work_irq(int irq, struct pt_regs *regs) 39 { 40 struct pt_regs *old_regs; 41
··· 8 #include <linux/irq.h> 9 10 #include <linux/perf_event.h> 11 + #include <linux/ftrace.h> 12 13 #include <asm/pil.h> 14 #include <asm/pcr.h> ··· 34 * Therefore in such situations we defer the work by signalling 35 * a lower level cpu IRQ. 36 */ 37 + void __irq_entry deferred_pcr_work_irq(int irq, struct pt_regs *regs) 38 { 39 struct pt_regs *old_regs; 40
+6 -5
arch/sparc/kernel/smp_64.c
··· 22 #include <linux/profile.h> 23 #include <linux/bootmem.h> 24 #include <linux/vmalloc.h> 25 #include <linux/cpu.h> 26 #include <linux/slab.h> 27 ··· 824 &cpumask_of_cpu(cpu)); 825 } 826 827 - void smp_call_function_client(int irq, struct pt_regs *regs) 828 { 829 clear_softint(1 << irq); 830 generic_smp_call_function_interrupt(); 831 } 832 833 - void smp_call_function_single_client(int irq, struct pt_regs *regs) 834 { 835 clear_softint(1 << irq); 836 generic_smp_call_function_single_interrupt(); ··· 966 put_cpu(); 967 } 968 969 - void smp_new_mmu_context_version_client(int irq, struct pt_regs *regs) 970 { 971 struct mm_struct *mm; 972 unsigned long flags; ··· 1150 */ 1151 extern void prom_world(int); 1152 1153 - void smp_penguin_jailcell(int irq, struct pt_regs *regs) 1154 { 1155 clear_softint(1 << irq); 1156 ··· 1366 &cpumask_of_cpu(cpu)); 1367 } 1368 1369 - void smp_receive_signal_client(int irq, struct pt_regs *regs) 1370 { 1371 clear_softint(1 << irq); 1372 }
··· 22 #include <linux/profile.h> 23 #include <linux/bootmem.h> 24 #include <linux/vmalloc.h> 25 + #include <linux/ftrace.h> 26 #include <linux/cpu.h> 27 #include <linux/slab.h> 28 ··· 823 &cpumask_of_cpu(cpu)); 824 } 825 826 + void __irq_entry smp_call_function_client(int irq, struct pt_regs *regs) 827 { 828 clear_softint(1 << irq); 829 generic_smp_call_function_interrupt(); 830 } 831 832 + void __irq_entry smp_call_function_single_client(int irq, struct pt_regs *regs) 833 { 834 clear_softint(1 << irq); 835 generic_smp_call_function_single_interrupt(); ··· 965 put_cpu(); 966 } 967 968 + void __irq_entry smp_new_mmu_context_version_client(int irq, struct pt_regs *regs) 969 { 970 struct mm_struct *mm; 971 unsigned long flags; ··· 1149 */ 1150 extern void prom_world(int); 1151 1152 + void __irq_entry smp_penguin_jailcell(int irq, struct pt_regs *regs) 1153 { 1154 clear_softint(1 << irq); 1155 ··· 1365 &cpumask_of_cpu(cpu)); 1366 } 1367 1368 + void __irq_entry smp_receive_signal_client(int irq, struct pt_regs *regs) 1369 { 1370 clear_softint(1 << irq); 1371 }
+3 -1
arch/sparc/kernel/time_64.c
··· 35 #include <linux/clocksource.h> 36 #include <linux/of_device.h> 37 #include <linux/platform_device.h> 38 39 #include <asm/oplib.h> 40 #include <asm/timer.h> ··· 718 }; 719 static DEFINE_PER_CPU(struct clock_event_device, sparc64_events); 720 721 - void timer_interrupt(int irq, struct pt_regs *regs) 722 { 723 struct pt_regs *old_regs = set_irq_regs(regs); 724 unsigned long tick_mask = tick_ops->softint_mask; ··· 729 730 irq_enter(); 731 732 kstat_incr_irqs_this_cpu(0, irq_to_desc(0)); 733 734 if (unlikely(!evt->event_handler)) {
··· 35 #include <linux/clocksource.h> 36 #include <linux/of_device.h> 37 #include <linux/platform_device.h> 38 + #include <linux/ftrace.h> 39 40 #include <asm/oplib.h> 41 #include <asm/timer.h> ··· 717 }; 718 static DEFINE_PER_CPU(struct clock_event_device, sparc64_events); 719 720 + void __irq_entry timer_interrupt(int irq, struct pt_regs *regs) 721 { 722 struct pt_regs *old_regs = set_irq_regs(regs); 723 unsigned long tick_mask = tick_ops->softint_mask; ··· 728 729 irq_enter(); 730 731 + local_cpu_data().irq0_irqs++; 732 kstat_incr_irqs_this_cpu(0, irq_to_desc(0)); 733 734 if (unlikely(!evt->event_handler)) {
+3 -23
arch/sparc/kernel/traps_64.c
··· 2203 2204 EXPORT_SYMBOL(dump_stack); 2205 2206 - static inline int is_kernel_stack(struct task_struct *task, 2207 - struct reg_window *rw) 2208 - { 2209 - unsigned long rw_addr = (unsigned long) rw; 2210 - unsigned long thread_base, thread_end; 2211 - 2212 - if (rw_addr < PAGE_OFFSET) { 2213 - if (task != &init_task) 2214 - return 0; 2215 - } 2216 - 2217 - thread_base = (unsigned long) task_stack_page(task); 2218 - thread_end = thread_base + sizeof(union thread_union); 2219 - if (rw_addr >= thread_base && 2220 - rw_addr < thread_end && 2221 - !(rw_addr & 0x7UL)) 2222 - return 1; 2223 - 2224 - return 0; 2225 - } 2226 - 2227 static inline struct reg_window *kernel_stack_up(struct reg_window *rw) 2228 { 2229 unsigned long fp = rw->ins[6]; ··· 2231 show_regs(regs); 2232 add_taint(TAINT_DIE); 2233 if (regs->tstate & TSTATE_PRIV) { 2234 struct reg_window *rw = (struct reg_window *) 2235 (regs->u_regs[UREG_FP] + STACK_BIAS); 2236 ··· 2239 * find some badly aligned kernel stack. 2240 */ 2241 while (rw && 2242 - count++ < 30&& 2243 - is_kernel_stack(current, rw)) { 2244 printk("Caller[%016lx]: %pS\n", rw->ins[7], 2245 (void *) rw->ins[7]); 2246
··· 2203 2204 EXPORT_SYMBOL(dump_stack); 2205 2206 static inline struct reg_window *kernel_stack_up(struct reg_window *rw) 2207 { 2208 unsigned long fp = rw->ins[6]; ··· 2252 show_regs(regs); 2253 add_taint(TAINT_DIE); 2254 if (regs->tstate & TSTATE_PRIV) { 2255 + struct thread_info *tp = current_thread_info(); 2256 struct reg_window *rw = (struct reg_window *) 2257 (regs->u_regs[UREG_FP] + STACK_BIAS); 2258 ··· 2259 * find some badly aligned kernel stack. 2260 */ 2261 while (rw && 2262 + count++ < 30 && 2263 + kstack_valid(tp, (unsigned long) rw)) { 2264 printk("Caller[%016lx]: %pS\n", rw->ins[7], 2265 (void *) rw->ins[7]); 2266
+5
arch/sparc/kernel/vmlinux.lds.S
··· 46 SCHED_TEXT 47 LOCK_TEXT 48 KPROBES_TEXT 49 *(.gnu.warning) 50 } = 0 51 _etext = .; 52 53 RO_DATA(PAGE_SIZE) 54 .data1 : { 55 *(.data1) 56 }
··· 46 SCHED_TEXT 47 LOCK_TEXT 48 KPROBES_TEXT 49 + IRQENTRY_TEXT 50 *(.gnu.warning) 51 } = 0 52 _etext = .; 53 54 RO_DATA(PAGE_SIZE) 55 + 56 + /* Start of data section */ 57 + _sdata = .; 58 + 59 .data1 : { 60 *(.data1) 61 }
+72 -87
arch/sparc/lib/mcount.S
··· 7 8 #include <linux/linkage.h> 9 10 - #include <asm/ptrace.h> 11 - #include <asm/thread_info.h> 12 - 13 /* 14 * This is the main variant and is called by C code. GCC's -pg option 15 * automatically instruments every C function with a call to this. 16 */ 17 18 - #ifdef CONFIG_STACK_DEBUG 19 - 20 - #define OVSTACKSIZE 4096 /* lets hope this is enough */ 21 - 22 - .data 23 - .align 8 24 - panicstring: 25 - .asciz "Stack overflow\n" 26 - .align 8 27 - ovstack: 28 - .skip OVSTACKSIZE 29 - #endif 30 .text 31 .align 32 32 .globl _mcount ··· 20 .type mcount,#function 21 _mcount: 22 mcount: 23 - #ifdef CONFIG_STACK_DEBUG 24 - /* 25 - * Check whether %sp is dangerously low. 26 - */ 27 - ldub [%g6 + TI_FPDEPTH], %g1 28 - srl %g1, 1, %g3 29 - add %g3, 1, %g3 30 - sllx %g3, 8, %g3 ! each fpregs frame is 256b 31 - add %g3, 192, %g3 32 - add %g6, %g3, %g3 ! where does task_struct+frame end? 33 - sub %g3, STACK_BIAS, %g3 34 - cmp %sp, %g3 35 - bg,pt %xcc, 1f 36 - nop 37 - lduh [%g6 + TI_CPU], %g1 38 - sethi %hi(hardirq_stack), %g3 39 - or %g3, %lo(hardirq_stack), %g3 40 - sllx %g1, 3, %g1 41 - ldx [%g3 + %g1], %g7 42 - sub %g7, STACK_BIAS, %g7 43 - cmp %sp, %g7 44 - bleu,pt %xcc, 2f 45 - sethi %hi(THREAD_SIZE), %g3 46 - add %g7, %g3, %g7 47 - cmp %sp, %g7 48 - blu,pn %xcc, 1f 49 - 2: sethi %hi(softirq_stack), %g3 50 - or %g3, %lo(softirq_stack), %g3 51 - ldx [%g3 + %g1], %g7 52 - sub %g7, STACK_BIAS, %g7 53 - cmp %sp, %g7 54 - bleu,pt %xcc, 3f 55 - sethi %hi(THREAD_SIZE), %g3 56 - add %g7, %g3, %g7 57 - cmp %sp, %g7 58 - blu,pn %xcc, 1f 59 - nop 60 - /* If we are already on ovstack, don't hop onto it 61 - * again, we are already trying to output the stack overflow 62 - * message. 63 - */ 64 - 3: sethi %hi(ovstack), %g7 ! cant move to panic stack fast enough 65 - or %g7, %lo(ovstack), %g7 66 - add %g7, OVSTACKSIZE, %g3 67 - sub %g3, STACK_BIAS + 192, %g3 68 - sub %g7, STACK_BIAS, %g7 69 - cmp %sp, %g7 70 - blu,pn %xcc, 2f 71 - cmp %sp, %g3 72 - bleu,pn %xcc, 1f 73 - nop 74 - 2: mov %g3, %sp 75 - sethi %hi(panicstring), %g3 76 - call prom_printf 77 - or %g3, %lo(panicstring), %o0 78 - call prom_halt 79 - nop 80 - 1: 81 - #endif 82 #ifdef CONFIG_FUNCTION_TRACER 83 #ifdef CONFIG_DYNAMIC_FTRACE 84 - mov %o7, %o0 85 - .globl mcount_call 86 - mcount_call: 87 - call ftrace_stub 88 - mov %o0, %o7 89 #else 90 - sethi %hi(ftrace_trace_function), %g1 91 sethi %hi(ftrace_stub), %g2 92 ldx [%g1 + %lo(ftrace_trace_function)], %g1 93 or %g2, %lo(ftrace_stub), %g2 94 cmp %g1, %g2 95 be,pn %icc, 1f 96 - mov %i7, %o1 97 - jmpl %g1, %g0 98 - mov %o7, %o0 99 /* not reached */ 100 1: 101 #endif 102 #endif 103 retl ··· 80 .globl ftrace_caller 81 .type ftrace_caller,#function 82 ftrace_caller: 83 - mov %i7, %o1 84 - mov %o7, %o0 85 .globl ftrace_call 86 ftrace_call: 87 call ftrace_stub 88 - mov %o0, %o7 89 - retl 90 nop 91 .size ftrace_caller,.-ftrace_caller 92 #endif 93 #endif
··· 7 8 #include <linux/linkage.h> 9 10 /* 11 * This is the main variant and is called by C code. GCC's -pg option 12 * automatically instruments every C function with a call to this. 13 */ 14 15 .text 16 .align 32 17 .globl _mcount ··· 35 .type mcount,#function 36 _mcount: 37 mcount: 38 #ifdef CONFIG_FUNCTION_TRACER 39 #ifdef CONFIG_DYNAMIC_FTRACE 40 + /* Do nothing, the retl/nop below is all we need. */ 41 #else 42 + sethi %hi(function_trace_stop), %g1 43 + lduw [%g1 + %lo(function_trace_stop)], %g2 44 + brnz,pn %g2, 2f 45 + sethi %hi(ftrace_trace_function), %g1 46 sethi %hi(ftrace_stub), %g2 47 ldx [%g1 + %lo(ftrace_trace_function)], %g1 48 or %g2, %lo(ftrace_stub), %g2 49 cmp %g1, %g2 50 be,pn %icc, 1f 51 + mov %i7, %g3 52 + save %sp, -128, %sp 53 + mov %g3, %o1 54 + jmpl %g1, %o7 55 + mov %i7, %o0 56 + ret 57 + restore 58 /* not reached */ 59 1: 60 + #ifdef CONFIG_FUNCTION_GRAPH_TRACER 61 + sethi %hi(ftrace_graph_return), %g1 62 + ldx [%g1 + %lo(ftrace_graph_return)], %g3 63 + cmp %g2, %g3 64 + bne,pn %xcc, 5f 65 + sethi %hi(ftrace_graph_entry_stub), %g2 66 + sethi %hi(ftrace_graph_entry), %g1 67 + or %g2, %lo(ftrace_graph_entry_stub), %g2 68 + ldx [%g1 + %lo(ftrace_graph_entry)], %g1 69 + cmp %g1, %g2 70 + be,pt %xcc, 2f 71 + nop 72 + 5: mov %i7, %g2 73 + mov %fp, %g3 74 + save %sp, -128, %sp 75 + mov %g2, %l0 76 + ba,pt %xcc, ftrace_graph_caller 77 + mov %g3, %l1 78 + #endif 79 + 2: 80 #endif 81 #endif 82 retl ··· 131 .globl ftrace_caller 132 .type ftrace_caller,#function 133 ftrace_caller: 134 + sethi %hi(function_trace_stop), %g1 135 + mov %i7, %g2 136 + lduw [%g1 + %lo(function_trace_stop)], %g1 137 + brnz,pn %g1, ftrace_stub 138 + mov %fp, %g3 139 + save %sp, -128, %sp 140 + mov %g2, %o1 141 + mov %g2, %l0 142 + mov %g3, %l1 143 .globl ftrace_call 144 ftrace_call: 145 call ftrace_stub 146 + mov %i7, %o0 147 + #ifdef CONFIG_FUNCTION_GRAPH_TRACER 148 + .globl ftrace_graph_call 149 + ftrace_graph_call: 150 + call ftrace_stub 151 nop 152 + #endif 153 + ret 154 + restore 155 + #ifdef CONFIG_FUNCTION_GRAPH_TRACER 156 + .size ftrace_graph_call,.-ftrace_graph_call 157 + #endif 158 + .size ftrace_call,.-ftrace_call 159 .size ftrace_caller,.-ftrace_caller 160 #endif 161 + #endif 162 + 163 + #ifdef CONFIG_FUNCTION_GRAPH_TRACER 164 + ENTRY(ftrace_graph_caller) 165 + mov %l0, %o0 166 + mov %i7, %o1 167 + call prepare_ftrace_return 168 + mov %l1, %o2 169 + ret 170 + restore %o0, -8, %i7 171 + END(ftrace_graph_caller) 172 + 173 + ENTRY(return_to_handler) 174 + save %sp, -128, %sp 175 + call ftrace_return_to_handler 176 + mov %fp, %o0 177 + jmpl %o0 + 8, %g0 178 + restore 179 + END(return_to_handler) 180 #endif
+1 -1
lib/Kconfig.debug
··· 356 config DEBUG_KMEMLEAK 357 bool "Kernel memory leak detector" 358 depends on DEBUG_KERNEL && EXPERIMENTAL && !MEMORY_HOTPLUG && \ 359 - (X86 || ARM || PPC || S390 || SUPERH || MICROBLAZE) 360 361 select DEBUG_FS if SYSFS 362 select STACKTRACE if STACKTRACE_SUPPORT
··· 356 config DEBUG_KMEMLEAK 357 bool "Kernel memory leak detector" 358 depends on DEBUG_KERNEL && EXPERIMENTAL && !MEMORY_HOTPLUG && \ 359 + (X86 || ARM || PPC || S390 || SPARC64 || SUPERH || MICROBLAZE) 360 361 select DEBUG_FS if SYSFS 362 select STACKTRACE if STACKTRACE_SUPPORT