···1414 select HAVE_FUNCTION_TRACER if (!XIP_KERNEL)1515 select HAVE_FTRACE_MCOUNT_RECORD if (!XIP_KERNEL)1616 select HAVE_DYNAMIC_FTRACE if (!XIP_KERNEL)1717+ select HAVE_FUNCTION_GRAPH_TRACER if (!THUMB2_KERNEL)1718 select HAVE_GENERIC_DMA_COHERENT1819 select HAVE_KERNEL_GZIP1920 select HAVE_KERNEL_LZO
+1-1
arch/arm/Kconfig.debug
···2323config FRAME_POINTER2424 bool2525 depends on !THUMB2_KERNEL2626- default y if !ARM_UNWIND2626+ default y if !ARM_UNWIND || FUNCTION_GRAPH_TRACER2727 help2828 If you say N here, the resulting kernel will be slightly smaller and2929 faster. However, if neither FRAME_POINTER nor ARM_UNWIND are enabled,
···2424#define NOP 0xe8bd4000 /* pop {lr} */2525#endif26262727+#ifdef CONFIG_DYNAMIC_FTRACE2728#ifdef CONFIG_OLD_MCOUNT2829#define OLD_MCOUNT_ADDR ((unsigned long) mcount)2930#define OLD_FTRACE_ADDR ((unsigned long) ftrace_caller_old)···6059}6160#endif62616363-/* construct a branch (BL) instruction to addr */6462#ifdef CONFIG_THUMB2_KERNEL6565-static unsigned long ftrace_call_replace(unsigned long pc, unsigned long addr)6363+static unsigned long ftrace_gen_branch(unsigned long pc, unsigned long addr,6464+ bool link)6665{6766 unsigned long s, j1, j2, i1, i2, imm10, imm11;6867 unsigned long first, second;···8483 j2 = (!i2) ^ s;85848685 first = 0xf000 | (s << 10) | imm10;8787- second = 0xd000 | (j1 << 13) | (j2 << 11) | imm11;8686+ second = 0x9000 | (j1 << 13) | (j2 << 11) | imm11;8787+ if (link)8888+ second |= 1 << 14;88898990 return (second << 16) | first;9091}9192#else9292-static unsigned long ftrace_call_replace(unsigned long pc, unsigned long addr)9393+static unsigned long ftrace_gen_branch(unsigned long pc, unsigned long addr,9494+ bool link)9395{9696+ unsigned long opcode = 0xea000000;9497 long offset;9898+9999+ if (link)100100+ opcode |= 1 << 24;9510196102 offset = (long)addr - (long)(pc + 8);97103 if (unlikely(offset < -33554432 || offset > 33554428)) {···111103112104 offset = (offset >> 2) & 0x00ffffff;113105114114- return 0xeb000000 | offset;106106+ return opcode | offset;115107}116108#endif109109+110110+static unsigned long ftrace_call_replace(unsigned long pc, unsigned long addr)111111+{112112+ return ftrace_gen_branch(pc, addr, true);113113+}117114118115static int ftrace_modify_code(unsigned long pc, unsigned long old,119116 unsigned long new)···206193207194 return 0;208195}196196+#endif /* CONFIG_DYNAMIC_FTRACE */197197+198198+#ifdef CONFIG_FUNCTION_GRAPH_TRACER199199+void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,200200+ unsigned long frame_pointer)201201+{202202+ unsigned long return_hooker = (unsigned long) &return_to_handler;203203+ struct ftrace_graph_ent trace;204204+ unsigned long old;205205+ int err;206206+207207+ if (unlikely(atomic_read(¤t->tracing_graph_pause)))208208+ return;209209+210210+ old = *parent;211211+ *parent = return_hooker;212212+213213+ err = ftrace_push_return_trace(old, self_addr, &trace.depth,214214+ frame_pointer);215215+ if (err == -EBUSY) {216216+ *parent = old;217217+ return;218218+ }219219+220220+ trace.func = self_addr;221221+222222+ /* Only trace if the calling function expects to */223223+ if (!ftrace_graph_entry(&trace)) {224224+ current->curr_ret_stack--;225225+ *parent = old;226226+ }227227+}228228+229229+#ifdef CONFIG_DYNAMIC_FTRACE230230+extern unsigned long ftrace_graph_call;231231+extern unsigned long ftrace_graph_call_old;232232+extern void ftrace_graph_caller_old(void);233233+234234+static int __ftrace_modify_caller(unsigned long *callsite,235235+ void (*func) (void), bool enable)236236+{237237+ unsigned long caller_fn = (unsigned long) func;238238+ unsigned long pc = (unsigned long) callsite;239239+ unsigned long branch = ftrace_gen_branch(pc, caller_fn, false);240240+ unsigned long nop = 0xe1a00000; /* mov r0, r0 */241241+ unsigned long old = enable ? nop : branch;242242+ unsigned long new = enable ? branch : nop;243243+244244+ return ftrace_modify_code(pc, old, new);245245+}246246+247247+static int ftrace_modify_graph_caller(bool enable)248248+{249249+ int ret;250250+251251+ ret = __ftrace_modify_caller(&ftrace_graph_call,252252+ ftrace_graph_caller,253253+ enable);254254+255255+#ifdef CONFIG_OLD_MCOUNT256256+ if (!ret)257257+ ret = __ftrace_modify_caller(&ftrace_graph_call_old,258258+ ftrace_graph_caller_old,259259+ enable);260260+#endif261261+262262+ return ret;263263+}264264+265265+int ftrace_enable_ftrace_graph_caller(void)266266+{267267+ return ftrace_modify_graph_caller(true);268268+}269269+270270+int ftrace_disable_ftrace_graph_caller(void)271271+{272272+ return ftrace_modify_graph_caller(false);273273+}274274+#endif /* CONFIG_DYNAMIC_FTRACE */275275+#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
+3-1
arch/arm/kernel/irq.c
···3535#include <linux/list.h>3636#include <linux/kallsyms.h>3737#include <linux/proc_fs.h>3838+#include <linux/ftrace.h>38393940#include <asm/system.h>4041#include <asm/mach/irq.h>···106105 * come via this function. Instead, they should provide their107106 * own 'handler'108107 */109109-asmlinkage void __exception asm_do_IRQ(unsigned int irq, struct pt_regs *regs)108108+asmlinkage void __exception_irq_entry109109+asm_do_IRQ(unsigned int irq, struct pt_regs *regs)110110{111111 struct pt_regs *old_regs = set_irq_regs(regs);112112
+3-2
arch/arm/kernel/smp.c
···1616#include <linux/cache.h>1717#include <linux/profile.h>1818#include <linux/errno.h>1919+#include <linux/ftrace.h>1920#include <linux/mm.h>2021#include <linux/err.h>2122#include <linux/cpu.h>···458457}459458460459#ifdef CONFIG_LOCAL_TIMERS461461-asmlinkage void __exception do_local_timer(struct pt_regs *regs)460460+asmlinkage void __exception_irq_entry do_local_timer(struct pt_regs *regs)462461{463462 struct pt_regs *old_regs = set_irq_regs(regs);464463 int cpu = smp_processor_id();···545544 *546545 * Bit 0 - Inter-processor function call547546 */548548-asmlinkage void __exception do_IPI(struct pt_regs *regs)547547+asmlinkage void __exception_irq_entry do_IPI(struct pt_regs *regs)549548{550549 unsigned int cpu = smp_processor_id();551550 struct ipi_data *ipi = &per_cpu(ipi_data, cpu);