Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'ftrace-v6.11' of git://git.kernel.org/pub/scm/linux/kernel/git/trace/linux-trace

Pull ftrace updates from Steven Rostedt:
"Rewrite of function graph tracer to allow multiple users

Up until now, the function graph tracer could only have a single user
attached to it. If another user tried to attach to the function graph
tracer while one was already attached, it would fail. Allowing
function graph tracer to have more than one user has been asked for
since 2009, but it required a rewrite to the logic to pull it off so
it never happened. Until now!

There's three systems that trace the return of a function. That is
kretprobes, function graph tracer, and BPF. kretprobes and function
graph tracing both do it similarly. The difference is that kretprobes
uses a shadow stack per callback and function graph tracer creates a
shadow stack for all tasks. The function graph tracer method makes it
possible to trace the return of all functions. As kretprobes now needs
that feature too, allowing it to use function graph tracer was needed.
BPF also wants to trace the return of many probes and its method
doesn't scale either. Having it use function graph tracer would
improve that.

By allowing function graph tracer to have multiple users allows both
kretprobes and BPF to use function graph tracer in these cases. This
will allow kretprobes code to be removed in the future as it's version
will no longer be needed.

Note, function graph tracer is only limited to 16 simultaneous users,
due to shadow stack size and allocated slots"

* tag 'ftrace-v6.11' of git://git.kernel.org/pub/scm/linux/kernel/git/trace/linux-trace: (49 commits)
fgraph: Use str_plural() in test_graph_storage_single()
function_graph: Add READ_ONCE() when accessing fgraph_array[]
ftrace: Add missing kerneldoc parameters to unregister_ftrace_direct()
function_graph: Everyone uses HAVE_FUNCTION_GRAPH_RET_ADDR_PTR, remove it
function_graph: Fix up ftrace_graph_ret_addr()
function_graph: Make fgraph_update_pid_func() a stub for !DYNAMIC_FTRACE
function_graph: Rename BYTE_NUMBER to CHAR_NUMBER in selftests
fgraph: Remove some unused functions
ftrace: Hide one more entry in stack trace when ftrace_pid is enabled
function_graph: Do not update pid func if CONFIG_DYNAMIC_FTRACE not enabled
function_graph: Make fgraph_do_direct static key static
ftrace: Fix prototypes for ftrace_startup/shutdown_subops()
ftrace: Assign RCU list variable with rcu_assign_ptr()
ftrace: Assign ftrace_list_end to ftrace_ops_list type cast to RCU
ftrace: Declare function_trace_op in header to quiet sparse warning
ftrace: Add comments to ftrace_hash_move() and friends
ftrace: Convert "inc" parameter to bool in ftrace_hash_rec_update_modify()
ftrace: Add comments to ftrace_hash_rec_disable/enable()
ftrace: Remove "filter_hash" parameter from __ftrace_hash_rec_update()
ftrace: Rename dup_hash() and comment it
...

+2058 -444
-12
Documentation/trace/ftrace-design.rst
··· 217 217 218 218 Similarly, when you call ftrace_return_to_handler(), pass it the frame pointer. 219 219 220 - HAVE_FUNCTION_GRAPH_RET_ADDR_PTR 221 - -------------------------------- 222 - 223 - An arch may pass in a pointer to the return address on the stack. This 224 - prevents potential stack unwinding issues where the unwinder gets out of 225 - sync with ret_stack and the wrong addresses are reported by 226 - ftrace_graph_ret_addr(). 227 - 228 - Adding support for it is easy: just define the macro in asm/ftrace.h and 229 - pass the return address pointer as the 'retp' argument to 230 - ftrace_push_return_trace(). 231 - 232 220 HAVE_SYSCALL_TRACEPOINTS 233 221 ------------------------ 234 222
-11
arch/arm64/include/asm/ftrace.h
··· 12 12 13 13 #define HAVE_FUNCTION_GRAPH_FP_TEST 14 14 15 - /* 16 - * HAVE_FUNCTION_GRAPH_RET_ADDR_PTR means that the architecture can provide a 17 - * "return address pointer" which can be used to uniquely identify a return 18 - * address which has been overwritten. 19 - * 20 - * On arm64 we use the address of the caller's frame record, which remains the 21 - * same for the lifetime of the instrumented function, unlike the return 22 - * address in the LR. 23 - */ 24 - #define HAVE_FUNCTION_GRAPH_RET_ADDR_PTR 25 - 26 15 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_ARGS 27 16 #define ARCH_SUPPORTS_FTRACE_OPS 1 28 17 #else
-2
arch/csky/include/asm/ftrace.h
··· 7 7 8 8 #define HAVE_FUNCTION_GRAPH_FP_TEST 9 9 10 - #define HAVE_FUNCTION_GRAPH_RET_ADDR_PTR 11 - 12 10 #define ARCH_SUPPORTS_FTRACE_OPS 1 13 11 14 12 #define MCOUNT_ADDR ((unsigned long)_mcount)
-1
arch/loongarch/include/asm/ftrace.h
··· 28 28 struct dyn_arch_ftrace { }; 29 29 30 30 #define ARCH_SUPPORTS_FTRACE_OPS 1 31 - #define HAVE_FUNCTION_GRAPH_RET_ADDR_PTR 32 31 33 32 #define ftrace_init_nop ftrace_init_nop 34 33 int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec);
-2
arch/powerpc/include/asm/ftrace.h
··· 8 8 #define MCOUNT_ADDR ((unsigned long)(_mcount)) 9 9 #define MCOUNT_INSN_SIZE 4 /* sizeof mcount call */ 10 10 11 - #define HAVE_FUNCTION_GRAPH_RET_ADDR_PTR 12 - 13 11 /* Ignore unused weak functions which will have larger offsets */ 14 12 #if defined(CONFIG_MPROFILE_KERNEL) || defined(CONFIG_ARCH_USING_PATCHABLE_FUNCTION_ENTRY) 15 13 #define FTRACE_MCOUNT_MAX_OFFSET 16
-1
arch/riscv/include/asm/ftrace.h
··· 11 11 #if defined(CONFIG_FUNCTION_GRAPH_TRACER) && defined(CONFIG_FRAME_POINTER) 12 12 #define HAVE_FUNCTION_GRAPH_FP_TEST 13 13 #endif 14 - #define HAVE_FUNCTION_GRAPH_RET_ADDR_PTR 15 14 16 15 #define ARCH_SUPPORTS_FTRACE_OPS 1 17 16 #ifndef __ASSEMBLY__
-1
arch/s390/include/asm/ftrace.h
··· 2 2 #ifndef _ASM_S390_FTRACE_H 3 3 #define _ASM_S390_FTRACE_H 4 4 5 - #define HAVE_FUNCTION_GRAPH_RET_ADDR_PTR 6 5 #define ARCH_SUPPORTS_FTRACE_OPS 1 7 6 #define MCOUNT_INSN_SIZE 6 8 7
-2
arch/x86/include/asm/ftrace.h
··· 20 20 #define ARCH_SUPPORTS_FTRACE_OPS 1 21 21 #endif 22 22 23 - #define HAVE_FUNCTION_GRAPH_RET_ADDR_PTR 24 - 25 23 #ifndef __ASSEMBLY__ 26 24 extern void __fentry__(void); 27 25
+34 -16
include/linux/ftrace.h
··· 227 227 * ftrace_enabled. 228 228 * DIRECT - Used by the direct ftrace_ops helper for direct functions 229 229 * (internal ftrace only, should not be used by others) 230 + * SUBOP - Is controlled by another op in field managed. 230 231 */ 231 232 enum { 232 233 FTRACE_OPS_FL_ENABLED = BIT(0), ··· 248 247 FTRACE_OPS_FL_TRACE_ARRAY = BIT(15), 249 248 FTRACE_OPS_FL_PERMANENT = BIT(16), 250 249 FTRACE_OPS_FL_DIRECT = BIT(17), 250 + FTRACE_OPS_FL_SUBOP = BIT(18), 251 251 }; 252 252 253 253 #ifndef CONFIG_DYNAMIC_FTRACE_WITH_ARGS ··· 336 334 unsigned long trampoline; 337 335 unsigned long trampoline_size; 338 336 struct list_head list; 337 + struct list_head subop_list; 339 338 ftrace_ops_func_t ops_func; 339 + struct ftrace_ops *managed; 340 340 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS 341 341 unsigned long direct_call; 342 342 #endif ··· 513 509 static inline void stack_tracer_enable(void) { } 514 510 #endif 515 511 512 + enum { 513 + FTRACE_UPDATE_CALLS = (1 << 0), 514 + FTRACE_DISABLE_CALLS = (1 << 1), 515 + FTRACE_UPDATE_TRACE_FUNC = (1 << 2), 516 + FTRACE_START_FUNC_RET = (1 << 3), 517 + FTRACE_STOP_FUNC_RET = (1 << 4), 518 + FTRACE_MAY_SLEEP = (1 << 5), 519 + }; 520 + 516 521 #ifdef CONFIG_DYNAMIC_FTRACE 517 522 518 523 void ftrace_arch_code_modify_prepare(void); ··· 615 602 void ftrace_set_global_notrace(unsigned char *buf, int len, int reset); 616 603 void ftrace_free_filter(struct ftrace_ops *ops); 617 604 void ftrace_ops_set_global_filter(struct ftrace_ops *ops); 618 - 619 - enum { 620 - FTRACE_UPDATE_CALLS = (1 << 0), 621 - FTRACE_DISABLE_CALLS = (1 << 1), 622 - FTRACE_UPDATE_TRACE_FUNC = (1 << 2), 623 - FTRACE_START_FUNC_RET = (1 << 3), 624 - FTRACE_STOP_FUNC_RET = (1 << 4), 625 - FTRACE_MAY_SLEEP = (1 << 5), 626 - }; 627 605 628 606 /* 629 607 * The FTRACE_UPDATE_* enum is used to pass information back ··· 1031 1027 unsigned long long rettime; 1032 1028 } __packed; 1033 1029 1034 - /* Type of the callback handlers for tracing function graph*/ 1035 - typedef void (*trace_func_graph_ret_t)(struct ftrace_graph_ret *); /* return */ 1036 - typedef int (*trace_func_graph_ent_t)(struct ftrace_graph_ent *); /* entry */ 1030 + struct fgraph_ops; 1037 1031 1038 - extern int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace); 1032 + /* Type of the callback handlers for tracing function graph*/ 1033 + typedef void (*trace_func_graph_ret_t)(struct ftrace_graph_ret *, 1034 + struct fgraph_ops *); /* return */ 1035 + typedef int (*trace_func_graph_ent_t)(struct ftrace_graph_ent *, 1036 + struct fgraph_ops *); /* entry */ 1037 + 1038 + extern int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace, struct fgraph_ops *gops); 1039 + bool ftrace_pids_enabled(struct ftrace_ops *ops); 1039 1040 1040 1041 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 1041 1042 1042 1043 struct fgraph_ops { 1043 1044 trace_func_graph_ent_t entryfunc; 1044 1045 trace_func_graph_ret_t retfunc; 1046 + struct ftrace_ops ops; /* for the hash lists */ 1047 + void *private; 1048 + trace_func_graph_ent_t saved_func; 1049 + int idx; 1045 1050 }; 1051 + 1052 + void *fgraph_reserve_data(int idx, int size_bytes); 1053 + void *fgraph_retrieve_data(int idx, int *size_bytes); 1046 1054 1047 1055 /* 1048 1056 * Stack of return addresses for functions ··· 1071 1055 #ifdef HAVE_FUNCTION_GRAPH_FP_TEST 1072 1056 unsigned long fp; 1073 1057 #endif 1074 - #ifdef HAVE_FUNCTION_GRAPH_RET_ADDR_PTR 1075 1058 unsigned long *retp; 1076 - #endif 1077 1059 }; 1078 1060 1079 1061 /* ··· 1086 1072 unsigned long frame_pointer, unsigned long *retp); 1087 1073 1088 1074 struct ftrace_ret_stack * 1089 - ftrace_graph_get_ret_stack(struct task_struct *task, int idx); 1075 + ftrace_graph_get_ret_stack(struct task_struct *task, int skip); 1090 1076 1091 1077 unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx, 1092 1078 unsigned long ret, unsigned long *retp); 1079 + unsigned long *fgraph_get_task_var(struct fgraph_ops *gops); 1093 1080 1094 1081 /* 1095 1082 * Sometimes we don't want to trace a function with the function ··· 1128 1113 extern void ftrace_graph_init_task(struct task_struct *t); 1129 1114 extern void ftrace_graph_exit_task(struct task_struct *t); 1130 1115 extern void ftrace_graph_init_idle_task(struct task_struct *t, int cpu); 1116 + 1117 + /* Used by assembly, but to quiet sparse warnings */ 1118 + extern struct ftrace_ops *function_trace_op; 1131 1119 1132 1120 static inline void pause_graph_tracing(void) 1133 1121 {
+1 -1
include/linux/sched.h
··· 1413 1413 int curr_ret_depth; 1414 1414 1415 1415 /* Stack of return addresses for return function tracing: */ 1416 - struct ftrace_ret_stack *ret_stack; 1416 + unsigned long *ret_stack; 1417 1417 1418 1418 /* Timestamp for last schedule: */ 1419 1419 unsigned long long ftrace_timestamp;
-39
include/linux/trace_recursion.h
··· 44 44 */ 45 45 TRACE_IRQ_BIT, 46 46 47 - /* Set if the function is in the set_graph_function file */ 48 - TRACE_GRAPH_BIT, 49 - 50 - /* 51 - * In the very unlikely case that an interrupt came in 52 - * at a start of graph tracing, and we want to trace 53 - * the function in that interrupt, the depth can be greater 54 - * than zero, because of the preempted start of a previous 55 - * trace. In an even more unlikely case, depth could be 2 56 - * if a softirq interrupted the start of graph tracing, 57 - * followed by an interrupt preempting a start of graph 58 - * tracing in the softirq, and depth can even be 3 59 - * if an NMI came in at the start of an interrupt function 60 - * that preempted a softirq start of a function that 61 - * preempted normal context!!!! Luckily, it can't be 62 - * greater than 3, so the next two bits are a mask 63 - * of what the depth is when we set TRACE_GRAPH_BIT 64 - */ 65 - 66 - TRACE_GRAPH_DEPTH_START_BIT, 67 - TRACE_GRAPH_DEPTH_END_BIT, 68 - 69 - /* 70 - * To implement set_graph_notrace, if this bit is set, we ignore 71 - * function graph tracing of called functions, until the return 72 - * function is called to clear it. 73 - */ 74 - TRACE_GRAPH_NOTRACE_BIT, 75 - 76 47 /* Used to prevent recursion recording from recursing. */ 77 48 TRACE_RECORD_RECURSION_BIT, 78 49 }; ··· 51 80 #define trace_recursion_set(bit) do { (current)->trace_recursion |= (1<<(bit)); } while (0) 52 81 #define trace_recursion_clear(bit) do { (current)->trace_recursion &= ~(1<<(bit)); } while (0) 53 82 #define trace_recursion_test(bit) ((current)->trace_recursion & (1<<(bit))) 54 - 55 - #define trace_recursion_depth() \ 56 - (((current)->trace_recursion >> TRACE_GRAPH_DEPTH_START_BIT) & 3) 57 - #define trace_recursion_set_depth(depth) \ 58 - do { \ 59 - current->trace_recursion &= \ 60 - ~(3 << TRACE_GRAPH_DEPTH_START_BIT); \ 61 - current->trace_recursion |= \ 62 - ((depth) & 3) << TRACE_GRAPH_DEPTH_START_BIT; \ 63 - } while (0) 64 83 65 84 #define TRACE_CONTEXT_BITS 4 66 85
+860 -198
kernel/trace/fgraph.c
··· 7 7 * 8 8 * Highly modified by Steven Rostedt (VMware). 9 9 */ 10 + #include <linux/bits.h> 10 11 #include <linux/jump_label.h> 11 12 #include <linux/suspend.h> 12 13 #include <linux/ftrace.h> 14 + #include <linux/static_call.h> 13 15 #include <linux/slab.h> 14 16 15 17 #include <trace/events/sched.h> ··· 19 17 #include "ftrace_internal.h" 20 18 #include "trace.h" 21 19 22 - #ifdef CONFIG_DYNAMIC_FTRACE 23 - #define ASSIGN_OPS_HASH(opsname, val) \ 24 - .func_hash = val, \ 25 - .local_hash.regex_lock = __MUTEX_INITIALIZER(opsname.local_hash.regex_lock), 26 - #else 27 - #define ASSIGN_OPS_HASH(opsname, val) 28 - #endif 20 + /* 21 + * FGRAPH_FRAME_SIZE: Size in bytes of the meta data on the shadow stack 22 + * FGRAPH_FRAME_OFFSET: Size in long words of the meta data frame 23 + */ 24 + #define FGRAPH_FRAME_SIZE sizeof(struct ftrace_ret_stack) 25 + #define FGRAPH_FRAME_OFFSET DIV_ROUND_UP(FGRAPH_FRAME_SIZE, sizeof(long)) 26 + 27 + /* 28 + * On entry to a function (via function_graph_enter()), a new fgraph frame 29 + * (ftrace_ret_stack) is pushed onto the stack as well as a word that 30 + * holds a bitmask and a type (called "bitmap"). The bitmap is defined as: 31 + * 32 + * bits: 0 - 9 offset in words from the previous ftrace_ret_stack 33 + * 34 + * bits: 10 - 11 Type of storage 35 + * 0 - reserved 36 + * 1 - bitmap of fgraph_array index 37 + * 2 - reserved data 38 + * 39 + * For type with "bitmap of fgraph_array index" (FGRAPH_TYPE_BITMAP): 40 + * bits: 12 - 27 The bitmap of fgraph_ops fgraph_array index 41 + * That is, it's a bitmask of 0-15 (16 bits) 42 + * where if a corresponding ops in the fgraph_array[] 43 + * expects a callback from the return of the function 44 + * it's corresponding bit will be set. 45 + * 46 + * 47 + * The top of the ret_stack (when not empty) will always have a reference 48 + * word that points to the last fgraph frame that was saved. 49 + * 50 + * For reserved data: 51 + * bits: 12 - 17 The size in words that is stored 52 + * bits: 18 - 23 The index of fgraph_array, which shows who is stored 53 + * 54 + * That is, at the end of function_graph_enter, if the first and forth 55 + * fgraph_ops on the fgraph_array[] (index 0 and 3) needs their retfunc called 56 + * on the return of the function being traced, and the forth fgraph_ops 57 + * stored two words of data, this is what will be on the task's shadow 58 + * ret_stack: (the stack grows upward) 59 + * 60 + * ret_stack[SHADOW_STACK_OFFSET] 61 + * | SHADOW_STACK_TASK_VARS(ret_stack)[15] | 62 + * ... 63 + * | SHADOW_STACK_TASK_VARS(ret_stack)[0] | 64 + * ret_stack[SHADOW_STACK_MAX_OFFSET] 65 + * ... 66 + * | | <- task->curr_ret_stack 67 + * +--------------------------------------------+ 68 + * | (3 << 12) | (3 << 10) | FGRAPH_FRAME_OFFSET| 69 + * | *or put another way* | 70 + * | (3 << FGRAPH_DATA_INDEX_SHIFT)| \ | This is for fgraph_ops[3]. 71 + * | ((2 - 1) << FGRAPH_DATA_SHIFT)| \ | The data size is 2 words. 72 + * | (FGRAPH_TYPE_DATA << FGRAPH_TYPE_SHIFT)| \ | 73 + * | (offset2:FGRAPH_FRAME_OFFSET+3) | <- the offset2 is from here 74 + * +--------------------------------------------+ ( It is 4 words from the ret_stack) 75 + * | STORED DATA WORD 2 | 76 + * | STORED DATA WORD 1 | 77 + * +--------------------------------------------+ 78 + * | (9 << 12) | (1 << 10) | FGRAPH_FRAME_OFFSET| 79 + * | *or put another way* | 80 + * | (BIT(3)|BIT(0)) << FGRAPH_INDEX_SHIFT | \ | 81 + * | FGRAPH_TYPE_BITMAP << FGRAPH_TYPE_SHIFT| \ | 82 + * | (offset1:FGRAPH_FRAME_OFFSET) | <- the offset1 is from here 83 + * +--------------------------------------------+ 84 + * | struct ftrace_ret_stack | 85 + * | (stores the saved ret pointer) | <- the offset points here 86 + * +--------------------------------------------+ 87 + * | (X) | (N) | ( N words away from 88 + * | | previous ret_stack) 89 + * ... 90 + * ret_stack[0] 91 + * 92 + * If a backtrace is required, and the real return pointer needs to be 93 + * fetched, then it looks at the task's curr_ret_stack offset, if it 94 + * is greater than zero (reserved, or right before popped), it would mask 95 + * the value by FGRAPH_FRAME_OFFSET_MASK to get the offset of the 96 + * ftrace_ret_stack structure stored on the shadow stack. 97 + */ 98 + 99 + /* 100 + * The following is for the top word on the stack: 101 + * 102 + * FGRAPH_FRAME_OFFSET (0-9) holds the offset delta to the fgraph frame 103 + * FGRAPH_TYPE (10-11) holds the type of word this is. 104 + * (RESERVED or BITMAP) 105 + */ 106 + #define FGRAPH_FRAME_OFFSET_BITS 10 107 + #define FGRAPH_FRAME_OFFSET_MASK GENMASK(FGRAPH_FRAME_OFFSET_BITS - 1, 0) 108 + 109 + #define FGRAPH_TYPE_BITS 2 110 + #define FGRAPH_TYPE_MASK GENMASK(FGRAPH_TYPE_BITS - 1, 0) 111 + #define FGRAPH_TYPE_SHIFT FGRAPH_FRAME_OFFSET_BITS 112 + 113 + enum { 114 + FGRAPH_TYPE_RESERVED = 0, 115 + FGRAPH_TYPE_BITMAP = 1, 116 + FGRAPH_TYPE_DATA = 2, 117 + }; 118 + 119 + /* 120 + * For BITMAP type: 121 + * FGRAPH_INDEX (12-27) bits holding the gops index wanting return callback called 122 + */ 123 + #define FGRAPH_INDEX_BITS 16 124 + #define FGRAPH_INDEX_MASK GENMASK(FGRAPH_INDEX_BITS - 1, 0) 125 + #define FGRAPH_INDEX_SHIFT (FGRAPH_TYPE_SHIFT + FGRAPH_TYPE_BITS) 126 + 127 + /* 128 + * For DATA type: 129 + * FGRAPH_DATA (12-17) bits hold the size of data (in words) 130 + * FGRAPH_INDEX (18-23) bits hold the index for which gops->idx the data is for 131 + * 132 + * Note: 133 + * data_size == 0 means 1 word, and 31 (=2^5 - 1) means 32 words. 134 + */ 135 + #define FGRAPH_DATA_BITS 5 136 + #define FGRAPH_DATA_MASK GENMASK(FGRAPH_DATA_BITS - 1, 0) 137 + #define FGRAPH_DATA_SHIFT (FGRAPH_TYPE_SHIFT + FGRAPH_TYPE_BITS) 138 + #define FGRAPH_MAX_DATA_SIZE (sizeof(long) * (1 << FGRAPH_DATA_BITS)) 139 + 140 + #define FGRAPH_DATA_INDEX_BITS 4 141 + #define FGRAPH_DATA_INDEX_MASK GENMASK(FGRAPH_DATA_INDEX_BITS - 1, 0) 142 + #define FGRAPH_DATA_INDEX_SHIFT (FGRAPH_DATA_SHIFT + FGRAPH_DATA_BITS) 143 + 144 + #define FGRAPH_MAX_INDEX \ 145 + ((FGRAPH_INDEX_SIZE << FGRAPH_DATA_BITS) + FGRAPH_RET_INDEX) 146 + 147 + #define FGRAPH_ARRAY_SIZE FGRAPH_INDEX_BITS 148 + 149 + /* 150 + * SHADOW_STACK_SIZE: The size in bytes of the entire shadow stack 151 + * SHADOW_STACK_OFFSET: The size in long words of the shadow stack 152 + * SHADOW_STACK_MAX_OFFSET: The max offset of the stack for a new frame to be added 153 + */ 154 + #define SHADOW_STACK_SIZE (PAGE_SIZE) 155 + #define SHADOW_STACK_OFFSET (SHADOW_STACK_SIZE / sizeof(long)) 156 + /* Leave on a buffer at the end */ 157 + #define SHADOW_STACK_MAX_OFFSET \ 158 + (SHADOW_STACK_OFFSET - (FGRAPH_FRAME_OFFSET + 1 + FGRAPH_ARRAY_SIZE)) 159 + 160 + /* RET_STACK(): Return the frame from a given @offset from task @t */ 161 + #define RET_STACK(t, offset) ((struct ftrace_ret_stack *)(&(t)->ret_stack[offset])) 162 + 163 + /* 164 + * Each fgraph_ops has a reservered unsigned long at the end (top) of the 165 + * ret_stack to store task specific state. 166 + */ 167 + #define SHADOW_STACK_TASK_VARS(ret_stack) \ 168 + ((unsigned long *)(&(ret_stack)[SHADOW_STACK_OFFSET - FGRAPH_ARRAY_SIZE])) 29 169 30 170 DEFINE_STATIC_KEY_FALSE(kill_ftrace_graph); 31 171 int ftrace_graph_active; 172 + 173 + static struct fgraph_ops *fgraph_array[FGRAPH_ARRAY_SIZE]; 174 + static unsigned long fgraph_array_bitmask; 175 + 176 + /* LRU index table for fgraph_array */ 177 + static int fgraph_lru_table[FGRAPH_ARRAY_SIZE]; 178 + static int fgraph_lru_next; 179 + static int fgraph_lru_last; 180 + 181 + /* Initialize fgraph_lru_table with unused index */ 182 + static void fgraph_lru_init(void) 183 + { 184 + int i; 185 + 186 + for (i = 0; i < FGRAPH_ARRAY_SIZE; i++) 187 + fgraph_lru_table[i] = i; 188 + } 189 + 190 + /* Release the used index to the LRU table */ 191 + static int fgraph_lru_release_index(int idx) 192 + { 193 + if (idx < 0 || idx >= FGRAPH_ARRAY_SIZE || 194 + WARN_ON_ONCE(fgraph_lru_table[fgraph_lru_last] != -1)) 195 + return -1; 196 + 197 + fgraph_lru_table[fgraph_lru_last] = idx; 198 + fgraph_lru_last = (fgraph_lru_last + 1) % FGRAPH_ARRAY_SIZE; 199 + 200 + clear_bit(idx, &fgraph_array_bitmask); 201 + return 0; 202 + } 203 + 204 + /* Allocate a new index from LRU table */ 205 + static int fgraph_lru_alloc_index(void) 206 + { 207 + int idx = fgraph_lru_table[fgraph_lru_next]; 208 + 209 + /* No id is available */ 210 + if (idx == -1) 211 + return -1; 212 + 213 + fgraph_lru_table[fgraph_lru_next] = -1; 214 + fgraph_lru_next = (fgraph_lru_next + 1) % FGRAPH_ARRAY_SIZE; 215 + 216 + set_bit(idx, &fgraph_array_bitmask); 217 + return idx; 218 + } 219 + 220 + /* Get the offset to the fgraph frame from a ret_stack value */ 221 + static inline int __get_offset(unsigned long val) 222 + { 223 + return val & FGRAPH_FRAME_OFFSET_MASK; 224 + } 225 + 226 + /* Get the type of word from a ret_stack value */ 227 + static inline int __get_type(unsigned long val) 228 + { 229 + return (val >> FGRAPH_TYPE_SHIFT) & FGRAPH_TYPE_MASK; 230 + } 231 + 232 + /* Get the data_index for a DATA type ret_stack word */ 233 + static inline int __get_data_index(unsigned long val) 234 + { 235 + return (val >> FGRAPH_DATA_INDEX_SHIFT) & FGRAPH_DATA_INDEX_MASK; 236 + } 237 + 238 + /* Get the data_size for a DATA type ret_stack word */ 239 + static inline int __get_data_size(unsigned long val) 240 + { 241 + return ((val >> FGRAPH_DATA_SHIFT) & FGRAPH_DATA_MASK) + 1; 242 + } 243 + 244 + /* Get the word from the ret_stack at @offset */ 245 + static inline unsigned long get_fgraph_entry(struct task_struct *t, int offset) 246 + { 247 + return t->ret_stack[offset]; 248 + } 249 + 250 + /* Get the FRAME_OFFSET from the word from the @offset on ret_stack */ 251 + static inline int get_frame_offset(struct task_struct *t, int offset) 252 + { 253 + return __get_offset(t->ret_stack[offset]); 254 + } 255 + 256 + /* For BITMAP type: get the bitmask from the @offset at ret_stack */ 257 + static inline unsigned long 258 + get_bitmap_bits(struct task_struct *t, int offset) 259 + { 260 + return (t->ret_stack[offset] >> FGRAPH_INDEX_SHIFT) & FGRAPH_INDEX_MASK; 261 + } 262 + 263 + /* Write the bitmap to the ret_stack at @offset (does index, offset and bitmask) */ 264 + static inline void 265 + set_bitmap(struct task_struct *t, int offset, unsigned long bitmap) 266 + { 267 + t->ret_stack[offset] = (bitmap << FGRAPH_INDEX_SHIFT) | 268 + (FGRAPH_TYPE_BITMAP << FGRAPH_TYPE_SHIFT) | FGRAPH_FRAME_OFFSET; 269 + } 270 + 271 + /* For DATA type: get the data saved under the ret_stack word at @offset */ 272 + static inline void *get_data_type_data(struct task_struct *t, int offset) 273 + { 274 + unsigned long val = t->ret_stack[offset]; 275 + 276 + if (__get_type(val) != FGRAPH_TYPE_DATA) 277 + return NULL; 278 + offset -= __get_data_size(val); 279 + return (void *)&t->ret_stack[offset]; 280 + } 281 + 282 + /* Create the ret_stack word for a DATA type */ 283 + static inline unsigned long make_data_type_val(int idx, int size, int offset) 284 + { 285 + return (idx << FGRAPH_DATA_INDEX_SHIFT) | 286 + ((size - 1) << FGRAPH_DATA_SHIFT) | 287 + (FGRAPH_TYPE_DATA << FGRAPH_TYPE_SHIFT) | offset; 288 + } 289 + 290 + /* ftrace_graph_entry set to this to tell some archs to run function graph */ 291 + static int entry_run(struct ftrace_graph_ent *trace, struct fgraph_ops *ops) 292 + { 293 + return 0; 294 + } 295 + 296 + /* ftrace_graph_return set to this to tell some archs to run function graph */ 297 + static void return_run(struct ftrace_graph_ret *trace, struct fgraph_ops *ops) 298 + { 299 + } 300 + 301 + static void ret_stack_set_task_var(struct task_struct *t, int idx, long val) 302 + { 303 + unsigned long *gvals = SHADOW_STACK_TASK_VARS(t->ret_stack); 304 + 305 + gvals[idx] = val; 306 + } 307 + 308 + static unsigned long * 309 + ret_stack_get_task_var(struct task_struct *t, int idx) 310 + { 311 + unsigned long *gvals = SHADOW_STACK_TASK_VARS(t->ret_stack); 312 + 313 + return &gvals[idx]; 314 + } 315 + 316 + static void ret_stack_init_task_vars(unsigned long *ret_stack) 317 + { 318 + unsigned long *gvals = SHADOW_STACK_TASK_VARS(ret_stack); 319 + 320 + memset(gvals, 0, sizeof(*gvals) * FGRAPH_ARRAY_SIZE); 321 + } 322 + 323 + /** 324 + * fgraph_reserve_data - Reserve storage on the task's ret_stack 325 + * @idx: The index of fgraph_array 326 + * @size_bytes: The size in bytes to reserve 327 + * 328 + * Reserves space of up to FGRAPH_MAX_DATA_SIZE bytes on the 329 + * task's ret_stack shadow stack, for a given fgraph_ops during 330 + * the entryfunc() call. If entryfunc() returns zero, the storage 331 + * is discarded. An entryfunc() can only call this once per iteration. 332 + * The fgraph_ops retfunc() can retrieve this stored data with 333 + * fgraph_retrieve_data(). 334 + * 335 + * Returns: On success, a pointer to the data on the stack. 336 + * Otherwise, NULL if there's not enough space left on the 337 + * ret_stack for the data, or if fgraph_reserve_data() was called 338 + * more than once for a single entryfunc() call. 339 + */ 340 + void *fgraph_reserve_data(int idx, int size_bytes) 341 + { 342 + unsigned long val; 343 + void *data; 344 + int curr_ret_stack = current->curr_ret_stack; 345 + int data_size; 346 + 347 + if (size_bytes > FGRAPH_MAX_DATA_SIZE) 348 + return NULL; 349 + 350 + /* Convert the data size to number of longs. */ 351 + data_size = (size_bytes + sizeof(long) - 1) >> (sizeof(long) == 4 ? 2 : 3); 352 + 353 + val = get_fgraph_entry(current, curr_ret_stack - 1); 354 + data = &current->ret_stack[curr_ret_stack]; 355 + 356 + curr_ret_stack += data_size + 1; 357 + if (unlikely(curr_ret_stack >= SHADOW_STACK_MAX_OFFSET)) 358 + return NULL; 359 + 360 + val = make_data_type_val(idx, data_size, __get_offset(val) + data_size + 1); 361 + 362 + /* Set the last word to be reserved */ 363 + current->ret_stack[curr_ret_stack - 1] = val; 364 + 365 + /* Make sure interrupts see this */ 366 + barrier(); 367 + current->curr_ret_stack = curr_ret_stack; 368 + /* Again sync with interrupts, and reset reserve */ 369 + current->ret_stack[curr_ret_stack - 1] = val; 370 + 371 + return data; 372 + } 373 + 374 + /** 375 + * fgraph_retrieve_data - Retrieve stored data from fgraph_reserve_data() 376 + * @idx: the index of fgraph_array (fgraph_ops::idx) 377 + * @size_bytes: pointer to retrieved data size. 378 + * 379 + * This is to be called by a fgraph_ops retfunc(), to retrieve data that 380 + * was stored by the fgraph_ops entryfunc() on the function entry. 381 + * That is, this will retrieve the data that was reserved on the 382 + * entry of the function that corresponds to the exit of the function 383 + * that the fgraph_ops retfunc() is called on. 384 + * 385 + * Returns: The stored data from fgraph_reserve_data() called by the 386 + * matching entryfunc() for the retfunc() this is called from. 387 + * Or NULL if there was nothing stored. 388 + */ 389 + void *fgraph_retrieve_data(int idx, int *size_bytes) 390 + { 391 + int offset = current->curr_ret_stack - 1; 392 + unsigned long val; 393 + 394 + val = get_fgraph_entry(current, offset); 395 + while (__get_type(val) == FGRAPH_TYPE_DATA) { 396 + if (__get_data_index(val) == idx) 397 + goto found; 398 + offset -= __get_data_size(val) + 1; 399 + val = get_fgraph_entry(current, offset); 400 + } 401 + return NULL; 402 + found: 403 + if (size_bytes) 404 + *size_bytes = __get_data_size(val) * sizeof(long); 405 + return get_data_type_data(current, offset); 406 + } 407 + 408 + /** 409 + * fgraph_get_task_var - retrieve a task specific state variable 410 + * @gops: The ftrace_ops that owns the task specific variable 411 + * 412 + * Every registered fgraph_ops has a task state variable 413 + * reserved on the task's ret_stack. This function returns the 414 + * address to that variable. 415 + * 416 + * Returns the address to the fgraph_ops @gops tasks specific 417 + * unsigned long variable. 418 + */ 419 + unsigned long *fgraph_get_task_var(struct fgraph_ops *gops) 420 + { 421 + return ret_stack_get_task_var(current, gops->idx); 422 + } 423 + 424 + /* 425 + * @offset: The offset into @t->ret_stack to find the ret_stack entry 426 + * @frame_offset: Where to place the offset into @t->ret_stack of that entry 427 + * 428 + * Returns a pointer to the previous ret_stack below @offset or NULL 429 + * when it reaches the bottom of the stack. 430 + * 431 + * Calling this with: 432 + * 433 + * offset = task->curr_ret_stack; 434 + * do { 435 + * ret_stack = get_ret_stack(task, offset, &offset); 436 + * } while (ret_stack); 437 + * 438 + * Will iterate through all the ret_stack entries from curr_ret_stack 439 + * down to the first one. 440 + */ 441 + static inline struct ftrace_ret_stack * 442 + get_ret_stack(struct task_struct *t, int offset, int *frame_offset) 443 + { 444 + int offs; 445 + 446 + BUILD_BUG_ON(FGRAPH_FRAME_SIZE % sizeof(long)); 447 + 448 + if (unlikely(offset <= 0)) 449 + return NULL; 450 + 451 + offs = get_frame_offset(t, --offset); 452 + if (WARN_ON_ONCE(offs <= 0 || offs > offset)) 453 + return NULL; 454 + 455 + offset -= offs; 456 + 457 + *frame_offset = offset; 458 + return RET_STACK(t, offset); 459 + } 32 460 33 461 /* Both enabled by default (can be cleared by function_graph tracer flags */ 34 462 static bool fgraph_sleep_time = true; ··· 483 51 } 484 52 #endif 485 53 54 + int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace, 55 + struct fgraph_ops *gops) 56 + { 57 + return 0; 58 + } 59 + 60 + static void ftrace_graph_ret_stub(struct ftrace_graph_ret *trace, 61 + struct fgraph_ops *gops) 62 + { 63 + } 64 + 65 + static struct fgraph_ops fgraph_stub = { 66 + .entryfunc = ftrace_graph_entry_stub, 67 + .retfunc = ftrace_graph_ret_stub, 68 + }; 69 + 70 + static struct fgraph_ops *fgraph_direct_gops = &fgraph_stub; 71 + DEFINE_STATIC_CALL(fgraph_func, ftrace_graph_entry_stub); 72 + DEFINE_STATIC_CALL(fgraph_retfunc, ftrace_graph_ret_stub); 73 + static DEFINE_STATIC_KEY_TRUE(fgraph_do_direct); 74 + 486 75 /** 487 76 * ftrace_graph_stop - set to permanently disable function graph tracing 488 77 * ··· 520 67 /* Add a function return address to the trace stack on thread info.*/ 521 68 static int 522 69 ftrace_push_return_trace(unsigned long ret, unsigned long func, 523 - unsigned long frame_pointer, unsigned long *retp) 70 + unsigned long frame_pointer, unsigned long *retp, 71 + int fgraph_idx) 524 72 { 73 + struct ftrace_ret_stack *ret_stack; 525 74 unsigned long long calltime; 526 - int index; 75 + unsigned long val; 76 + int offset; 527 77 528 78 if (unlikely(ftrace_graph_is_dead())) 529 79 return -EBUSY; ··· 534 78 if (!current->ret_stack) 535 79 return -EBUSY; 536 80 81 + BUILD_BUG_ON(SHADOW_STACK_SIZE % sizeof(long)); 82 + 83 + /* Set val to "reserved" with the delta to the new fgraph frame */ 84 + val = (FGRAPH_TYPE_RESERVED << FGRAPH_TYPE_SHIFT) | FGRAPH_FRAME_OFFSET; 85 + 537 86 /* 538 87 * We must make sure the ret_stack is tested before we read 539 88 * anything else. 540 89 */ 541 90 smp_rmb(); 542 91 543 - /* The return trace stack is full */ 544 - if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) { 92 + /* 93 + * Check if there's room on the shadow stack to fit a fraph frame 94 + * and a bitmap word. 95 + */ 96 + if (current->curr_ret_stack + FGRAPH_FRAME_OFFSET + 1 >= SHADOW_STACK_MAX_OFFSET) { 545 97 atomic_inc(&current->trace_overrun); 546 98 return -EBUSY; 547 99 } 548 100 549 101 calltime = trace_clock_local(); 550 102 551 - index = ++current->curr_ret_stack; 103 + offset = READ_ONCE(current->curr_ret_stack); 104 + ret_stack = RET_STACK(current, offset); 105 + offset += FGRAPH_FRAME_OFFSET; 106 + 107 + /* ret offset = FGRAPH_FRAME_OFFSET ; type = reserved */ 108 + current->ret_stack[offset] = val; 109 + ret_stack->ret = ret; 110 + /* 111 + * The unwinders expect curr_ret_stack to point to either zero 112 + * or an offset where to find the next ret_stack. Even though the 113 + * ret stack might be bogus, we want to write the ret and the 114 + * offset to find the ret_stack before we increment the stack point. 115 + * If an interrupt comes in now before we increment the curr_ret_stack 116 + * it may blow away what we wrote. But that's fine, because the 117 + * offset will still be correct (even though the 'ret' won't be). 118 + * What we worry about is the offset being correct after we increment 119 + * the curr_ret_stack and before we update that offset, as if an 120 + * interrupt comes in and does an unwind stack dump, it will need 121 + * at least a correct offset! 122 + */ 552 123 barrier(); 553 - current->ret_stack[index].ret = ret; 554 - current->ret_stack[index].func = func; 555 - current->ret_stack[index].calltime = calltime; 124 + WRITE_ONCE(current->curr_ret_stack, offset + 1); 125 + /* 126 + * This next barrier is to ensure that an interrupt coming in 127 + * will not corrupt what we are about to write. 128 + */ 129 + barrier(); 130 + 131 + /* Still keep it reserved even if an interrupt came in */ 132 + current->ret_stack[offset] = val; 133 + 134 + ret_stack->ret = ret; 135 + ret_stack->func = func; 136 + ret_stack->calltime = calltime; 556 137 #ifdef HAVE_FUNCTION_GRAPH_FP_TEST 557 - current->ret_stack[index].fp = frame_pointer; 138 + ret_stack->fp = frame_pointer; 558 139 #endif 559 - #ifdef HAVE_FUNCTION_GRAPH_RET_ADDR_PTR 560 - current->ret_stack[index].retp = retp; 561 - #endif 562 - return 0; 140 + ret_stack->retp = retp; 141 + return offset; 563 142 } 564 143 565 144 /* ··· 611 120 # define MCOUNT_INSN_SIZE 0 612 121 #endif 613 122 123 + /* If the caller does not use ftrace, call this function. */ 614 124 int function_graph_enter(unsigned long ret, unsigned long func, 615 125 unsigned long frame_pointer, unsigned long *retp) 616 126 { 617 127 struct ftrace_graph_ent trace; 128 + unsigned long bitmap = 0; 129 + int offset; 130 + int i; 618 131 619 132 trace.func = func; 620 133 trace.depth = ++current->curr_ret_depth; 621 134 622 - if (ftrace_push_return_trace(ret, func, frame_pointer, retp)) 135 + offset = ftrace_push_return_trace(ret, func, frame_pointer, retp, 0); 136 + if (offset < 0) 623 137 goto out; 624 138 625 - /* Only trace if the calling function expects to */ 626 - if (!ftrace_graph_entry(&trace)) 139 + #ifdef CONFIG_HAVE_STATIC_CALL 140 + if (static_branch_likely(&fgraph_do_direct)) { 141 + int save_curr_ret_stack = current->curr_ret_stack; 142 + 143 + if (static_call(fgraph_func)(&trace, fgraph_direct_gops)) 144 + bitmap |= BIT(fgraph_direct_gops->idx); 145 + else 146 + /* Clear out any saved storage */ 147 + current->curr_ret_stack = save_curr_ret_stack; 148 + } else 149 + #endif 150 + { 151 + for_each_set_bit(i, &fgraph_array_bitmask, 152 + sizeof(fgraph_array_bitmask) * BITS_PER_BYTE) { 153 + struct fgraph_ops *gops = READ_ONCE(fgraph_array[i]); 154 + int save_curr_ret_stack; 155 + 156 + if (gops == &fgraph_stub) 157 + continue; 158 + 159 + save_curr_ret_stack = current->curr_ret_stack; 160 + if (ftrace_ops_test(&gops->ops, func, NULL) && 161 + gops->entryfunc(&trace, gops)) 162 + bitmap |= BIT(i); 163 + else 164 + /* Clear out any saved storage */ 165 + current->curr_ret_stack = save_curr_ret_stack; 166 + } 167 + } 168 + 169 + if (!bitmap) 627 170 goto out_ret; 171 + 172 + /* 173 + * Since this function uses fgraph_idx = 0 as a tail-call checking 174 + * flag, set that bit always. 175 + */ 176 + set_bitmap(current, offset, bitmap | BIT(0)); 628 177 629 178 return 0; 630 179 out_ret: 631 - current->curr_ret_stack--; 180 + current->curr_ret_stack -= FGRAPH_FRAME_OFFSET + 1; 632 181 out: 633 182 current->curr_ret_depth--; 634 183 return -EBUSY; 635 184 } 636 185 637 186 /* Retrieve a function return address to the trace stack on thread info.*/ 638 - static void 187 + static struct ftrace_ret_stack * 639 188 ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret, 640 - unsigned long frame_pointer) 189 + unsigned long frame_pointer, int *offset) 641 190 { 642 - int index; 191 + struct ftrace_ret_stack *ret_stack; 643 192 644 - index = current->curr_ret_stack; 193 + ret_stack = get_ret_stack(current, current->curr_ret_stack, offset); 645 194 646 - if (unlikely(index < 0 || index >= FTRACE_RETFUNC_DEPTH)) { 195 + if (unlikely(!ret_stack)) { 647 196 ftrace_graph_stop(); 648 - WARN_ON(1); 197 + WARN(1, "Bad function graph ret_stack pointer: %d", 198 + current->curr_ret_stack); 649 199 /* Might as well panic, otherwise we have no where to go */ 650 200 *ret = (unsigned long)panic; 651 - return; 201 + return NULL; 652 202 } 653 203 654 204 #ifdef HAVE_FUNCTION_GRAPH_FP_TEST ··· 707 175 * Note, -mfentry does not use frame pointers, and this test 708 176 * is not needed if CC_USING_FENTRY is set. 709 177 */ 710 - if (unlikely(current->ret_stack[index].fp != frame_pointer)) { 178 + if (unlikely(ret_stack->fp != frame_pointer)) { 711 179 ftrace_graph_stop(); 712 180 WARN(1, "Bad frame pointer: expected %lx, received %lx\n" 713 181 " from func %ps return to %lx\n", 714 - current->ret_stack[index].fp, 182 + ret_stack->fp, 715 183 frame_pointer, 716 - (void *)current->ret_stack[index].func, 717 - current->ret_stack[index].ret); 184 + (void *)ret_stack->func, 185 + ret_stack->ret); 718 186 *ret = (unsigned long)panic; 719 - return; 187 + return NULL; 720 188 } 721 189 #endif 722 190 723 - *ret = current->ret_stack[index].ret; 724 - trace->func = current->ret_stack[index].func; 725 - trace->calltime = current->ret_stack[index].calltime; 191 + *offset += FGRAPH_FRAME_OFFSET; 192 + *ret = ret_stack->ret; 193 + trace->func = ret_stack->func; 194 + trace->calltime = ret_stack->calltime; 726 195 trace->overrun = atomic_read(&current->trace_overrun); 727 - trace->depth = current->curr_ret_depth--; 196 + trace->depth = current->curr_ret_depth; 728 197 /* 729 198 * We still want to trace interrupts coming in if 730 199 * max_depth is set to 1. Make sure the decrement is 731 200 * seen before ftrace_graph_return. 732 201 */ 733 202 barrier(); 203 + 204 + return ret_stack; 734 205 } 735 206 736 207 /* ··· 771 236 static unsigned long __ftrace_return_to_handler(struct fgraph_ret_regs *ret_regs, 772 237 unsigned long frame_pointer) 773 238 { 239 + struct ftrace_ret_stack *ret_stack; 774 240 struct ftrace_graph_ret trace; 241 + unsigned long bitmap; 775 242 unsigned long ret; 243 + int offset; 244 + int i; 776 245 777 - ftrace_pop_return_trace(&trace, &ret, frame_pointer); 246 + ret_stack = ftrace_pop_return_trace(&trace, &ret, frame_pointer, &offset); 247 + 248 + if (unlikely(!ret_stack)) { 249 + ftrace_graph_stop(); 250 + WARN_ON(1); 251 + /* Might as well panic. What else to do? */ 252 + return (unsigned long)panic; 253 + } 254 + 255 + trace.rettime = trace_clock_local(); 778 256 #ifdef CONFIG_FUNCTION_GRAPH_RETVAL 779 257 trace.retval = fgraph_ret_regs_return_value(ret_regs); 780 258 #endif 781 - trace.rettime = trace_clock_local(); 782 - ftrace_graph_return(&trace); 259 + 260 + bitmap = get_bitmap_bits(current, offset); 261 + 262 + #ifdef CONFIG_HAVE_STATIC_CALL 263 + if (static_branch_likely(&fgraph_do_direct)) { 264 + if (test_bit(fgraph_direct_gops->idx, &bitmap)) 265 + static_call(fgraph_retfunc)(&trace, fgraph_direct_gops); 266 + } else 267 + #endif 268 + { 269 + for_each_set_bit(i, &bitmap, sizeof(bitmap) * BITS_PER_BYTE) { 270 + struct fgraph_ops *gops = fgraph_array[i]; 271 + 272 + if (gops == &fgraph_stub) 273 + continue; 274 + 275 + gops->retfunc(&trace, gops); 276 + } 277 + } 278 + 783 279 /* 784 280 * The ftrace_graph_return() may still access the current 785 281 * ret_stack structure, we need to make sure the update of 786 282 * curr_ret_stack is after that. 787 283 */ 788 284 barrier(); 789 - current->curr_ret_stack--; 285 + current->curr_ret_stack = offset - FGRAPH_FRAME_OFFSET; 790 286 791 - if (unlikely(!ret)) { 792 - ftrace_graph_stop(); 793 - WARN_ON(1); 794 - /* Might as well panic. What else to do? */ 795 - ret = (unsigned long)panic; 796 - } 797 - 287 + current->curr_ret_depth--; 798 288 return ret; 799 289 } 800 290 ··· 842 282 843 283 /** 844 284 * ftrace_graph_get_ret_stack - return the entry of the shadow stack 845 - * @task: The task to read the shadow stack from 285 + * @task: The task to read the shadow stack from. 846 286 * @idx: Index down the shadow stack 847 287 * 848 288 * Return the ret_struct on the shadow stack of the @task at the ··· 854 294 struct ftrace_ret_stack * 855 295 ftrace_graph_get_ret_stack(struct task_struct *task, int idx) 856 296 { 857 - idx = task->curr_ret_stack - idx; 297 + struct ftrace_ret_stack *ret_stack = NULL; 298 + int offset = task->curr_ret_stack; 858 299 859 - if (idx >= 0 && idx <= task->curr_ret_stack) 860 - return &task->ret_stack[idx]; 300 + if (offset < 0) 301 + return NULL; 861 302 862 - return NULL; 303 + do { 304 + ret_stack = get_ret_stack(task, offset, &offset); 305 + } while (ret_stack && --idx >= 0); 306 + 307 + return ret_stack; 863 308 } 864 309 865 310 /** 866 - * ftrace_graph_ret_addr - convert a potentially modified stack return address 867 - * to its original value 311 + * ftrace_graph_ret_addr - return the original value of the return address 312 + * @task: The task the unwinder is being executed on 313 + * @idx: An initialized pointer to the next stack index to use 314 + * @ret: The current return address (likely pointing to return_handler) 315 + * @retp: The address on the stack of the current return location 868 316 * 869 317 * This function can be called by stack unwinding code to convert a found stack 870 - * return address ('ret') to its original value, in case the function graph 318 + * return address (@ret) to its original value, in case the function graph 871 319 * tracer has modified it to be 'return_to_handler'. If the address hasn't 872 - * been modified, the unchanged value of 'ret' is returned. 320 + * been modified, the unchanged value of @ret is returned. 873 321 * 874 - * 'idx' is a state variable which should be initialized by the caller to zero 875 - * before the first call. 322 + * @idx holds the last index used to know where to start from. It should be 323 + * initialized to zero for the first iteration as that will mean to start 324 + * at the top of the shadow stack. If the location is found, this pointer 325 + * will be assigned that location so that if called again, it will continue 326 + * where it left off. 876 327 * 877 - * 'retp' is a pointer to the return address on the stack. It's ignored if 878 - * the arch doesn't have HAVE_FUNCTION_GRAPH_RET_ADDR_PTR defined. 328 + * @retp is a pointer to the return address on the stack. 879 329 */ 880 - #ifdef HAVE_FUNCTION_GRAPH_RET_ADDR_PTR 881 330 unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx, 882 331 unsigned long ret, unsigned long *retp) 883 332 { 884 - int index = task->curr_ret_stack; 885 - int i; 333 + struct ftrace_ret_stack *ret_stack; 334 + unsigned long return_handler = (unsigned long)dereference_kernel_function_descriptor(return_to_handler); 335 + int i = task->curr_ret_stack; 886 336 887 - if (ret != (unsigned long)dereference_kernel_function_descriptor(return_to_handler)) 337 + if (ret != return_handler) 888 338 return ret; 889 339 890 - if (index < 0) 340 + if (!idx) 891 341 return ret; 892 342 893 - for (i = 0; i <= index; i++) 894 - if (task->ret_stack[i].retp == retp) 895 - return task->ret_stack[i].ret; 343 + i = *idx ? : task->curr_ret_stack; 344 + while (i > 0) { 345 + ret_stack = get_ret_stack(current, i, &i); 346 + if (!ret_stack) 347 + break; 348 + /* 349 + * For the tail-call, there would be 2 or more ftrace_ret_stacks on 350 + * the ret_stack, which records "return_to_handler" as the return 351 + * address except for the last one. 352 + * But on the real stack, there should be 1 entry because tail-call 353 + * reuses the return address on the stack and jump to the next function. 354 + * Thus we will continue to find real return address. 355 + */ 356 + if (ret_stack->retp == retp && 357 + ret_stack->ret != return_handler) { 358 + *idx = i; 359 + return ret_stack->ret; 360 + } 361 + } 896 362 897 363 return ret; 898 364 } 899 - #else /* !HAVE_FUNCTION_GRAPH_RET_ADDR_PTR */ 900 - unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx, 901 - unsigned long ret, unsigned long *retp) 902 - { 903 - int task_idx; 904 - 905 - if (ret != (unsigned long)dereference_kernel_function_descriptor(return_to_handler)) 906 - return ret; 907 - 908 - task_idx = task->curr_ret_stack; 909 - 910 - if (!task->ret_stack || task_idx < *idx) 911 - return ret; 912 - 913 - task_idx -= *idx; 914 - (*idx)++; 915 - 916 - return task->ret_stack[task_idx].ret; 917 - } 918 - #endif /* HAVE_FUNCTION_GRAPH_RET_ADDR_PTR */ 919 365 920 366 static struct ftrace_ops graph_ops = { 921 367 .func = ftrace_graph_func, 922 - .flags = FTRACE_OPS_FL_INITIALIZED | 923 - FTRACE_OPS_FL_PID | 924 - FTRACE_OPS_GRAPH_STUB, 368 + .flags = FTRACE_OPS_GRAPH_STUB, 925 369 #ifdef FTRACE_GRAPH_TRAMP_ADDR 926 370 .trampoline = FTRACE_GRAPH_TRAMP_ADDR, 927 371 /* trampoline_size is only needed for dynamically allocated tramps */ 928 372 #endif 929 - ASSIGN_OPS_HASH(graph_ops, &global_ops.local_hash) 930 373 }; 374 + 375 + void fgraph_init_ops(struct ftrace_ops *dst_ops, 376 + struct ftrace_ops *src_ops) 377 + { 378 + dst_ops->flags = FTRACE_OPS_FL_PID | FTRACE_OPS_GRAPH_STUB; 379 + 380 + #ifdef CONFIG_DYNAMIC_FTRACE 381 + if (src_ops) { 382 + dst_ops->func_hash = &src_ops->local_hash; 383 + mutex_init(&dst_ops->local_hash.regex_lock); 384 + INIT_LIST_HEAD(&dst_ops->subop_list); 385 + dst_ops->flags |= FTRACE_OPS_FL_INITIALIZED; 386 + } 387 + #endif 388 + } 931 389 932 390 void ftrace_graph_sleep_time_control(bool enable) 933 391 { 934 392 fgraph_sleep_time = enable; 935 393 } 936 394 937 - int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace) 938 - { 939 - return 0; 940 - } 941 - 942 395 /* 943 396 * Simply points to ftrace_stub, but with the proper protocol. 944 397 * Defined by the linker script in linux/vmlinux.lds.h 945 398 */ 946 - extern void ftrace_stub_graph(struct ftrace_graph_ret *); 399 + void ftrace_stub_graph(struct ftrace_graph_ret *trace, struct fgraph_ops *gops); 947 400 948 401 /* The callbacks that hook a function */ 949 402 trace_func_graph_ret_t ftrace_graph_return = ftrace_stub_graph; 950 403 trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub; 951 - static trace_func_graph_ent_t __ftrace_graph_entry = ftrace_graph_entry_stub; 952 404 953 405 /* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */ 954 - static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list) 406 + static int alloc_retstack_tasklist(unsigned long **ret_stack_list) 955 407 { 956 408 int i; 957 409 int ret = 0; ··· 971 399 struct task_struct *g, *t; 972 400 973 401 for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) { 974 - ret_stack_list[i] = 975 - kmalloc_array(FTRACE_RETFUNC_DEPTH, 976 - sizeof(struct ftrace_ret_stack), 977 - GFP_KERNEL); 402 + ret_stack_list[i] = kmalloc(SHADOW_STACK_SIZE, GFP_KERNEL); 978 403 if (!ret_stack_list[i]) { 979 404 start = 0; 980 405 end = i; ··· 989 420 990 421 if (t->ret_stack == NULL) { 991 422 atomic_set(&t->trace_overrun, 0); 992 - t->curr_ret_stack = -1; 423 + ret_stack_init_task_vars(ret_stack_list[start]); 424 + t->curr_ret_stack = 0; 993 425 t->curr_ret_depth = -1; 994 - /* Make sure the tasks see the -1 first: */ 426 + /* Make sure the tasks see the 0 first: */ 995 427 smp_wmb(); 996 428 t->ret_stack = ret_stack_list[start++]; 997 429 } ··· 1012 442 struct task_struct *next, 1013 443 unsigned int prev_state) 1014 444 { 445 + struct ftrace_ret_stack *ret_stack; 1015 446 unsigned long long timestamp; 1016 - int index; 447 + int offset; 1017 448 1018 449 /* 1019 450 * Does the user want to count the time a function was asleep. ··· 1037 466 */ 1038 467 timestamp -= next->ftrace_timestamp; 1039 468 1040 - for (index = next->curr_ret_stack; index >= 0; index--) 1041 - next->ret_stack[index].calltime += timestamp; 469 + for (offset = next->curr_ret_stack; offset > 0; ) { 470 + ret_stack = get_ret_stack(next, offset, &offset); 471 + if (ret_stack) 472 + ret_stack->calltime += timestamp; 473 + } 1042 474 } 1043 475 1044 - static int ftrace_graph_entry_test(struct ftrace_graph_ent *trace) 1045 - { 1046 - if (!ftrace_ops_test(&global_ops, trace->func, NULL)) 1047 - return 0; 1048 - return __ftrace_graph_entry(trace); 1049 - } 1050 - 1051 - /* 1052 - * The function graph tracer should only trace the functions defined 1053 - * by set_ftrace_filter and set_ftrace_notrace. If another function 1054 - * tracer ops is registered, the graph tracer requires testing the 1055 - * function against the global ops, and not just trace any function 1056 - * that any ftrace_ops registered. 1057 - */ 1058 - void update_function_graph_func(void) 1059 - { 1060 - struct ftrace_ops *op; 1061 - bool do_test = false; 1062 - 1063 - /* 1064 - * The graph and global ops share the same set of functions 1065 - * to test. If any other ops is on the list, then 1066 - * the graph tracing needs to test if its the function 1067 - * it should call. 1068 - */ 1069 - do_for_each_ftrace_op(op, ftrace_ops_list) { 1070 - if (op != &global_ops && op != &graph_ops && 1071 - op != &ftrace_list_end) { 1072 - do_test = true; 1073 - /* in double loop, break out with goto */ 1074 - goto out; 1075 - } 1076 - } while_for_each_ftrace_op(op); 1077 - out: 1078 - if (do_test) 1079 - ftrace_graph_entry = ftrace_graph_entry_test; 1080 - else 1081 - ftrace_graph_entry = __ftrace_graph_entry; 1082 - } 1083 - 1084 - static DEFINE_PER_CPU(struct ftrace_ret_stack *, idle_ret_stack); 476 + static DEFINE_PER_CPU(unsigned long *, idle_ret_stack); 1085 477 1086 478 static void 1087 - graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack) 479 + graph_init_task(struct task_struct *t, unsigned long *ret_stack) 1088 480 { 1089 481 atomic_set(&t->trace_overrun, 0); 482 + ret_stack_init_task_vars(ret_stack); 1090 483 t->ftrace_timestamp = 0; 484 + t->curr_ret_stack = 0; 485 + t->curr_ret_depth = -1; 1091 486 /* make curr_ret_stack visible before we add the ret_stack */ 1092 487 smp_wmb(); 1093 488 t->ret_stack = ret_stack; ··· 1065 528 */ 1066 529 void ftrace_graph_init_idle_task(struct task_struct *t, int cpu) 1067 530 { 1068 - t->curr_ret_stack = -1; 531 + t->curr_ret_stack = 0; 1069 532 t->curr_ret_depth = -1; 1070 533 /* 1071 534 * The idle task has no parent, it either has its own ··· 1075 538 WARN_ON(t->ret_stack != per_cpu(idle_ret_stack, cpu)); 1076 539 1077 540 if (ftrace_graph_active) { 1078 - struct ftrace_ret_stack *ret_stack; 541 + unsigned long *ret_stack; 1079 542 1080 543 ret_stack = per_cpu(idle_ret_stack, cpu); 1081 544 if (!ret_stack) { 1082 - ret_stack = 1083 - kmalloc_array(FTRACE_RETFUNC_DEPTH, 1084 - sizeof(struct ftrace_ret_stack), 1085 - GFP_KERNEL); 545 + ret_stack = kmalloc(SHADOW_STACK_SIZE, GFP_KERNEL); 1086 546 if (!ret_stack) 1087 547 return; 1088 548 per_cpu(idle_ret_stack, cpu) = ret_stack; ··· 1093 559 { 1094 560 /* Make sure we do not use the parent ret_stack */ 1095 561 t->ret_stack = NULL; 1096 - t->curr_ret_stack = -1; 562 + t->curr_ret_stack = 0; 1097 563 t->curr_ret_depth = -1; 1098 564 1099 565 if (ftrace_graph_active) { 1100 - struct ftrace_ret_stack *ret_stack; 566 + unsigned long *ret_stack; 1101 567 1102 - ret_stack = kmalloc_array(FTRACE_RETFUNC_DEPTH, 1103 - sizeof(struct ftrace_ret_stack), 1104 - GFP_KERNEL); 568 + ret_stack = kmalloc(SHADOW_STACK_SIZE, GFP_KERNEL); 1105 569 if (!ret_stack) 1106 570 return; 1107 571 graph_init_task(t, ret_stack); ··· 1108 576 1109 577 void ftrace_graph_exit_task(struct task_struct *t) 1110 578 { 1111 - struct ftrace_ret_stack *ret_stack = t->ret_stack; 579 + unsigned long *ret_stack = t->ret_stack; 1112 580 1113 581 t->ret_stack = NULL; 1114 582 /* NULL must become visible to IRQs before we free it: */ ··· 1117 585 kfree(ret_stack); 1118 586 } 1119 587 588 + #ifdef CONFIG_DYNAMIC_FTRACE 589 + static int fgraph_pid_func(struct ftrace_graph_ent *trace, 590 + struct fgraph_ops *gops) 591 + { 592 + struct trace_array *tr = gops->ops.private; 593 + int pid; 594 + 595 + if (tr) { 596 + pid = this_cpu_read(tr->array_buffer.data->ftrace_ignore_pid); 597 + if (pid == FTRACE_PID_IGNORE) 598 + return 0; 599 + if (pid != FTRACE_PID_TRACE && 600 + pid != current->pid) 601 + return 0; 602 + } 603 + 604 + return gops->saved_func(trace, gops); 605 + } 606 + 607 + void fgraph_update_pid_func(void) 608 + { 609 + struct fgraph_ops *gops; 610 + struct ftrace_ops *op; 611 + 612 + if (!(graph_ops.flags & FTRACE_OPS_FL_INITIALIZED)) 613 + return; 614 + 615 + list_for_each_entry(op, &graph_ops.subop_list, list) { 616 + if (op->flags & FTRACE_OPS_FL_PID) { 617 + gops = container_of(op, struct fgraph_ops, ops); 618 + gops->entryfunc = ftrace_pids_enabled(op) ? 619 + fgraph_pid_func : gops->saved_func; 620 + if (ftrace_graph_active == 1) 621 + static_call_update(fgraph_func, gops->entryfunc); 622 + } 623 + } 624 + } 625 + #endif 626 + 1120 627 /* Allocate a return stack for each task */ 1121 628 static int start_graph_tracing(void) 1122 629 { 1123 - struct ftrace_ret_stack **ret_stack_list; 630 + unsigned long **ret_stack_list; 1124 631 int ret, cpu; 1125 632 1126 - ret_stack_list = kmalloc_array(FTRACE_RETSTACK_ALLOC_SIZE, 1127 - sizeof(struct ftrace_ret_stack *), 1128 - GFP_KERNEL); 633 + ret_stack_list = kmalloc(SHADOW_STACK_SIZE, GFP_KERNEL); 1129 634 1130 635 if (!ret_stack_list) 1131 636 return -ENOMEM; ··· 1188 619 return ret; 1189 620 } 1190 621 622 + static void init_task_vars(int idx) 623 + { 624 + struct task_struct *g, *t; 625 + int cpu; 626 + 627 + for_each_online_cpu(cpu) { 628 + if (idle_task(cpu)->ret_stack) 629 + ret_stack_set_task_var(idle_task(cpu), idx, 0); 630 + } 631 + 632 + read_lock(&tasklist_lock); 633 + for_each_process_thread(g, t) { 634 + if (t->ret_stack) 635 + ret_stack_set_task_var(t, idx, 0); 636 + } 637 + read_unlock(&tasklist_lock); 638 + } 639 + 640 + static void ftrace_graph_enable_direct(bool enable_branch) 641 + { 642 + trace_func_graph_ent_t func = NULL; 643 + trace_func_graph_ret_t retfunc = NULL; 644 + int i; 645 + 646 + for_each_set_bit(i, &fgraph_array_bitmask, 647 + sizeof(fgraph_array_bitmask) * BITS_PER_BYTE) { 648 + func = fgraph_array[i]->entryfunc; 649 + retfunc = fgraph_array[i]->retfunc; 650 + fgraph_direct_gops = fgraph_array[i]; 651 + } 652 + if (WARN_ON_ONCE(!func)) 653 + return; 654 + 655 + static_call_update(fgraph_func, func); 656 + static_call_update(fgraph_retfunc, retfunc); 657 + if (enable_branch) 658 + static_branch_disable(&fgraph_do_direct); 659 + } 660 + 661 + static void ftrace_graph_disable_direct(bool disable_branch) 662 + { 663 + if (disable_branch) 664 + static_branch_disable(&fgraph_do_direct); 665 + static_call_update(fgraph_func, ftrace_graph_entry_stub); 666 + static_call_update(fgraph_retfunc, ftrace_graph_ret_stub); 667 + fgraph_direct_gops = &fgraph_stub; 668 + } 669 + 1191 670 int register_ftrace_graph(struct fgraph_ops *gops) 1192 671 { 672 + int command = 0; 1193 673 int ret = 0; 674 + int i = -1; 1194 675 1195 676 mutex_lock(&ftrace_lock); 1196 677 1197 - /* we currently allow only one tracer registered at a time */ 1198 - if (ftrace_graph_active) { 1199 - ret = -EBUSY; 678 + if (!fgraph_array[0]) { 679 + /* The array must always have real data on it */ 680 + for (i = 0; i < FGRAPH_ARRAY_SIZE; i++) 681 + fgraph_array[i] = &fgraph_stub; 682 + fgraph_lru_init(); 683 + } 684 + 685 + i = fgraph_lru_alloc_index(); 686 + if (i < 0 || WARN_ON_ONCE(fgraph_array[i] != &fgraph_stub)) { 687 + ret = -ENOSPC; 1200 688 goto out; 1201 689 } 1202 690 1203 - register_pm_notifier(&ftrace_suspend_notifier); 691 + fgraph_array[i] = gops; 692 + gops->idx = i; 1204 693 1205 694 ftrace_graph_active++; 1206 - ret = start_graph_tracing(); 1207 - if (ret) { 1208 - ftrace_graph_active--; 1209 - goto out; 695 + 696 + if (ftrace_graph_active == 2) 697 + ftrace_graph_disable_direct(true); 698 + 699 + if (ftrace_graph_active == 1) { 700 + ftrace_graph_enable_direct(false); 701 + register_pm_notifier(&ftrace_suspend_notifier); 702 + ret = start_graph_tracing(); 703 + if (ret) 704 + goto error; 705 + /* 706 + * Some archs just test to see if these are not 707 + * the default function 708 + */ 709 + ftrace_graph_return = return_run; 710 + ftrace_graph_entry = entry_run; 711 + command = FTRACE_START_FUNC_RET; 712 + } else { 713 + init_task_vars(gops->idx); 1210 714 } 1211 715 1212 - ftrace_graph_return = gops->retfunc; 716 + /* Always save the function, and reset at unregistering */ 717 + gops->saved_func = gops->entryfunc; 1213 718 1214 - /* 1215 - * Update the indirect function to the entryfunc, and the 1216 - * function that gets called to the entry_test first. Then 1217 - * call the update fgraph entry function to determine if 1218 - * the entryfunc should be called directly or not. 1219 - */ 1220 - __ftrace_graph_entry = gops->entryfunc; 1221 - ftrace_graph_entry = ftrace_graph_entry_test; 1222 - update_function_graph_func(); 1223 - 1224 - ret = ftrace_startup(&graph_ops, FTRACE_START_FUNC_RET); 719 + ret = ftrace_startup_subops(&graph_ops, &gops->ops, command); 720 + error: 721 + if (ret) { 722 + fgraph_array[i] = &fgraph_stub; 723 + ftrace_graph_active--; 724 + gops->saved_func = NULL; 725 + fgraph_lru_release_index(i); 726 + } 1225 727 out: 1226 728 mutex_unlock(&ftrace_lock); 1227 729 return ret; ··· 1300 660 1301 661 void unregister_ftrace_graph(struct fgraph_ops *gops) 1302 662 { 663 + int command = 0; 664 + 1303 665 mutex_lock(&ftrace_lock); 1304 666 1305 667 if (unlikely(!ftrace_graph_active)) 1306 668 goto out; 1307 669 1308 - ftrace_graph_active--; 1309 - ftrace_graph_return = ftrace_stub_graph; 1310 - ftrace_graph_entry = ftrace_graph_entry_stub; 1311 - __ftrace_graph_entry = ftrace_graph_entry_stub; 1312 - ftrace_shutdown(&graph_ops, FTRACE_STOP_FUNC_RET); 1313 - unregister_pm_notifier(&ftrace_suspend_notifier); 1314 - unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL); 670 + if (unlikely(gops->idx < 0 || gops->idx >= FGRAPH_ARRAY_SIZE || 671 + fgraph_array[gops->idx] != gops)) 672 + goto out; 1315 673 674 + if (fgraph_lru_release_index(gops->idx) < 0) 675 + goto out; 676 + 677 + fgraph_array[gops->idx] = &fgraph_stub; 678 + 679 + ftrace_graph_active--; 680 + 681 + if (!ftrace_graph_active) 682 + command = FTRACE_STOP_FUNC_RET; 683 + 684 + ftrace_shutdown_subops(&graph_ops, &gops->ops, command); 685 + 686 + if (ftrace_graph_active == 1) 687 + ftrace_graph_enable_direct(true); 688 + else if (!ftrace_graph_active) 689 + ftrace_graph_disable_direct(false); 690 + 691 + if (!ftrace_graph_active) { 692 + ftrace_graph_return = ftrace_stub_graph; 693 + ftrace_graph_entry = ftrace_graph_entry_stub; 694 + unregister_pm_notifier(&ftrace_suspend_notifier); 695 + unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL); 696 + } 1316 697 out: 698 + gops->saved_func = NULL; 1317 699 mutex_unlock(&ftrace_lock); 1318 700 }
+602 -86
kernel/trace/ftrace.c
··· 74 74 #ifdef CONFIG_DYNAMIC_FTRACE 75 75 #define INIT_OPS_HASH(opsname) \ 76 76 .func_hash = &opsname.local_hash, \ 77 - .local_hash.regex_lock = __MUTEX_INITIALIZER(opsname.local_hash.regex_lock), 77 + .local_hash.regex_lock = __MUTEX_INITIALIZER(opsname.local_hash.regex_lock), \ 78 + .subop_list = LIST_HEAD_INIT(opsname.subop_list), 78 79 #else 79 80 #define INIT_OPS_HASH(opsname) 80 81 #endif ··· 100 99 /* What to set function_trace_op to */ 101 100 static struct ftrace_ops *set_function_trace_op; 102 101 103 - static bool ftrace_pids_enabled(struct ftrace_ops *ops) 102 + bool ftrace_pids_enabled(struct ftrace_ops *ops) 104 103 { 105 104 struct trace_array *tr; 106 105 ··· 122 121 123 122 DEFINE_MUTEX(ftrace_lock); 124 123 125 - struct ftrace_ops __rcu *ftrace_ops_list __read_mostly = &ftrace_list_end; 124 + struct ftrace_ops __rcu *ftrace_ops_list __read_mostly = (struct ftrace_ops __rcu *)&ftrace_list_end; 126 125 ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub; 127 126 struct ftrace_ops global_ops; 128 127 ··· 162 161 #ifdef CONFIG_DYNAMIC_FTRACE 163 162 if (!(ops->flags & FTRACE_OPS_FL_INITIALIZED)) { 164 163 mutex_init(&ops->local_hash.regex_lock); 164 + INIT_LIST_HEAD(&ops->subop_list); 165 165 ops->func_hash = &ops->local_hash; 166 166 ops->flags |= FTRACE_OPS_FL_INITIALIZED; 167 167 } 168 168 #endif 169 169 } 170 170 171 + /* Call this function for when a callback filters on set_ftrace_pid */ 171 172 static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip, 172 173 struct ftrace_ops *op, struct ftrace_regs *fregs) 173 174 { ··· 237 234 set_function_trace_op = &ftrace_list_end; 238 235 func = ftrace_ops_list_func; 239 236 } 240 - 241 - update_function_graph_func(); 242 237 243 238 /* If there's no change, then do nothing more here */ 244 239 if (ftrace_trace_function == func) ··· 311 310 lockdep_is_held(&ftrace_lock)) == ops && 312 311 rcu_dereference_protected(ops->next, 313 312 lockdep_is_held(&ftrace_lock)) == &ftrace_list_end) { 314 - *list = &ftrace_list_end; 313 + rcu_assign_pointer(*list, &ftrace_list_end); 315 314 return 0; 316 315 } 317 316 ··· 406 405 ftrace_update_trampoline(op); 407 406 } 408 407 } while_for_each_ftrace_op(op); 408 + 409 + fgraph_update_pid_func(); 409 410 410 411 update_ftrace_function(); 411 412 } ··· 820 817 fgraph_graph_time = enable; 821 818 } 822 819 823 - static int profile_graph_entry(struct ftrace_graph_ent *trace) 820 + static int profile_graph_entry(struct ftrace_graph_ent *trace, 821 + struct fgraph_ops *gops) 824 822 { 825 823 struct ftrace_ret_stack *ret_stack; 826 824 ··· 838 834 return 1; 839 835 } 840 836 841 - static void profile_graph_return(struct ftrace_graph_ret *trace) 837 + static void profile_graph_return(struct ftrace_graph_ret *trace, 838 + struct fgraph_ops *gops) 842 839 { 843 840 struct ftrace_ret_stack *ret_stack; 844 841 struct ftrace_profile_stat *stat; ··· 1319 1314 return hash; 1320 1315 } 1321 1316 1322 - 1317 + /* Used to save filters on functions for modules not loaded yet */ 1323 1318 static int ftrace_add_mod(struct trace_array *tr, 1324 1319 const char *func, const char *module, 1325 1320 int enable) ··· 1385 1380 return NULL; 1386 1381 } 1387 1382 1388 - static void 1389 - ftrace_hash_rec_disable_modify(struct ftrace_ops *ops, int filter_hash); 1390 - static void 1391 - ftrace_hash_rec_enable_modify(struct ftrace_ops *ops, int filter_hash); 1383 + static void ftrace_hash_rec_disable_modify(struct ftrace_ops *ops); 1384 + static void ftrace_hash_rec_enable_modify(struct ftrace_ops *ops); 1392 1385 1393 1386 static int ftrace_hash_ipmodify_update(struct ftrace_ops *ops, 1394 1387 struct ftrace_hash *new_hash); 1395 1388 1396 - static struct ftrace_hash *dup_hash(struct ftrace_hash *src, int size) 1389 + /* 1390 + * Allocate a new hash and remove entries from @src and move them to the new hash. 1391 + * On success, the @src hash will be empty and should be freed. 1392 + */ 1393 + static struct ftrace_hash *__move_hash(struct ftrace_hash *src, int size) 1397 1394 { 1398 1395 struct ftrace_func_entry *entry; 1399 1396 struct ftrace_hash *new_hash; ··· 1431 1424 return new_hash; 1432 1425 } 1433 1426 1427 + /* Move the @src entries to a newly allocated hash */ 1434 1428 static struct ftrace_hash * 1435 1429 __ftrace_hash_move(struct ftrace_hash *src) 1436 1430 { ··· 1443 1435 if (ftrace_hash_empty(src)) 1444 1436 return EMPTY_HASH; 1445 1437 1446 - return dup_hash(src, size); 1438 + return __move_hash(src, size); 1447 1439 } 1448 1440 1441 + /** 1442 + * ftrace_hash_move - move a new hash to a filter and do updates 1443 + * @ops: The ops with the hash that @dst points to 1444 + * @enable: True if for the filter hash, false for the notrace hash 1445 + * @dst: Points to the @ops hash that should be updated 1446 + * @src: The hash to update @dst with 1447 + * 1448 + * This is called when an ftrace_ops hash is being updated and the 1449 + * the kernel needs to reflect this. Note, this only updates the kernel 1450 + * function callbacks if the @ops is enabled (not to be confused with 1451 + * @enable above). If the @ops is enabled, its hash determines what 1452 + * callbacks get called. This function gets called when the @ops hash 1453 + * is updated and it requires new callbacks. 1454 + * 1455 + * On success the elements of @src is moved to @dst, and @dst is updated 1456 + * properly, as well as the functions determined by the @ops hashes 1457 + * are now calling the @ops callback function. 1458 + * 1459 + * Regardless of return type, @src should be freed with free_ftrace_hash(). 1460 + */ 1449 1461 static int 1450 1462 ftrace_hash_move(struct ftrace_ops *ops, int enable, 1451 1463 struct ftrace_hash **dst, struct ftrace_hash *src) ··· 1495 1467 * Remove the current set, update the hash and add 1496 1468 * them back. 1497 1469 */ 1498 - ftrace_hash_rec_disable_modify(ops, enable); 1470 + ftrace_hash_rec_disable_modify(ops); 1499 1471 1500 1472 rcu_assign_pointer(*dst, new_hash); 1501 1473 1502 - ftrace_hash_rec_enable_modify(ops, enable); 1474 + ftrace_hash_rec_enable_modify(ops); 1503 1475 1504 1476 return 0; 1505 1477 } ··· 1722 1694 !(rec->flags & FTRACE_FL_ENABLED); 1723 1695 } 1724 1696 1697 + /* 1698 + * This is the main engine to the ftrace updates to the dyn_ftrace records. 1699 + * 1700 + * It will iterate through all the available ftrace functions 1701 + * (the ones that ftrace can have callbacks to) and set the flags 1702 + * in the associated dyn_ftrace records. 1703 + * 1704 + * @inc: If true, the functions associated to @ops are added to 1705 + * the dyn_ftrace records, otherwise they are removed. 1706 + */ 1725 1707 static bool __ftrace_hash_rec_update(struct ftrace_ops *ops, 1726 - int filter_hash, 1727 1708 bool inc) 1728 1709 { 1729 1710 struct ftrace_hash *hash; 1730 - struct ftrace_hash *other_hash; 1711 + struct ftrace_hash *notrace_hash; 1731 1712 struct ftrace_page *pg; 1732 1713 struct dyn_ftrace *rec; 1733 1714 bool update = false; ··· 1748 1711 return false; 1749 1712 1750 1713 /* 1751 - * In the filter_hash case: 1752 1714 * If the count is zero, we update all records. 1753 1715 * Otherwise we just update the items in the hash. 1754 - * 1755 - * In the notrace_hash case: 1756 - * We enable the update in the hash. 1757 - * As disabling notrace means enabling the tracing, 1758 - * and enabling notrace means disabling, the inc variable 1759 - * gets inversed. 1760 1716 */ 1761 - if (filter_hash) { 1762 - hash = ops->func_hash->filter_hash; 1763 - other_hash = ops->func_hash->notrace_hash; 1764 - if (ftrace_hash_empty(hash)) 1765 - all = true; 1766 - } else { 1767 - inc = !inc; 1768 - hash = ops->func_hash->notrace_hash; 1769 - other_hash = ops->func_hash->filter_hash; 1770 - /* 1771 - * If the notrace hash has no items, 1772 - * then there's nothing to do. 1773 - */ 1774 - if (ftrace_hash_empty(hash)) 1775 - return false; 1776 - } 1717 + hash = ops->func_hash->filter_hash; 1718 + notrace_hash = ops->func_hash->notrace_hash; 1719 + if (ftrace_hash_empty(hash)) 1720 + all = true; 1777 1721 1778 1722 do_for_each_ftrace_rec(pg, rec) { 1779 - int in_other_hash = 0; 1723 + int in_notrace_hash = 0; 1780 1724 int in_hash = 0; 1781 1725 int match = 0; 1782 1726 ··· 1769 1751 * Only the filter_hash affects all records. 1770 1752 * Update if the record is not in the notrace hash. 1771 1753 */ 1772 - if (!other_hash || !ftrace_lookup_ip(other_hash, rec->ip)) 1754 + if (!notrace_hash || !ftrace_lookup_ip(notrace_hash, rec->ip)) 1773 1755 match = 1; 1774 1756 } else { 1775 1757 in_hash = !!ftrace_lookup_ip(hash, rec->ip); 1776 - in_other_hash = !!ftrace_lookup_ip(other_hash, rec->ip); 1758 + in_notrace_hash = !!ftrace_lookup_ip(notrace_hash, rec->ip); 1777 1759 1778 1760 /* 1779 - * If filter_hash is set, we want to match all functions 1780 - * that are in the hash but not in the other hash. 1781 - * 1782 - * If filter_hash is not set, then we are decrementing. 1783 - * That means we match anything that is in the hash 1784 - * and also in the other_hash. That is, we need to turn 1785 - * off functions in the other hash because they are disabled 1786 - * by this hash. 1761 + * We want to match all functions that are in the hash but 1762 + * not in the other hash. 1787 1763 */ 1788 - if (filter_hash && in_hash && !in_other_hash) 1789 - match = 1; 1790 - else if (!filter_hash && in_hash && 1791 - (in_other_hash || ftrace_hash_empty(other_hash))) 1764 + if (in_hash && !in_notrace_hash) 1792 1765 match = 1; 1793 1766 } 1794 1767 if (!match) ··· 1885 1876 return update; 1886 1877 } 1887 1878 1888 - static bool ftrace_hash_rec_disable(struct ftrace_ops *ops, 1889 - int filter_hash) 1879 + /* 1880 + * This is called when an ops is removed from tracing. It will decrement 1881 + * the counters of the dyn_ftrace records for all the functions that 1882 + * the @ops attached to. 1883 + */ 1884 + static bool ftrace_hash_rec_disable(struct ftrace_ops *ops) 1890 1885 { 1891 - return __ftrace_hash_rec_update(ops, filter_hash, 0); 1886 + return __ftrace_hash_rec_update(ops, false); 1892 1887 } 1893 1888 1894 - static bool ftrace_hash_rec_enable(struct ftrace_ops *ops, 1895 - int filter_hash) 1889 + /* 1890 + * This is called when an ops is added to tracing. It will increment 1891 + * the counters of the dyn_ftrace records for all the functions that 1892 + * the @ops attached to. 1893 + */ 1894 + static bool ftrace_hash_rec_enable(struct ftrace_ops *ops) 1896 1895 { 1897 - return __ftrace_hash_rec_update(ops, filter_hash, 1); 1896 + return __ftrace_hash_rec_update(ops, true); 1898 1897 } 1899 1898 1900 - static void ftrace_hash_rec_update_modify(struct ftrace_ops *ops, 1901 - int filter_hash, int inc) 1899 + /* 1900 + * This function will update what functions @ops traces when its filter 1901 + * changes. 1902 + * 1903 + * The @inc states if the @ops callbacks are going to be added or removed. 1904 + * When one of the @ops hashes are updated to a "new_hash" the dyn_ftrace 1905 + * records are update via: 1906 + * 1907 + * ftrace_hash_rec_disable_modify(ops); 1908 + * ops->hash = new_hash 1909 + * ftrace_hash_rec_enable_modify(ops); 1910 + * 1911 + * Where the @ops is removed from all the records it is tracing using 1912 + * its old hash. The @ops hash is updated to the new hash, and then 1913 + * the @ops is added back to the records so that it is tracing all 1914 + * the new functions. 1915 + */ 1916 + static void ftrace_hash_rec_update_modify(struct ftrace_ops *ops, bool inc) 1902 1917 { 1903 1918 struct ftrace_ops *op; 1904 1919 1905 - __ftrace_hash_rec_update(ops, filter_hash, inc); 1920 + __ftrace_hash_rec_update(ops, inc); 1906 1921 1907 1922 if (ops->func_hash != &global_ops.local_hash) 1908 1923 return; ··· 1940 1907 if (op == ops) 1941 1908 continue; 1942 1909 if (op->func_hash == &global_ops.local_hash) 1943 - __ftrace_hash_rec_update(op, filter_hash, inc); 1910 + __ftrace_hash_rec_update(op, inc); 1944 1911 } while_for_each_ftrace_op(op); 1945 1912 } 1946 1913 1947 - static void ftrace_hash_rec_disable_modify(struct ftrace_ops *ops, 1948 - int filter_hash) 1914 + static void ftrace_hash_rec_disable_modify(struct ftrace_ops *ops) 1949 1915 { 1950 - ftrace_hash_rec_update_modify(ops, filter_hash, 0); 1916 + ftrace_hash_rec_update_modify(ops, false); 1951 1917 } 1952 1918 1953 - static void ftrace_hash_rec_enable_modify(struct ftrace_ops *ops, 1954 - int filter_hash) 1919 + static void ftrace_hash_rec_enable_modify(struct ftrace_ops *ops) 1955 1920 { 1956 - ftrace_hash_rec_update_modify(ops, filter_hash, 1); 1921 + ftrace_hash_rec_update_modify(ops, true); 1957 1922 } 1958 1923 1959 1924 /* ··· 3074 3043 return ret; 3075 3044 } 3076 3045 3077 - if (ftrace_hash_rec_enable(ops, 1)) 3046 + if (ftrace_hash_rec_enable(ops)) 3078 3047 command |= FTRACE_UPDATE_CALLS; 3079 3048 3080 3049 ftrace_startup_enable(command); ··· 3116 3085 /* Disabling ipmodify never fails */ 3117 3086 ftrace_hash_ipmodify_disable(ops); 3118 3087 3119 - if (ftrace_hash_rec_disable(ops, 1)) 3088 + if (ftrace_hash_rec_disable(ops)) 3120 3089 command |= FTRACE_UPDATE_CALLS; 3121 3090 3122 3091 ops->flags &= ~FTRACE_OPS_FL_ENABLED; ··· 3194 3163 3195 3164 return 0; 3196 3165 } 3166 + 3167 + /* Simply make a copy of @src and return it */ 3168 + static struct ftrace_hash *copy_hash(struct ftrace_hash *src) 3169 + { 3170 + if (ftrace_hash_empty(src)) 3171 + return EMPTY_HASH; 3172 + 3173 + return alloc_and_copy_ftrace_hash(src->size_bits, src); 3174 + } 3175 + 3176 + /* 3177 + * Append @new_hash entries to @hash: 3178 + * 3179 + * If @hash is the EMPTY_HASH then it traces all functions and nothing 3180 + * needs to be done. 3181 + * 3182 + * If @new_hash is the EMPTY_HASH, then make *hash the EMPTY_HASH so 3183 + * that it traces everything. 3184 + * 3185 + * Otherwise, go through all of @new_hash and add anything that @hash 3186 + * doesn't already have, to @hash. 3187 + * 3188 + * The filter_hash updates uses just the append_hash() function 3189 + * and the notrace_hash does not. 3190 + */ 3191 + static int append_hash(struct ftrace_hash **hash, struct ftrace_hash *new_hash) 3192 + { 3193 + struct ftrace_func_entry *entry; 3194 + int size; 3195 + int i; 3196 + 3197 + /* An empty hash does everything */ 3198 + if (ftrace_hash_empty(*hash)) 3199 + return 0; 3200 + 3201 + /* If new_hash has everything make hash have everything */ 3202 + if (ftrace_hash_empty(new_hash)) { 3203 + free_ftrace_hash(*hash); 3204 + *hash = EMPTY_HASH; 3205 + return 0; 3206 + } 3207 + 3208 + size = 1 << new_hash->size_bits; 3209 + for (i = 0; i < size; i++) { 3210 + hlist_for_each_entry(entry, &new_hash->buckets[i], hlist) { 3211 + /* Only add if not already in hash */ 3212 + if (!__ftrace_lookup_ip(*hash, entry->ip) && 3213 + add_hash_entry(*hash, entry->ip) == NULL) 3214 + return -ENOMEM; 3215 + } 3216 + } 3217 + return 0; 3218 + } 3219 + 3220 + /* 3221 + * Add to @hash only those that are in both @new_hash1 and @new_hash2 3222 + * 3223 + * The notrace_hash updates uses just the intersect_hash() function 3224 + * and the filter_hash does not. 3225 + */ 3226 + static int intersect_hash(struct ftrace_hash **hash, struct ftrace_hash *new_hash1, 3227 + struct ftrace_hash *new_hash2) 3228 + { 3229 + struct ftrace_func_entry *entry; 3230 + int size; 3231 + int i; 3232 + 3233 + /* 3234 + * If new_hash1 or new_hash2 is the EMPTY_HASH then make the hash 3235 + * empty as well as empty for notrace means none are notraced. 3236 + */ 3237 + if (ftrace_hash_empty(new_hash1) || ftrace_hash_empty(new_hash2)) { 3238 + free_ftrace_hash(*hash); 3239 + *hash = EMPTY_HASH; 3240 + return 0; 3241 + } 3242 + 3243 + size = 1 << new_hash1->size_bits; 3244 + for (i = 0; i < size; i++) { 3245 + hlist_for_each_entry(entry, &new_hash1->buckets[i], hlist) { 3246 + /* Only add if in both @new_hash1 and @new_hash2 */ 3247 + if (__ftrace_lookup_ip(new_hash2, entry->ip) && 3248 + add_hash_entry(*hash, entry->ip) == NULL) 3249 + return -ENOMEM; 3250 + } 3251 + } 3252 + /* If nothing intersects, make it the empty set */ 3253 + if (ftrace_hash_empty(*hash)) { 3254 + free_ftrace_hash(*hash); 3255 + *hash = EMPTY_HASH; 3256 + } 3257 + return 0; 3258 + } 3259 + 3260 + /* Return a new hash that has a union of all @ops->filter_hash entries */ 3261 + static struct ftrace_hash *append_hashes(struct ftrace_ops *ops) 3262 + { 3263 + struct ftrace_hash *new_hash; 3264 + struct ftrace_ops *subops; 3265 + int ret; 3266 + 3267 + new_hash = alloc_ftrace_hash(ops->func_hash->filter_hash->size_bits); 3268 + if (!new_hash) 3269 + return NULL; 3270 + 3271 + list_for_each_entry(subops, &ops->subop_list, list) { 3272 + ret = append_hash(&new_hash, subops->func_hash->filter_hash); 3273 + if (ret < 0) { 3274 + free_ftrace_hash(new_hash); 3275 + return NULL; 3276 + } 3277 + /* Nothing more to do if new_hash is empty */ 3278 + if (ftrace_hash_empty(new_hash)) 3279 + break; 3280 + } 3281 + return new_hash; 3282 + } 3283 + 3284 + /* Make @ops trace evenything except what all its subops do not trace */ 3285 + static struct ftrace_hash *intersect_hashes(struct ftrace_ops *ops) 3286 + { 3287 + struct ftrace_hash *new_hash = NULL; 3288 + struct ftrace_ops *subops; 3289 + int size_bits; 3290 + int ret; 3291 + 3292 + list_for_each_entry(subops, &ops->subop_list, list) { 3293 + struct ftrace_hash *next_hash; 3294 + 3295 + if (!new_hash) { 3296 + size_bits = subops->func_hash->notrace_hash->size_bits; 3297 + new_hash = alloc_and_copy_ftrace_hash(size_bits, ops->func_hash->notrace_hash); 3298 + if (!new_hash) 3299 + return NULL; 3300 + continue; 3301 + } 3302 + size_bits = new_hash->size_bits; 3303 + next_hash = new_hash; 3304 + new_hash = alloc_ftrace_hash(size_bits); 3305 + ret = intersect_hash(&new_hash, next_hash, subops->func_hash->notrace_hash); 3306 + free_ftrace_hash(next_hash); 3307 + if (ret < 0) { 3308 + free_ftrace_hash(new_hash); 3309 + return NULL; 3310 + } 3311 + /* Nothing more to do if new_hash is empty */ 3312 + if (ftrace_hash_empty(new_hash)) 3313 + break; 3314 + } 3315 + return new_hash; 3316 + } 3317 + 3318 + static bool ops_equal(struct ftrace_hash *A, struct ftrace_hash *B) 3319 + { 3320 + struct ftrace_func_entry *entry; 3321 + int size; 3322 + int i; 3323 + 3324 + if (ftrace_hash_empty(A)) 3325 + return ftrace_hash_empty(B); 3326 + 3327 + if (ftrace_hash_empty(B)) 3328 + return ftrace_hash_empty(A); 3329 + 3330 + if (A->count != B->count) 3331 + return false; 3332 + 3333 + size = 1 << A->size_bits; 3334 + for (i = 0; i < size; i++) { 3335 + hlist_for_each_entry(entry, &A->buckets[i], hlist) { 3336 + if (!__ftrace_lookup_ip(B, entry->ip)) 3337 + return false; 3338 + } 3339 + } 3340 + 3341 + return true; 3342 + } 3343 + 3344 + static void ftrace_ops_update_code(struct ftrace_ops *ops, 3345 + struct ftrace_ops_hash *old_hash); 3346 + 3347 + static int __ftrace_hash_move_and_update_ops(struct ftrace_ops *ops, 3348 + struct ftrace_hash **orig_hash, 3349 + struct ftrace_hash *hash, 3350 + int enable) 3351 + { 3352 + struct ftrace_ops_hash old_hash_ops; 3353 + struct ftrace_hash *old_hash; 3354 + int ret; 3355 + 3356 + old_hash = *orig_hash; 3357 + old_hash_ops.filter_hash = ops->func_hash->filter_hash; 3358 + old_hash_ops.notrace_hash = ops->func_hash->notrace_hash; 3359 + ret = ftrace_hash_move(ops, enable, orig_hash, hash); 3360 + if (!ret) { 3361 + ftrace_ops_update_code(ops, &old_hash_ops); 3362 + free_ftrace_hash_rcu(old_hash); 3363 + } 3364 + return ret; 3365 + } 3366 + 3367 + static int ftrace_update_ops(struct ftrace_ops *ops, struct ftrace_hash *filter_hash, 3368 + struct ftrace_hash *notrace_hash) 3369 + { 3370 + int ret; 3371 + 3372 + if (!ops_equal(filter_hash, ops->func_hash->filter_hash)) { 3373 + ret = __ftrace_hash_move_and_update_ops(ops, &ops->func_hash->filter_hash, 3374 + filter_hash, 1); 3375 + if (ret < 0) 3376 + return ret; 3377 + } 3378 + 3379 + if (!ops_equal(notrace_hash, ops->func_hash->notrace_hash)) { 3380 + ret = __ftrace_hash_move_and_update_ops(ops, &ops->func_hash->notrace_hash, 3381 + notrace_hash, 0); 3382 + if (ret < 0) 3383 + return ret; 3384 + } 3385 + 3386 + return 0; 3387 + } 3388 + 3389 + /** 3390 + * ftrace_startup_subops - enable tracing for subops of an ops 3391 + * @ops: Manager ops (used to pick all the functions of its subops) 3392 + * @subops: A new ops to add to @ops 3393 + * @command: Extra commands to use to enable tracing 3394 + * 3395 + * The @ops is a manager @ops that has the filter that includes all the functions 3396 + * that its list of subops are tracing. Adding a new @subops will add the 3397 + * functions of @subops to @ops. 3398 + */ 3399 + int ftrace_startup_subops(struct ftrace_ops *ops, struct ftrace_ops *subops, int command) 3400 + { 3401 + struct ftrace_hash *filter_hash; 3402 + struct ftrace_hash *notrace_hash; 3403 + struct ftrace_hash *save_filter_hash; 3404 + struct ftrace_hash *save_notrace_hash; 3405 + int size_bits; 3406 + int ret; 3407 + 3408 + if (unlikely(ftrace_disabled)) 3409 + return -ENODEV; 3410 + 3411 + ftrace_ops_init(ops); 3412 + ftrace_ops_init(subops); 3413 + 3414 + if (WARN_ON_ONCE(subops->flags & FTRACE_OPS_FL_ENABLED)) 3415 + return -EBUSY; 3416 + 3417 + /* Make everything canonical (Just in case!) */ 3418 + if (!ops->func_hash->filter_hash) 3419 + ops->func_hash->filter_hash = EMPTY_HASH; 3420 + if (!ops->func_hash->notrace_hash) 3421 + ops->func_hash->notrace_hash = EMPTY_HASH; 3422 + if (!subops->func_hash->filter_hash) 3423 + subops->func_hash->filter_hash = EMPTY_HASH; 3424 + if (!subops->func_hash->notrace_hash) 3425 + subops->func_hash->notrace_hash = EMPTY_HASH; 3426 + 3427 + /* For the first subops to ops just enable it normally */ 3428 + if (list_empty(&ops->subop_list)) { 3429 + /* Just use the subops hashes */ 3430 + filter_hash = copy_hash(subops->func_hash->filter_hash); 3431 + notrace_hash = copy_hash(subops->func_hash->notrace_hash); 3432 + if (!filter_hash || !notrace_hash) { 3433 + free_ftrace_hash(filter_hash); 3434 + free_ftrace_hash(notrace_hash); 3435 + return -ENOMEM; 3436 + } 3437 + 3438 + save_filter_hash = ops->func_hash->filter_hash; 3439 + save_notrace_hash = ops->func_hash->notrace_hash; 3440 + 3441 + ops->func_hash->filter_hash = filter_hash; 3442 + ops->func_hash->notrace_hash = notrace_hash; 3443 + list_add(&subops->list, &ops->subop_list); 3444 + ret = ftrace_startup(ops, command); 3445 + if (ret < 0) { 3446 + list_del(&subops->list); 3447 + ops->func_hash->filter_hash = save_filter_hash; 3448 + ops->func_hash->notrace_hash = save_notrace_hash; 3449 + free_ftrace_hash(filter_hash); 3450 + free_ftrace_hash(notrace_hash); 3451 + } else { 3452 + free_ftrace_hash(save_filter_hash); 3453 + free_ftrace_hash(save_notrace_hash); 3454 + subops->flags |= FTRACE_OPS_FL_ENABLED | FTRACE_OPS_FL_SUBOP; 3455 + subops->managed = ops; 3456 + } 3457 + return ret; 3458 + } 3459 + 3460 + /* 3461 + * Here there's already something attached. Here are the rules: 3462 + * o If either filter_hash is empty then the final stays empty 3463 + * o Otherwise, the final is a superset of both hashes 3464 + * o If either notrace_hash is empty then the final stays empty 3465 + * o Otherwise, the final is an intersection between the hashes 3466 + */ 3467 + if (ftrace_hash_empty(ops->func_hash->filter_hash) || 3468 + ftrace_hash_empty(subops->func_hash->filter_hash)) { 3469 + filter_hash = EMPTY_HASH; 3470 + } else { 3471 + size_bits = max(ops->func_hash->filter_hash->size_bits, 3472 + subops->func_hash->filter_hash->size_bits); 3473 + filter_hash = alloc_and_copy_ftrace_hash(size_bits, ops->func_hash->filter_hash); 3474 + if (!filter_hash) 3475 + return -ENOMEM; 3476 + ret = append_hash(&filter_hash, subops->func_hash->filter_hash); 3477 + if (ret < 0) { 3478 + free_ftrace_hash(filter_hash); 3479 + return ret; 3480 + } 3481 + } 3482 + 3483 + if (ftrace_hash_empty(ops->func_hash->notrace_hash) || 3484 + ftrace_hash_empty(subops->func_hash->notrace_hash)) { 3485 + notrace_hash = EMPTY_HASH; 3486 + } else { 3487 + size_bits = max(ops->func_hash->filter_hash->size_bits, 3488 + subops->func_hash->filter_hash->size_bits); 3489 + notrace_hash = alloc_ftrace_hash(size_bits); 3490 + if (!notrace_hash) { 3491 + free_ftrace_hash(filter_hash); 3492 + return -ENOMEM; 3493 + } 3494 + 3495 + ret = intersect_hash(&notrace_hash, ops->func_hash->filter_hash, 3496 + subops->func_hash->filter_hash); 3497 + if (ret < 0) { 3498 + free_ftrace_hash(filter_hash); 3499 + free_ftrace_hash(notrace_hash); 3500 + return ret; 3501 + } 3502 + } 3503 + 3504 + list_add(&subops->list, &ops->subop_list); 3505 + 3506 + ret = ftrace_update_ops(ops, filter_hash, notrace_hash); 3507 + free_ftrace_hash(filter_hash); 3508 + free_ftrace_hash(notrace_hash); 3509 + if (ret < 0) { 3510 + list_del(&subops->list); 3511 + } else { 3512 + subops->flags |= FTRACE_OPS_FL_ENABLED | FTRACE_OPS_FL_SUBOP; 3513 + subops->managed = ops; 3514 + } 3515 + return ret; 3516 + } 3517 + 3518 + /** 3519 + * ftrace_shutdown_subops - Remove a subops from a manager ops 3520 + * @ops: A manager ops to remove @subops from 3521 + * @subops: The subops to remove from @ops 3522 + * @command: Any extra command flags to add to modifying the text 3523 + * 3524 + * Removes the functions being traced by the @subops from @ops. Note, it 3525 + * will not affect functions that are being traced by other subops that 3526 + * still exist in @ops. 3527 + * 3528 + * If the last subops is removed from @ops, then @ops is shutdown normally. 3529 + */ 3530 + int ftrace_shutdown_subops(struct ftrace_ops *ops, struct ftrace_ops *subops, int command) 3531 + { 3532 + struct ftrace_hash *filter_hash; 3533 + struct ftrace_hash *notrace_hash; 3534 + int ret; 3535 + 3536 + if (unlikely(ftrace_disabled)) 3537 + return -ENODEV; 3538 + 3539 + if (WARN_ON_ONCE(!(subops->flags & FTRACE_OPS_FL_ENABLED))) 3540 + return -EINVAL; 3541 + 3542 + list_del(&subops->list); 3543 + 3544 + if (list_empty(&ops->subop_list)) { 3545 + /* Last one, just disable the current ops */ 3546 + 3547 + ret = ftrace_shutdown(ops, command); 3548 + if (ret < 0) { 3549 + list_add(&subops->list, &ops->subop_list); 3550 + return ret; 3551 + } 3552 + 3553 + subops->flags &= ~FTRACE_OPS_FL_ENABLED; 3554 + 3555 + free_ftrace_hash(ops->func_hash->filter_hash); 3556 + free_ftrace_hash(ops->func_hash->notrace_hash); 3557 + ops->func_hash->filter_hash = EMPTY_HASH; 3558 + ops->func_hash->notrace_hash = EMPTY_HASH; 3559 + subops->flags &= ~(FTRACE_OPS_FL_ENABLED | FTRACE_OPS_FL_SUBOP); 3560 + subops->managed = NULL; 3561 + 3562 + return 0; 3563 + } 3564 + 3565 + /* Rebuild the hashes without subops */ 3566 + filter_hash = append_hashes(ops); 3567 + notrace_hash = intersect_hashes(ops); 3568 + if (!filter_hash || !notrace_hash) { 3569 + free_ftrace_hash(filter_hash); 3570 + free_ftrace_hash(notrace_hash); 3571 + list_add(&subops->list, &ops->subop_list); 3572 + return -ENOMEM; 3573 + } 3574 + 3575 + ret = ftrace_update_ops(ops, filter_hash, notrace_hash); 3576 + if (ret < 0) { 3577 + list_add(&subops->list, &ops->subop_list); 3578 + } else { 3579 + subops->flags &= ~(FTRACE_OPS_FL_ENABLED | FTRACE_OPS_FL_SUBOP); 3580 + subops->managed = NULL; 3581 + } 3582 + free_ftrace_hash(filter_hash); 3583 + free_ftrace_hash(notrace_hash); 3584 + return ret; 3585 + } 3586 + 3587 + static int ftrace_hash_move_and_update_subops(struct ftrace_ops *subops, 3588 + struct ftrace_hash **orig_subhash, 3589 + struct ftrace_hash *hash, 3590 + int enable) 3591 + { 3592 + struct ftrace_ops *ops = subops->managed; 3593 + struct ftrace_hash **orig_hash; 3594 + struct ftrace_hash *save_hash; 3595 + struct ftrace_hash *new_hash; 3596 + int ret; 3597 + 3598 + /* Manager ops can not be subops (yet) */ 3599 + if (WARN_ON_ONCE(!ops || ops->flags & FTRACE_OPS_FL_SUBOP)) 3600 + return -EINVAL; 3601 + 3602 + /* Move the new hash over to the subops hash */ 3603 + save_hash = *orig_subhash; 3604 + *orig_subhash = __ftrace_hash_move(hash); 3605 + if (!*orig_subhash) { 3606 + *orig_subhash = save_hash; 3607 + return -ENOMEM; 3608 + } 3609 + 3610 + /* Create a new_hash to hold the ops new functions */ 3611 + if (enable) { 3612 + orig_hash = &ops->func_hash->filter_hash; 3613 + new_hash = append_hashes(ops); 3614 + } else { 3615 + orig_hash = &ops->func_hash->notrace_hash; 3616 + new_hash = intersect_hashes(ops); 3617 + } 3618 + 3619 + /* Move the hash over to the new hash */ 3620 + ret = __ftrace_hash_move_and_update_ops(ops, orig_hash, new_hash, enable); 3621 + 3622 + free_ftrace_hash(new_hash); 3623 + 3624 + if (ret) { 3625 + /* Put back the original hash */ 3626 + free_ftrace_hash_rcu(*orig_subhash); 3627 + *orig_subhash = save_hash; 3628 + } else { 3629 + free_ftrace_hash_rcu(save_hash); 3630 + } 3631 + return ret; 3632 + } 3633 + 3197 3634 3198 3635 static u64 ftrace_update_time; 3199 3636 unsigned long ftrace_update_tot_cnt; ··· 4879 4380 struct ftrace_hash *hash, 4880 4381 int enable) 4881 4382 { 4882 - struct ftrace_ops_hash old_hash_ops; 4883 - struct ftrace_hash *old_hash; 4884 - int ret; 4383 + if (ops->flags & FTRACE_OPS_FL_SUBOP) 4384 + return ftrace_hash_move_and_update_subops(ops, orig_hash, hash, enable); 4885 4385 4886 - old_hash = *orig_hash; 4887 - old_hash_ops.filter_hash = ops->func_hash->filter_hash; 4888 - old_hash_ops.notrace_hash = ops->func_hash->notrace_hash; 4889 - ret = ftrace_hash_move(ops, enable, orig_hash, hash); 4890 - if (!ret) { 4891 - ftrace_ops_update_code(ops, &old_hash_ops); 4892 - free_ftrace_hash_rcu(old_hash); 4386 + /* 4387 + * If this ops is not enabled, it could be sharing its filters 4388 + * with a subop. If that's the case, update the subop instead of 4389 + * this ops. Shared filters are only allowed to have one ops set 4390 + * at a time, and if we update the ops that is not enabled, 4391 + * it will not affect subops that share it. 4392 + */ 4393 + if (!(ops->flags & FTRACE_OPS_FL_ENABLED)) { 4394 + struct ftrace_ops *op; 4395 + 4396 + /* Check if any other manager subops maps to this hash */ 4397 + do_for_each_ftrace_op(op, ftrace_ops_list) { 4398 + struct ftrace_ops *subops; 4399 + 4400 + list_for_each_entry(subops, &op->subop_list, list) { 4401 + if ((subops->flags & FTRACE_OPS_FL_ENABLED) && 4402 + subops->func_hash == ops->func_hash) { 4403 + return ftrace_hash_move_and_update_subops(subops, orig_hash, hash, enable); 4404 + } 4405 + } 4406 + } while_for_each_ftrace_op(op); 4893 4407 } 4894 - return ret; 4408 + 4409 + return __ftrace_hash_move_and_update_ops(ops, orig_hash, hash, enable); 4895 4410 } 4896 4411 4897 4412 static bool module_exists(const char *module) ··· 5988 5475 * unregister_ftrace_direct - Remove calls to custom trampoline 5989 5476 * previously registered by register_ftrace_direct for @ops object. 5990 5477 * @ops: The address of the struct ftrace_ops object 5478 + * @addr: The address of the direct function that is called by the @ops functions 5479 + * @free_filters: Set to true to remove all filters for the ftrace_ops, false otherwise 5991 5480 * 5992 5481 * This is used to remove a direct calls to @addr from the nop locations 5993 5482 * of the functions registered in @ops (with by ftrace_set_filter_ip ··· 7839 7324 tr->ops = &global_ops; 7840 7325 tr->ops->private = tr; 7841 7326 ftrace_init_trace_array(tr); 7327 + init_array_fgraph_ops(tr, tr->ops); 7842 7328 } 7843 7329 7844 7330 void ftrace_init_array_ops(struct trace_array *tr, ftrace_func_t func)
+16 -2
kernel/trace/ftrace_internal.h
··· 15 15 int ftrace_startup(struct ftrace_ops *ops, int command); 16 16 int ftrace_shutdown(struct ftrace_ops *ops, int command); 17 17 int ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs); 18 + int ftrace_startup_subops(struct ftrace_ops *ops, struct ftrace_ops *subops, int command); 19 + int ftrace_shutdown_subops(struct ftrace_ops *ops, struct ftrace_ops *subops, int command); 18 20 19 21 #else /* !CONFIG_DYNAMIC_FTRACE */ 20 22 ··· 40 38 { 41 39 return 1; 42 40 } 41 + static inline int ftrace_startup_subops(struct ftrace_ops *ops, struct ftrace_ops *subops, int command) 42 + { 43 + return -EINVAL; 44 + } 45 + static inline int ftrace_shutdown_subops(struct ftrace_ops *ops, struct ftrace_ops *subops, int command) 46 + { 47 + return -EINVAL; 48 + } 43 49 #endif /* CONFIG_DYNAMIC_FTRACE */ 44 50 45 51 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 46 52 extern int ftrace_graph_active; 47 - void update_function_graph_func(void); 53 + # ifdef CONFIG_DYNAMIC_FTRACE 54 + extern void fgraph_update_pid_func(void); 55 + # else 56 + static inline void fgraph_update_pid_func(void) {} 57 + # endif 48 58 #else /* !CONFIG_FUNCTION_GRAPH_TRACER */ 49 59 # define ftrace_graph_active 0 50 - static inline void update_function_graph_func(void) { } 60 + static inline void fgraph_update_pid_func(void) {} 51 61 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 52 62 53 63 #else /* !CONFIG_FUNCTION_TRACER */
+77 -16
kernel/trace/trace.h
··· 397 397 struct ftrace_ops *ops; 398 398 struct trace_pid_list __rcu *function_pids; 399 399 struct trace_pid_list __rcu *function_no_pids; 400 + #ifdef CONFIG_FUNCTION_GRAPH_TRACER 401 + struct fgraph_ops *gops; 402 + #endif 400 403 #ifdef CONFIG_DYNAMIC_FTRACE 401 404 /* All of these are protected by the ftrace_lock */ 402 405 struct list_head func_probes; ··· 682 679 void trace_default_header(struct seq_file *m); 683 680 void print_trace_header(struct seq_file *m, struct trace_iterator *iter); 684 681 685 - void trace_graph_return(struct ftrace_graph_ret *trace); 686 - int trace_graph_entry(struct ftrace_graph_ent *trace); 687 - void set_graph_array(struct trace_array *tr); 682 + void trace_graph_return(struct ftrace_graph_ret *trace, struct fgraph_ops *gops); 683 + int trace_graph_entry(struct ftrace_graph_ent *trace, struct fgraph_ops *gops); 688 684 689 685 void tracing_start_cmdline_record(void); 690 686 void tracing_stop_cmdline_record(void); ··· 894 892 extern void __trace_graph_return(struct trace_array *tr, 895 893 struct ftrace_graph_ret *trace, 896 894 unsigned int trace_ctx); 895 + extern void init_array_fgraph_ops(struct trace_array *tr, struct ftrace_ops *ops); 896 + extern int allocate_fgraph_ops(struct trace_array *tr, struct ftrace_ops *ops); 897 + extern void free_fgraph_ops(struct trace_array *tr); 898 + 899 + enum { 900 + TRACE_GRAPH_FL = 1, 901 + 902 + /* 903 + * In the very unlikely case that an interrupt came in 904 + * at a start of graph tracing, and we want to trace 905 + * the function in that interrupt, the depth can be greater 906 + * than zero, because of the preempted start of a previous 907 + * trace. In an even more unlikely case, depth could be 2 908 + * if a softirq interrupted the start of graph tracing, 909 + * followed by an interrupt preempting a start of graph 910 + * tracing in the softirq, and depth can even be 3 911 + * if an NMI came in at the start of an interrupt function 912 + * that preempted a softirq start of a function that 913 + * preempted normal context!!!! Luckily, it can't be 914 + * greater than 3, so the next two bits are a mask 915 + * of what the depth is when we set TRACE_GRAPH_FL 916 + */ 917 + 918 + TRACE_GRAPH_DEPTH_START_BIT, 919 + TRACE_GRAPH_DEPTH_END_BIT, 920 + 921 + /* 922 + * To implement set_graph_notrace, if this bit is set, we ignore 923 + * function graph tracing of called functions, until the return 924 + * function is called to clear it. 925 + */ 926 + TRACE_GRAPH_NOTRACE_BIT, 927 + }; 928 + 929 + #define TRACE_GRAPH_NOTRACE (1 << TRACE_GRAPH_NOTRACE_BIT) 930 + 931 + static inline unsigned long ftrace_graph_depth(unsigned long *task_var) 932 + { 933 + return (*task_var >> TRACE_GRAPH_DEPTH_START_BIT) & 3; 934 + } 935 + 936 + static inline void ftrace_graph_set_depth(unsigned long *task_var, int depth) 937 + { 938 + *task_var &= ~(3 << TRACE_GRAPH_DEPTH_START_BIT); 939 + *task_var |= (depth & 3) << TRACE_GRAPH_DEPTH_START_BIT; 940 + } 897 941 898 942 #ifdef CONFIG_DYNAMIC_FTRACE 899 943 extern struct ftrace_hash __rcu *ftrace_graph_hash; 900 944 extern struct ftrace_hash __rcu *ftrace_graph_notrace_hash; 901 945 902 - static inline int ftrace_graph_addr(struct ftrace_graph_ent *trace) 946 + static inline int 947 + ftrace_graph_addr(unsigned long *task_var, struct ftrace_graph_ent *trace) 903 948 { 904 949 unsigned long addr = trace->func; 905 950 int ret = 0; ··· 968 919 } 969 920 970 921 if (ftrace_lookup_ip(hash, addr)) { 971 - 972 922 /* 973 923 * This needs to be cleared on the return functions 974 924 * when the depth is zero. 975 925 */ 976 - trace_recursion_set(TRACE_GRAPH_BIT); 977 - trace_recursion_set_depth(trace->depth); 926 + *task_var |= TRACE_GRAPH_FL; 927 + ftrace_graph_set_depth(task_var, trace->depth); 978 928 979 929 /* 980 930 * If no irqs are to be traced, but a set_graph_function ··· 992 944 return ret; 993 945 } 994 946 995 - static inline void ftrace_graph_addr_finish(struct ftrace_graph_ret *trace) 947 + static inline void 948 + ftrace_graph_addr_finish(struct fgraph_ops *gops, struct ftrace_graph_ret *trace) 996 949 { 997 - if (trace_recursion_test(TRACE_GRAPH_BIT) && 998 - trace->depth == trace_recursion_depth()) 999 - trace_recursion_clear(TRACE_GRAPH_BIT); 950 + unsigned long *task_var = fgraph_get_task_var(gops); 951 + 952 + if ((*task_var & TRACE_GRAPH_FL) && 953 + trace->depth == ftrace_graph_depth(task_var)) 954 + *task_var &= ~TRACE_GRAPH_FL; 1000 955 } 1001 956 1002 957 static inline int ftrace_graph_notrace_addr(unsigned long addr) ··· 1025 974 return ret; 1026 975 } 1027 976 #else 1028 - static inline int ftrace_graph_addr(struct ftrace_graph_ent *trace) 977 + static inline int ftrace_graph_addr(unsigned long *task_var, struct ftrace_graph_ent *trace) 1029 978 { 1030 979 return 1; 1031 980 } ··· 1034 983 { 1035 984 return 0; 1036 985 } 1037 - static inline void ftrace_graph_addr_finish(struct ftrace_graph_ret *trace) 986 + static inline void ftrace_graph_addr_finish(struct fgraph_ops *gops, struct ftrace_graph_ret *trace) 1038 987 { } 1039 988 #endif /* CONFIG_DYNAMIC_FTRACE */ 1040 989 1041 990 extern unsigned int fgraph_max_depth; 1042 991 1043 - static inline bool ftrace_graph_ignore_func(struct ftrace_graph_ent *trace) 992 + static inline bool 993 + ftrace_graph_ignore_func(struct fgraph_ops *gops, struct ftrace_graph_ent *trace) 1044 994 { 995 + unsigned long *task_var = fgraph_get_task_var(gops); 996 + 1045 997 /* trace it when it is-nested-in or is a function enabled. */ 1046 - return !(trace_recursion_test(TRACE_GRAPH_BIT) || 1047 - ftrace_graph_addr(trace)) || 998 + return !((*task_var & TRACE_GRAPH_FL) || 999 + ftrace_graph_addr(task_var, trace)) || 1048 1000 (trace->depth < 0) || 1049 1001 (fgraph_max_depth && trace->depth >= fgraph_max_depth); 1050 1002 } 1003 + 1004 + void fgraph_init_ops(struct ftrace_ops *dst_ops, 1005 + struct ftrace_ops *src_ops); 1051 1006 1052 1007 #else /* CONFIG_FUNCTION_GRAPH_TRACER */ 1053 1008 static inline enum print_line_t ··· 1061 1004 { 1062 1005 return TRACE_TYPE_UNHANDLED; 1063 1006 } 1007 + static inline void free_fgraph_ops(struct trace_array *tr) { } 1008 + /* ftrace_ops may not be defined */ 1009 + #define init_array_fgraph_ops(tr, ops) do { } while (0) 1010 + #define allocate_fgraph_ops(tr, ops) ({ 0; }) 1064 1011 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 1065 1012 1066 1013 extern struct list_head ftrace_pids;
+14 -1
kernel/trace/trace_functions.c
··· 80 80 int ftrace_create_function_files(struct trace_array *tr, 81 81 struct dentry *parent) 82 82 { 83 + int ret; 83 84 /* 84 85 * The top level array uses the "global_ops", and the files are 85 86 * created on boot up. ··· 91 90 if (!tr->ops) 92 91 return -EINVAL; 93 92 93 + ret = allocate_fgraph_ops(tr, tr->ops); 94 + if (ret) { 95 + kfree(tr->ops); 96 + return ret; 97 + } 98 + 94 99 ftrace_create_filter_files(tr->ops, parent); 95 100 96 101 return 0; ··· 106 99 { 107 100 ftrace_destroy_filter_files(tr->ops); 108 101 ftrace_free_ftrace_ops(tr); 102 + free_fgraph_ops(tr); 109 103 } 110 104 111 105 static ftrace_func_t select_trace_function(u32 flags_val) ··· 231 223 long disabled; 232 224 int cpu; 233 225 unsigned int trace_ctx; 226 + int skip = STACK_SKIP; 234 227 235 228 if (unlikely(!tr->function_enabled)) 236 229 return; ··· 248 239 if (likely(disabled == 1)) { 249 240 trace_ctx = tracing_gen_ctx_flags(flags); 250 241 trace_function(tr, ip, parent_ip, trace_ctx); 251 - __trace_stack(tr, trace_ctx, STACK_SKIP); 242 + #ifdef CONFIG_UNWINDER_FRAME_POINTER 243 + if (ftrace_pids_enabled(op)) 244 + skip++; 245 + #endif 246 + __trace_stack(tr, trace_ctx, skip); 252 247 } 253 248 254 249 atomic_dec(&data->disabled);
+60 -36
kernel/trace/trace_functions_graph.c
··· 83 83 .opts = trace_opts 84 84 }; 85 85 86 - static struct trace_array *graph_array; 87 - 88 86 /* 89 87 * DURATION column is being also used to display IRQ signs, 90 88 * following values are used by print_graph_irq and others ··· 127 129 return in_hardirq(); 128 130 } 129 131 130 - int trace_graph_entry(struct ftrace_graph_ent *trace) 132 + int trace_graph_entry(struct ftrace_graph_ent *trace, 133 + struct fgraph_ops *gops) 131 134 { 132 - struct trace_array *tr = graph_array; 135 + unsigned long *task_var = fgraph_get_task_var(gops); 136 + struct trace_array *tr = gops->private; 133 137 struct trace_array_cpu *data; 134 138 unsigned long flags; 135 139 unsigned int trace_ctx; ··· 139 139 int ret; 140 140 int cpu; 141 141 142 - if (trace_recursion_test(TRACE_GRAPH_NOTRACE_BIT)) 142 + if (*task_var & TRACE_GRAPH_NOTRACE) 143 143 return 0; 144 144 145 145 /* ··· 150 150 * returning from the function. 151 151 */ 152 152 if (ftrace_graph_notrace_addr(trace->func)) { 153 - trace_recursion_set(TRACE_GRAPH_NOTRACE_BIT); 153 + *task_var |= TRACE_GRAPH_NOTRACE_BIT; 154 154 /* 155 155 * Need to return 1 to have the return called 156 156 * that will clear the NOTRACE bit. ··· 161 161 if (!ftrace_trace_task(tr)) 162 162 return 0; 163 163 164 - if (ftrace_graph_ignore_func(trace)) 164 + if (ftrace_graph_ignore_func(gops, trace)) 165 165 return 0; 166 166 167 167 if (ftrace_graph_ignore_irqs()) ··· 238 238 trace_buffer_unlock_commit_nostack(buffer, event); 239 239 } 240 240 241 - void trace_graph_return(struct ftrace_graph_ret *trace) 241 + void trace_graph_return(struct ftrace_graph_ret *trace, 242 + struct fgraph_ops *gops) 242 243 { 243 - struct trace_array *tr = graph_array; 244 + unsigned long *task_var = fgraph_get_task_var(gops); 245 + struct trace_array *tr = gops->private; 244 246 struct trace_array_cpu *data; 245 247 unsigned long flags; 246 248 unsigned int trace_ctx; 247 249 long disabled; 248 250 int cpu; 249 251 250 - ftrace_graph_addr_finish(trace); 252 + ftrace_graph_addr_finish(gops, trace); 251 253 252 - if (trace_recursion_test(TRACE_GRAPH_NOTRACE_BIT)) { 253 - trace_recursion_clear(TRACE_GRAPH_NOTRACE_BIT); 254 + if (*task_var & TRACE_GRAPH_NOTRACE) { 255 + *task_var &= ~TRACE_GRAPH_NOTRACE; 254 256 return; 255 257 } 256 258 ··· 268 266 local_irq_restore(flags); 269 267 } 270 268 271 - void set_graph_array(struct trace_array *tr) 269 + static void trace_graph_thresh_return(struct ftrace_graph_ret *trace, 270 + struct fgraph_ops *gops) 272 271 { 273 - graph_array = tr; 274 - 275 - /* Make graph_array visible before we start tracing */ 276 - 277 - smp_mb(); 278 - } 279 - 280 - static void trace_graph_thresh_return(struct ftrace_graph_ret *trace) 281 - { 282 - ftrace_graph_addr_finish(trace); 272 + ftrace_graph_addr_finish(gops, trace); 283 273 284 274 if (trace_recursion_test(TRACE_GRAPH_NOTRACE_BIT)) { 285 275 trace_recursion_clear(TRACE_GRAPH_NOTRACE_BIT); ··· 282 288 (trace->rettime - trace->calltime < tracing_thresh)) 283 289 return; 284 290 else 285 - trace_graph_return(trace); 291 + trace_graph_return(trace, gops); 286 292 } 287 - 288 - static struct fgraph_ops funcgraph_thresh_ops = { 289 - .entryfunc = &trace_graph_entry, 290 - .retfunc = &trace_graph_thresh_return, 291 - }; 292 293 293 294 static struct fgraph_ops funcgraph_ops = { 294 295 .entryfunc = &trace_graph_entry, 295 296 .retfunc = &trace_graph_return, 296 297 }; 297 298 299 + int allocate_fgraph_ops(struct trace_array *tr, struct ftrace_ops *ops) 300 + { 301 + struct fgraph_ops *gops; 302 + 303 + gops = kzalloc(sizeof(*gops), GFP_KERNEL); 304 + if (!gops) 305 + return -ENOMEM; 306 + 307 + gops->entryfunc = &trace_graph_entry; 308 + gops->retfunc = &trace_graph_return; 309 + 310 + tr->gops = gops; 311 + gops->private = tr; 312 + 313 + fgraph_init_ops(&gops->ops, ops); 314 + 315 + return 0; 316 + } 317 + 318 + void free_fgraph_ops(struct trace_array *tr) 319 + { 320 + kfree(tr->gops); 321 + } 322 + 323 + __init void init_array_fgraph_ops(struct trace_array *tr, struct ftrace_ops *ops) 324 + { 325 + tr->gops = &funcgraph_ops; 326 + funcgraph_ops.private = tr; 327 + fgraph_init_ops(&tr->gops->ops, ops); 328 + } 329 + 298 330 static int graph_trace_init(struct trace_array *tr) 299 331 { 300 332 int ret; 301 333 302 - set_graph_array(tr); 334 + tr->gops->entryfunc = trace_graph_entry; 335 + 303 336 if (tracing_thresh) 304 - ret = register_ftrace_graph(&funcgraph_thresh_ops); 337 + tr->gops->retfunc = trace_graph_thresh_return; 305 338 else 306 - ret = register_ftrace_graph(&funcgraph_ops); 339 + tr->gops->retfunc = trace_graph_return; 340 + 341 + /* Make gops functions are visible before we start tracing */ 342 + smp_mb(); 343 + 344 + ret = register_ftrace_graph(tr->gops); 307 345 if (ret) 308 346 return ret; 309 347 tracing_start_cmdline_record(); ··· 346 320 static void graph_trace_reset(struct trace_array *tr) 347 321 { 348 322 tracing_stop_cmdline_record(); 349 - if (tracing_thresh) 350 - unregister_ftrace_graph(&funcgraph_thresh_ops); 351 - else 352 - unregister_ftrace_graph(&funcgraph_ops); 323 + unregister_ftrace_graph(tr->gops); 353 324 } 354 325 355 326 static int graph_trace_update_thresh(struct trace_array *tr) ··· 1385 1362 .print_header = print_graph_headers, 1386 1363 .flags = &tracer_flags, 1387 1364 .set_flag = func_graph_set_flag, 1365 + .allow_instances = true, 1388 1366 #ifdef CONFIG_FTRACE_SELFTEST 1389 1367 .selftest = trace_selftest_startup_function_graph, 1390 1368 #endif
+6 -4
kernel/trace/trace_irqsoff.c
··· 175 175 return start_irqsoff_tracer(irqsoff_trace, set); 176 176 } 177 177 178 - static int irqsoff_graph_entry(struct ftrace_graph_ent *trace) 178 + static int irqsoff_graph_entry(struct ftrace_graph_ent *trace, 179 + struct fgraph_ops *gops) 179 180 { 180 181 struct trace_array *tr = irqsoff_trace; 181 182 struct trace_array_cpu *data; ··· 184 183 unsigned int trace_ctx; 185 184 int ret; 186 185 187 - if (ftrace_graph_ignore_func(trace)) 186 + if (ftrace_graph_ignore_func(gops, trace)) 188 187 return 0; 189 188 /* 190 189 * Do not trace a function if it's filtered by set_graph_notrace. ··· 206 205 return ret; 207 206 } 208 207 209 - static void irqsoff_graph_return(struct ftrace_graph_ret *trace) 208 + static void irqsoff_graph_return(struct ftrace_graph_ret *trace, 209 + struct fgraph_ops *gops) 210 210 { 211 211 struct trace_array *tr = irqsoff_trace; 212 212 struct trace_array_cpu *data; 213 213 unsigned long flags; 214 214 unsigned int trace_ctx; 215 215 216 - ftrace_graph_addr_finish(trace); 216 + ftrace_graph_addr_finish(gops, trace); 217 217 218 218 if (!func_prolog_dec(tr, &data, &flags)) 219 219 return;
+6 -4
kernel/trace/trace_sched_wakeup.c
··· 112 112 return start_func_tracer(tr, set); 113 113 } 114 114 115 - static int wakeup_graph_entry(struct ftrace_graph_ent *trace) 115 + static int wakeup_graph_entry(struct ftrace_graph_ent *trace, 116 + struct fgraph_ops *gops) 116 117 { 117 118 struct trace_array *tr = wakeup_trace; 118 119 struct trace_array_cpu *data; 119 120 unsigned int trace_ctx; 120 121 int ret = 0; 121 122 122 - if (ftrace_graph_ignore_func(trace)) 123 + if (ftrace_graph_ignore_func(gops, trace)) 123 124 return 0; 124 125 /* 125 126 * Do not trace a function if it's filtered by set_graph_notrace. ··· 142 141 return ret; 143 142 } 144 143 145 - static void wakeup_graph_return(struct ftrace_graph_ret *trace) 144 + static void wakeup_graph_return(struct ftrace_graph_ret *trace, 145 + struct fgraph_ops *gops) 146 146 { 147 147 struct trace_array *tr = wakeup_trace; 148 148 struct trace_array_cpu *data; 149 149 unsigned int trace_ctx; 150 150 151 - ftrace_graph_addr_finish(trace); 151 + ftrace_graph_addr_finish(gops, trace); 152 152 153 153 if (!func_prolog_preempt_disable(tr, &data, &trace_ctx)) 154 154 return;
+255 -4
kernel/trace/trace_selftest.c
··· 756 756 757 757 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 758 758 759 + #ifdef CONFIG_DYNAMIC_FTRACE 760 + 761 + #define CHAR_NUMBER 123 762 + #define SHORT_NUMBER 12345 763 + #define WORD_NUMBER 1234567890 764 + #define LONG_NUMBER 1234567890123456789LL 765 + #define ERRSTR_BUFLEN 128 766 + 767 + struct fgraph_fixture { 768 + struct fgraph_ops gops; 769 + int store_size; 770 + const char *store_type_name; 771 + char error_str_buf[ERRSTR_BUFLEN]; 772 + char *error_str; 773 + }; 774 + 775 + static __init int store_entry(struct ftrace_graph_ent *trace, 776 + struct fgraph_ops *gops) 777 + { 778 + struct fgraph_fixture *fixture = container_of(gops, struct fgraph_fixture, gops); 779 + const char *type = fixture->store_type_name; 780 + int size = fixture->store_size; 781 + void *p; 782 + 783 + p = fgraph_reserve_data(gops->idx, size); 784 + if (!p) { 785 + snprintf(fixture->error_str_buf, ERRSTR_BUFLEN, 786 + "Failed to reserve %s\n", type); 787 + return 0; 788 + } 789 + 790 + switch (size) { 791 + case 1: 792 + *(char *)p = CHAR_NUMBER; 793 + break; 794 + case 2: 795 + *(short *)p = SHORT_NUMBER; 796 + break; 797 + case 4: 798 + *(int *)p = WORD_NUMBER; 799 + break; 800 + case 8: 801 + *(long long *)p = LONG_NUMBER; 802 + break; 803 + } 804 + 805 + return 1; 806 + } 807 + 808 + static __init void store_return(struct ftrace_graph_ret *trace, 809 + struct fgraph_ops *gops) 810 + { 811 + struct fgraph_fixture *fixture = container_of(gops, struct fgraph_fixture, gops); 812 + const char *type = fixture->store_type_name; 813 + long long expect = 0; 814 + long long found = -1; 815 + int size; 816 + char *p; 817 + 818 + p = fgraph_retrieve_data(gops->idx, &size); 819 + if (!p) { 820 + snprintf(fixture->error_str_buf, ERRSTR_BUFLEN, 821 + "Failed to retrieve %s\n", type); 822 + return; 823 + } 824 + if (fixture->store_size > size) { 825 + snprintf(fixture->error_str_buf, ERRSTR_BUFLEN, 826 + "Retrieved size %d is smaller than expected %d\n", 827 + size, (int)fixture->store_size); 828 + return; 829 + } 830 + 831 + switch (fixture->store_size) { 832 + case 1: 833 + expect = CHAR_NUMBER; 834 + found = *(char *)p; 835 + break; 836 + case 2: 837 + expect = SHORT_NUMBER; 838 + found = *(short *)p; 839 + break; 840 + case 4: 841 + expect = WORD_NUMBER; 842 + found = *(int *)p; 843 + break; 844 + case 8: 845 + expect = LONG_NUMBER; 846 + found = *(long long *)p; 847 + break; 848 + } 849 + 850 + if (found != expect) { 851 + snprintf(fixture->error_str_buf, ERRSTR_BUFLEN, 852 + "%s returned not %lld but %lld\n", type, expect, found); 853 + return; 854 + } 855 + fixture->error_str = NULL; 856 + } 857 + 858 + static int __init init_fgraph_fixture(struct fgraph_fixture *fixture) 859 + { 860 + char *func_name; 861 + int len; 862 + 863 + snprintf(fixture->error_str_buf, ERRSTR_BUFLEN, 864 + "Failed to execute storage %s\n", fixture->store_type_name); 865 + fixture->error_str = fixture->error_str_buf; 866 + 867 + func_name = "*" __stringify(DYN_FTRACE_TEST_NAME); 868 + len = strlen(func_name); 869 + 870 + return ftrace_set_filter(&fixture->gops.ops, func_name, len, 1); 871 + } 872 + 873 + /* Test fgraph storage for each size */ 874 + static int __init test_graph_storage_single(struct fgraph_fixture *fixture) 875 + { 876 + int size = fixture->store_size; 877 + int ret; 878 + 879 + pr_cont("PASSED\n"); 880 + pr_info("Testing fgraph storage of %d byte%s: ", size, str_plural(size)); 881 + 882 + ret = init_fgraph_fixture(fixture); 883 + if (ret && ret != -ENODEV) { 884 + pr_cont("*Could not set filter* "); 885 + return -1; 886 + } 887 + 888 + ret = register_ftrace_graph(&fixture->gops); 889 + if (ret) { 890 + pr_warn("Failed to init store_bytes fgraph tracing\n"); 891 + return -1; 892 + } 893 + 894 + DYN_FTRACE_TEST_NAME(); 895 + 896 + unregister_ftrace_graph(&fixture->gops); 897 + 898 + if (fixture->error_str) { 899 + pr_cont("*** %s ***", fixture->error_str); 900 + return -1; 901 + } 902 + 903 + return 0; 904 + } 905 + 906 + static struct fgraph_fixture store_bytes[4] __initdata = { 907 + [0] = { 908 + .gops = { 909 + .entryfunc = store_entry, 910 + .retfunc = store_return, 911 + }, 912 + .store_size = 1, 913 + .store_type_name = "byte", 914 + }, 915 + [1] = { 916 + .gops = { 917 + .entryfunc = store_entry, 918 + .retfunc = store_return, 919 + }, 920 + .store_size = 2, 921 + .store_type_name = "short", 922 + }, 923 + [2] = { 924 + .gops = { 925 + .entryfunc = store_entry, 926 + .retfunc = store_return, 927 + }, 928 + .store_size = 4, 929 + .store_type_name = "word", 930 + }, 931 + [3] = { 932 + .gops = { 933 + .entryfunc = store_entry, 934 + .retfunc = store_return, 935 + }, 936 + .store_size = 8, 937 + .store_type_name = "long long", 938 + }, 939 + }; 940 + 941 + static __init int test_graph_storage_multi(void) 942 + { 943 + struct fgraph_fixture *fixture; 944 + bool printed = false; 945 + int i, ret; 946 + 947 + pr_cont("PASSED\n"); 948 + pr_info("Testing multiple fgraph storage on a function: "); 949 + 950 + for (i = 0; i < ARRAY_SIZE(store_bytes); i++) { 951 + fixture = &store_bytes[i]; 952 + ret = init_fgraph_fixture(fixture); 953 + if (ret && ret != -ENODEV) { 954 + pr_cont("*Could not set filter* "); 955 + printed = true; 956 + goto out; 957 + } 958 + 959 + ret = register_ftrace_graph(&fixture->gops); 960 + if (ret) { 961 + pr_warn("Failed to init store_bytes fgraph tracing\n"); 962 + printed = true; 963 + goto out; 964 + } 965 + } 966 + 967 + DYN_FTRACE_TEST_NAME(); 968 + out: 969 + while (--i >= 0) { 970 + fixture = &store_bytes[i]; 971 + unregister_ftrace_graph(&fixture->gops); 972 + 973 + if (fixture->error_str && !printed) { 974 + pr_cont("*** %s ***", fixture->error_str); 975 + printed = true; 976 + } 977 + } 978 + return printed ? -1 : 0; 979 + } 980 + 981 + /* Test the storage passed across function_graph entry and return */ 982 + static __init int test_graph_storage(void) 983 + { 984 + int ret; 985 + 986 + ret = test_graph_storage_single(&store_bytes[0]); 987 + if (ret) 988 + return ret; 989 + ret = test_graph_storage_single(&store_bytes[1]); 990 + if (ret) 991 + return ret; 992 + ret = test_graph_storage_single(&store_bytes[2]); 993 + if (ret) 994 + return ret; 995 + ret = test_graph_storage_single(&store_bytes[3]); 996 + if (ret) 997 + return ret; 998 + ret = test_graph_storage_multi(); 999 + if (ret) 1000 + return ret; 1001 + return 0; 1002 + } 1003 + #else 1004 + static inline int test_graph_storage(void) { return 0; } 1005 + #endif /* CONFIG_DYNAMIC_FTRACE */ 1006 + 759 1007 /* Maximum number of functions to trace before diagnosing a hang */ 760 1008 #define GRAPH_MAX_FUNC_TEST 100000000 761 1009 762 1010 static unsigned int graph_hang_thresh; 763 1011 764 1012 /* Wrap the real function entry probe to avoid possible hanging */ 765 - static int trace_graph_entry_watchdog(struct ftrace_graph_ent *trace) 1013 + static int trace_graph_entry_watchdog(struct ftrace_graph_ent *trace, 1014 + struct fgraph_ops *gops) 766 1015 { 767 1016 /* This is harmlessly racy, we want to approximately detect a hang */ 768 1017 if (unlikely(++graph_hang_thresh > GRAPH_MAX_FUNC_TEST)) { ··· 1025 776 return 0; 1026 777 } 1027 778 1028 - return trace_graph_entry(trace); 779 + return trace_graph_entry(trace, gops); 1029 780 } 1030 781 1031 782 static struct fgraph_ops fgraph_ops __initdata = { ··· 1061 812 * to detect and recover from possible hangs 1062 813 */ 1063 814 tracing_reset_online_cpus(&tr->array_buffer); 1064 - set_graph_array(tr); 815 + fgraph_ops.private = tr; 1065 816 ret = register_ftrace_graph(&fgraph_ops); 1066 817 if (ret) { 1067 818 warn_failed_init_tracer(trace, ret); ··· 1104 855 cond_resched(); 1105 856 1106 857 tracing_reset_online_cpus(&tr->array_buffer); 1107 - set_graph_array(tr); 858 + fgraph_ops.private = tr; 1108 859 1109 860 /* 1110 861 * Some archs *cough*PowerPC*cough* add characters to the ··· 1160 911 /* Enable tracing on all functions again */ 1161 912 ftrace_set_global_filter(NULL, 0, 1); 1162 913 #endif 914 + 915 + ret = test_graph_storage(); 1163 916 1164 917 /* Don't test dynamic tracing, the function tracer already did */ 1165 918 out:
+103
tools/testing/selftests/ftrace/test.d/ftrace/fgraph-multi.tc
··· 1 + #!/bin/sh 2 + # SPDX-License-Identifier: GPL-2.0 3 + # description: ftrace - function graph filters 4 + # requires: set_ftrace_filter function_graph:tracer 5 + 6 + # Make sure that function graph filtering works 7 + 8 + INSTANCE1="instances/test1_$$" 9 + INSTANCE2="instances/test2_$$" 10 + INSTANCE3="instances/test3_$$" 11 + 12 + WD=`pwd` 13 + 14 + do_reset() { 15 + cd $WD 16 + if [ -d $INSTANCE1 ]; then 17 + echo nop > $INSTANCE1/current_tracer 18 + rmdir $INSTANCE1 19 + fi 20 + if [ -d $INSTANCE2 ]; then 21 + echo nop > $INSTANCE2/current_tracer 22 + rmdir $INSTANCE2 23 + fi 24 + if [ -d $INSTANCE3 ]; then 25 + echo nop > $INSTANCE3/current_tracer 26 + rmdir $INSTANCE3 27 + fi 28 + } 29 + 30 + mkdir $INSTANCE1 31 + if ! grep -q function_graph $INSTANCE1/available_tracers; then 32 + echo "function_graph not allowed with instances" 33 + rmdir $INSTANCE1 34 + exit_unsupported 35 + fi 36 + 37 + mkdir $INSTANCE2 38 + mkdir $INSTANCE3 39 + 40 + fail() { # msg 41 + do_reset 42 + echo $1 43 + exit_fail 44 + } 45 + 46 + disable_tracing 47 + clear_trace 48 + 49 + do_test() { 50 + REGEX=$1 51 + TEST=$2 52 + 53 + # filter something, schedule is always good 54 + if ! echo "$REGEX" > set_ftrace_filter; then 55 + fail "can not enable filter $REGEX" 56 + fi 57 + 58 + echo > trace 59 + echo function_graph > current_tracer 60 + enable_tracing 61 + sleep 1 62 + # search for functions (has "{" or ";" on the line) 63 + echo 0 > tracing_on 64 + count=`cat trace | grep -v '^#' | grep -e '{' -e ';' | grep -v "$TEST" | wc -l` 65 + echo 1 > tracing_on 66 + if [ $count -ne 0 ]; then 67 + fail "Graph filtering not working by itself against $TEST?" 68 + fi 69 + 70 + # Make sure we did find something 71 + echo 0 > tracing_on 72 + count=`cat trace | grep -v '^#' | grep -e '{' -e ';' | grep "$TEST" | wc -l` 73 + echo 1 > tracing_on 74 + if [ $count -eq 0 ]; then 75 + fail "No traces found with $TEST?" 76 + fi 77 + } 78 + 79 + do_test '*sched*' 'sched' 80 + cd $INSTANCE1 81 + do_test '*lock*' 'lock' 82 + cd $WD 83 + cd $INSTANCE2 84 + do_test '*rcu*' 'rcu' 85 + cd $WD 86 + cd $INSTANCE3 87 + echo function_graph > current_tracer 88 + 89 + sleep 1 90 + count=`cat trace | grep -v '^#' | grep -e '{' -e ';' | grep "$TEST" | wc -l` 91 + if [ $count -eq 0 ]; then 92 + fail "No traces found with all tracing?" 93 + fi 94 + 95 + cd $WD 96 + echo nop > current_tracer 97 + echo nop > $INSTANCE1/current_tracer 98 + echo nop > $INSTANCE2/current_tracer 99 + echo nop > $INSTANCE3/current_tracer 100 + 101 + do_reset 102 + 103 + exit 0
+24 -5
tools/testing/selftests/ftrace/test.d/ftrace/func-filter-pid.tc
··· 8 8 # Also test it on an instance directory 9 9 10 10 do_function_fork=1 11 + do_funcgraph_proc=1 11 12 12 13 if [ ! -f options/function-fork ]; then 13 14 do_function_fork=0 15 + echo "no option for function-fork found. Option will not be tested." 16 + fi 17 + 18 + if [ ! -f options/funcgraph-proc ]; then 19 + do_funcgraph_proc=0 14 20 echo "no option for function-fork found. Option will not be tested." 15 21 fi 16 22 ··· 27 21 orig_value=`grep function-fork trace_options` 28 22 fi 29 23 24 + if [ $do_funcgraph_proc -eq 1 ]; then 25 + orig_value2=`cat options/funcgraph-proc` 26 + echo 1 > options/funcgraph-proc 27 + fi 28 + 30 29 do_reset() { 31 - if [ $do_function_fork -eq 0 ]; then 32 - return 30 + if [ $do_function_fork -eq 1 ]; then 31 + echo $orig_value > trace_options 33 32 fi 34 33 35 - echo $orig_value > trace_options 34 + if [ $do_funcgraph_proc -eq 1 ]; then 35 + echo $orig_value2 > options/funcgraph-proc 36 + fi 36 37 } 37 38 38 39 fail() { # msg ··· 49 36 } 50 37 51 38 do_test() { 39 + TRACER=$1 40 + 52 41 disable_tracing 53 42 54 43 echo do_execve* > set_ftrace_filter 55 44 echo $FUNCTION_FORK >> set_ftrace_filter 56 45 57 46 echo $PID > set_ftrace_pid 58 - echo function > current_tracer 47 + echo $TRACER > current_tracer 59 48 60 49 if [ $do_function_fork -eq 1 ]; then 61 50 # don't allow children to be traced ··· 97 82 fi 98 83 } 99 84 100 - do_test 85 + do_test function 86 + if grep -s function_graph available_tracers; then 87 + do_test function_graph 88 + fi 89 + 101 90 do_reset 102 91 103 92 exit 0