···31313232typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip);33333434+/*3535+ * FTRACE_OPS_FL_* bits denote the state of ftrace_ops struct and are3636+ * set in the flags member.3737+ *3838+ * ENABLED - set/unset when ftrace_ops is registered/unregistered3939+ * GLOBAL - set manualy by ftrace_ops user to denote the ftrace_ops4040+ * is part of the global tracers sharing the same filter4141+ * via set_ftrace_* debugfs files.4242+ * DYNAMIC - set when ftrace_ops is registered to denote dynamically4343+ * allocated ftrace_ops which need special care4444+ * CONTROL - set manualy by ftrace_ops user to denote the ftrace_ops4545+ * could be controled by following calls:4646+ * ftrace_function_local_enable4747+ * ftrace_function_local_disable4848+ */3449enum {3550 FTRACE_OPS_FL_ENABLED = 1 << 0,3651 FTRACE_OPS_FL_GLOBAL = 1 << 1,3752 FTRACE_OPS_FL_DYNAMIC = 1 << 2,5353+ FTRACE_OPS_FL_CONTROL = 1 << 3,3854};39554056struct ftrace_ops {4157 ftrace_func_t func;4258 struct ftrace_ops *next;4359 unsigned long flags;6060+ int __percpu *disabled;4461#ifdef CONFIG_DYNAMIC_FTRACE4562 struct ftrace_hash *notrace_hash;4663 struct ftrace_hash *filter_hash;···11396int register_ftrace_function(struct ftrace_ops *ops);11497int unregister_ftrace_function(struct ftrace_ops *ops);11598void clear_ftrace_function(void);9999+100100+/**101101+ * ftrace_function_local_enable - enable controlled ftrace_ops on current cpu102102+ *103103+ * This function enables tracing on current cpu by decreasing104104+ * the per cpu control variable.105105+ * It must be called with preemption disabled and only on ftrace_ops106106+ * registered with FTRACE_OPS_FL_CONTROL. If called without preemption107107+ * disabled, this_cpu_ptr will complain when CONFIG_DEBUG_PREEMPT is enabled.108108+ */109109+static inline void ftrace_function_local_enable(struct ftrace_ops *ops)110110+{111111+ if (WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_CONTROL)))112112+ return;113113+114114+ (*this_cpu_ptr(ops->disabled))--;115115+}116116+117117+/**118118+ * ftrace_function_local_disable - enable controlled ftrace_ops on current cpu119119+ *120120+ * This function enables tracing on current cpu by decreasing121121+ * the per cpu control variable.122122+ * It must be called with preemption disabled and only on ftrace_ops123123+ * registered with FTRACE_OPS_FL_CONTROL. If called without preemption124124+ * disabled, this_cpu_ptr will complain when CONFIG_DEBUG_PREEMPT is enabled.125125+ */126126+static inline void ftrace_function_local_disable(struct ftrace_ops *ops)127127+{128128+ if (WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_CONTROL)))129129+ return;130130+131131+ (*this_cpu_ptr(ops->disabled))++;132132+}133133+134134+/**135135+ * ftrace_function_local_disabled - returns ftrace_ops disabled value136136+ * on current cpu137137+ *138138+ * This function returns value of ftrace_ops::disabled on current cpu.139139+ * It must be called with preemption disabled and only on ftrace_ops140140+ * registered with FTRACE_OPS_FL_CONTROL. If called without preemption141141+ * disabled, this_cpu_ptr will complain when CONFIG_DEBUG_PREEMPT is enabled.142142+ */143143+static inline int ftrace_function_local_disabled(struct ftrace_ops *ops)144144+{145145+ WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_CONTROL));146146+ return *this_cpu_ptr(ops->disabled);147147+}116148117149extern void ftrace_stub(unsigned long a0, unsigned long a1);118150···250184 int len, int reset);251185void ftrace_set_global_filter(unsigned char *buf, int len, int reset);252186void ftrace_set_global_notrace(unsigned char *buf, int len, int reset);187187+void ftrace_free_filter(struct ftrace_ops *ops);253188254189int register_ftrace_command(struct ftrace_func_command *cmd);255190int unregister_ftrace_command(struct ftrace_func_command *cmd);···381314#else382315static inline int skip_trace(unsigned long ip) { return 0; }383316static inline int ftrace_force_update(void) { return 0; }384384-static inline void ftrace_set_filter(unsigned char *buf, int len, int reset)385385-{386386-}387317static inline void ftrace_disable_daemon(void) { }388318static inline void ftrace_enable_daemon(void) { }389319static inline void ftrace_release_mod(struct module *mod) {}···404340 */405341#define ftrace_regex_open(ops, flag, inod, file) ({ -ENODEV; })406342#define ftrace_set_early_filter(ops, buf, enable) do { } while (0)343343+#define ftrace_set_filter(ops, buf, len, reset) ({ -ENODEV; })344344+#define ftrace_set_notrace(ops, buf, len, reset) ({ -ENODEV; })345345+#define ftrace_free_filter(ops) do { } while (0)407346408347static inline ssize_t ftrace_filter_write(struct file *file, const char __user *ubuf,409348 size_t cnt, loff_t *ppos) { return -ENODEV; }
···6262#define FTRACE_HASH_DEFAULT_BITS 106363#define FTRACE_HASH_MAX_BITS 1264646565+#define FL_GLOBAL_CONTROL_MASK (FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_CONTROL)6666+6567/* ftrace_enabled is a method to turn ftrace on or off */6668int ftrace_enabled __read_mostly;6769static int last_ftrace_enabled;···9189};92909391static struct ftrace_ops *ftrace_global_list __read_mostly = &ftrace_list_end;9292+static struct ftrace_ops *ftrace_control_list __read_mostly = &ftrace_list_end;9493static struct ftrace_ops *ftrace_ops_list __read_mostly = &ftrace_list_end;9594ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;9695static ftrace_func_t __ftrace_trace_function_delay __read_mostly = ftrace_stub;9796ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub;9897ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;9998static struct ftrace_ops global_ops;9999+static struct ftrace_ops control_ops;100100101101static void102102ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip);···171167 __ftrace_trace_function(ip, parent_ip);172168}173169#endif170170+171171+static void control_ops_disable_all(struct ftrace_ops *ops)172172+{173173+ int cpu;174174+175175+ for_each_possible_cpu(cpu)176176+ *per_cpu_ptr(ops->disabled, cpu) = 1;177177+}178178+179179+static int control_ops_alloc(struct ftrace_ops *ops)180180+{181181+ int __percpu *disabled;182182+183183+ disabled = alloc_percpu(int);184184+ if (!disabled)185185+ return -ENOMEM;186186+187187+ ops->disabled = disabled;188188+ control_ops_disable_all(ops);189189+ return 0;190190+}191191+192192+static void control_ops_free(struct ftrace_ops *ops)193193+{194194+ free_percpu(ops->disabled);195195+}174196175197static void update_global_ops(void)176198{···289259 return 0;290260}291261262262+static void add_ftrace_list_ops(struct ftrace_ops **list,263263+ struct ftrace_ops *main_ops,264264+ struct ftrace_ops *ops)265265+{266266+ int first = *list == &ftrace_list_end;267267+ add_ftrace_ops(list, ops);268268+ if (first)269269+ add_ftrace_ops(&ftrace_ops_list, main_ops);270270+}271271+272272+static int remove_ftrace_list_ops(struct ftrace_ops **list,273273+ struct ftrace_ops *main_ops,274274+ struct ftrace_ops *ops)275275+{276276+ int ret = remove_ftrace_ops(list, ops);277277+ if (!ret && *list == &ftrace_list_end)278278+ ret = remove_ftrace_ops(&ftrace_ops_list, main_ops);279279+ return ret;280280+}281281+292282static int __register_ftrace_function(struct ftrace_ops *ops)293283{294284 if (ftrace_disabled)···320270 if (WARN_ON(ops->flags & FTRACE_OPS_FL_ENABLED))321271 return -EBUSY;322272273273+ /* We don't support both control and global flags set. */274274+ if ((ops->flags & FL_GLOBAL_CONTROL_MASK) == FL_GLOBAL_CONTROL_MASK)275275+ return -EINVAL;276276+323277 if (!core_kernel_data((unsigned long)ops))324278 ops->flags |= FTRACE_OPS_FL_DYNAMIC;325279326280 if (ops->flags & FTRACE_OPS_FL_GLOBAL) {327327- int first = ftrace_global_list == &ftrace_list_end;328328- add_ftrace_ops(&ftrace_global_list, ops);281281+ add_ftrace_list_ops(&ftrace_global_list, &global_ops, ops);329282 ops->flags |= FTRACE_OPS_FL_ENABLED;330330- if (first)331331- add_ftrace_ops(&ftrace_ops_list, &global_ops);283283+ } else if (ops->flags & FTRACE_OPS_FL_CONTROL) {284284+ if (control_ops_alloc(ops))285285+ return -ENOMEM;286286+ add_ftrace_list_ops(&ftrace_control_list, &control_ops, ops);332287 } else333288 add_ftrace_ops(&ftrace_ops_list, ops);334289···357302 return -EINVAL;358303359304 if (ops->flags & FTRACE_OPS_FL_GLOBAL) {360360- ret = remove_ftrace_ops(&ftrace_global_list, ops);361361- if (!ret && ftrace_global_list == &ftrace_list_end)362362- ret = remove_ftrace_ops(&ftrace_ops_list, &global_ops);305305+ ret = remove_ftrace_list_ops(&ftrace_global_list,306306+ &global_ops, ops);363307 if (!ret)364308 ops->flags &= ~FTRACE_OPS_FL_ENABLED;309309+ } else if (ops->flags & FTRACE_OPS_FL_CONTROL) {310310+ ret = remove_ftrace_list_ops(&ftrace_control_list,311311+ &control_ops, ops);312312+ if (!ret) {313313+ /*314314+ * The ftrace_ops is now removed from the list,315315+ * so there'll be no new users. We must ensure316316+ * all current users are done before we free317317+ * the control data.318318+ */319319+ synchronize_sched();320320+ control_ops_free(ops);321321+ }365322 } else366323 ret = remove_ftrace_ops(&ftrace_ops_list, ops);367324···11841117 if (!hash || hash == EMPTY_HASH)11851118 return;11861119 call_rcu_sched(&hash->rcu, __free_ftrace_hash_rcu);11201120+}11211121+11221122+void ftrace_free_filter(struct ftrace_ops *ops)11231123+{11241124+ free_ftrace_hash(ops->filter_hash);11251125+ free_ftrace_hash(ops->notrace_hash);11871126}1188112711891128static struct ftrace_hash *alloc_ftrace_hash(int size_bits)···39453872}3946387339473874#endif /* CONFIG_DYNAMIC_FTRACE */38753875+38763876+static void38773877+ftrace_ops_control_func(unsigned long ip, unsigned long parent_ip)38783878+{38793879+ struct ftrace_ops *op;38803880+38813881+ if (unlikely(trace_recursion_test(TRACE_CONTROL_BIT)))38823882+ return;38833883+38843884+ /*38853885+ * Some of the ops may be dynamically allocated,38863886+ * they must be freed after a synchronize_sched().38873887+ */38883888+ preempt_disable_notrace();38893889+ trace_recursion_set(TRACE_CONTROL_BIT);38903890+ op = rcu_dereference_raw(ftrace_control_list);38913891+ while (op != &ftrace_list_end) {38923892+ if (!ftrace_function_local_disabled(op) &&38933893+ ftrace_ops_test(op, ip))38943894+ op->func(ip, parent_ip);38953895+38963896+ op = rcu_dereference_raw(op->next);38973897+ };38983898+ trace_recursion_clear(TRACE_CONTROL_BIT);38993899+ preempt_enable_notrace();39003900+}39013901+39023902+static struct ftrace_ops control_ops = {39033903+ .func = ftrace_ops_control_func,39043904+};3948390539493906static void39503907ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip)
+28-10
kernel/trace/trace.h
···5656#define F_STRUCT(args...) args57575858#undef FTRACE_ENTRY5959-#define FTRACE_ENTRY(name, struct_name, id, tstruct, print) \6060- struct struct_name { \6161- struct trace_entry ent; \6262- tstruct \5959+#define FTRACE_ENTRY(name, struct_name, id, tstruct, print, filter) \6060+ struct struct_name { \6161+ struct trace_entry ent; \6262+ tstruct \6363 }64646565#undef TP_ARGS6666#define TP_ARGS(args...) args67676868#undef FTRACE_ENTRY_DUP6969-#define FTRACE_ENTRY_DUP(name, name_struct, id, tstruct, printk)6969+#define FTRACE_ENTRY_DUP(name, name_struct, id, tstruct, printk, filter)7070+7171+#undef FTRACE_ENTRY_REG7272+#define FTRACE_ENTRY_REG(name, struct_name, id, tstruct, print, \7373+ filter, regfn) \7474+ FTRACE_ENTRY(name, struct_name, id, PARAMS(tstruct), PARAMS(print), \7575+ filter)70767177#include "trace_entries.h"7278···294288/* for function tracing recursion */295289#define TRACE_INTERNAL_BIT (1<<11)296290#define TRACE_GLOBAL_BIT (1<<12)291291+#define TRACE_CONTROL_BIT (1<<13)292292+297293/*298294 * Abuse of the trace_recursion.299295 * As we need a way to maintain state if we are tracing the function···597589static inline int ftrace_is_dead(void) { return 0; }598590#endif599591592592+int ftrace_event_is_function(struct ftrace_event_call *call);593593+600594/*601595 * struct trace_parser - servers for reading the user input separated by spaces602596 * @cont: set if the input is not complete - no final space char was found···776766 u64 val;777767 struct regex regex;778768 unsigned short *ops;779779-#ifdef CONFIG_FTRACE_STARTUP_TEST780769 struct ftrace_event_field *field;781781-#endif782770 int offset;783771 int not;784772 int op;···826818extern const char *__stop___trace_bprintk_fmt[];827819828820#undef FTRACE_ENTRY829829-#define FTRACE_ENTRY(call, struct_name, id, tstruct, print) \821821+#define FTRACE_ENTRY(call, struct_name, id, tstruct, print, filter) \830822 extern struct ftrace_event_call \831823 __attribute__((__aligned__(4))) event_##call;832824#undef FTRACE_ENTRY_DUP833833-#define FTRACE_ENTRY_DUP(call, struct_name, id, tstruct, print) \834834- FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print))825825+#define FTRACE_ENTRY_DUP(call, struct_name, id, tstruct, print, filter) \826826+ FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print), \827827+ filter)835828#include "trace_entries.h"829829+830830+#ifdef CONFIG_PERF_EVENTS831831+#ifdef CONFIG_FUNCTION_TRACER832832+int perf_ftrace_event_register(struct ftrace_event_call *call,833833+ enum trace_reg type, void *data);834834+#else835835+#define perf_ftrace_event_register NULL836836+#endif /* CONFIG_FUNCTION_TRACER */837837+#endif /* CONFIG_PERF_EVENTS */836838837839#endif /* _LINUX_KERNEL_TRACE_H */