at v2.6.39 15 kB view raw
1/* 2 * Ftrace header. For implementation details beyond the random comments 3 * scattered below, see: Documentation/trace/ftrace-design.txt 4 */ 5 6#ifndef _LINUX_FTRACE_H 7#define _LINUX_FTRACE_H 8 9#include <linux/trace_clock.h> 10#include <linux/kallsyms.h> 11#include <linux/linkage.h> 12#include <linux/bitops.h> 13#include <linux/module.h> 14#include <linux/ktime.h> 15#include <linux/sched.h> 16#include <linux/types.h> 17#include <linux/init.h> 18#include <linux/fs.h> 19 20#include <asm/ftrace.h> 21 22#ifdef CONFIG_FUNCTION_TRACER 23 24extern int ftrace_enabled; 25extern int 26ftrace_enable_sysctl(struct ctl_table *table, int write, 27 void __user *buffer, size_t *lenp, 28 loff_t *ppos); 29 30typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip); 31 32struct ftrace_ops { 33 ftrace_func_t func; 34 struct ftrace_ops *next; 35}; 36 37extern int function_trace_stop; 38 39/* 40 * Type of the current tracing. 41 */ 42enum ftrace_tracing_type_t { 43 FTRACE_TYPE_ENTER = 0, /* Hook the call of the function */ 44 FTRACE_TYPE_RETURN, /* Hook the return of the function */ 45}; 46 47/* Current tracing type, default is FTRACE_TYPE_ENTER */ 48extern enum ftrace_tracing_type_t ftrace_tracing_type; 49 50/** 51 * ftrace_stop - stop function tracer. 52 * 53 * A quick way to stop the function tracer. Note this an on off switch, 54 * it is not something that is recursive like preempt_disable. 55 * This does not disable the calling of mcount, it only stops the 56 * calling of functions from mcount. 57 */ 58static inline void ftrace_stop(void) 59{ 60 function_trace_stop = 1; 61} 62 63/** 64 * ftrace_start - start the function tracer. 65 * 66 * This function is the inverse of ftrace_stop. This does not enable 67 * the function tracing if the function tracer is disabled. This only 68 * sets the function tracer flag to continue calling the functions 69 * from mcount. 70 */ 71static inline void ftrace_start(void) 72{ 73 function_trace_stop = 0; 74} 75 76/* 77 * The ftrace_ops must be a static and should also 78 * be read_mostly. These functions do modify read_mostly variables 79 * so use them sparely. Never free an ftrace_op or modify the 80 * next pointer after it has been registered. Even after unregistering 81 * it, the next pointer may still be used internally. 82 */ 83int register_ftrace_function(struct ftrace_ops *ops); 84int unregister_ftrace_function(struct ftrace_ops *ops); 85void clear_ftrace_function(void); 86 87extern void ftrace_stub(unsigned long a0, unsigned long a1); 88 89#else /* !CONFIG_FUNCTION_TRACER */ 90/* 91 * (un)register_ftrace_function must be a macro since the ops parameter 92 * must not be evaluated. 93 */ 94#define register_ftrace_function(ops) ({ 0; }) 95#define unregister_ftrace_function(ops) ({ 0; }) 96static inline void clear_ftrace_function(void) { } 97static inline void ftrace_kill(void) { } 98static inline void ftrace_stop(void) { } 99static inline void ftrace_start(void) { } 100#endif /* CONFIG_FUNCTION_TRACER */ 101 102#ifdef CONFIG_STACK_TRACER 103extern int stack_tracer_enabled; 104int 105stack_trace_sysctl(struct ctl_table *table, int write, 106 void __user *buffer, size_t *lenp, 107 loff_t *ppos); 108#endif 109 110struct ftrace_func_command { 111 struct list_head list; 112 char *name; 113 int (*func)(char *func, char *cmd, 114 char *params, int enable); 115}; 116 117#ifdef CONFIG_DYNAMIC_FTRACE 118 119int ftrace_arch_code_modify_prepare(void); 120int ftrace_arch_code_modify_post_process(void); 121 122struct seq_file; 123 124struct ftrace_probe_ops { 125 void (*func)(unsigned long ip, 126 unsigned long parent_ip, 127 void **data); 128 int (*callback)(unsigned long ip, void **data); 129 void (*free)(void **data); 130 int (*print)(struct seq_file *m, 131 unsigned long ip, 132 struct ftrace_probe_ops *ops, 133 void *data); 134}; 135 136extern int 137register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, 138 void *data); 139extern void 140unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, 141 void *data); 142extern void 143unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops); 144extern void unregister_ftrace_function_probe_all(char *glob); 145 146extern int ftrace_text_reserved(void *start, void *end); 147 148enum { 149 FTRACE_FL_FREE = (1 << 0), 150 FTRACE_FL_FAILED = (1 << 1), 151 FTRACE_FL_FILTER = (1 << 2), 152 FTRACE_FL_ENABLED = (1 << 3), 153 FTRACE_FL_NOTRACE = (1 << 4), 154 FTRACE_FL_CONVERTED = (1 << 5), 155}; 156 157struct dyn_ftrace { 158 union { 159 unsigned long ip; /* address of mcount call-site */ 160 struct dyn_ftrace *freelist; 161 }; 162 union { 163 unsigned long flags; 164 struct dyn_ftrace *newlist; 165 }; 166 struct dyn_arch_ftrace arch; 167}; 168 169int ftrace_force_update(void); 170void ftrace_set_filter(unsigned char *buf, int len, int reset); 171 172int register_ftrace_command(struct ftrace_func_command *cmd); 173int unregister_ftrace_command(struct ftrace_func_command *cmd); 174 175/* defined in arch */ 176extern int ftrace_ip_converted(unsigned long ip); 177extern int ftrace_dyn_arch_init(void *data); 178extern int ftrace_update_ftrace_func(ftrace_func_t func); 179extern void ftrace_caller(void); 180extern void ftrace_call(void); 181extern void mcount_call(void); 182 183#ifndef FTRACE_ADDR 184#define FTRACE_ADDR ((unsigned long)ftrace_caller) 185#endif 186#ifdef CONFIG_FUNCTION_GRAPH_TRACER 187extern void ftrace_graph_caller(void); 188extern int ftrace_enable_ftrace_graph_caller(void); 189extern int ftrace_disable_ftrace_graph_caller(void); 190#else 191static inline int ftrace_enable_ftrace_graph_caller(void) { return 0; } 192static inline int ftrace_disable_ftrace_graph_caller(void) { return 0; } 193#endif 194 195/** 196 * ftrace_make_nop - convert code into nop 197 * @mod: module structure if called by module load initialization 198 * @rec: the mcount call site record 199 * @addr: the address that the call site should be calling 200 * 201 * This is a very sensitive operation and great care needs 202 * to be taken by the arch. The operation should carefully 203 * read the location, check to see if what is read is indeed 204 * what we expect it to be, and then on success of the compare, 205 * it should write to the location. 206 * 207 * The code segment at @rec->ip should be a caller to @addr 208 * 209 * Return must be: 210 * 0 on success 211 * -EFAULT on error reading the location 212 * -EINVAL on a failed compare of the contents 213 * -EPERM on error writing to the location 214 * Any other value will be considered a failure. 215 */ 216extern int ftrace_make_nop(struct module *mod, 217 struct dyn_ftrace *rec, unsigned long addr); 218 219/** 220 * ftrace_make_call - convert a nop call site into a call to addr 221 * @rec: the mcount call site record 222 * @addr: the address that the call site should call 223 * 224 * This is a very sensitive operation and great care needs 225 * to be taken by the arch. The operation should carefully 226 * read the location, check to see if what is read is indeed 227 * what we expect it to be, and then on success of the compare, 228 * it should write to the location. 229 * 230 * The code segment at @rec->ip should be a nop 231 * 232 * Return must be: 233 * 0 on success 234 * -EFAULT on error reading the location 235 * -EINVAL on a failed compare of the contents 236 * -EPERM on error writing to the location 237 * Any other value will be considered a failure. 238 */ 239extern int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr); 240 241/* May be defined in arch */ 242extern int ftrace_arch_read_dyn_info(char *buf, int size); 243 244extern int skip_trace(unsigned long ip); 245 246extern void ftrace_disable_daemon(void); 247extern void ftrace_enable_daemon(void); 248#else 249static inline int skip_trace(unsigned long ip) { return 0; } 250static inline int ftrace_force_update(void) { return 0; } 251static inline void ftrace_set_filter(unsigned char *buf, int len, int reset) 252{ 253} 254static inline void ftrace_disable_daemon(void) { } 255static inline void ftrace_enable_daemon(void) { } 256static inline void ftrace_release_mod(struct module *mod) {} 257static inline int register_ftrace_command(struct ftrace_func_command *cmd) 258{ 259 return -EINVAL; 260} 261static inline int unregister_ftrace_command(char *cmd_name) 262{ 263 return -EINVAL; 264} 265static inline int ftrace_text_reserved(void *start, void *end) 266{ 267 return 0; 268} 269#endif /* CONFIG_DYNAMIC_FTRACE */ 270 271/* totally disable ftrace - can not re-enable after this */ 272void ftrace_kill(void); 273 274static inline void tracer_disable(void) 275{ 276#ifdef CONFIG_FUNCTION_TRACER 277 ftrace_enabled = 0; 278#endif 279} 280 281/* 282 * Ftrace disable/restore without lock. Some synchronization mechanism 283 * must be used to prevent ftrace_enabled to be changed between 284 * disable/restore. 285 */ 286static inline int __ftrace_enabled_save(void) 287{ 288#ifdef CONFIG_FUNCTION_TRACER 289 int saved_ftrace_enabled = ftrace_enabled; 290 ftrace_enabled = 0; 291 return saved_ftrace_enabled; 292#else 293 return 0; 294#endif 295} 296 297static inline void __ftrace_enabled_restore(int enabled) 298{ 299#ifdef CONFIG_FUNCTION_TRACER 300 ftrace_enabled = enabled; 301#endif 302} 303 304#ifndef HAVE_ARCH_CALLER_ADDR 305# ifdef CONFIG_FRAME_POINTER 306# define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0)) 307# define CALLER_ADDR1 ((unsigned long)__builtin_return_address(1)) 308# define CALLER_ADDR2 ((unsigned long)__builtin_return_address(2)) 309# define CALLER_ADDR3 ((unsigned long)__builtin_return_address(3)) 310# define CALLER_ADDR4 ((unsigned long)__builtin_return_address(4)) 311# define CALLER_ADDR5 ((unsigned long)__builtin_return_address(5)) 312# define CALLER_ADDR6 ((unsigned long)__builtin_return_address(6)) 313# else 314# define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0)) 315# define CALLER_ADDR1 0UL 316# define CALLER_ADDR2 0UL 317# define CALLER_ADDR3 0UL 318# define CALLER_ADDR4 0UL 319# define CALLER_ADDR5 0UL 320# define CALLER_ADDR6 0UL 321# endif 322#endif /* ifndef HAVE_ARCH_CALLER_ADDR */ 323 324#ifdef CONFIG_IRQSOFF_TRACER 325 extern void time_hardirqs_on(unsigned long a0, unsigned long a1); 326 extern void time_hardirqs_off(unsigned long a0, unsigned long a1); 327#else 328 static inline void time_hardirqs_on(unsigned long a0, unsigned long a1) { } 329 static inline void time_hardirqs_off(unsigned long a0, unsigned long a1) { } 330#endif 331 332#ifdef CONFIG_PREEMPT_TRACER 333 extern void trace_preempt_on(unsigned long a0, unsigned long a1); 334 extern void trace_preempt_off(unsigned long a0, unsigned long a1); 335#else 336 static inline void trace_preempt_on(unsigned long a0, unsigned long a1) { } 337 static inline void trace_preempt_off(unsigned long a0, unsigned long a1) { } 338#endif 339 340#ifdef CONFIG_FTRACE_MCOUNT_RECORD 341extern void ftrace_init(void); 342#else 343static inline void ftrace_init(void) { } 344#endif 345 346/* 347 * Structure that defines an entry function trace. 348 */ 349struct ftrace_graph_ent { 350 unsigned long func; /* Current function */ 351 int depth; 352}; 353 354/* 355 * Structure that defines a return function trace. 356 */ 357struct ftrace_graph_ret { 358 unsigned long func; /* Current function */ 359 unsigned long long calltime; 360 unsigned long long rettime; 361 /* Number of functions that overran the depth limit for current task */ 362 unsigned long overrun; 363 int depth; 364}; 365 366/* Type of the callback handlers for tracing function graph*/ 367typedef void (*trace_func_graph_ret_t)(struct ftrace_graph_ret *); /* return */ 368typedef int (*trace_func_graph_ent_t)(struct ftrace_graph_ent *); /* entry */ 369 370#ifdef CONFIG_FUNCTION_GRAPH_TRACER 371 372/* for init task */ 373#define INIT_FTRACE_GRAPH .ret_stack = NULL, 374 375/* 376 * Stack of return addresses for functions 377 * of a thread. 378 * Used in struct thread_info 379 */ 380struct ftrace_ret_stack { 381 unsigned long ret; 382 unsigned long func; 383 unsigned long long calltime; 384 unsigned long long subtime; 385 unsigned long fp; 386}; 387 388/* 389 * Primary handler of a function return. 390 * It relays on ftrace_return_to_handler. 391 * Defined in entry_32/64.S 392 */ 393extern void return_to_handler(void); 394 395extern int 396ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth, 397 unsigned long frame_pointer); 398 399/* 400 * Sometimes we don't want to trace a function with the function 401 * graph tracer but we want them to keep traced by the usual function 402 * tracer if the function graph tracer is not configured. 403 */ 404#define __notrace_funcgraph notrace 405 406/* 407 * We want to which function is an entrypoint of a hardirq. 408 * That will help us to put a signal on output. 409 */ 410#define __irq_entry __attribute__((__section__(".irqentry.text"))) 411 412/* Limits of hardirq entrypoints */ 413extern char __irqentry_text_start[]; 414extern char __irqentry_text_end[]; 415 416#define FTRACE_RETFUNC_DEPTH 50 417#define FTRACE_RETSTACK_ALLOC_SIZE 32 418extern int register_ftrace_graph(trace_func_graph_ret_t retfunc, 419 trace_func_graph_ent_t entryfunc); 420 421extern void ftrace_graph_stop(void); 422 423/* The current handlers in use */ 424extern trace_func_graph_ret_t ftrace_graph_return; 425extern trace_func_graph_ent_t ftrace_graph_entry; 426 427extern void unregister_ftrace_graph(void); 428 429extern void ftrace_graph_init_task(struct task_struct *t); 430extern void ftrace_graph_exit_task(struct task_struct *t); 431extern void ftrace_graph_init_idle_task(struct task_struct *t, int cpu); 432 433static inline int task_curr_ret_stack(struct task_struct *t) 434{ 435 return t->curr_ret_stack; 436} 437 438static inline void pause_graph_tracing(void) 439{ 440 atomic_inc(&current->tracing_graph_pause); 441} 442 443static inline void unpause_graph_tracing(void) 444{ 445 atomic_dec(&current->tracing_graph_pause); 446} 447#else /* !CONFIG_FUNCTION_GRAPH_TRACER */ 448 449#define __notrace_funcgraph 450#define __irq_entry 451#define INIT_FTRACE_GRAPH 452 453static inline void ftrace_graph_init_task(struct task_struct *t) { } 454static inline void ftrace_graph_exit_task(struct task_struct *t) { } 455static inline void ftrace_graph_init_idle_task(struct task_struct *t, int cpu) { } 456 457static inline int register_ftrace_graph(trace_func_graph_ret_t retfunc, 458 trace_func_graph_ent_t entryfunc) 459{ 460 return -1; 461} 462static inline void unregister_ftrace_graph(void) { } 463 464static inline int task_curr_ret_stack(struct task_struct *tsk) 465{ 466 return -1; 467} 468 469static inline void pause_graph_tracing(void) { } 470static inline void unpause_graph_tracing(void) { } 471#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 472 473#ifdef CONFIG_TRACING 474 475/* flags for current->trace */ 476enum { 477 TSK_TRACE_FL_TRACE_BIT = 0, 478 TSK_TRACE_FL_GRAPH_BIT = 1, 479}; 480enum { 481 TSK_TRACE_FL_TRACE = 1 << TSK_TRACE_FL_TRACE_BIT, 482 TSK_TRACE_FL_GRAPH = 1 << TSK_TRACE_FL_GRAPH_BIT, 483}; 484 485static inline void set_tsk_trace_trace(struct task_struct *tsk) 486{ 487 set_bit(TSK_TRACE_FL_TRACE_BIT, &tsk->trace); 488} 489 490static inline void clear_tsk_trace_trace(struct task_struct *tsk) 491{ 492 clear_bit(TSK_TRACE_FL_TRACE_BIT, &tsk->trace); 493} 494 495static inline int test_tsk_trace_trace(struct task_struct *tsk) 496{ 497 return tsk->trace & TSK_TRACE_FL_TRACE; 498} 499 500static inline void set_tsk_trace_graph(struct task_struct *tsk) 501{ 502 set_bit(TSK_TRACE_FL_GRAPH_BIT, &tsk->trace); 503} 504 505static inline void clear_tsk_trace_graph(struct task_struct *tsk) 506{ 507 clear_bit(TSK_TRACE_FL_GRAPH_BIT, &tsk->trace); 508} 509 510static inline int test_tsk_trace_graph(struct task_struct *tsk) 511{ 512 return tsk->trace & TSK_TRACE_FL_GRAPH; 513} 514 515enum ftrace_dump_mode; 516 517extern enum ftrace_dump_mode ftrace_dump_on_oops; 518 519#ifdef CONFIG_PREEMPT 520#define INIT_TRACE_RECURSION .trace_recursion = 0, 521#endif 522 523#endif /* CONFIG_TRACING */ 524 525#ifndef INIT_TRACE_RECURSION 526#define INIT_TRACE_RECURSION 527#endif 528 529#ifdef CONFIG_FTRACE_SYSCALLS 530 531unsigned long arch_syscall_addr(int nr); 532 533#endif /* CONFIG_FTRACE_SYSCALLS */ 534 535#endif /* _LINUX_FTRACE_H */