at v4.9 31 kB view raw
1/* 2 * Ftrace header. For implementation details beyond the random comments 3 * scattered below, see: Documentation/trace/ftrace-design.txt 4 */ 5 6#ifndef _LINUX_FTRACE_H 7#define _LINUX_FTRACE_H 8 9#include <linux/trace_clock.h> 10#include <linux/kallsyms.h> 11#include <linux/linkage.h> 12#include <linux/bitops.h> 13#include <linux/ptrace.h> 14#include <linux/ktime.h> 15#include <linux/sched.h> 16#include <linux/types.h> 17#include <linux/init.h> 18#include <linux/fs.h> 19 20#include <asm/ftrace.h> 21 22/* 23 * If the arch supports passing the variable contents of 24 * function_trace_op as the third parameter back from the 25 * mcount call, then the arch should define this as 1. 26 */ 27#ifndef ARCH_SUPPORTS_FTRACE_OPS 28#define ARCH_SUPPORTS_FTRACE_OPS 0 29#endif 30 31/* 32 * If the arch's mcount caller does not support all of ftrace's 33 * features, then it must call an indirect function that 34 * does. Or at least does enough to prevent any unwelcomed side effects. 35 */ 36#if !ARCH_SUPPORTS_FTRACE_OPS 37# define FTRACE_FORCE_LIST_FUNC 1 38#else 39# define FTRACE_FORCE_LIST_FUNC 0 40#endif 41 42/* Main tracing buffer and events set up */ 43#ifdef CONFIG_TRACING 44void trace_init(void); 45#else 46static inline void trace_init(void) { } 47#endif 48 49struct module; 50struct ftrace_hash; 51 52#ifdef CONFIG_FUNCTION_TRACER 53 54extern int ftrace_enabled; 55extern int 56ftrace_enable_sysctl(struct ctl_table *table, int write, 57 void __user *buffer, size_t *lenp, 58 loff_t *ppos); 59 60struct ftrace_ops; 61 62typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip, 63 struct ftrace_ops *op, struct pt_regs *regs); 64 65ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops); 66 67/* 68 * FTRACE_OPS_FL_* bits denote the state of ftrace_ops struct and are 69 * set in the flags member. 70 * CONTROL, SAVE_REGS, SAVE_REGS_IF_SUPPORTED, RECURSION_SAFE, STUB and 71 * IPMODIFY are a kind of attribute flags which can be set only before 72 * registering the ftrace_ops, and can not be modified while registered. 73 * Changing those attribute flags after regsitering ftrace_ops will 74 * cause unexpected results. 75 * 76 * ENABLED - set/unset when ftrace_ops is registered/unregistered 77 * DYNAMIC - set when ftrace_ops is registered to denote dynamically 78 * allocated ftrace_ops which need special care 79 * PER_CPU - set manualy by ftrace_ops user to denote the ftrace_ops 80 * could be controlled by following calls: 81 * ftrace_function_local_enable 82 * ftrace_function_local_disable 83 * SAVE_REGS - The ftrace_ops wants regs saved at each function called 84 * and passed to the callback. If this flag is set, but the 85 * architecture does not support passing regs 86 * (CONFIG_DYNAMIC_FTRACE_WITH_REGS is not defined), then the 87 * ftrace_ops will fail to register, unless the next flag 88 * is set. 89 * SAVE_REGS_IF_SUPPORTED - This is the same as SAVE_REGS, but if the 90 * handler can handle an arch that does not save regs 91 * (the handler tests if regs == NULL), then it can set 92 * this flag instead. It will not fail registering the ftrace_ops 93 * but, the regs field will be NULL if the arch does not support 94 * passing regs to the handler. 95 * Note, if this flag is set, the SAVE_REGS flag will automatically 96 * get set upon registering the ftrace_ops, if the arch supports it. 97 * RECURSION_SAFE - The ftrace_ops can set this to tell the ftrace infrastructure 98 * that the call back has its own recursion protection. If it does 99 * not set this, then the ftrace infrastructure will add recursion 100 * protection for the caller. 101 * STUB - The ftrace_ops is just a place holder. 102 * INITIALIZED - The ftrace_ops has already been initialized (first use time 103 * register_ftrace_function() is called, it will initialized the ops) 104 * DELETED - The ops are being deleted, do not let them be registered again. 105 * ADDING - The ops is in the process of being added. 106 * REMOVING - The ops is in the process of being removed. 107 * MODIFYING - The ops is in the process of changing its filter functions. 108 * ALLOC_TRAMP - A dynamic trampoline was allocated by the core code. 109 * The arch specific code sets this flag when it allocated a 110 * trampoline. This lets the arch know that it can update the 111 * trampoline in case the callback function changes. 112 * The ftrace_ops trampoline can be set by the ftrace users, and 113 * in such cases the arch must not modify it. Only the arch ftrace 114 * core code should set this flag. 115 * IPMODIFY - The ops can modify the IP register. This can only be set with 116 * SAVE_REGS. If another ops with this flag set is already registered 117 * for any of the functions that this ops will be registered for, then 118 * this ops will fail to register or set_filter_ip. 119 * PID - Is affected by set_ftrace_pid (allows filtering on those pids) 120 */ 121enum { 122 FTRACE_OPS_FL_ENABLED = 1 << 0, 123 FTRACE_OPS_FL_DYNAMIC = 1 << 1, 124 FTRACE_OPS_FL_PER_CPU = 1 << 2, 125 FTRACE_OPS_FL_SAVE_REGS = 1 << 3, 126 FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED = 1 << 4, 127 FTRACE_OPS_FL_RECURSION_SAFE = 1 << 5, 128 FTRACE_OPS_FL_STUB = 1 << 6, 129 FTRACE_OPS_FL_INITIALIZED = 1 << 7, 130 FTRACE_OPS_FL_DELETED = 1 << 8, 131 FTRACE_OPS_FL_ADDING = 1 << 9, 132 FTRACE_OPS_FL_REMOVING = 1 << 10, 133 FTRACE_OPS_FL_MODIFYING = 1 << 11, 134 FTRACE_OPS_FL_ALLOC_TRAMP = 1 << 12, 135 FTRACE_OPS_FL_IPMODIFY = 1 << 13, 136 FTRACE_OPS_FL_PID = 1 << 14, 137 FTRACE_OPS_FL_RCU = 1 << 15, 138}; 139 140#ifdef CONFIG_DYNAMIC_FTRACE 141/* The hash used to know what functions callbacks trace */ 142struct ftrace_ops_hash { 143 struct ftrace_hash *notrace_hash; 144 struct ftrace_hash *filter_hash; 145 struct mutex regex_lock; 146}; 147#endif 148 149/* 150 * Note, ftrace_ops can be referenced outside of RCU protection, unless 151 * the RCU flag is set. If ftrace_ops is allocated and not part of kernel 152 * core data, the unregistering of it will perform a scheduling on all CPUs 153 * to make sure that there are no more users. Depending on the load of the 154 * system that may take a bit of time. 155 * 156 * Any private data added must also take care not to be freed and if private 157 * data is added to a ftrace_ops that is in core code, the user of the 158 * ftrace_ops must perform a schedule_on_each_cpu() before freeing it. 159 */ 160struct ftrace_ops { 161 ftrace_func_t func; 162 struct ftrace_ops *next; 163 unsigned long flags; 164 void *private; 165 ftrace_func_t saved_func; 166 int __percpu *disabled; 167#ifdef CONFIG_DYNAMIC_FTRACE 168 struct ftrace_ops_hash local_hash; 169 struct ftrace_ops_hash *func_hash; 170 struct ftrace_ops_hash old_hash; 171 unsigned long trampoline; 172 unsigned long trampoline_size; 173#endif 174}; 175 176/* 177 * Type of the current tracing. 178 */ 179enum ftrace_tracing_type_t { 180 FTRACE_TYPE_ENTER = 0, /* Hook the call of the function */ 181 FTRACE_TYPE_RETURN, /* Hook the return of the function */ 182}; 183 184/* Current tracing type, default is FTRACE_TYPE_ENTER */ 185extern enum ftrace_tracing_type_t ftrace_tracing_type; 186 187/* 188 * The ftrace_ops must be a static and should also 189 * be read_mostly. These functions do modify read_mostly variables 190 * so use them sparely. Never free an ftrace_op or modify the 191 * next pointer after it has been registered. Even after unregistering 192 * it, the next pointer may still be used internally. 193 */ 194int register_ftrace_function(struct ftrace_ops *ops); 195int unregister_ftrace_function(struct ftrace_ops *ops); 196void clear_ftrace_function(void); 197 198/** 199 * ftrace_function_local_enable - enable ftrace_ops on current cpu 200 * 201 * This function enables tracing on current cpu by decreasing 202 * the per cpu control variable. 203 * It must be called with preemption disabled and only on ftrace_ops 204 * registered with FTRACE_OPS_FL_PER_CPU. If called without preemption 205 * disabled, this_cpu_ptr will complain when CONFIG_DEBUG_PREEMPT is enabled. 206 */ 207static inline void ftrace_function_local_enable(struct ftrace_ops *ops) 208{ 209 if (WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_PER_CPU))) 210 return; 211 212 (*this_cpu_ptr(ops->disabled))--; 213} 214 215/** 216 * ftrace_function_local_disable - disable ftrace_ops on current cpu 217 * 218 * This function disables tracing on current cpu by increasing 219 * the per cpu control variable. 220 * It must be called with preemption disabled and only on ftrace_ops 221 * registered with FTRACE_OPS_FL_PER_CPU. If called without preemption 222 * disabled, this_cpu_ptr will complain when CONFIG_DEBUG_PREEMPT is enabled. 223 */ 224static inline void ftrace_function_local_disable(struct ftrace_ops *ops) 225{ 226 if (WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_PER_CPU))) 227 return; 228 229 (*this_cpu_ptr(ops->disabled))++; 230} 231 232/** 233 * ftrace_function_local_disabled - returns ftrace_ops disabled value 234 * on current cpu 235 * 236 * This function returns value of ftrace_ops::disabled on current cpu. 237 * It must be called with preemption disabled and only on ftrace_ops 238 * registered with FTRACE_OPS_FL_PER_CPU. If called without preemption 239 * disabled, this_cpu_ptr will complain when CONFIG_DEBUG_PREEMPT is enabled. 240 */ 241static inline int ftrace_function_local_disabled(struct ftrace_ops *ops) 242{ 243 WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_PER_CPU)); 244 return *this_cpu_ptr(ops->disabled); 245} 246 247extern void ftrace_stub(unsigned long a0, unsigned long a1, 248 struct ftrace_ops *op, struct pt_regs *regs); 249 250#else /* !CONFIG_FUNCTION_TRACER */ 251/* 252 * (un)register_ftrace_function must be a macro since the ops parameter 253 * must not be evaluated. 254 */ 255#define register_ftrace_function(ops) ({ 0; }) 256#define unregister_ftrace_function(ops) ({ 0; }) 257static inline int ftrace_nr_registered_ops(void) 258{ 259 return 0; 260} 261static inline void clear_ftrace_function(void) { } 262static inline void ftrace_kill(void) { } 263#endif /* CONFIG_FUNCTION_TRACER */ 264 265#ifdef CONFIG_STACK_TRACER 266 267#define STACK_TRACE_ENTRIES 500 268 269struct stack_trace; 270 271extern unsigned stack_trace_index[]; 272extern struct stack_trace stack_trace_max; 273extern unsigned long stack_trace_max_size; 274extern arch_spinlock_t stack_trace_max_lock; 275 276extern int stack_tracer_enabled; 277void stack_trace_print(void); 278int 279stack_trace_sysctl(struct ctl_table *table, int write, 280 void __user *buffer, size_t *lenp, 281 loff_t *ppos); 282#endif 283 284struct ftrace_func_command { 285 struct list_head list; 286 char *name; 287 int (*func)(struct ftrace_hash *hash, 288 char *func, char *cmd, 289 char *params, int enable); 290}; 291 292#ifdef CONFIG_DYNAMIC_FTRACE 293 294int ftrace_arch_code_modify_prepare(void); 295int ftrace_arch_code_modify_post_process(void); 296 297struct dyn_ftrace; 298 299enum ftrace_bug_type { 300 FTRACE_BUG_UNKNOWN, 301 FTRACE_BUG_INIT, 302 FTRACE_BUG_NOP, 303 FTRACE_BUG_CALL, 304 FTRACE_BUG_UPDATE, 305}; 306extern enum ftrace_bug_type ftrace_bug_type; 307 308/* 309 * Archs can set this to point to a variable that holds the value that was 310 * expected at the call site before calling ftrace_bug(). 311 */ 312extern const void *ftrace_expected; 313 314void ftrace_bug(int err, struct dyn_ftrace *rec); 315 316struct seq_file; 317 318struct ftrace_probe_ops { 319 void (*func)(unsigned long ip, 320 unsigned long parent_ip, 321 void **data); 322 int (*init)(struct ftrace_probe_ops *ops, 323 unsigned long ip, void **data); 324 void (*free)(struct ftrace_probe_ops *ops, 325 unsigned long ip, void **data); 326 int (*print)(struct seq_file *m, 327 unsigned long ip, 328 struct ftrace_probe_ops *ops, 329 void *data); 330}; 331 332extern int 333register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, 334 void *data); 335extern void 336unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, 337 void *data); 338extern void 339unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops); 340extern void unregister_ftrace_function_probe_all(char *glob); 341 342extern int ftrace_text_reserved(const void *start, const void *end); 343 344extern int ftrace_nr_registered_ops(void); 345 346bool is_ftrace_trampoline(unsigned long addr); 347 348/* 349 * The dyn_ftrace record's flags field is split into two parts. 350 * the first part which is '0-FTRACE_REF_MAX' is a counter of 351 * the number of callbacks that have registered the function that 352 * the dyn_ftrace descriptor represents. 353 * 354 * The second part is a mask: 355 * ENABLED - the function is being traced 356 * REGS - the record wants the function to save regs 357 * REGS_EN - the function is set up to save regs. 358 * IPMODIFY - the record allows for the IP address to be changed. 359 * DISABLED - the record is not ready to be touched yet 360 * 361 * When a new ftrace_ops is registered and wants a function to save 362 * pt_regs, the rec->flag REGS is set. When the function has been 363 * set up to save regs, the REG_EN flag is set. Once a function 364 * starts saving regs it will do so until all ftrace_ops are removed 365 * from tracing that function. 366 */ 367enum { 368 FTRACE_FL_ENABLED = (1UL << 31), 369 FTRACE_FL_REGS = (1UL << 30), 370 FTRACE_FL_REGS_EN = (1UL << 29), 371 FTRACE_FL_TRAMP = (1UL << 28), 372 FTRACE_FL_TRAMP_EN = (1UL << 27), 373 FTRACE_FL_IPMODIFY = (1UL << 26), 374 FTRACE_FL_DISABLED = (1UL << 25), 375}; 376 377#define FTRACE_REF_MAX_SHIFT 25 378#define FTRACE_FL_BITS 7 379#define FTRACE_FL_MASKED_BITS ((1UL << FTRACE_FL_BITS) - 1) 380#define FTRACE_FL_MASK (FTRACE_FL_MASKED_BITS << FTRACE_REF_MAX_SHIFT) 381#define FTRACE_REF_MAX ((1UL << FTRACE_REF_MAX_SHIFT) - 1) 382 383#define ftrace_rec_count(rec) ((rec)->flags & ~FTRACE_FL_MASK) 384 385struct dyn_ftrace { 386 unsigned long ip; /* address of mcount call-site */ 387 unsigned long flags; 388 struct dyn_arch_ftrace arch; 389}; 390 391int ftrace_force_update(void); 392int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip, 393 int remove, int reset); 394int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf, 395 int len, int reset); 396int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf, 397 int len, int reset); 398void ftrace_set_global_filter(unsigned char *buf, int len, int reset); 399void ftrace_set_global_notrace(unsigned char *buf, int len, int reset); 400void ftrace_free_filter(struct ftrace_ops *ops); 401 402int register_ftrace_command(struct ftrace_func_command *cmd); 403int unregister_ftrace_command(struct ftrace_func_command *cmd); 404 405enum { 406 FTRACE_UPDATE_CALLS = (1 << 0), 407 FTRACE_DISABLE_CALLS = (1 << 1), 408 FTRACE_UPDATE_TRACE_FUNC = (1 << 2), 409 FTRACE_START_FUNC_RET = (1 << 3), 410 FTRACE_STOP_FUNC_RET = (1 << 4), 411}; 412 413/* 414 * The FTRACE_UPDATE_* enum is used to pass information back 415 * from the ftrace_update_record() and ftrace_test_record() 416 * functions. These are called by the code update routines 417 * to find out what is to be done for a given function. 418 * 419 * IGNORE - The function is already what we want it to be 420 * MAKE_CALL - Start tracing the function 421 * MODIFY_CALL - Stop saving regs for the function 422 * MAKE_NOP - Stop tracing the function 423 */ 424enum { 425 FTRACE_UPDATE_IGNORE, 426 FTRACE_UPDATE_MAKE_CALL, 427 FTRACE_UPDATE_MODIFY_CALL, 428 FTRACE_UPDATE_MAKE_NOP, 429}; 430 431enum { 432 FTRACE_ITER_FILTER = (1 << 0), 433 FTRACE_ITER_NOTRACE = (1 << 1), 434 FTRACE_ITER_PRINTALL = (1 << 2), 435 FTRACE_ITER_DO_HASH = (1 << 3), 436 FTRACE_ITER_HASH = (1 << 4), 437 FTRACE_ITER_ENABLED = (1 << 5), 438}; 439 440void arch_ftrace_update_code(int command); 441 442struct ftrace_rec_iter; 443 444struct ftrace_rec_iter *ftrace_rec_iter_start(void); 445struct ftrace_rec_iter *ftrace_rec_iter_next(struct ftrace_rec_iter *iter); 446struct dyn_ftrace *ftrace_rec_iter_record(struct ftrace_rec_iter *iter); 447 448#define for_ftrace_rec_iter(iter) \ 449 for (iter = ftrace_rec_iter_start(); \ 450 iter; \ 451 iter = ftrace_rec_iter_next(iter)) 452 453 454int ftrace_update_record(struct dyn_ftrace *rec, int enable); 455int ftrace_test_record(struct dyn_ftrace *rec, int enable); 456void ftrace_run_stop_machine(int command); 457unsigned long ftrace_location(unsigned long ip); 458unsigned long ftrace_location_range(unsigned long start, unsigned long end); 459unsigned long ftrace_get_addr_new(struct dyn_ftrace *rec); 460unsigned long ftrace_get_addr_curr(struct dyn_ftrace *rec); 461 462extern ftrace_func_t ftrace_trace_function; 463 464int ftrace_regex_open(struct ftrace_ops *ops, int flag, 465 struct inode *inode, struct file *file); 466ssize_t ftrace_filter_write(struct file *file, const char __user *ubuf, 467 size_t cnt, loff_t *ppos); 468ssize_t ftrace_notrace_write(struct file *file, const char __user *ubuf, 469 size_t cnt, loff_t *ppos); 470int ftrace_regex_release(struct inode *inode, struct file *file); 471 472void __init 473ftrace_set_early_filter(struct ftrace_ops *ops, char *buf, int enable); 474 475/* defined in arch */ 476extern int ftrace_ip_converted(unsigned long ip); 477extern int ftrace_dyn_arch_init(void); 478extern void ftrace_replace_code(int enable); 479extern int ftrace_update_ftrace_func(ftrace_func_t func); 480extern void ftrace_caller(void); 481extern void ftrace_regs_caller(void); 482extern void ftrace_call(void); 483extern void ftrace_regs_call(void); 484extern void mcount_call(void); 485 486void ftrace_modify_all_code(int command); 487 488#ifndef FTRACE_ADDR 489#define FTRACE_ADDR ((unsigned long)ftrace_caller) 490#endif 491 492#ifndef FTRACE_GRAPH_ADDR 493#define FTRACE_GRAPH_ADDR ((unsigned long)ftrace_graph_caller) 494#endif 495 496#ifndef FTRACE_REGS_ADDR 497#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS 498# define FTRACE_REGS_ADDR ((unsigned long)ftrace_regs_caller) 499#else 500# define FTRACE_REGS_ADDR FTRACE_ADDR 501#endif 502#endif 503 504/* 505 * If an arch would like functions that are only traced 506 * by the function graph tracer to jump directly to its own 507 * trampoline, then they can define FTRACE_GRAPH_TRAMP_ADDR 508 * to be that address to jump to. 509 */ 510#ifndef FTRACE_GRAPH_TRAMP_ADDR 511#define FTRACE_GRAPH_TRAMP_ADDR ((unsigned long) 0) 512#endif 513 514#ifdef CONFIG_FUNCTION_GRAPH_TRACER 515extern void ftrace_graph_caller(void); 516extern int ftrace_enable_ftrace_graph_caller(void); 517extern int ftrace_disable_ftrace_graph_caller(void); 518#else 519static inline int ftrace_enable_ftrace_graph_caller(void) { return 0; } 520static inline int ftrace_disable_ftrace_graph_caller(void) { return 0; } 521#endif 522 523/** 524 * ftrace_make_nop - convert code into nop 525 * @mod: module structure if called by module load initialization 526 * @rec: the mcount call site record 527 * @addr: the address that the call site should be calling 528 * 529 * This is a very sensitive operation and great care needs 530 * to be taken by the arch. The operation should carefully 531 * read the location, check to see if what is read is indeed 532 * what we expect it to be, and then on success of the compare, 533 * it should write to the location. 534 * 535 * The code segment at @rec->ip should be a caller to @addr 536 * 537 * Return must be: 538 * 0 on success 539 * -EFAULT on error reading the location 540 * -EINVAL on a failed compare of the contents 541 * -EPERM on error writing to the location 542 * Any other value will be considered a failure. 543 */ 544extern int ftrace_make_nop(struct module *mod, 545 struct dyn_ftrace *rec, unsigned long addr); 546 547/** 548 * ftrace_make_call - convert a nop call site into a call to addr 549 * @rec: the mcount call site record 550 * @addr: the address that the call site should call 551 * 552 * This is a very sensitive operation and great care needs 553 * to be taken by the arch. The operation should carefully 554 * read the location, check to see if what is read is indeed 555 * what we expect it to be, and then on success of the compare, 556 * it should write to the location. 557 * 558 * The code segment at @rec->ip should be a nop 559 * 560 * Return must be: 561 * 0 on success 562 * -EFAULT on error reading the location 563 * -EINVAL on a failed compare of the contents 564 * -EPERM on error writing to the location 565 * Any other value will be considered a failure. 566 */ 567extern int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr); 568 569#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS 570/** 571 * ftrace_modify_call - convert from one addr to another (no nop) 572 * @rec: the mcount call site record 573 * @old_addr: the address expected to be currently called to 574 * @addr: the address to change to 575 * 576 * This is a very sensitive operation and great care needs 577 * to be taken by the arch. The operation should carefully 578 * read the location, check to see if what is read is indeed 579 * what we expect it to be, and then on success of the compare, 580 * it should write to the location. 581 * 582 * The code segment at @rec->ip should be a caller to @old_addr 583 * 584 * Return must be: 585 * 0 on success 586 * -EFAULT on error reading the location 587 * -EINVAL on a failed compare of the contents 588 * -EPERM on error writing to the location 589 * Any other value will be considered a failure. 590 */ 591extern int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, 592 unsigned long addr); 593#else 594/* Should never be called */ 595static inline int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, 596 unsigned long addr) 597{ 598 return -EINVAL; 599} 600#endif 601 602/* May be defined in arch */ 603extern int ftrace_arch_read_dyn_info(char *buf, int size); 604 605extern int skip_trace(unsigned long ip); 606extern void ftrace_module_init(struct module *mod); 607extern void ftrace_module_enable(struct module *mod); 608extern void ftrace_release_mod(struct module *mod); 609 610extern void ftrace_disable_daemon(void); 611extern void ftrace_enable_daemon(void); 612#else /* CONFIG_DYNAMIC_FTRACE */ 613static inline int skip_trace(unsigned long ip) { return 0; } 614static inline int ftrace_force_update(void) { return 0; } 615static inline void ftrace_disable_daemon(void) { } 616static inline void ftrace_enable_daemon(void) { } 617static inline void ftrace_module_init(struct module *mod) { } 618static inline void ftrace_module_enable(struct module *mod) { } 619static inline void ftrace_release_mod(struct module *mod) { } 620static inline __init int register_ftrace_command(struct ftrace_func_command *cmd) 621{ 622 return -EINVAL; 623} 624static inline __init int unregister_ftrace_command(char *cmd_name) 625{ 626 return -EINVAL; 627} 628static inline int ftrace_text_reserved(const void *start, const void *end) 629{ 630 return 0; 631} 632static inline unsigned long ftrace_location(unsigned long ip) 633{ 634 return 0; 635} 636 637/* 638 * Again users of functions that have ftrace_ops may not 639 * have them defined when ftrace is not enabled, but these 640 * functions may still be called. Use a macro instead of inline. 641 */ 642#define ftrace_regex_open(ops, flag, inod, file) ({ -ENODEV; }) 643#define ftrace_set_early_filter(ops, buf, enable) do { } while (0) 644#define ftrace_set_filter_ip(ops, ip, remove, reset) ({ -ENODEV; }) 645#define ftrace_set_filter(ops, buf, len, reset) ({ -ENODEV; }) 646#define ftrace_set_notrace(ops, buf, len, reset) ({ -ENODEV; }) 647#define ftrace_free_filter(ops) do { } while (0) 648 649static inline ssize_t ftrace_filter_write(struct file *file, const char __user *ubuf, 650 size_t cnt, loff_t *ppos) { return -ENODEV; } 651static inline ssize_t ftrace_notrace_write(struct file *file, const char __user *ubuf, 652 size_t cnt, loff_t *ppos) { return -ENODEV; } 653static inline int 654ftrace_regex_release(struct inode *inode, struct file *file) { return -ENODEV; } 655 656static inline bool is_ftrace_trampoline(unsigned long addr) 657{ 658 return false; 659} 660#endif /* CONFIG_DYNAMIC_FTRACE */ 661 662/* totally disable ftrace - can not re-enable after this */ 663void ftrace_kill(void); 664 665static inline void tracer_disable(void) 666{ 667#ifdef CONFIG_FUNCTION_TRACER 668 ftrace_enabled = 0; 669#endif 670} 671 672/* 673 * Ftrace disable/restore without lock. Some synchronization mechanism 674 * must be used to prevent ftrace_enabled to be changed between 675 * disable/restore. 676 */ 677static inline int __ftrace_enabled_save(void) 678{ 679#ifdef CONFIG_FUNCTION_TRACER 680 int saved_ftrace_enabled = ftrace_enabled; 681 ftrace_enabled = 0; 682 return saved_ftrace_enabled; 683#else 684 return 0; 685#endif 686} 687 688static inline void __ftrace_enabled_restore(int enabled) 689{ 690#ifdef CONFIG_FUNCTION_TRACER 691 ftrace_enabled = enabled; 692#endif 693} 694 695/* All archs should have this, but we define it for consistency */ 696#ifndef ftrace_return_address0 697# define ftrace_return_address0 __builtin_return_address(0) 698#endif 699 700/* Archs may use other ways for ADDR1 and beyond */ 701#ifndef ftrace_return_address 702# ifdef CONFIG_FRAME_POINTER 703# define ftrace_return_address(n) __builtin_return_address(n) 704# else 705# define ftrace_return_address(n) 0UL 706# endif 707#endif 708 709#define CALLER_ADDR0 ((unsigned long)ftrace_return_address0) 710#define CALLER_ADDR1 ((unsigned long)ftrace_return_address(1)) 711#define CALLER_ADDR2 ((unsigned long)ftrace_return_address(2)) 712#define CALLER_ADDR3 ((unsigned long)ftrace_return_address(3)) 713#define CALLER_ADDR4 ((unsigned long)ftrace_return_address(4)) 714#define CALLER_ADDR5 ((unsigned long)ftrace_return_address(5)) 715#define CALLER_ADDR6 ((unsigned long)ftrace_return_address(6)) 716 717static inline unsigned long get_lock_parent_ip(void) 718{ 719 unsigned long addr = CALLER_ADDR0; 720 721 if (!in_lock_functions(addr)) 722 return addr; 723 addr = CALLER_ADDR1; 724 if (!in_lock_functions(addr)) 725 return addr; 726 return CALLER_ADDR2; 727} 728 729#ifdef CONFIG_IRQSOFF_TRACER 730 extern void time_hardirqs_on(unsigned long a0, unsigned long a1); 731 extern void time_hardirqs_off(unsigned long a0, unsigned long a1); 732#else 733 static inline void time_hardirqs_on(unsigned long a0, unsigned long a1) { } 734 static inline void time_hardirqs_off(unsigned long a0, unsigned long a1) { } 735#endif 736 737#ifdef CONFIG_PREEMPT_TRACER 738 extern void trace_preempt_on(unsigned long a0, unsigned long a1); 739 extern void trace_preempt_off(unsigned long a0, unsigned long a1); 740#else 741/* 742 * Use defines instead of static inlines because some arches will make code out 743 * of the CALLER_ADDR, when we really want these to be a real nop. 744 */ 745# define trace_preempt_on(a0, a1) do { } while (0) 746# define trace_preempt_off(a0, a1) do { } while (0) 747#endif 748 749#ifdef CONFIG_FTRACE_MCOUNT_RECORD 750extern void ftrace_init(void); 751#else 752static inline void ftrace_init(void) { } 753#endif 754 755/* 756 * Structure that defines an entry function trace. 757 * It's already packed but the attribute "packed" is needed 758 * to remove extra padding at the end. 759 */ 760struct ftrace_graph_ent { 761 unsigned long func; /* Current function */ 762 int depth; 763} __packed; 764 765/* 766 * Structure that defines a return function trace. 767 * It's already packed but the attribute "packed" is needed 768 * to remove extra padding at the end. 769 */ 770struct ftrace_graph_ret { 771 unsigned long func; /* Current function */ 772 /* Number of functions that overran the depth limit for current task */ 773 unsigned long overrun; 774 unsigned long long calltime; 775 unsigned long long rettime; 776 int depth; 777} __packed; 778 779/* Type of the callback handlers for tracing function graph*/ 780typedef void (*trace_func_graph_ret_t)(struct ftrace_graph_ret *); /* return */ 781typedef int (*trace_func_graph_ent_t)(struct ftrace_graph_ent *); /* entry */ 782 783#ifdef CONFIG_FUNCTION_GRAPH_TRACER 784 785/* for init task */ 786#define INIT_FTRACE_GRAPH .ret_stack = NULL, 787 788/* 789 * Stack of return addresses for functions 790 * of a thread. 791 * Used in struct thread_info 792 */ 793struct ftrace_ret_stack { 794 unsigned long ret; 795 unsigned long func; 796 unsigned long long calltime; 797#ifdef CONFIG_FUNCTION_PROFILER 798 unsigned long long subtime; 799#endif 800#ifdef HAVE_FUNCTION_GRAPH_FP_TEST 801 unsigned long fp; 802#endif 803#ifdef HAVE_FUNCTION_GRAPH_RET_ADDR_PTR 804 unsigned long *retp; 805#endif 806}; 807 808/* 809 * Primary handler of a function return. 810 * It relays on ftrace_return_to_handler. 811 * Defined in entry_32/64.S 812 */ 813extern void return_to_handler(void); 814 815extern int 816ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth, 817 unsigned long frame_pointer, unsigned long *retp); 818 819unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx, 820 unsigned long ret, unsigned long *retp); 821 822/* 823 * Sometimes we don't want to trace a function with the function 824 * graph tracer but we want them to keep traced by the usual function 825 * tracer if the function graph tracer is not configured. 826 */ 827#define __notrace_funcgraph notrace 828 829#define FTRACE_NOTRACE_DEPTH 65536 830#define FTRACE_RETFUNC_DEPTH 50 831#define FTRACE_RETSTACK_ALLOC_SIZE 32 832extern int register_ftrace_graph(trace_func_graph_ret_t retfunc, 833 trace_func_graph_ent_t entryfunc); 834 835extern bool ftrace_graph_is_dead(void); 836extern void ftrace_graph_stop(void); 837 838/* The current handlers in use */ 839extern trace_func_graph_ret_t ftrace_graph_return; 840extern trace_func_graph_ent_t ftrace_graph_entry; 841 842extern void unregister_ftrace_graph(void); 843 844extern void ftrace_graph_init_task(struct task_struct *t); 845extern void ftrace_graph_exit_task(struct task_struct *t); 846extern void ftrace_graph_init_idle_task(struct task_struct *t, int cpu); 847 848static inline int task_curr_ret_stack(struct task_struct *t) 849{ 850 return t->curr_ret_stack; 851} 852 853static inline void pause_graph_tracing(void) 854{ 855 atomic_inc(&current->tracing_graph_pause); 856} 857 858static inline void unpause_graph_tracing(void) 859{ 860 atomic_dec(&current->tracing_graph_pause); 861} 862#else /* !CONFIG_FUNCTION_GRAPH_TRACER */ 863 864#define __notrace_funcgraph 865#define INIT_FTRACE_GRAPH 866 867static inline void ftrace_graph_init_task(struct task_struct *t) { } 868static inline void ftrace_graph_exit_task(struct task_struct *t) { } 869static inline void ftrace_graph_init_idle_task(struct task_struct *t, int cpu) { } 870 871static inline int register_ftrace_graph(trace_func_graph_ret_t retfunc, 872 trace_func_graph_ent_t entryfunc) 873{ 874 return -1; 875} 876static inline void unregister_ftrace_graph(void) { } 877 878static inline int task_curr_ret_stack(struct task_struct *tsk) 879{ 880 return -1; 881} 882 883static inline unsigned long 884ftrace_graph_ret_addr(struct task_struct *task, int *idx, unsigned long ret, 885 unsigned long *retp) 886{ 887 return ret; 888} 889 890static inline void pause_graph_tracing(void) { } 891static inline void unpause_graph_tracing(void) { } 892#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 893 894#ifdef CONFIG_TRACING 895 896/* flags for current->trace */ 897enum { 898 TSK_TRACE_FL_TRACE_BIT = 0, 899 TSK_TRACE_FL_GRAPH_BIT = 1, 900}; 901enum { 902 TSK_TRACE_FL_TRACE = 1 << TSK_TRACE_FL_TRACE_BIT, 903 TSK_TRACE_FL_GRAPH = 1 << TSK_TRACE_FL_GRAPH_BIT, 904}; 905 906static inline void set_tsk_trace_trace(struct task_struct *tsk) 907{ 908 set_bit(TSK_TRACE_FL_TRACE_BIT, &tsk->trace); 909} 910 911static inline void clear_tsk_trace_trace(struct task_struct *tsk) 912{ 913 clear_bit(TSK_TRACE_FL_TRACE_BIT, &tsk->trace); 914} 915 916static inline int test_tsk_trace_trace(struct task_struct *tsk) 917{ 918 return tsk->trace & TSK_TRACE_FL_TRACE; 919} 920 921static inline void set_tsk_trace_graph(struct task_struct *tsk) 922{ 923 set_bit(TSK_TRACE_FL_GRAPH_BIT, &tsk->trace); 924} 925 926static inline void clear_tsk_trace_graph(struct task_struct *tsk) 927{ 928 clear_bit(TSK_TRACE_FL_GRAPH_BIT, &tsk->trace); 929} 930 931static inline int test_tsk_trace_graph(struct task_struct *tsk) 932{ 933 return tsk->trace & TSK_TRACE_FL_GRAPH; 934} 935 936enum ftrace_dump_mode; 937 938extern enum ftrace_dump_mode ftrace_dump_on_oops; 939extern int tracepoint_printk; 940 941extern void disable_trace_on_warning(void); 942extern int __disable_trace_on_warning; 943 944#ifdef CONFIG_PREEMPT 945#define INIT_TRACE_RECURSION .trace_recursion = 0, 946#endif 947 948#else /* CONFIG_TRACING */ 949static inline void disable_trace_on_warning(void) { } 950#endif /* CONFIG_TRACING */ 951 952#ifndef INIT_TRACE_RECURSION 953#define INIT_TRACE_RECURSION 954#endif 955 956#ifdef CONFIG_FTRACE_SYSCALLS 957 958unsigned long arch_syscall_addr(int nr); 959 960#endif /* CONFIG_FTRACE_SYSCALLS */ 961 962#endif /* _LINUX_FTRACE_H */