Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Ftrace header. For implementation details beyond the random comments
4 * scattered below, see: Documentation/trace/ftrace-design.rst
5 */
6
7#ifndef _LINUX_FTRACE_H
8#define _LINUX_FTRACE_H
9
10#include <linux/trace_clock.h>
11#include <linux/kallsyms.h>
12#include <linux/linkage.h>
13#include <linux/bitops.h>
14#include <linux/ptrace.h>
15#include <linux/ktime.h>
16#include <linux/sched.h>
17#include <linux/types.h>
18#include <linux/init.h>
19#include <linux/fs.h>
20
21#include <asm/ftrace.h>
22
23/*
24 * If the arch supports passing the variable contents of
25 * function_trace_op as the third parameter back from the
26 * mcount call, then the arch should define this as 1.
27 */
28#ifndef ARCH_SUPPORTS_FTRACE_OPS
29#define ARCH_SUPPORTS_FTRACE_OPS 0
30#endif
31
32/*
33 * If the arch's mcount caller does not support all of ftrace's
34 * features, then it must call an indirect function that
35 * does. Or at least does enough to prevent any unwelcomed side effects.
36 */
37#if !ARCH_SUPPORTS_FTRACE_OPS
38# define FTRACE_FORCE_LIST_FUNC 1
39#else
40# define FTRACE_FORCE_LIST_FUNC 0
41#endif
42
43/* Main tracing buffer and events set up */
44#ifdef CONFIG_TRACING
45void trace_init(void);
46void early_trace_init(void);
47#else
48static inline void trace_init(void) { }
49static inline void early_trace_init(void) { }
50#endif
51
52struct module;
53struct ftrace_hash;
54struct ftrace_direct_func;
55
56#if defined(CONFIG_FUNCTION_TRACER) && defined(CONFIG_MODULES) && \
57 defined(CONFIG_DYNAMIC_FTRACE)
58const char *
59ftrace_mod_address_lookup(unsigned long addr, unsigned long *size,
60 unsigned long *off, char **modname, char *sym);
61#else
62static inline const char *
63ftrace_mod_address_lookup(unsigned long addr, unsigned long *size,
64 unsigned long *off, char **modname, char *sym)
65{
66 return NULL;
67}
68#endif
69
70#if defined(CONFIG_FUNCTION_TRACER) && defined(CONFIG_DYNAMIC_FTRACE)
71int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *value,
72 char *type, char *name,
73 char *module_name, int *exported);
74#else
75static inline int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *value,
76 char *type, char *name,
77 char *module_name, int *exported)
78{
79 return -1;
80}
81#endif
82
83#ifdef CONFIG_FUNCTION_TRACER
84
85extern int ftrace_enabled;
86extern int
87ftrace_enable_sysctl(struct ctl_table *table, int write,
88 void *buffer, size_t *lenp, loff_t *ppos);
89
90struct ftrace_ops;
91
92typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip,
93 struct ftrace_ops *op, struct pt_regs *regs);
94
95ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops);
96
97/*
98 * FTRACE_OPS_FL_* bits denote the state of ftrace_ops struct and are
99 * set in the flags member.
100 * CONTROL, SAVE_REGS, SAVE_REGS_IF_SUPPORTED, RECURSION_SAFE, STUB and
101 * IPMODIFY are a kind of attribute flags which can be set only before
102 * registering the ftrace_ops, and can not be modified while registered.
103 * Changing those attribute flags after registering ftrace_ops will
104 * cause unexpected results.
105 *
106 * ENABLED - set/unset when ftrace_ops is registered/unregistered
107 * DYNAMIC - set when ftrace_ops is registered to denote dynamically
108 * allocated ftrace_ops which need special care
109 * SAVE_REGS - The ftrace_ops wants regs saved at each function called
110 * and passed to the callback. If this flag is set, but the
111 * architecture does not support passing regs
112 * (CONFIG_DYNAMIC_FTRACE_WITH_REGS is not defined), then the
113 * ftrace_ops will fail to register, unless the next flag
114 * is set.
115 * SAVE_REGS_IF_SUPPORTED - This is the same as SAVE_REGS, but if the
116 * handler can handle an arch that does not save regs
117 * (the handler tests if regs == NULL), then it can set
118 * this flag instead. It will not fail registering the ftrace_ops
119 * but, the regs field will be NULL if the arch does not support
120 * passing regs to the handler.
121 * Note, if this flag is set, the SAVE_REGS flag will automatically
122 * get set upon registering the ftrace_ops, if the arch supports it.
123 * RECURSION_SAFE - The ftrace_ops can set this to tell the ftrace infrastructure
124 * that the call back has its own recursion protection. If it does
125 * not set this, then the ftrace infrastructure will add recursion
126 * protection for the caller.
127 * STUB - The ftrace_ops is just a place holder.
128 * INITIALIZED - The ftrace_ops has already been initialized (first use time
129 * register_ftrace_function() is called, it will initialized the ops)
130 * DELETED - The ops are being deleted, do not let them be registered again.
131 * ADDING - The ops is in the process of being added.
132 * REMOVING - The ops is in the process of being removed.
133 * MODIFYING - The ops is in the process of changing its filter functions.
134 * ALLOC_TRAMP - A dynamic trampoline was allocated by the core code.
135 * The arch specific code sets this flag when it allocated a
136 * trampoline. This lets the arch know that it can update the
137 * trampoline in case the callback function changes.
138 * The ftrace_ops trampoline can be set by the ftrace users, and
139 * in such cases the arch must not modify it. Only the arch ftrace
140 * core code should set this flag.
141 * IPMODIFY - The ops can modify the IP register. This can only be set with
142 * SAVE_REGS. If another ops with this flag set is already registered
143 * for any of the functions that this ops will be registered for, then
144 * this ops will fail to register or set_filter_ip.
145 * PID - Is affected by set_ftrace_pid (allows filtering on those pids)
146 * RCU - Set when the ops can only be called when RCU is watching.
147 * TRACE_ARRAY - The ops->private points to a trace_array descriptor.
148 * PERMANENT - Set when the ops is permanent and should not be affected by
149 * ftrace_enabled.
150 * DIRECT - Used by the direct ftrace_ops helper for direct functions
151 * (internal ftrace only, should not be used by others)
152 */
153enum {
154 FTRACE_OPS_FL_ENABLED = BIT(0),
155 FTRACE_OPS_FL_DYNAMIC = BIT(1),
156 FTRACE_OPS_FL_SAVE_REGS = BIT(2),
157 FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED = BIT(3),
158 FTRACE_OPS_FL_RECURSION_SAFE = BIT(4),
159 FTRACE_OPS_FL_STUB = BIT(5),
160 FTRACE_OPS_FL_INITIALIZED = BIT(6),
161 FTRACE_OPS_FL_DELETED = BIT(7),
162 FTRACE_OPS_FL_ADDING = BIT(8),
163 FTRACE_OPS_FL_REMOVING = BIT(9),
164 FTRACE_OPS_FL_MODIFYING = BIT(10),
165 FTRACE_OPS_FL_ALLOC_TRAMP = BIT(11),
166 FTRACE_OPS_FL_IPMODIFY = BIT(12),
167 FTRACE_OPS_FL_PID = BIT(13),
168 FTRACE_OPS_FL_RCU = BIT(14),
169 FTRACE_OPS_FL_TRACE_ARRAY = BIT(15),
170 FTRACE_OPS_FL_PERMANENT = BIT(16),
171 FTRACE_OPS_FL_DIRECT = BIT(17),
172};
173
174#ifdef CONFIG_DYNAMIC_FTRACE
175/* The hash used to know what functions callbacks trace */
176struct ftrace_ops_hash {
177 struct ftrace_hash __rcu *notrace_hash;
178 struct ftrace_hash __rcu *filter_hash;
179 struct mutex regex_lock;
180};
181
182void ftrace_free_init_mem(void);
183void ftrace_free_mem(struct module *mod, void *start, void *end);
184#else
185static inline void ftrace_free_init_mem(void) { }
186static inline void ftrace_free_mem(struct module *mod, void *start, void *end) { }
187#endif
188
189/*
190 * Note, ftrace_ops can be referenced outside of RCU protection, unless
191 * the RCU flag is set. If ftrace_ops is allocated and not part of kernel
192 * core data, the unregistering of it will perform a scheduling on all CPUs
193 * to make sure that there are no more users. Depending on the load of the
194 * system that may take a bit of time.
195 *
196 * Any private data added must also take care not to be freed and if private
197 * data is added to a ftrace_ops that is in core code, the user of the
198 * ftrace_ops must perform a schedule_on_each_cpu() before freeing it.
199 */
200struct ftrace_ops {
201 ftrace_func_t func;
202 struct ftrace_ops __rcu *next;
203 unsigned long flags;
204 void *private;
205 ftrace_func_t saved_func;
206#ifdef CONFIG_DYNAMIC_FTRACE
207 struct ftrace_ops_hash local_hash;
208 struct ftrace_ops_hash *func_hash;
209 struct ftrace_ops_hash old_hash;
210 unsigned long trampoline;
211 unsigned long trampoline_size;
212 struct list_head list;
213#endif
214};
215
216extern struct ftrace_ops __rcu *ftrace_ops_list;
217extern struct ftrace_ops ftrace_list_end;
218
219/*
220 * Traverse the ftrace_global_list, invoking all entries. The reason that we
221 * can use rcu_dereference_raw_check() is that elements removed from this list
222 * are simply leaked, so there is no need to interact with a grace-period
223 * mechanism. The rcu_dereference_raw_check() calls are needed to handle
224 * concurrent insertions into the ftrace_global_list.
225 *
226 * Silly Alpha and silly pointer-speculation compiler optimizations!
227 */
228#define do_for_each_ftrace_op(op, list) \
229 op = rcu_dereference_raw_check(list); \
230 do
231
232/*
233 * Optimized for just a single item in the list (as that is the normal case).
234 */
235#define while_for_each_ftrace_op(op) \
236 while (likely(op = rcu_dereference_raw_check((op)->next)) && \
237 unlikely((op) != &ftrace_list_end))
238
239/*
240 * Type of the current tracing.
241 */
242enum ftrace_tracing_type_t {
243 FTRACE_TYPE_ENTER = 0, /* Hook the call of the function */
244 FTRACE_TYPE_RETURN, /* Hook the return of the function */
245};
246
247/* Current tracing type, default is FTRACE_TYPE_ENTER */
248extern enum ftrace_tracing_type_t ftrace_tracing_type;
249
250/*
251 * The ftrace_ops must be a static and should also
252 * be read_mostly. These functions do modify read_mostly variables
253 * so use them sparely. Never free an ftrace_op or modify the
254 * next pointer after it has been registered. Even after unregistering
255 * it, the next pointer may still be used internally.
256 */
257int register_ftrace_function(struct ftrace_ops *ops);
258int unregister_ftrace_function(struct ftrace_ops *ops);
259
260extern void ftrace_stub(unsigned long a0, unsigned long a1,
261 struct ftrace_ops *op, struct pt_regs *regs);
262
263#else /* !CONFIG_FUNCTION_TRACER */
264/*
265 * (un)register_ftrace_function must be a macro since the ops parameter
266 * must not be evaluated.
267 */
268#define register_ftrace_function(ops) ({ 0; })
269#define unregister_ftrace_function(ops) ({ 0; })
270static inline void ftrace_kill(void) { }
271static inline void ftrace_free_init_mem(void) { }
272static inline void ftrace_free_mem(struct module *mod, void *start, void *end) { }
273#endif /* CONFIG_FUNCTION_TRACER */
274
275struct ftrace_func_entry {
276 struct hlist_node hlist;
277 unsigned long ip;
278 unsigned long direct; /* for direct lookup only */
279};
280
281struct dyn_ftrace;
282
283#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
284extern int ftrace_direct_func_count;
285int register_ftrace_direct(unsigned long ip, unsigned long addr);
286int unregister_ftrace_direct(unsigned long ip, unsigned long addr);
287int modify_ftrace_direct(unsigned long ip, unsigned long old_addr, unsigned long new_addr);
288struct ftrace_direct_func *ftrace_find_direct_func(unsigned long addr);
289int ftrace_modify_direct_caller(struct ftrace_func_entry *entry,
290 struct dyn_ftrace *rec,
291 unsigned long old_addr,
292 unsigned long new_addr);
293unsigned long ftrace_find_rec_direct(unsigned long ip);
294#else
295# define ftrace_direct_func_count 0
296static inline int register_ftrace_direct(unsigned long ip, unsigned long addr)
297{
298 return -ENOTSUPP;
299}
300static inline int unregister_ftrace_direct(unsigned long ip, unsigned long addr)
301{
302 return -ENOTSUPP;
303}
304static inline int modify_ftrace_direct(unsigned long ip,
305 unsigned long old_addr, unsigned long new_addr)
306{
307 return -ENOTSUPP;
308}
309static inline struct ftrace_direct_func *ftrace_find_direct_func(unsigned long addr)
310{
311 return NULL;
312}
313static inline int ftrace_modify_direct_caller(struct ftrace_func_entry *entry,
314 struct dyn_ftrace *rec,
315 unsigned long old_addr,
316 unsigned long new_addr)
317{
318 return -ENODEV;
319}
320static inline unsigned long ftrace_find_rec_direct(unsigned long ip)
321{
322 return 0;
323}
324#endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */
325
326#ifndef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
327/*
328 * This must be implemented by the architecture.
329 * It is the way the ftrace direct_ops helper, when called
330 * via ftrace (because there's other callbacks besides the
331 * direct call), can inform the architecture's trampoline that this
332 * routine has a direct caller, and what the caller is.
333 *
334 * For example, in x86, it returns the direct caller
335 * callback function via the regs->orig_ax parameter.
336 * Then in the ftrace trampoline, if this is set, it makes
337 * the return from the trampoline jump to the direct caller
338 * instead of going back to the function it just traced.
339 */
340static inline void arch_ftrace_set_direct_caller(struct pt_regs *regs,
341 unsigned long addr) { }
342#endif /* CONFIG_HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */
343
344#ifdef CONFIG_STACK_TRACER
345
346extern int stack_tracer_enabled;
347
348int stack_trace_sysctl(struct ctl_table *table, int write, void *buffer,
349 size_t *lenp, loff_t *ppos);
350
351/* DO NOT MODIFY THIS VARIABLE DIRECTLY! */
352DECLARE_PER_CPU(int, disable_stack_tracer);
353
354/**
355 * stack_tracer_disable - temporarily disable the stack tracer
356 *
357 * There's a few locations (namely in RCU) where stack tracing
358 * cannot be executed. This function is used to disable stack
359 * tracing during those critical sections.
360 *
361 * This function must be called with preemption or interrupts
362 * disabled and stack_tracer_enable() must be called shortly after
363 * while preemption or interrupts are still disabled.
364 */
365static inline void stack_tracer_disable(void)
366{
367 /* Preemption or interupts must be disabled */
368 if (IS_ENABLED(CONFIG_DEBUG_PREEMPT))
369 WARN_ON_ONCE(!preempt_count() || !irqs_disabled());
370 this_cpu_inc(disable_stack_tracer);
371}
372
373/**
374 * stack_tracer_enable - re-enable the stack tracer
375 *
376 * After stack_tracer_disable() is called, stack_tracer_enable()
377 * must be called shortly afterward.
378 */
379static inline void stack_tracer_enable(void)
380{
381 if (IS_ENABLED(CONFIG_DEBUG_PREEMPT))
382 WARN_ON_ONCE(!preempt_count() || !irqs_disabled());
383 this_cpu_dec(disable_stack_tracer);
384}
385#else
386static inline void stack_tracer_disable(void) { }
387static inline void stack_tracer_enable(void) { }
388#endif
389
390#ifdef CONFIG_DYNAMIC_FTRACE
391
392int ftrace_arch_code_modify_prepare(void);
393int ftrace_arch_code_modify_post_process(void);
394
395enum ftrace_bug_type {
396 FTRACE_BUG_UNKNOWN,
397 FTRACE_BUG_INIT,
398 FTRACE_BUG_NOP,
399 FTRACE_BUG_CALL,
400 FTRACE_BUG_UPDATE,
401};
402extern enum ftrace_bug_type ftrace_bug_type;
403
404/*
405 * Archs can set this to point to a variable that holds the value that was
406 * expected at the call site before calling ftrace_bug().
407 */
408extern const void *ftrace_expected;
409
410void ftrace_bug(int err, struct dyn_ftrace *rec);
411
412struct seq_file;
413
414extern int ftrace_text_reserved(const void *start, const void *end);
415
416struct ftrace_ops *ftrace_ops_trampoline(unsigned long addr);
417
418bool is_ftrace_trampoline(unsigned long addr);
419
420/*
421 * The dyn_ftrace record's flags field is split into two parts.
422 * the first part which is '0-FTRACE_REF_MAX' is a counter of
423 * the number of callbacks that have registered the function that
424 * the dyn_ftrace descriptor represents.
425 *
426 * The second part is a mask:
427 * ENABLED - the function is being traced
428 * REGS - the record wants the function to save regs
429 * REGS_EN - the function is set up to save regs.
430 * IPMODIFY - the record allows for the IP address to be changed.
431 * DISABLED - the record is not ready to be touched yet
432 * DIRECT - there is a direct function to call
433 *
434 * When a new ftrace_ops is registered and wants a function to save
435 * pt_regs, the rec->flag REGS is set. When the function has been
436 * set up to save regs, the REG_EN flag is set. Once a function
437 * starts saving regs it will do so until all ftrace_ops are removed
438 * from tracing that function.
439 */
440enum {
441 FTRACE_FL_ENABLED = (1UL << 31),
442 FTRACE_FL_REGS = (1UL << 30),
443 FTRACE_FL_REGS_EN = (1UL << 29),
444 FTRACE_FL_TRAMP = (1UL << 28),
445 FTRACE_FL_TRAMP_EN = (1UL << 27),
446 FTRACE_FL_IPMODIFY = (1UL << 26),
447 FTRACE_FL_DISABLED = (1UL << 25),
448 FTRACE_FL_DIRECT = (1UL << 24),
449 FTRACE_FL_DIRECT_EN = (1UL << 23),
450};
451
452#define FTRACE_REF_MAX_SHIFT 23
453#define FTRACE_FL_BITS 9
454#define FTRACE_FL_MASKED_BITS ((1UL << FTRACE_FL_BITS) - 1)
455#define FTRACE_FL_MASK (FTRACE_FL_MASKED_BITS << FTRACE_REF_MAX_SHIFT)
456#define FTRACE_REF_MAX ((1UL << FTRACE_REF_MAX_SHIFT) - 1)
457
458#define ftrace_rec_count(rec) ((rec)->flags & ~FTRACE_FL_MASK)
459
460struct dyn_ftrace {
461 unsigned long ip; /* address of mcount call-site */
462 unsigned long flags;
463 struct dyn_arch_ftrace arch;
464};
465
466int ftrace_force_update(void);
467int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip,
468 int remove, int reset);
469int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
470 int len, int reset);
471int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
472 int len, int reset);
473void ftrace_set_global_filter(unsigned char *buf, int len, int reset);
474void ftrace_set_global_notrace(unsigned char *buf, int len, int reset);
475void ftrace_free_filter(struct ftrace_ops *ops);
476void ftrace_ops_set_global_filter(struct ftrace_ops *ops);
477
478enum {
479 FTRACE_UPDATE_CALLS = (1 << 0),
480 FTRACE_DISABLE_CALLS = (1 << 1),
481 FTRACE_UPDATE_TRACE_FUNC = (1 << 2),
482 FTRACE_START_FUNC_RET = (1 << 3),
483 FTRACE_STOP_FUNC_RET = (1 << 4),
484 FTRACE_MAY_SLEEP = (1 << 5),
485};
486
487/*
488 * The FTRACE_UPDATE_* enum is used to pass information back
489 * from the ftrace_update_record() and ftrace_test_record()
490 * functions. These are called by the code update routines
491 * to find out what is to be done for a given function.
492 *
493 * IGNORE - The function is already what we want it to be
494 * MAKE_CALL - Start tracing the function
495 * MODIFY_CALL - Stop saving regs for the function
496 * MAKE_NOP - Stop tracing the function
497 */
498enum {
499 FTRACE_UPDATE_IGNORE,
500 FTRACE_UPDATE_MAKE_CALL,
501 FTRACE_UPDATE_MODIFY_CALL,
502 FTRACE_UPDATE_MAKE_NOP,
503};
504
505enum {
506 FTRACE_ITER_FILTER = (1 << 0),
507 FTRACE_ITER_NOTRACE = (1 << 1),
508 FTRACE_ITER_PRINTALL = (1 << 2),
509 FTRACE_ITER_DO_PROBES = (1 << 3),
510 FTRACE_ITER_PROBE = (1 << 4),
511 FTRACE_ITER_MOD = (1 << 5),
512 FTRACE_ITER_ENABLED = (1 << 6),
513};
514
515void arch_ftrace_update_code(int command);
516void arch_ftrace_update_trampoline(struct ftrace_ops *ops);
517void *arch_ftrace_trampoline_func(struct ftrace_ops *ops, struct dyn_ftrace *rec);
518void arch_ftrace_trampoline_free(struct ftrace_ops *ops);
519
520struct ftrace_rec_iter;
521
522struct ftrace_rec_iter *ftrace_rec_iter_start(void);
523struct ftrace_rec_iter *ftrace_rec_iter_next(struct ftrace_rec_iter *iter);
524struct dyn_ftrace *ftrace_rec_iter_record(struct ftrace_rec_iter *iter);
525
526#define for_ftrace_rec_iter(iter) \
527 for (iter = ftrace_rec_iter_start(); \
528 iter; \
529 iter = ftrace_rec_iter_next(iter))
530
531
532int ftrace_update_record(struct dyn_ftrace *rec, bool enable);
533int ftrace_test_record(struct dyn_ftrace *rec, bool enable);
534void ftrace_run_stop_machine(int command);
535unsigned long ftrace_location(unsigned long ip);
536unsigned long ftrace_location_range(unsigned long start, unsigned long end);
537unsigned long ftrace_get_addr_new(struct dyn_ftrace *rec);
538unsigned long ftrace_get_addr_curr(struct dyn_ftrace *rec);
539
540extern ftrace_func_t ftrace_trace_function;
541
542int ftrace_regex_open(struct ftrace_ops *ops, int flag,
543 struct inode *inode, struct file *file);
544ssize_t ftrace_filter_write(struct file *file, const char __user *ubuf,
545 size_t cnt, loff_t *ppos);
546ssize_t ftrace_notrace_write(struct file *file, const char __user *ubuf,
547 size_t cnt, loff_t *ppos);
548int ftrace_regex_release(struct inode *inode, struct file *file);
549
550void __init
551ftrace_set_early_filter(struct ftrace_ops *ops, char *buf, int enable);
552
553/* defined in arch */
554extern int ftrace_ip_converted(unsigned long ip);
555extern int ftrace_dyn_arch_init(void);
556extern void ftrace_replace_code(int enable);
557extern int ftrace_update_ftrace_func(ftrace_func_t func);
558extern void ftrace_caller(void);
559extern void ftrace_regs_caller(void);
560extern void ftrace_call(void);
561extern void ftrace_regs_call(void);
562extern void mcount_call(void);
563
564void ftrace_modify_all_code(int command);
565
566#ifndef FTRACE_ADDR
567#define FTRACE_ADDR ((unsigned long)ftrace_caller)
568#endif
569
570#ifndef FTRACE_GRAPH_ADDR
571#define FTRACE_GRAPH_ADDR ((unsigned long)ftrace_graph_caller)
572#endif
573
574#ifndef FTRACE_REGS_ADDR
575#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
576# define FTRACE_REGS_ADDR ((unsigned long)ftrace_regs_caller)
577#else
578# define FTRACE_REGS_ADDR FTRACE_ADDR
579#endif
580#endif
581
582/*
583 * If an arch would like functions that are only traced
584 * by the function graph tracer to jump directly to its own
585 * trampoline, then they can define FTRACE_GRAPH_TRAMP_ADDR
586 * to be that address to jump to.
587 */
588#ifndef FTRACE_GRAPH_TRAMP_ADDR
589#define FTRACE_GRAPH_TRAMP_ADDR ((unsigned long) 0)
590#endif
591
592#ifdef CONFIG_FUNCTION_GRAPH_TRACER
593extern void ftrace_graph_caller(void);
594extern int ftrace_enable_ftrace_graph_caller(void);
595extern int ftrace_disable_ftrace_graph_caller(void);
596#else
597static inline int ftrace_enable_ftrace_graph_caller(void) { return 0; }
598static inline int ftrace_disable_ftrace_graph_caller(void) { return 0; }
599#endif
600
601/**
602 * ftrace_make_nop - convert code into nop
603 * @mod: module structure if called by module load initialization
604 * @rec: the call site record (e.g. mcount/fentry)
605 * @addr: the address that the call site should be calling
606 *
607 * This is a very sensitive operation and great care needs
608 * to be taken by the arch. The operation should carefully
609 * read the location, check to see if what is read is indeed
610 * what we expect it to be, and then on success of the compare,
611 * it should write to the location.
612 *
613 * The code segment at @rec->ip should be a caller to @addr
614 *
615 * Return must be:
616 * 0 on success
617 * -EFAULT on error reading the location
618 * -EINVAL on a failed compare of the contents
619 * -EPERM on error writing to the location
620 * Any other value will be considered a failure.
621 */
622extern int ftrace_make_nop(struct module *mod,
623 struct dyn_ftrace *rec, unsigned long addr);
624
625
626/**
627 * ftrace_init_nop - initialize a nop call site
628 * @mod: module structure if called by module load initialization
629 * @rec: the call site record (e.g. mcount/fentry)
630 *
631 * This is a very sensitive operation and great care needs
632 * to be taken by the arch. The operation should carefully
633 * read the location, check to see if what is read is indeed
634 * what we expect it to be, and then on success of the compare,
635 * it should write to the location.
636 *
637 * The code segment at @rec->ip should contain the contents created by
638 * the compiler
639 *
640 * Return must be:
641 * 0 on success
642 * -EFAULT on error reading the location
643 * -EINVAL on a failed compare of the contents
644 * -EPERM on error writing to the location
645 * Any other value will be considered a failure.
646 */
647#ifndef ftrace_init_nop
648static inline int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec)
649{
650 return ftrace_make_nop(mod, rec, MCOUNT_ADDR);
651}
652#endif
653
654/**
655 * ftrace_make_call - convert a nop call site into a call to addr
656 * @rec: the call site record (e.g. mcount/fentry)
657 * @addr: the address that the call site should call
658 *
659 * This is a very sensitive operation and great care needs
660 * to be taken by the arch. The operation should carefully
661 * read the location, check to see if what is read is indeed
662 * what we expect it to be, and then on success of the compare,
663 * it should write to the location.
664 *
665 * The code segment at @rec->ip should be a nop
666 *
667 * Return must be:
668 * 0 on success
669 * -EFAULT on error reading the location
670 * -EINVAL on a failed compare of the contents
671 * -EPERM on error writing to the location
672 * Any other value will be considered a failure.
673 */
674extern int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr);
675
676#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
677/**
678 * ftrace_modify_call - convert from one addr to another (no nop)
679 * @rec: the call site record (e.g. mcount/fentry)
680 * @old_addr: the address expected to be currently called to
681 * @addr: the address to change to
682 *
683 * This is a very sensitive operation and great care needs
684 * to be taken by the arch. The operation should carefully
685 * read the location, check to see if what is read is indeed
686 * what we expect it to be, and then on success of the compare,
687 * it should write to the location.
688 *
689 * The code segment at @rec->ip should be a caller to @old_addr
690 *
691 * Return must be:
692 * 0 on success
693 * -EFAULT on error reading the location
694 * -EINVAL on a failed compare of the contents
695 * -EPERM on error writing to the location
696 * Any other value will be considered a failure.
697 */
698extern int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
699 unsigned long addr);
700#else
701/* Should never be called */
702static inline int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
703 unsigned long addr)
704{
705 return -EINVAL;
706}
707#endif
708
709/* May be defined in arch */
710extern int ftrace_arch_read_dyn_info(char *buf, int size);
711
712extern int skip_trace(unsigned long ip);
713extern void ftrace_module_init(struct module *mod);
714extern void ftrace_module_enable(struct module *mod);
715extern void ftrace_release_mod(struct module *mod);
716
717extern void ftrace_disable_daemon(void);
718extern void ftrace_enable_daemon(void);
719#else /* CONFIG_DYNAMIC_FTRACE */
720static inline int skip_trace(unsigned long ip) { return 0; }
721static inline int ftrace_force_update(void) { return 0; }
722static inline void ftrace_disable_daemon(void) { }
723static inline void ftrace_enable_daemon(void) { }
724static inline void ftrace_module_init(struct module *mod) { }
725static inline void ftrace_module_enable(struct module *mod) { }
726static inline void ftrace_release_mod(struct module *mod) { }
727static inline int ftrace_text_reserved(const void *start, const void *end)
728{
729 return 0;
730}
731static inline unsigned long ftrace_location(unsigned long ip)
732{
733 return 0;
734}
735
736/*
737 * Again users of functions that have ftrace_ops may not
738 * have them defined when ftrace is not enabled, but these
739 * functions may still be called. Use a macro instead of inline.
740 */
741#define ftrace_regex_open(ops, flag, inod, file) ({ -ENODEV; })
742#define ftrace_set_early_filter(ops, buf, enable) do { } while (0)
743#define ftrace_set_filter_ip(ops, ip, remove, reset) ({ -ENODEV; })
744#define ftrace_set_filter(ops, buf, len, reset) ({ -ENODEV; })
745#define ftrace_set_notrace(ops, buf, len, reset) ({ -ENODEV; })
746#define ftrace_free_filter(ops) do { } while (0)
747#define ftrace_ops_set_global_filter(ops) do { } while (0)
748
749static inline ssize_t ftrace_filter_write(struct file *file, const char __user *ubuf,
750 size_t cnt, loff_t *ppos) { return -ENODEV; }
751static inline ssize_t ftrace_notrace_write(struct file *file, const char __user *ubuf,
752 size_t cnt, loff_t *ppos) { return -ENODEV; }
753static inline int
754ftrace_regex_release(struct inode *inode, struct file *file) { return -ENODEV; }
755
756static inline bool is_ftrace_trampoline(unsigned long addr)
757{
758 return false;
759}
760#endif /* CONFIG_DYNAMIC_FTRACE */
761
762/* totally disable ftrace - can not re-enable after this */
763void ftrace_kill(void);
764
765static inline void tracer_disable(void)
766{
767#ifdef CONFIG_FUNCTION_TRACER
768 ftrace_enabled = 0;
769#endif
770}
771
772/*
773 * Ftrace disable/restore without lock. Some synchronization mechanism
774 * must be used to prevent ftrace_enabled to be changed between
775 * disable/restore.
776 */
777static inline int __ftrace_enabled_save(void)
778{
779#ifdef CONFIG_FUNCTION_TRACER
780 int saved_ftrace_enabled = ftrace_enabled;
781 ftrace_enabled = 0;
782 return saved_ftrace_enabled;
783#else
784 return 0;
785#endif
786}
787
788static inline void __ftrace_enabled_restore(int enabled)
789{
790#ifdef CONFIG_FUNCTION_TRACER
791 ftrace_enabled = enabled;
792#endif
793}
794
795/* All archs should have this, but we define it for consistency */
796#ifndef ftrace_return_address0
797# define ftrace_return_address0 __builtin_return_address(0)
798#endif
799
800/* Archs may use other ways for ADDR1 and beyond */
801#ifndef ftrace_return_address
802# ifdef CONFIG_FRAME_POINTER
803# define ftrace_return_address(n) __builtin_return_address(n)
804# else
805# define ftrace_return_address(n) 0UL
806# endif
807#endif
808
809#define CALLER_ADDR0 ((unsigned long)ftrace_return_address0)
810#define CALLER_ADDR1 ((unsigned long)ftrace_return_address(1))
811#define CALLER_ADDR2 ((unsigned long)ftrace_return_address(2))
812#define CALLER_ADDR3 ((unsigned long)ftrace_return_address(3))
813#define CALLER_ADDR4 ((unsigned long)ftrace_return_address(4))
814#define CALLER_ADDR5 ((unsigned long)ftrace_return_address(5))
815#define CALLER_ADDR6 ((unsigned long)ftrace_return_address(6))
816
817static inline unsigned long get_lock_parent_ip(void)
818{
819 unsigned long addr = CALLER_ADDR0;
820
821 if (!in_lock_functions(addr))
822 return addr;
823 addr = CALLER_ADDR1;
824 if (!in_lock_functions(addr))
825 return addr;
826 return CALLER_ADDR2;
827}
828
829#ifdef CONFIG_TRACE_PREEMPT_TOGGLE
830 extern void trace_preempt_on(unsigned long a0, unsigned long a1);
831 extern void trace_preempt_off(unsigned long a0, unsigned long a1);
832#else
833/*
834 * Use defines instead of static inlines because some arches will make code out
835 * of the CALLER_ADDR, when we really want these to be a real nop.
836 */
837# define trace_preempt_on(a0, a1) do { } while (0)
838# define trace_preempt_off(a0, a1) do { } while (0)
839#endif
840
841#ifdef CONFIG_FTRACE_MCOUNT_RECORD
842extern void ftrace_init(void);
843#ifdef CC_USING_PATCHABLE_FUNCTION_ENTRY
844#define FTRACE_CALLSITE_SECTION "__patchable_function_entries"
845#else
846#define FTRACE_CALLSITE_SECTION "__mcount_loc"
847#endif
848#else
849static inline void ftrace_init(void) { }
850#endif
851
852/*
853 * Structure that defines an entry function trace.
854 * It's already packed but the attribute "packed" is needed
855 * to remove extra padding at the end.
856 */
857struct ftrace_graph_ent {
858 unsigned long func; /* Current function */
859 int depth;
860} __packed;
861
862/*
863 * Structure that defines a return function trace.
864 * It's already packed but the attribute "packed" is needed
865 * to remove extra padding at the end.
866 */
867struct ftrace_graph_ret {
868 unsigned long func; /* Current function */
869 /* Number of functions that overran the depth limit for current task */
870 unsigned long overrun;
871 unsigned long long calltime;
872 unsigned long long rettime;
873 int depth;
874} __packed;
875
876/* Type of the callback handlers for tracing function graph*/
877typedef void (*trace_func_graph_ret_t)(struct ftrace_graph_ret *); /* return */
878typedef int (*trace_func_graph_ent_t)(struct ftrace_graph_ent *); /* entry */
879
880extern int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace);
881
882#ifdef CONFIG_FUNCTION_GRAPH_TRACER
883
884struct fgraph_ops {
885 trace_func_graph_ent_t entryfunc;
886 trace_func_graph_ret_t retfunc;
887};
888
889/*
890 * Stack of return addresses for functions
891 * of a thread.
892 * Used in struct thread_info
893 */
894struct ftrace_ret_stack {
895 unsigned long ret;
896 unsigned long func;
897 unsigned long long calltime;
898#ifdef CONFIG_FUNCTION_PROFILER
899 unsigned long long subtime;
900#endif
901#ifdef HAVE_FUNCTION_GRAPH_FP_TEST
902 unsigned long fp;
903#endif
904#ifdef HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
905 unsigned long *retp;
906#endif
907};
908
909/*
910 * Primary handler of a function return.
911 * It relays on ftrace_return_to_handler.
912 * Defined in entry_32/64.S
913 */
914extern void return_to_handler(void);
915
916extern int
917function_graph_enter(unsigned long ret, unsigned long func,
918 unsigned long frame_pointer, unsigned long *retp);
919
920struct ftrace_ret_stack *
921ftrace_graph_get_ret_stack(struct task_struct *task, int idx);
922
923unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx,
924 unsigned long ret, unsigned long *retp);
925
926/*
927 * Sometimes we don't want to trace a function with the function
928 * graph tracer but we want them to keep traced by the usual function
929 * tracer if the function graph tracer is not configured.
930 */
931#define __notrace_funcgraph notrace
932
933#define FTRACE_RETFUNC_DEPTH 50
934#define FTRACE_RETSTACK_ALLOC_SIZE 32
935
936extern int register_ftrace_graph(struct fgraph_ops *ops);
937extern void unregister_ftrace_graph(struct fgraph_ops *ops);
938
939extern bool ftrace_graph_is_dead(void);
940extern void ftrace_graph_stop(void);
941
942/* The current handlers in use */
943extern trace_func_graph_ret_t ftrace_graph_return;
944extern trace_func_graph_ent_t ftrace_graph_entry;
945
946extern void ftrace_graph_init_task(struct task_struct *t);
947extern void ftrace_graph_exit_task(struct task_struct *t);
948extern void ftrace_graph_init_idle_task(struct task_struct *t, int cpu);
949
950static inline void pause_graph_tracing(void)
951{
952 atomic_inc(¤t->tracing_graph_pause);
953}
954
955static inline void unpause_graph_tracing(void)
956{
957 atomic_dec(¤t->tracing_graph_pause);
958}
959#else /* !CONFIG_FUNCTION_GRAPH_TRACER */
960
961#define __notrace_funcgraph
962
963static inline void ftrace_graph_init_task(struct task_struct *t) { }
964static inline void ftrace_graph_exit_task(struct task_struct *t) { }
965static inline void ftrace_graph_init_idle_task(struct task_struct *t, int cpu) { }
966
967/* Define as macros as fgraph_ops may not be defined */
968#define register_ftrace_graph(ops) ({ -1; })
969#define unregister_ftrace_graph(ops) do { } while (0)
970
971static inline unsigned long
972ftrace_graph_ret_addr(struct task_struct *task, int *idx, unsigned long ret,
973 unsigned long *retp)
974{
975 return ret;
976}
977
978static inline void pause_graph_tracing(void) { }
979static inline void unpause_graph_tracing(void) { }
980#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
981
982#ifdef CONFIG_TRACING
983
984/* flags for current->trace */
985enum {
986 TSK_TRACE_FL_TRACE_BIT = 0,
987 TSK_TRACE_FL_GRAPH_BIT = 1,
988};
989enum {
990 TSK_TRACE_FL_TRACE = 1 << TSK_TRACE_FL_TRACE_BIT,
991 TSK_TRACE_FL_GRAPH = 1 << TSK_TRACE_FL_GRAPH_BIT,
992};
993
994static inline void set_tsk_trace_trace(struct task_struct *tsk)
995{
996 set_bit(TSK_TRACE_FL_TRACE_BIT, &tsk->trace);
997}
998
999static inline void clear_tsk_trace_trace(struct task_struct *tsk)
1000{
1001 clear_bit(TSK_TRACE_FL_TRACE_BIT, &tsk->trace);
1002}
1003
1004static inline int test_tsk_trace_trace(struct task_struct *tsk)
1005{
1006 return tsk->trace & TSK_TRACE_FL_TRACE;
1007}
1008
1009static inline void set_tsk_trace_graph(struct task_struct *tsk)
1010{
1011 set_bit(TSK_TRACE_FL_GRAPH_BIT, &tsk->trace);
1012}
1013
1014static inline void clear_tsk_trace_graph(struct task_struct *tsk)
1015{
1016 clear_bit(TSK_TRACE_FL_GRAPH_BIT, &tsk->trace);
1017}
1018
1019static inline int test_tsk_trace_graph(struct task_struct *tsk)
1020{
1021 return tsk->trace & TSK_TRACE_FL_GRAPH;
1022}
1023
1024enum ftrace_dump_mode;
1025
1026extern enum ftrace_dump_mode ftrace_dump_on_oops;
1027extern int tracepoint_printk;
1028
1029extern void disable_trace_on_warning(void);
1030extern int __disable_trace_on_warning;
1031
1032int tracepoint_printk_sysctl(struct ctl_table *table, int write,
1033 void *buffer, size_t *lenp, loff_t *ppos);
1034
1035#else /* CONFIG_TRACING */
1036static inline void disable_trace_on_warning(void) { }
1037#endif /* CONFIG_TRACING */
1038
1039#ifdef CONFIG_FTRACE_SYSCALLS
1040
1041unsigned long arch_syscall_addr(int nr);
1042
1043#endif /* CONFIG_FTRACE_SYSCALLS */
1044
1045#endif /* _LINUX_FTRACE_H */