Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v2.6.31-rc3 172 lines 4.9 kB view raw
1#ifndef _LINUX_FTRACE_EVENT_H 2#define _LINUX_FTRACE_EVENT_H 3 4#include <linux/trace_seq.h> 5#include <linux/ring_buffer.h> 6#include <linux/percpu.h> 7 8struct trace_array; 9struct tracer; 10struct dentry; 11 12DECLARE_PER_CPU(struct trace_seq, ftrace_event_seq); 13 14struct trace_print_flags { 15 unsigned long mask; 16 const char *name; 17}; 18 19const char *ftrace_print_flags_seq(struct trace_seq *p, const char *delim, 20 unsigned long flags, 21 const struct trace_print_flags *flag_array); 22 23const char *ftrace_print_symbols_seq(struct trace_seq *p, unsigned long val, 24 const struct trace_print_flags *symbol_array); 25 26/* 27 * The trace entry - the most basic unit of tracing. This is what 28 * is printed in the end as a single line in the trace output, such as: 29 * 30 * bash-15816 [01] 235.197585: idle_cpu <- irq_enter 31 */ 32struct trace_entry { 33 unsigned short type; 34 unsigned char flags; 35 unsigned char preempt_count; 36 int pid; 37 int tgid; 38}; 39 40#define FTRACE_MAX_EVENT \ 41 ((1 << (sizeof(((struct trace_entry *)0)->type) * 8)) - 1) 42 43/* 44 * Trace iterator - used by printout routines who present trace 45 * results to users and which routines might sleep, etc: 46 */ 47struct trace_iterator { 48 struct trace_array *tr; 49 struct tracer *trace; 50 void *private; 51 int cpu_file; 52 struct mutex mutex; 53 struct ring_buffer_iter *buffer_iter[NR_CPUS]; 54 unsigned long iter_flags; 55 56 /* The below is zeroed out in pipe_read */ 57 struct trace_seq seq; 58 struct trace_entry *ent; 59 int cpu; 60 u64 ts; 61 62 loff_t pos; 63 long idx; 64 65 cpumask_var_t started; 66}; 67 68 69typedef enum print_line_t (*trace_print_func)(struct trace_iterator *iter, 70 int flags); 71struct trace_event { 72 struct hlist_node node; 73 struct list_head list; 74 int type; 75 trace_print_func trace; 76 trace_print_func raw; 77 trace_print_func hex; 78 trace_print_func binary; 79}; 80 81extern int register_ftrace_event(struct trace_event *event); 82extern int unregister_ftrace_event(struct trace_event *event); 83 84/* Return values for print_line callback */ 85enum print_line_t { 86 TRACE_TYPE_PARTIAL_LINE = 0, /* Retry after flushing the seq */ 87 TRACE_TYPE_HANDLED = 1, 88 TRACE_TYPE_UNHANDLED = 2, /* Relay to other output functions */ 89 TRACE_TYPE_NO_CONSUME = 3 /* Handled but ask to not consume */ 90}; 91 92 93struct ring_buffer_event * 94trace_current_buffer_lock_reserve(int type, unsigned long len, 95 unsigned long flags, int pc); 96void trace_current_buffer_unlock_commit(struct ring_buffer_event *event, 97 unsigned long flags, int pc); 98void trace_nowake_buffer_unlock_commit(struct ring_buffer_event *event, 99 unsigned long flags, int pc); 100void trace_current_buffer_discard_commit(struct ring_buffer_event *event); 101 102void tracing_record_cmdline(struct task_struct *tsk); 103 104struct ftrace_event_call { 105 struct list_head list; 106 char *name; 107 char *system; 108 struct dentry *dir; 109 struct trace_event *event; 110 int enabled; 111 int (*regfunc)(void); 112 void (*unregfunc)(void); 113 int id; 114 int (*raw_init)(void); 115 int (*show_format)(struct trace_seq *s); 116 int (*define_fields)(void); 117 struct list_head fields; 118 int filter_active; 119 void *filter; 120 void *mod; 121 122#ifdef CONFIG_EVENT_PROFILE 123 atomic_t profile_count; 124 int (*profile_enable)(struct ftrace_event_call *); 125 void (*profile_disable)(struct ftrace_event_call *); 126#endif 127}; 128 129#define MAX_FILTER_PRED 32 130#define MAX_FILTER_STR_VAL 128 131 132extern int init_preds(struct ftrace_event_call *call); 133extern void destroy_preds(struct ftrace_event_call *call); 134extern int filter_match_preds(struct ftrace_event_call *call, void *rec); 135extern int filter_current_check_discard(struct ftrace_event_call *call, 136 void *rec, 137 struct ring_buffer_event *event); 138 139extern int trace_define_field(struct ftrace_event_call *call, char *type, 140 char *name, int offset, int size, int is_signed); 141 142#define is_signed_type(type) (((type)(-1)) < 0) 143 144int trace_set_clr_event(const char *system, const char *event, int set); 145 146/* 147 * The double __builtin_constant_p is because gcc will give us an error 148 * if we try to allocate the static variable to fmt if it is not a 149 * constant. Even with the outer if statement optimizing out. 150 */ 151#define event_trace_printk(ip, fmt, args...) \ 152do { \ 153 __trace_printk_check_format(fmt, ##args); \ 154 tracing_record_cmdline(current); \ 155 if (__builtin_constant_p(fmt)) { \ 156 static const char *trace_printk_fmt \ 157 __attribute__((section("__trace_printk_fmt"))) = \ 158 __builtin_constant_p(fmt) ? fmt : NULL; \ 159 \ 160 __trace_bprintk(ip, trace_printk_fmt, ##args); \ 161 } else \ 162 __trace_printk(ip, fmt, ##args); \ 163} while (0) 164 165#define __common_field(type, item, is_signed) \ 166 ret = trace_define_field(event_call, #type, "common_" #item, \ 167 offsetof(typeof(field.ent), item), \ 168 sizeof(field.ent.item), is_signed); \ 169 if (ret) \ 170 return ret; 171 172#endif /* _LINUX_FTRACE_EVENT_H */