at for-next 31 kB view raw
1/* SPDX-License-Identifier: GPL-2.0 */ 2 3#ifndef _LINUX_TRACE_EVENT_H 4#define _LINUX_TRACE_EVENT_H 5 6#include <linux/ring_buffer.h> 7#include <linux/trace_seq.h> 8#include <linux/percpu.h> 9#include <linux/hardirq.h> 10#include <linux/perf_event.h> 11#include <linux/tracepoint.h> 12 13struct trace_array; 14struct array_buffer; 15struct tracer; 16struct dentry; 17struct bpf_prog; 18union bpf_attr; 19 20/* Used for event string fields when they are NULL */ 21#define EVENT_NULL_STR "(null)" 22 23const char *trace_print_flags_seq(struct trace_seq *p, const char *delim, 24 unsigned long flags, 25 const struct trace_print_flags *flag_array); 26 27const char *trace_print_symbols_seq(struct trace_seq *p, unsigned long val, 28 const struct trace_print_flags *symbol_array); 29 30#if BITS_PER_LONG == 32 31const char *trace_print_flags_seq_u64(struct trace_seq *p, const char *delim, 32 unsigned long long flags, 33 const struct trace_print_flags_u64 *flag_array); 34 35const char *trace_print_symbols_seq_u64(struct trace_seq *p, 36 unsigned long long val, 37 const struct trace_print_flags_u64 38 *symbol_array); 39#endif 40 41const char *trace_print_bitmask_seq(struct trace_seq *p, void *bitmask_ptr, 42 unsigned int bitmask_size); 43 44const char *trace_print_hex_seq(struct trace_seq *p, 45 const unsigned char *buf, int len, 46 bool concatenate); 47 48const char *trace_print_array_seq(struct trace_seq *p, 49 const void *buf, int count, 50 size_t el_size); 51 52const char * 53trace_print_hex_dump_seq(struct trace_seq *p, const char *prefix_str, 54 int prefix_type, int rowsize, int groupsize, 55 const void *buf, size_t len, bool ascii); 56 57struct trace_iterator; 58struct trace_event; 59 60int trace_raw_output_prep(struct trace_iterator *iter, 61 struct trace_event *event); 62extern __printf(2, 3) 63void trace_event_printf(struct trace_iterator *iter, const char *fmt, ...); 64 65/* Used to find the offset and length of dynamic fields in trace events */ 66struct trace_dynamic_info { 67#ifdef CONFIG_CPU_BIG_ENDIAN 68 u16 len; 69 u16 offset; 70#else 71 u16 offset; 72 u16 len; 73#endif 74} __packed; 75 76/* 77 * The trace entry - the most basic unit of tracing. This is what 78 * is printed in the end as a single line in the trace output, such as: 79 * 80 * bash-15816 [01] 235.197585: idle_cpu <- irq_enter 81 */ 82struct trace_entry { 83 unsigned short type; 84 unsigned char flags; 85 unsigned char preempt_count; 86 int pid; 87}; 88 89#define TRACE_EVENT_TYPE_MAX \ 90 ((1 << (sizeof(((struct trace_entry *)0)->type) * 8)) - 1) 91 92/* 93 * Trace iterator - used by printout routines who present trace 94 * results to users and which routines might sleep, etc: 95 */ 96struct trace_iterator { 97 struct trace_array *tr; 98 struct tracer *trace; 99 struct array_buffer *array_buffer; 100 void *private; 101 int cpu_file; 102 struct mutex mutex; 103 struct ring_buffer_iter **buffer_iter; 104 unsigned long iter_flags; 105 void *temp; /* temp holder */ 106 unsigned int temp_size; 107 char *fmt; /* modified format holder */ 108 unsigned int fmt_size; 109 atomic_t wait_index; 110 111 /* trace_seq for __print_flags() and __print_symbolic() etc. */ 112 struct trace_seq tmp_seq; 113 114 cpumask_var_t started; 115 116 /* Set when the file is closed to prevent new waiters */ 117 bool closed; 118 119 /* it's true when current open file is snapshot */ 120 bool snapshot; 121 122 /* The below is zeroed out in pipe_read */ 123 struct trace_seq seq; 124 struct trace_entry *ent; 125 unsigned long lost_events; 126 int leftover; 127 int ent_size; 128 int cpu; 129 u64 ts; 130 131 loff_t pos; 132 long idx; 133 134 /* All new field here will be zeroed out in pipe_read */ 135}; 136 137enum trace_iter_flags { 138 TRACE_FILE_LAT_FMT = 1, 139 TRACE_FILE_ANNOTATE = 2, 140 TRACE_FILE_TIME_IN_NS = 4, 141}; 142 143 144typedef enum print_line_t (*trace_print_func)(struct trace_iterator *iter, 145 int flags, struct trace_event *event); 146 147struct trace_event_functions { 148 trace_print_func trace; 149 trace_print_func raw; 150 trace_print_func hex; 151 trace_print_func binary; 152}; 153 154struct trace_event { 155 struct hlist_node node; 156 int type; 157 struct trace_event_functions *funcs; 158}; 159 160extern int register_trace_event(struct trace_event *event); 161extern int unregister_trace_event(struct trace_event *event); 162 163/* Return values for print_line callback */ 164enum print_line_t { 165 TRACE_TYPE_PARTIAL_LINE = 0, /* Retry after flushing the seq */ 166 TRACE_TYPE_HANDLED = 1, 167 TRACE_TYPE_UNHANDLED = 2, /* Relay to other output functions */ 168 TRACE_TYPE_NO_CONSUME = 3 /* Handled but ask to not consume */ 169}; 170 171enum print_line_t trace_handle_return(struct trace_seq *s); 172 173static inline void tracing_generic_entry_update(struct trace_entry *entry, 174 unsigned short type, 175 unsigned int trace_ctx) 176{ 177 entry->preempt_count = trace_ctx & 0xff; 178 entry->pid = current->pid; 179 entry->type = type; 180 entry->flags = trace_ctx >> 16; 181} 182 183unsigned int tracing_gen_ctx_irq_test(unsigned int irqs_status); 184 185enum trace_flag_type { 186 TRACE_FLAG_IRQS_OFF = 0x01, 187 TRACE_FLAG_NEED_RESCHED_LAZY = 0x02, 188 TRACE_FLAG_NEED_RESCHED = 0x04, 189 TRACE_FLAG_HARDIRQ = 0x08, 190 TRACE_FLAG_SOFTIRQ = 0x10, 191 TRACE_FLAG_PREEMPT_RESCHED = 0x20, 192 TRACE_FLAG_NMI = 0x40, 193 TRACE_FLAG_BH_OFF = 0x80, 194}; 195 196static inline unsigned int tracing_gen_ctx_flags(unsigned long irqflags) 197{ 198 unsigned int irq_status = irqs_disabled_flags(irqflags) ? 199 TRACE_FLAG_IRQS_OFF : 0; 200 return tracing_gen_ctx_irq_test(irq_status); 201} 202static inline unsigned int tracing_gen_ctx(void) 203{ 204 unsigned long irqflags; 205 206 local_save_flags(irqflags); 207 return tracing_gen_ctx_flags(irqflags); 208} 209 210static inline unsigned int tracing_gen_ctx_dec(void) 211{ 212 unsigned int trace_ctx; 213 214 trace_ctx = tracing_gen_ctx(); 215 /* 216 * Subtract one from the preemption counter if preemption is enabled, 217 * see trace_event_buffer_reserve()for details. 218 */ 219 if (IS_ENABLED(CONFIG_PREEMPTION)) 220 trace_ctx--; 221 return trace_ctx; 222} 223 224struct trace_event_file; 225 226struct ring_buffer_event * 227trace_event_buffer_lock_reserve(struct trace_buffer **current_buffer, 228 struct trace_event_file *trace_file, 229 int type, unsigned long len, 230 unsigned int trace_ctx); 231 232#define TRACE_RECORD_CMDLINE BIT(0) 233#define TRACE_RECORD_TGID BIT(1) 234 235void tracing_record_taskinfo(struct task_struct *task, int flags); 236void tracing_record_taskinfo_sched_switch(struct task_struct *prev, 237 struct task_struct *next, int flags); 238 239void tracing_record_cmdline(struct task_struct *task); 240void tracing_record_tgid(struct task_struct *task); 241 242int trace_output_call(struct trace_iterator *iter, char *name, char *fmt, ...) 243 __printf(3, 4); 244 245struct event_filter; 246 247enum trace_reg { 248 TRACE_REG_REGISTER, 249 TRACE_REG_UNREGISTER, 250#ifdef CONFIG_PERF_EVENTS 251 TRACE_REG_PERF_REGISTER, 252 TRACE_REG_PERF_UNREGISTER, 253 TRACE_REG_PERF_OPEN, 254 TRACE_REG_PERF_CLOSE, 255 /* 256 * These (ADD/DEL) use a 'boolean' return value, where 1 (true) means a 257 * custom action was taken and the default action is not to be 258 * performed. 259 */ 260 TRACE_REG_PERF_ADD, 261 TRACE_REG_PERF_DEL, 262#endif 263}; 264 265struct trace_event_call; 266 267#define TRACE_FUNCTION_TYPE ((const char *)~0UL) 268 269struct trace_event_fields { 270 const char *type; 271 union { 272 struct { 273 const char *name; 274 const int size; 275 const int align; 276 const unsigned int is_signed:1; 277 unsigned int needs_test:1; 278 const int filter_type; 279 const int len; 280 }; 281 int (*define_fields)(struct trace_event_call *); 282 }; 283}; 284 285struct trace_event_class { 286 const char *system; 287 void *probe; 288#ifdef CONFIG_PERF_EVENTS 289 void *perf_probe; 290#endif 291 int (*reg)(struct trace_event_call *event, 292 enum trace_reg type, void *data); 293 struct trace_event_fields *fields_array; 294 struct list_head *(*get_fields)(struct trace_event_call *); 295 struct list_head fields; 296 int (*raw_init)(struct trace_event_call *); 297}; 298 299extern int trace_event_reg(struct trace_event_call *event, 300 enum trace_reg type, void *data); 301 302struct trace_event_buffer { 303 struct trace_buffer *buffer; 304 struct ring_buffer_event *event; 305 struct trace_event_file *trace_file; 306 void *entry; 307 unsigned int trace_ctx; 308 struct pt_regs *regs; 309}; 310 311void *trace_event_buffer_reserve(struct trace_event_buffer *fbuffer, 312 struct trace_event_file *trace_file, 313 unsigned long len); 314 315void trace_event_buffer_commit(struct trace_event_buffer *fbuffer); 316 317enum { 318 TRACE_EVENT_FL_CAP_ANY_BIT, 319 TRACE_EVENT_FL_NO_SET_FILTER_BIT, 320 TRACE_EVENT_FL_IGNORE_ENABLE_BIT, 321 TRACE_EVENT_FL_TRACEPOINT_BIT, 322 TRACE_EVENT_FL_DYNAMIC_BIT, 323 TRACE_EVENT_FL_KPROBE_BIT, 324 TRACE_EVENT_FL_UPROBE_BIT, 325 TRACE_EVENT_FL_EPROBE_BIT, 326 TRACE_EVENT_FL_FPROBE_BIT, 327 TRACE_EVENT_FL_CUSTOM_BIT, 328 TRACE_EVENT_FL_TEST_STR_BIT, 329}; 330 331/* 332 * Event flags: 333 * CAP_ANY - Any user can enable for perf 334 * NO_SET_FILTER - Set when filter has error and is to be ignored 335 * IGNORE_ENABLE - For trace internal events, do not enable with debugfs file 336 * TRACEPOINT - Event is a tracepoint 337 * DYNAMIC - Event is a dynamic event (created at run time) 338 * KPROBE - Event is a kprobe 339 * UPROBE - Event is a uprobe 340 * EPROBE - Event is an event probe 341 * FPROBE - Event is an function probe 342 * CUSTOM - Event is a custom event (to be attached to an exsiting tracepoint) 343 * This is set when the custom event has not been attached 344 * to a tracepoint yet, then it is cleared when it is. 345 * TEST_STR - The event has a "%s" that points to a string outside the event 346 */ 347enum { 348 TRACE_EVENT_FL_CAP_ANY = (1 << TRACE_EVENT_FL_CAP_ANY_BIT), 349 TRACE_EVENT_FL_NO_SET_FILTER = (1 << TRACE_EVENT_FL_NO_SET_FILTER_BIT), 350 TRACE_EVENT_FL_IGNORE_ENABLE = (1 << TRACE_EVENT_FL_IGNORE_ENABLE_BIT), 351 TRACE_EVENT_FL_TRACEPOINT = (1 << TRACE_EVENT_FL_TRACEPOINT_BIT), 352 TRACE_EVENT_FL_DYNAMIC = (1 << TRACE_EVENT_FL_DYNAMIC_BIT), 353 TRACE_EVENT_FL_KPROBE = (1 << TRACE_EVENT_FL_KPROBE_BIT), 354 TRACE_EVENT_FL_UPROBE = (1 << TRACE_EVENT_FL_UPROBE_BIT), 355 TRACE_EVENT_FL_EPROBE = (1 << TRACE_EVENT_FL_EPROBE_BIT), 356 TRACE_EVENT_FL_FPROBE = (1 << TRACE_EVENT_FL_FPROBE_BIT), 357 TRACE_EVENT_FL_CUSTOM = (1 << TRACE_EVENT_FL_CUSTOM_BIT), 358 TRACE_EVENT_FL_TEST_STR = (1 << TRACE_EVENT_FL_TEST_STR_BIT), 359}; 360 361#define TRACE_EVENT_FL_UKPROBE (TRACE_EVENT_FL_KPROBE | TRACE_EVENT_FL_UPROBE) 362 363struct trace_event_call { 364 struct list_head list; 365 struct trace_event_class *class; 366 union { 367 const char *name; 368 /* Set TRACE_EVENT_FL_TRACEPOINT flag when using "tp" */ 369 struct tracepoint *tp; 370 }; 371 struct trace_event event; 372 char *print_fmt; 373 /* 374 * Static events can disappear with modules, 375 * where as dynamic ones need their own ref count. 376 */ 377 union { 378 void *module; 379 atomic_t refcnt; 380 }; 381 void *data; 382 383 /* See the TRACE_EVENT_FL_* flags above */ 384 int flags; /* static flags of different events */ 385 386#ifdef CONFIG_PERF_EVENTS 387 int perf_refcount; 388 struct hlist_head __percpu *perf_events; 389 struct bpf_prog_array __rcu *prog_array; 390 391 int (*perf_perm)(struct trace_event_call *, 392 struct perf_event *); 393#endif 394}; 395 396#ifdef CONFIG_DYNAMIC_EVENTS 397bool trace_event_dyn_try_get_ref(struct trace_event_call *call); 398void trace_event_dyn_put_ref(struct trace_event_call *call); 399bool trace_event_dyn_busy(struct trace_event_call *call); 400#else 401static inline bool trace_event_dyn_try_get_ref(struct trace_event_call *call) 402{ 403 /* Without DYNAMIC_EVENTS configured, nothing should be calling this */ 404 return false; 405} 406static inline void trace_event_dyn_put_ref(struct trace_event_call *call) 407{ 408} 409static inline bool trace_event_dyn_busy(struct trace_event_call *call) 410{ 411 /* Nothing should call this without DYNAIMIC_EVENTS configured. */ 412 return true; 413} 414#endif 415 416static inline bool trace_event_try_get_ref(struct trace_event_call *call) 417{ 418 if (call->flags & TRACE_EVENT_FL_DYNAMIC) 419 return trace_event_dyn_try_get_ref(call); 420 else 421 return try_module_get(call->module); 422} 423 424static inline void trace_event_put_ref(struct trace_event_call *call) 425{ 426 if (call->flags & TRACE_EVENT_FL_DYNAMIC) 427 trace_event_dyn_put_ref(call); 428 else 429 module_put(call->module); 430} 431 432#ifdef CONFIG_PERF_EVENTS 433static inline bool bpf_prog_array_valid(struct trace_event_call *call) 434{ 435 /* 436 * This inline function checks whether call->prog_array 437 * is valid or not. The function is called in various places, 438 * outside rcu_read_lock/unlock, as a heuristic to speed up execution. 439 * 440 * If this function returns true, and later call->prog_array 441 * becomes false inside rcu_read_lock/unlock region, 442 * we bail out then. If this function return false, 443 * there is a risk that we might miss a few events if the checking 444 * were delayed until inside rcu_read_lock/unlock region and 445 * call->prog_array happened to become non-NULL then. 446 * 447 * Here, READ_ONCE() is used instead of rcu_access_pointer(). 448 * rcu_access_pointer() requires the actual definition of 449 * "struct bpf_prog_array" while READ_ONCE() only needs 450 * a declaration of the same type. 451 */ 452 return !!READ_ONCE(call->prog_array); 453} 454#endif 455 456static inline const char * 457trace_event_name(struct trace_event_call *call) 458{ 459 if (call->flags & TRACE_EVENT_FL_CUSTOM) 460 return call->name; 461 else if (call->flags & TRACE_EVENT_FL_TRACEPOINT) 462 return call->tp ? call->tp->name : NULL; 463 else 464 return call->name; 465} 466 467static inline struct list_head * 468trace_get_fields(struct trace_event_call *event_call) 469{ 470 if (!event_call->class->get_fields) 471 return &event_call->class->fields; 472 return event_call->class->get_fields(event_call); 473} 474 475struct trace_subsystem_dir; 476 477enum { 478 EVENT_FILE_FL_ENABLED_BIT, 479 EVENT_FILE_FL_RECORDED_CMD_BIT, 480 EVENT_FILE_FL_RECORDED_TGID_BIT, 481 EVENT_FILE_FL_FILTERED_BIT, 482 EVENT_FILE_FL_NO_SET_FILTER_BIT, 483 EVENT_FILE_FL_SOFT_MODE_BIT, 484 EVENT_FILE_FL_SOFT_DISABLED_BIT, 485 EVENT_FILE_FL_TRIGGER_MODE_BIT, 486 EVENT_FILE_FL_TRIGGER_COND_BIT, 487 EVENT_FILE_FL_PID_FILTER_BIT, 488 EVENT_FILE_FL_WAS_ENABLED_BIT, 489 EVENT_FILE_FL_FREED_BIT, 490}; 491 492extern struct trace_event_file *trace_get_event_file(const char *instance, 493 const char *system, 494 const char *event); 495extern void trace_put_event_file(struct trace_event_file *file); 496 497#define MAX_DYNEVENT_CMD_LEN (2048) 498 499enum dynevent_type { 500 DYNEVENT_TYPE_SYNTH = 1, 501 DYNEVENT_TYPE_KPROBE, 502 DYNEVENT_TYPE_NONE, 503}; 504 505struct dynevent_cmd; 506 507typedef int (*dynevent_create_fn_t)(struct dynevent_cmd *cmd); 508 509struct dynevent_cmd { 510 struct seq_buf seq; 511 const char *event_name; 512 unsigned int n_fields; 513 enum dynevent_type type; 514 dynevent_create_fn_t run_command; 515 void *private_data; 516}; 517 518extern int dynevent_create(struct dynevent_cmd *cmd); 519 520extern int synth_event_delete(const char *name); 521 522extern void synth_event_cmd_init(struct dynevent_cmd *cmd, 523 char *buf, int maxlen); 524 525extern int __synth_event_gen_cmd_start(struct dynevent_cmd *cmd, 526 const char *name, 527 struct module *mod, ...); 528 529#define synth_event_gen_cmd_start(cmd, name, mod, ...) \ 530 __synth_event_gen_cmd_start(cmd, name, mod, ## __VA_ARGS__, NULL) 531 532struct synth_field_desc { 533 const char *type; 534 const char *name; 535}; 536 537extern int synth_event_gen_cmd_array_start(struct dynevent_cmd *cmd, 538 const char *name, 539 struct module *mod, 540 struct synth_field_desc *fields, 541 unsigned int n_fields); 542extern int synth_event_create(const char *name, 543 struct synth_field_desc *fields, 544 unsigned int n_fields, struct module *mod); 545 546extern int synth_event_add_field(struct dynevent_cmd *cmd, 547 const char *type, 548 const char *name); 549extern int synth_event_add_field_str(struct dynevent_cmd *cmd, 550 const char *type_name); 551extern int synth_event_add_fields(struct dynevent_cmd *cmd, 552 struct synth_field_desc *fields, 553 unsigned int n_fields); 554 555#define synth_event_gen_cmd_end(cmd) \ 556 dynevent_create(cmd) 557 558struct synth_event; 559 560struct synth_event_trace_state { 561 struct trace_event_buffer fbuffer; 562 struct synth_trace_event *entry; 563 struct trace_buffer *buffer; 564 struct synth_event *event; 565 unsigned int cur_field; 566 unsigned int n_u64; 567 bool disabled; 568 bool add_next; 569 bool add_name; 570}; 571 572extern int synth_event_trace(struct trace_event_file *file, 573 unsigned int n_vals, ...); 574extern int synth_event_trace_array(struct trace_event_file *file, u64 *vals, 575 unsigned int n_vals); 576extern int synth_event_trace_start(struct trace_event_file *file, 577 struct synth_event_trace_state *trace_state); 578extern int synth_event_add_next_val(u64 val, 579 struct synth_event_trace_state *trace_state); 580extern int synth_event_add_val(const char *field_name, u64 val, 581 struct synth_event_trace_state *trace_state); 582extern int synth_event_trace_end(struct synth_event_trace_state *trace_state); 583 584extern int kprobe_event_delete(const char *name); 585 586extern void kprobe_event_cmd_init(struct dynevent_cmd *cmd, 587 char *buf, int maxlen); 588 589#define kprobe_event_gen_cmd_start(cmd, name, loc, ...) \ 590 __kprobe_event_gen_cmd_start(cmd, false, name, loc, ## __VA_ARGS__, NULL) 591 592#define kretprobe_event_gen_cmd_start(cmd, name, loc, ...) \ 593 __kprobe_event_gen_cmd_start(cmd, true, name, loc, ## __VA_ARGS__, NULL) 594 595extern int __kprobe_event_gen_cmd_start(struct dynevent_cmd *cmd, 596 bool kretprobe, 597 const char *name, 598 const char *loc, ...); 599 600#define kprobe_event_add_fields(cmd, ...) \ 601 __kprobe_event_add_fields(cmd, ## __VA_ARGS__, NULL) 602 603#define kprobe_event_add_field(cmd, field) \ 604 __kprobe_event_add_fields(cmd, field, NULL) 605 606extern int __kprobe_event_add_fields(struct dynevent_cmd *cmd, ...); 607 608#define kprobe_event_gen_cmd_end(cmd) \ 609 dynevent_create(cmd) 610 611#define kretprobe_event_gen_cmd_end(cmd) \ 612 dynevent_create(cmd) 613 614/* 615 * Event file flags: 616 * ENABLED - The event is enabled 617 * RECORDED_CMD - The comms should be recorded at sched_switch 618 * RECORDED_TGID - The tgids should be recorded at sched_switch 619 * FILTERED - The event has a filter attached 620 * NO_SET_FILTER - Set when filter has error and is to be ignored 621 * SOFT_MODE - The event is enabled/disabled by SOFT_DISABLED 622 * SOFT_DISABLED - When set, do not trace the event (even though its 623 * tracepoint may be enabled) 624 * TRIGGER_MODE - When set, invoke the triggers associated with the event 625 * TRIGGER_COND - When set, one or more triggers has an associated filter 626 * PID_FILTER - When set, the event is filtered based on pid 627 * WAS_ENABLED - Set when enabled to know to clear trace on module removal 628 * FREED - File descriptor is freed, all fields should be considered invalid 629 */ 630enum { 631 EVENT_FILE_FL_ENABLED = (1 << EVENT_FILE_FL_ENABLED_BIT), 632 EVENT_FILE_FL_RECORDED_CMD = (1 << EVENT_FILE_FL_RECORDED_CMD_BIT), 633 EVENT_FILE_FL_RECORDED_TGID = (1 << EVENT_FILE_FL_RECORDED_TGID_BIT), 634 EVENT_FILE_FL_FILTERED = (1 << EVENT_FILE_FL_FILTERED_BIT), 635 EVENT_FILE_FL_NO_SET_FILTER = (1 << EVENT_FILE_FL_NO_SET_FILTER_BIT), 636 EVENT_FILE_FL_SOFT_MODE = (1 << EVENT_FILE_FL_SOFT_MODE_BIT), 637 EVENT_FILE_FL_SOFT_DISABLED = (1 << EVENT_FILE_FL_SOFT_DISABLED_BIT), 638 EVENT_FILE_FL_TRIGGER_MODE = (1 << EVENT_FILE_FL_TRIGGER_MODE_BIT), 639 EVENT_FILE_FL_TRIGGER_COND = (1 << EVENT_FILE_FL_TRIGGER_COND_BIT), 640 EVENT_FILE_FL_PID_FILTER = (1 << EVENT_FILE_FL_PID_FILTER_BIT), 641 EVENT_FILE_FL_WAS_ENABLED = (1 << EVENT_FILE_FL_WAS_ENABLED_BIT), 642 EVENT_FILE_FL_FREED = (1 << EVENT_FILE_FL_FREED_BIT), 643}; 644 645struct trace_event_file { 646 struct list_head list; 647 struct trace_event_call *event_call; 648 struct event_filter __rcu *filter; 649 struct eventfs_inode *ei; 650 struct trace_array *tr; 651 struct trace_subsystem_dir *system; 652 struct list_head triggers; 653 654 /* 655 * 32 bit flags: 656 * bit 0: enabled 657 * bit 1: enabled cmd record 658 * bit 2: enable/disable with the soft disable bit 659 * bit 3: soft disabled 660 * bit 4: trigger enabled 661 * 662 * Note: The bits must be set atomically to prevent races 663 * from other writers. Reads of flags do not need to be in 664 * sync as they occur in critical sections. But the way flags 665 * is currently used, these changes do not affect the code 666 * except that when a change is made, it may have a slight 667 * delay in propagating the changes to other CPUs due to 668 * caching and such. Which is mostly OK ;-) 669 */ 670 unsigned long flags; 671 refcount_t ref; /* ref count for opened files */ 672 atomic_t sm_ref; /* soft-mode reference counter */ 673 atomic_t tm_ref; /* trigger-mode reference counter */ 674}; 675 676#define __TRACE_EVENT_FLAGS(name, value) \ 677 static int __init trace_init_flags_##name(void) \ 678 { \ 679 event_##name.flags |= value; \ 680 return 0; \ 681 } \ 682 early_initcall(trace_init_flags_##name); 683 684#define __TRACE_EVENT_PERF_PERM(name, expr...) \ 685 static int perf_perm_##name(struct trace_event_call *tp_event, \ 686 struct perf_event *p_event) \ 687 { \ 688 return ({ expr; }); \ 689 } \ 690 static int __init trace_init_perf_perm_##name(void) \ 691 { \ 692 event_##name.perf_perm = &perf_perm_##name; \ 693 return 0; \ 694 } \ 695 early_initcall(trace_init_perf_perm_##name); 696 697#define PERF_MAX_TRACE_SIZE 8192 698 699#define MAX_FILTER_STR_VAL 256U /* Should handle KSYM_SYMBOL_LEN */ 700 701enum event_trigger_type { 702 ETT_NONE = (0), 703 ETT_TRACE_ONOFF = (1 << 0), 704 ETT_SNAPSHOT = (1 << 1), 705 ETT_STACKTRACE = (1 << 2), 706 ETT_EVENT_ENABLE = (1 << 3), 707 ETT_EVENT_HIST = (1 << 4), 708 ETT_HIST_ENABLE = (1 << 5), 709 ETT_EVENT_EPROBE = (1 << 6), 710}; 711 712extern int filter_match_preds(struct event_filter *filter, void *rec); 713 714extern enum event_trigger_type 715event_triggers_call(struct trace_event_file *file, 716 struct trace_buffer *buffer, void *rec, 717 struct ring_buffer_event *event); 718extern void 719event_triggers_post_call(struct trace_event_file *file, 720 enum event_trigger_type tt); 721 722bool trace_event_ignore_this_pid(struct trace_event_file *trace_file); 723 724bool __trace_trigger_soft_disabled(struct trace_event_file *file); 725 726/** 727 * trace_trigger_soft_disabled - do triggers and test if soft disabled 728 * @file: The file pointer of the event to test 729 * 730 * If any triggers without filters are attached to this event, they 731 * will be called here. If the event is soft disabled and has no 732 * triggers that require testing the fields, it will return true, 733 * otherwise false. 734 */ 735static __always_inline bool 736trace_trigger_soft_disabled(struct trace_event_file *file) 737{ 738 unsigned long eflags = file->flags; 739 740 if (likely(!(eflags & (EVENT_FILE_FL_TRIGGER_MODE | 741 EVENT_FILE_FL_SOFT_DISABLED | 742 EVENT_FILE_FL_PID_FILTER)))) 743 return false; 744 745 if (likely(eflags & EVENT_FILE_FL_TRIGGER_COND)) 746 return false; 747 748 return __trace_trigger_soft_disabled(file); 749} 750 751#ifdef CONFIG_BPF_EVENTS 752unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx); 753int perf_event_attach_bpf_prog(struct perf_event *event, struct bpf_prog *prog, u64 bpf_cookie); 754void perf_event_detach_bpf_prog(struct perf_event *event); 755int perf_event_query_prog_array(struct perf_event *event, void __user *info); 756 757struct bpf_raw_tp_link; 758int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_raw_tp_link *link); 759int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_raw_tp_link *link); 760 761struct bpf_raw_event_map *bpf_get_raw_tracepoint(const char *name); 762void bpf_put_raw_tracepoint(struct bpf_raw_event_map *btp); 763int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id, 764 u32 *fd_type, const char **buf, 765 u64 *probe_offset, u64 *probe_addr, 766 unsigned long *missed); 767int bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog); 768int bpf_uprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog); 769#else 770static inline unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx) 771{ 772 return 1; 773} 774 775static inline int 776perf_event_attach_bpf_prog(struct perf_event *event, struct bpf_prog *prog, u64 bpf_cookie) 777{ 778 return -EOPNOTSUPP; 779} 780 781static inline void perf_event_detach_bpf_prog(struct perf_event *event) { } 782 783static inline int 784perf_event_query_prog_array(struct perf_event *event, void __user *info) 785{ 786 return -EOPNOTSUPP; 787} 788struct bpf_raw_tp_link; 789static inline int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_raw_tp_link *link) 790{ 791 return -EOPNOTSUPP; 792} 793static inline int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_raw_tp_link *link) 794{ 795 return -EOPNOTSUPP; 796} 797static inline struct bpf_raw_event_map *bpf_get_raw_tracepoint(const char *name) 798{ 799 return NULL; 800} 801static inline void bpf_put_raw_tracepoint(struct bpf_raw_event_map *btp) 802{ 803} 804static inline int bpf_get_perf_event_info(const struct perf_event *event, 805 u32 *prog_id, u32 *fd_type, 806 const char **buf, u64 *probe_offset, 807 u64 *probe_addr, unsigned long *missed) 808{ 809 return -EOPNOTSUPP; 810} 811static inline int 812bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog) 813{ 814 return -EOPNOTSUPP; 815} 816static inline int 817bpf_uprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog) 818{ 819 return -EOPNOTSUPP; 820} 821#endif 822 823enum { 824 FILTER_OTHER = 0, 825 FILTER_STATIC_STRING, 826 FILTER_DYN_STRING, 827 FILTER_RDYN_STRING, 828 FILTER_PTR_STRING, 829 FILTER_TRACE_FN, 830 FILTER_CPUMASK, 831 FILTER_COMM, 832 FILTER_CPU, 833 FILTER_STACKTRACE, 834}; 835 836extern int trace_event_raw_init(struct trace_event_call *call); 837extern int trace_define_field(struct trace_event_call *call, const char *type, 838 const char *name, int offset, int size, 839 int is_signed, int filter_type); 840extern int trace_add_event_call(struct trace_event_call *call); 841extern int trace_remove_event_call(struct trace_event_call *call); 842extern int trace_event_get_offsets(struct trace_event_call *call); 843 844int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set); 845int trace_set_clr_event(const char *system, const char *event, int set); 846int trace_array_set_clr_event(struct trace_array *tr, const char *system, 847 const char *event, bool enable); 848/* 849 * The double __builtin_constant_p is because gcc will give us an error 850 * if we try to allocate the static variable to fmt if it is not a 851 * constant. Even with the outer if statement optimizing out. 852 */ 853#define event_trace_printk(ip, fmt, args...) \ 854do { \ 855 __trace_printk_check_format(fmt, ##args); \ 856 tracing_record_cmdline(current); \ 857 if (__builtin_constant_p(fmt)) { \ 858 static const char *trace_printk_fmt \ 859 __section("__trace_printk_fmt") = \ 860 __builtin_constant_p(fmt) ? fmt : NULL; \ 861 \ 862 __trace_bprintk(ip, trace_printk_fmt, ##args); \ 863 } else \ 864 __trace_printk(ip, fmt, ##args); \ 865} while (0) 866 867#ifdef CONFIG_PERF_EVENTS 868struct perf_event; 869 870DECLARE_PER_CPU(struct pt_regs, perf_trace_regs); 871 872extern int perf_trace_init(struct perf_event *event); 873extern void perf_trace_destroy(struct perf_event *event); 874extern int perf_trace_add(struct perf_event *event, int flags); 875extern void perf_trace_del(struct perf_event *event, int flags); 876#ifdef CONFIG_KPROBE_EVENTS 877extern int perf_kprobe_init(struct perf_event *event, bool is_retprobe); 878extern void perf_kprobe_destroy(struct perf_event *event); 879extern int bpf_get_kprobe_info(const struct perf_event *event, 880 u32 *fd_type, const char **symbol, 881 u64 *probe_offset, u64 *probe_addr, 882 unsigned long *missed, 883 bool perf_type_tracepoint); 884#endif 885#ifdef CONFIG_UPROBE_EVENTS 886extern int perf_uprobe_init(struct perf_event *event, 887 unsigned long ref_ctr_offset, bool is_retprobe); 888extern void perf_uprobe_destroy(struct perf_event *event); 889extern int bpf_get_uprobe_info(const struct perf_event *event, 890 u32 *fd_type, const char **filename, 891 u64 *probe_offset, u64 *probe_addr, 892 bool perf_type_tracepoint); 893#endif 894extern int ftrace_profile_set_filter(struct perf_event *event, int event_id, 895 char *filter_str); 896extern void ftrace_profile_free_filter(struct perf_event *event); 897void perf_trace_buf_update(void *record, u16 type); 898void *perf_trace_buf_alloc(int size, struct pt_regs **regs, int *rctxp); 899 900int perf_event_set_bpf_prog(struct perf_event *event, struct bpf_prog *prog, u64 bpf_cookie); 901void perf_event_free_bpf_prog(struct perf_event *event); 902 903void bpf_trace_run1(struct bpf_raw_tp_link *link, u64 arg1); 904void bpf_trace_run2(struct bpf_raw_tp_link *link, u64 arg1, u64 arg2); 905void bpf_trace_run3(struct bpf_raw_tp_link *link, u64 arg1, u64 arg2, 906 u64 arg3); 907void bpf_trace_run4(struct bpf_raw_tp_link *link, u64 arg1, u64 arg2, 908 u64 arg3, u64 arg4); 909void bpf_trace_run5(struct bpf_raw_tp_link *link, u64 arg1, u64 arg2, 910 u64 arg3, u64 arg4, u64 arg5); 911void bpf_trace_run6(struct bpf_raw_tp_link *link, u64 arg1, u64 arg2, 912 u64 arg3, u64 arg4, u64 arg5, u64 arg6); 913void bpf_trace_run7(struct bpf_raw_tp_link *link, u64 arg1, u64 arg2, 914 u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7); 915void bpf_trace_run8(struct bpf_raw_tp_link *link, u64 arg1, u64 arg2, 916 u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7, 917 u64 arg8); 918void bpf_trace_run9(struct bpf_raw_tp_link *link, u64 arg1, u64 arg2, 919 u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7, 920 u64 arg8, u64 arg9); 921void bpf_trace_run10(struct bpf_raw_tp_link *link, u64 arg1, u64 arg2, 922 u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7, 923 u64 arg8, u64 arg9, u64 arg10); 924void bpf_trace_run11(struct bpf_raw_tp_link *link, u64 arg1, u64 arg2, 925 u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7, 926 u64 arg8, u64 arg9, u64 arg10, u64 arg11); 927void bpf_trace_run12(struct bpf_raw_tp_link *link, u64 arg1, u64 arg2, 928 u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7, 929 u64 arg8, u64 arg9, u64 arg10, u64 arg11, u64 arg12); 930void perf_trace_run_bpf_submit(void *raw_data, int size, int rctx, 931 struct trace_event_call *call, u64 count, 932 struct pt_regs *regs, struct hlist_head *head, 933 struct task_struct *task); 934 935static inline void 936perf_trace_buf_submit(void *raw_data, int size, int rctx, u16 type, 937 u64 count, struct pt_regs *regs, void *head, 938 struct task_struct *task) 939{ 940 perf_tp_event(type, count, raw_data, size, regs, head, rctx, task); 941} 942 943#endif 944 945#define TRACE_EVENT_STR_MAX 512 946 947/* 948 * gcc warns that you can not use a va_list in an inlined 949 * function. But lets me make it into a macro :-/ 950 */ 951#define __trace_event_vstr_len(fmt, va) \ 952({ \ 953 va_list __ap; \ 954 int __ret; \ 955 \ 956 va_copy(__ap, *(va)); \ 957 __ret = vsnprintf(NULL, 0, fmt, __ap) + 1; \ 958 va_end(__ap); \ 959 \ 960 min(__ret, TRACE_EVENT_STR_MAX); \ 961}) 962 963#endif /* _LINUX_TRACE_EVENT_H */ 964 965/* 966 * Note: we keep the TRACE_CUSTOM_EVENT outside the include file ifdef protection. 967 * This is due to the way trace custom events work. If a file includes two 968 * trace event headers under one "CREATE_CUSTOM_TRACE_EVENTS" the first include 969 * will override the TRACE_CUSTOM_EVENT and break the second include. 970 */ 971 972#ifndef TRACE_CUSTOM_EVENT 973 974#define DECLARE_CUSTOM_EVENT_CLASS(name, proto, args, tstruct, assign, print) 975#define DEFINE_CUSTOM_EVENT(template, name, proto, args) 976#define TRACE_CUSTOM_EVENT(name, proto, args, struct, assign, print) 977 978#endif /* ifdef TRACE_CUSTOM_EVENT (see note above) */