at v5.15 47 kB view raw
1/* 2 * Performance events: 3 * 4 * Copyright (C) 2008-2009, Thomas Gleixner <tglx@linutronix.de> 5 * Copyright (C) 2008-2011, Red Hat, Inc., Ingo Molnar 6 * Copyright (C) 2008-2011, Red Hat, Inc., Peter Zijlstra 7 * 8 * Data type definitions, declarations, prototypes. 9 * 10 * Started by: Thomas Gleixner and Ingo Molnar 11 * 12 * For licencing details see kernel-base/COPYING 13 */ 14#ifndef _LINUX_PERF_EVENT_H 15#define _LINUX_PERF_EVENT_H 16 17#include <uapi/linux/perf_event.h> 18#include <uapi/linux/bpf_perf_event.h> 19 20/* 21 * Kernel-internal data types and definitions: 22 */ 23 24#ifdef CONFIG_PERF_EVENTS 25# include <asm/perf_event.h> 26# include <asm/local64.h> 27#endif 28 29struct perf_guest_info_callbacks { 30 int (*is_in_guest)(void); 31 int (*is_user_mode)(void); 32 unsigned long (*get_guest_ip)(void); 33 void (*handle_intel_pt_intr)(void); 34}; 35 36#ifdef CONFIG_HAVE_HW_BREAKPOINT 37#include <asm/hw_breakpoint.h> 38#endif 39 40#include <linux/list.h> 41#include <linux/mutex.h> 42#include <linux/rculist.h> 43#include <linux/rcupdate.h> 44#include <linux/spinlock.h> 45#include <linux/hrtimer.h> 46#include <linux/fs.h> 47#include <linux/pid_namespace.h> 48#include <linux/workqueue.h> 49#include <linux/ftrace.h> 50#include <linux/cpu.h> 51#include <linux/irq_work.h> 52#include <linux/static_key.h> 53#include <linux/jump_label_ratelimit.h> 54#include <linux/atomic.h> 55#include <linux/sysfs.h> 56#include <linux/perf_regs.h> 57#include <linux/cgroup.h> 58#include <linux/refcount.h> 59#include <linux/security.h> 60#include <asm/local.h> 61 62struct perf_callchain_entry { 63 __u64 nr; 64 __u64 ip[]; /* /proc/sys/kernel/perf_event_max_stack */ 65}; 66 67struct perf_callchain_entry_ctx { 68 struct perf_callchain_entry *entry; 69 u32 max_stack; 70 u32 nr; 71 short contexts; 72 bool contexts_maxed; 73}; 74 75typedef unsigned long (*perf_copy_f)(void *dst, const void *src, 76 unsigned long off, unsigned long len); 77 78struct perf_raw_frag { 79 union { 80 struct perf_raw_frag *next; 81 unsigned long pad; 82 }; 83 perf_copy_f copy; 84 void *data; 85 u32 size; 86} __packed; 87 88struct perf_raw_record { 89 struct perf_raw_frag frag; 90 u32 size; 91}; 92 93/* 94 * branch stack layout: 95 * nr: number of taken branches stored in entries[] 96 * hw_idx: The low level index of raw branch records 97 * for the most recent branch. 98 * -1ULL means invalid/unknown. 99 * 100 * Note that nr can vary from sample to sample 101 * branches (to, from) are stored from most recent 102 * to least recent, i.e., entries[0] contains the most 103 * recent branch. 104 * The entries[] is an abstraction of raw branch records, 105 * which may not be stored in age order in HW, e.g. Intel LBR. 106 * The hw_idx is to expose the low level index of raw 107 * branch record for the most recent branch aka entries[0]. 108 * The hw_idx index is between -1 (unknown) and max depth, 109 * which can be retrieved in /sys/devices/cpu/caps/branches. 110 * For the architectures whose raw branch records are 111 * already stored in age order, the hw_idx should be 0. 112 */ 113struct perf_branch_stack { 114 __u64 nr; 115 __u64 hw_idx; 116 struct perf_branch_entry entries[]; 117}; 118 119struct task_struct; 120 121/* 122 * extra PMU register associated with an event 123 */ 124struct hw_perf_event_extra { 125 u64 config; /* register value */ 126 unsigned int reg; /* register address or index */ 127 int alloc; /* extra register already allocated */ 128 int idx; /* index in shared_regs->regs[] */ 129}; 130 131/** 132 * struct hw_perf_event - performance event hardware details: 133 */ 134struct hw_perf_event { 135#ifdef CONFIG_PERF_EVENTS 136 union { 137 struct { /* hardware */ 138 u64 config; 139 u64 last_tag; 140 unsigned long config_base; 141 unsigned long event_base; 142 int event_base_rdpmc; 143 int idx; 144 int last_cpu; 145 int flags; 146 147 struct hw_perf_event_extra extra_reg; 148 struct hw_perf_event_extra branch_reg; 149 }; 150 struct { /* software */ 151 struct hrtimer hrtimer; 152 }; 153 struct { /* tracepoint */ 154 /* for tp_event->class */ 155 struct list_head tp_list; 156 }; 157 struct { /* amd_power */ 158 u64 pwr_acc; 159 u64 ptsc; 160 }; 161#ifdef CONFIG_HAVE_HW_BREAKPOINT 162 struct { /* breakpoint */ 163 /* 164 * Crufty hack to avoid the chicken and egg 165 * problem hw_breakpoint has with context 166 * creation and event initalization. 167 */ 168 struct arch_hw_breakpoint info; 169 struct list_head bp_list; 170 }; 171#endif 172 struct { /* amd_iommu */ 173 u8 iommu_bank; 174 u8 iommu_cntr; 175 u16 padding; 176 u64 conf; 177 u64 conf1; 178 }; 179 }; 180 /* 181 * If the event is a per task event, this will point to the task in 182 * question. See the comment in perf_event_alloc(). 183 */ 184 struct task_struct *target; 185 186 /* 187 * PMU would store hardware filter configuration 188 * here. 189 */ 190 void *addr_filters; 191 192 /* Last sync'ed generation of filters */ 193 unsigned long addr_filters_gen; 194 195/* 196 * hw_perf_event::state flags; used to track the PERF_EF_* state. 197 */ 198#define PERF_HES_STOPPED 0x01 /* the counter is stopped */ 199#define PERF_HES_UPTODATE 0x02 /* event->count up-to-date */ 200#define PERF_HES_ARCH 0x04 201 202 int state; 203 204 /* 205 * The last observed hardware counter value, updated with a 206 * local64_cmpxchg() such that pmu::read() can be called nested. 207 */ 208 local64_t prev_count; 209 210 /* 211 * The period to start the next sample with. 212 */ 213 u64 sample_period; 214 215 union { 216 struct { /* Sampling */ 217 /* 218 * The period we started this sample with. 219 */ 220 u64 last_period; 221 222 /* 223 * However much is left of the current period; 224 * note that this is a full 64bit value and 225 * allows for generation of periods longer 226 * than hardware might allow. 227 */ 228 local64_t period_left; 229 }; 230 struct { /* Topdown events counting for context switch */ 231 u64 saved_metric; 232 u64 saved_slots; 233 }; 234 }; 235 236 /* 237 * State for throttling the event, see __perf_event_overflow() and 238 * perf_adjust_freq_unthr_context(). 239 */ 240 u64 interrupts_seq; 241 u64 interrupts; 242 243 /* 244 * State for freq target events, see __perf_event_overflow() and 245 * perf_adjust_freq_unthr_context(). 246 */ 247 u64 freq_time_stamp; 248 u64 freq_count_stamp; 249#endif 250}; 251 252struct perf_event; 253 254/* 255 * Common implementation detail of pmu::{start,commit,cancel}_txn 256 */ 257#define PERF_PMU_TXN_ADD 0x1 /* txn to add/schedule event on PMU */ 258#define PERF_PMU_TXN_READ 0x2 /* txn to read event group from PMU */ 259 260/** 261 * pmu::capabilities flags 262 */ 263#define PERF_PMU_CAP_NO_INTERRUPT 0x0001 264#define PERF_PMU_CAP_NO_NMI 0x0002 265#define PERF_PMU_CAP_AUX_NO_SG 0x0004 266#define PERF_PMU_CAP_EXTENDED_REGS 0x0008 267#define PERF_PMU_CAP_EXCLUSIVE 0x0010 268#define PERF_PMU_CAP_ITRACE 0x0020 269#define PERF_PMU_CAP_HETEROGENEOUS_CPUS 0x0040 270#define PERF_PMU_CAP_NO_EXCLUDE 0x0080 271#define PERF_PMU_CAP_AUX_OUTPUT 0x0100 272#define PERF_PMU_CAP_EXTENDED_HW_TYPE 0x0200 273 274struct perf_output_handle; 275 276/** 277 * struct pmu - generic performance monitoring unit 278 */ 279struct pmu { 280 struct list_head entry; 281 282 struct module *module; 283 struct device *dev; 284 const struct attribute_group **attr_groups; 285 const struct attribute_group **attr_update; 286 const char *name; 287 int type; 288 289 /* 290 * various common per-pmu feature flags 291 */ 292 int capabilities; 293 294 int __percpu *pmu_disable_count; 295 struct perf_cpu_context __percpu *pmu_cpu_context; 296 atomic_t exclusive_cnt; /* < 0: cpu; > 0: tsk */ 297 int task_ctx_nr; 298 int hrtimer_interval_ms; 299 300 /* number of address filters this PMU can do */ 301 unsigned int nr_addr_filters; 302 303 /* 304 * Fully disable/enable this PMU, can be used to protect from the PMI 305 * as well as for lazy/batch writing of the MSRs. 306 */ 307 void (*pmu_enable) (struct pmu *pmu); /* optional */ 308 void (*pmu_disable) (struct pmu *pmu); /* optional */ 309 310 /* 311 * Try and initialize the event for this PMU. 312 * 313 * Returns: 314 * -ENOENT -- @event is not for this PMU 315 * 316 * -ENODEV -- @event is for this PMU but PMU not present 317 * -EBUSY -- @event is for this PMU but PMU temporarily unavailable 318 * -EINVAL -- @event is for this PMU but @event is not valid 319 * -EOPNOTSUPP -- @event is for this PMU, @event is valid, but not supported 320 * -EACCES -- @event is for this PMU, @event is valid, but no privileges 321 * 322 * 0 -- @event is for this PMU and valid 323 * 324 * Other error return values are allowed. 325 */ 326 int (*event_init) (struct perf_event *event); 327 328 /* 329 * Notification that the event was mapped or unmapped. Called 330 * in the context of the mapping task. 331 */ 332 void (*event_mapped) (struct perf_event *event, struct mm_struct *mm); /* optional */ 333 void (*event_unmapped) (struct perf_event *event, struct mm_struct *mm); /* optional */ 334 335 /* 336 * Flags for ->add()/->del()/ ->start()/->stop(). There are 337 * matching hw_perf_event::state flags. 338 */ 339#define PERF_EF_START 0x01 /* start the counter when adding */ 340#define PERF_EF_RELOAD 0x02 /* reload the counter when starting */ 341#define PERF_EF_UPDATE 0x04 /* update the counter when stopping */ 342 343 /* 344 * Adds/Removes a counter to/from the PMU, can be done inside a 345 * transaction, see the ->*_txn() methods. 346 * 347 * The add/del callbacks will reserve all hardware resources required 348 * to service the event, this includes any counter constraint 349 * scheduling etc. 350 * 351 * Called with IRQs disabled and the PMU disabled on the CPU the event 352 * is on. 353 * 354 * ->add() called without PERF_EF_START should result in the same state 355 * as ->add() followed by ->stop(). 356 * 357 * ->del() must always PERF_EF_UPDATE stop an event. If it calls 358 * ->stop() that must deal with already being stopped without 359 * PERF_EF_UPDATE. 360 */ 361 int (*add) (struct perf_event *event, int flags); 362 void (*del) (struct perf_event *event, int flags); 363 364 /* 365 * Starts/Stops a counter present on the PMU. 366 * 367 * The PMI handler should stop the counter when perf_event_overflow() 368 * returns !0. ->start() will be used to continue. 369 * 370 * Also used to change the sample period. 371 * 372 * Called with IRQs disabled and the PMU disabled on the CPU the event 373 * is on -- will be called from NMI context with the PMU generates 374 * NMIs. 375 * 376 * ->stop() with PERF_EF_UPDATE will read the counter and update 377 * period/count values like ->read() would. 378 * 379 * ->start() with PERF_EF_RELOAD will reprogram the counter 380 * value, must be preceded by a ->stop() with PERF_EF_UPDATE. 381 */ 382 void (*start) (struct perf_event *event, int flags); 383 void (*stop) (struct perf_event *event, int flags); 384 385 /* 386 * Updates the counter value of the event. 387 * 388 * For sampling capable PMUs this will also update the software period 389 * hw_perf_event::period_left field. 390 */ 391 void (*read) (struct perf_event *event); 392 393 /* 394 * Group events scheduling is treated as a transaction, add 395 * group events as a whole and perform one schedulability test. 396 * If the test fails, roll back the whole group 397 * 398 * Start the transaction, after this ->add() doesn't need to 399 * do schedulability tests. 400 * 401 * Optional. 402 */ 403 void (*start_txn) (struct pmu *pmu, unsigned int txn_flags); 404 /* 405 * If ->start_txn() disabled the ->add() schedulability test 406 * then ->commit_txn() is required to perform one. On success 407 * the transaction is closed. On error the transaction is kept 408 * open until ->cancel_txn() is called. 409 * 410 * Optional. 411 */ 412 int (*commit_txn) (struct pmu *pmu); 413 /* 414 * Will cancel the transaction, assumes ->del() is called 415 * for each successful ->add() during the transaction. 416 * 417 * Optional. 418 */ 419 void (*cancel_txn) (struct pmu *pmu); 420 421 /* 422 * Will return the value for perf_event_mmap_page::index for this event, 423 * if no implementation is provided it will default to: event->hw.idx + 1. 424 */ 425 int (*event_idx) (struct perf_event *event); /*optional */ 426 427 /* 428 * context-switches callback 429 */ 430 void (*sched_task) (struct perf_event_context *ctx, 431 bool sched_in); 432 433 /* 434 * Kmem cache of PMU specific data 435 */ 436 struct kmem_cache *task_ctx_cache; 437 438 /* 439 * PMU specific parts of task perf event context (i.e. ctx->task_ctx_data) 440 * can be synchronized using this function. See Intel LBR callstack support 441 * implementation and Perf core context switch handling callbacks for usage 442 * examples. 443 */ 444 void (*swap_task_ctx) (struct perf_event_context *prev, 445 struct perf_event_context *next); 446 /* optional */ 447 448 /* 449 * Set up pmu-private data structures for an AUX area 450 */ 451 void *(*setup_aux) (struct perf_event *event, void **pages, 452 int nr_pages, bool overwrite); 453 /* optional */ 454 455 /* 456 * Free pmu-private AUX data structures 457 */ 458 void (*free_aux) (void *aux); /* optional */ 459 460 /* 461 * Take a snapshot of the AUX buffer without touching the event 462 * state, so that preempting ->start()/->stop() callbacks does 463 * not interfere with their logic. Called in PMI context. 464 * 465 * Returns the size of AUX data copied to the output handle. 466 * 467 * Optional. 468 */ 469 long (*snapshot_aux) (struct perf_event *event, 470 struct perf_output_handle *handle, 471 unsigned long size); 472 473 /* 474 * Validate address range filters: make sure the HW supports the 475 * requested configuration and number of filters; return 0 if the 476 * supplied filters are valid, -errno otherwise. 477 * 478 * Runs in the context of the ioctl()ing process and is not serialized 479 * with the rest of the PMU callbacks. 480 */ 481 int (*addr_filters_validate) (struct list_head *filters); 482 /* optional */ 483 484 /* 485 * Synchronize address range filter configuration: 486 * translate hw-agnostic filters into hardware configuration in 487 * event::hw::addr_filters. 488 * 489 * Runs as a part of filter sync sequence that is done in ->start() 490 * callback by calling perf_event_addr_filters_sync(). 491 * 492 * May (and should) traverse event::addr_filters::list, for which its 493 * caller provides necessary serialization. 494 */ 495 void (*addr_filters_sync) (struct perf_event *event); 496 /* optional */ 497 498 /* 499 * Check if event can be used for aux_output purposes for 500 * events of this PMU. 501 * 502 * Runs from perf_event_open(). Should return 0 for "no match" 503 * or non-zero for "match". 504 */ 505 int (*aux_output_match) (struct perf_event *event); 506 /* optional */ 507 508 /* 509 * Filter events for PMU-specific reasons. 510 */ 511 int (*filter_match) (struct perf_event *event); /* optional */ 512 513 /* 514 * Check period value for PERF_EVENT_IOC_PERIOD ioctl. 515 */ 516 int (*check_period) (struct perf_event *event, u64 value); /* optional */ 517}; 518 519enum perf_addr_filter_action_t { 520 PERF_ADDR_FILTER_ACTION_STOP = 0, 521 PERF_ADDR_FILTER_ACTION_START, 522 PERF_ADDR_FILTER_ACTION_FILTER, 523}; 524 525/** 526 * struct perf_addr_filter - address range filter definition 527 * @entry: event's filter list linkage 528 * @path: object file's path for file-based filters 529 * @offset: filter range offset 530 * @size: filter range size (size==0 means single address trigger) 531 * @action: filter/start/stop 532 * 533 * This is a hardware-agnostic filter configuration as specified by the user. 534 */ 535struct perf_addr_filter { 536 struct list_head entry; 537 struct path path; 538 unsigned long offset; 539 unsigned long size; 540 enum perf_addr_filter_action_t action; 541}; 542 543/** 544 * struct perf_addr_filters_head - container for address range filters 545 * @list: list of filters for this event 546 * @lock: spinlock that serializes accesses to the @list and event's 547 * (and its children's) filter generations. 548 * @nr_file_filters: number of file-based filters 549 * 550 * A child event will use parent's @list (and therefore @lock), so they are 551 * bundled together; see perf_event_addr_filters(). 552 */ 553struct perf_addr_filters_head { 554 struct list_head list; 555 raw_spinlock_t lock; 556 unsigned int nr_file_filters; 557}; 558 559struct perf_addr_filter_range { 560 unsigned long start; 561 unsigned long size; 562}; 563 564/** 565 * enum perf_event_state - the states of an event: 566 */ 567enum perf_event_state { 568 PERF_EVENT_STATE_DEAD = -4, 569 PERF_EVENT_STATE_EXIT = -3, 570 PERF_EVENT_STATE_ERROR = -2, 571 PERF_EVENT_STATE_OFF = -1, 572 PERF_EVENT_STATE_INACTIVE = 0, 573 PERF_EVENT_STATE_ACTIVE = 1, 574}; 575 576struct file; 577struct perf_sample_data; 578 579typedef void (*perf_overflow_handler_t)(struct perf_event *, 580 struct perf_sample_data *, 581 struct pt_regs *regs); 582 583/* 584 * Event capabilities. For event_caps and groups caps. 585 * 586 * PERF_EV_CAP_SOFTWARE: Is a software event. 587 * PERF_EV_CAP_READ_ACTIVE_PKG: A CPU event (or cgroup event) that can be read 588 * from any CPU in the package where it is active. 589 * PERF_EV_CAP_SIBLING: An event with this flag must be a group sibling and 590 * cannot be a group leader. If an event with this flag is detached from the 591 * group it is scheduled out and moved into an unrecoverable ERROR state. 592 */ 593#define PERF_EV_CAP_SOFTWARE BIT(0) 594#define PERF_EV_CAP_READ_ACTIVE_PKG BIT(1) 595#define PERF_EV_CAP_SIBLING BIT(2) 596 597#define SWEVENT_HLIST_BITS 8 598#define SWEVENT_HLIST_SIZE (1 << SWEVENT_HLIST_BITS) 599 600struct swevent_hlist { 601 struct hlist_head heads[SWEVENT_HLIST_SIZE]; 602 struct rcu_head rcu_head; 603}; 604 605#define PERF_ATTACH_CONTEXT 0x01 606#define PERF_ATTACH_GROUP 0x02 607#define PERF_ATTACH_TASK 0x04 608#define PERF_ATTACH_TASK_DATA 0x08 609#define PERF_ATTACH_ITRACE 0x10 610#define PERF_ATTACH_SCHED_CB 0x20 611#define PERF_ATTACH_CHILD 0x40 612 613struct perf_cgroup; 614struct perf_buffer; 615 616struct pmu_event_list { 617 raw_spinlock_t lock; 618 struct list_head list; 619}; 620 621#define for_each_sibling_event(sibling, event) \ 622 if ((event)->group_leader == (event)) \ 623 list_for_each_entry((sibling), &(event)->sibling_list, sibling_list) 624 625/** 626 * struct perf_event - performance event kernel representation: 627 */ 628struct perf_event { 629#ifdef CONFIG_PERF_EVENTS 630 /* 631 * entry onto perf_event_context::event_list; 632 * modifications require ctx->lock 633 * RCU safe iterations. 634 */ 635 struct list_head event_entry; 636 637 /* 638 * Locked for modification by both ctx->mutex and ctx->lock; holding 639 * either sufficies for read. 640 */ 641 struct list_head sibling_list; 642 struct list_head active_list; 643 /* 644 * Node on the pinned or flexible tree located at the event context; 645 */ 646 struct rb_node group_node; 647 u64 group_index; 648 /* 649 * We need storage to track the entries in perf_pmu_migrate_context; we 650 * cannot use the event_entry because of RCU and we want to keep the 651 * group in tact which avoids us using the other two entries. 652 */ 653 struct list_head migrate_entry; 654 655 struct hlist_node hlist_entry; 656 struct list_head active_entry; 657 int nr_siblings; 658 659 /* Not serialized. Only written during event initialization. */ 660 int event_caps; 661 /* The cumulative AND of all event_caps for events in this group. */ 662 int group_caps; 663 664 struct perf_event *group_leader; 665 struct pmu *pmu; 666 void *pmu_private; 667 668 enum perf_event_state state; 669 unsigned int attach_state; 670 local64_t count; 671 atomic64_t child_count; 672 673 /* 674 * These are the total time in nanoseconds that the event 675 * has been enabled (i.e. eligible to run, and the task has 676 * been scheduled in, if this is a per-task event) 677 * and running (scheduled onto the CPU), respectively. 678 */ 679 u64 total_time_enabled; 680 u64 total_time_running; 681 u64 tstamp; 682 683 /* 684 * timestamp shadows the actual context timing but it can 685 * be safely used in NMI interrupt context. It reflects the 686 * context time as it was when the event was last scheduled in, 687 * or when ctx_sched_in failed to schedule the event because we 688 * run out of PMC. 689 * 690 * ctx_time already accounts for ctx->timestamp. Therefore to 691 * compute ctx_time for a sample, simply add perf_clock(). 692 */ 693 u64 shadow_ctx_time; 694 695 struct perf_event_attr attr; 696 u16 header_size; 697 u16 id_header_size; 698 u16 read_size; 699 struct hw_perf_event hw; 700 701 struct perf_event_context *ctx; 702 atomic_long_t refcount; 703 704 /* 705 * These accumulate total time (in nanoseconds) that children 706 * events have been enabled and running, respectively. 707 */ 708 atomic64_t child_total_time_enabled; 709 atomic64_t child_total_time_running; 710 711 /* 712 * Protect attach/detach and child_list: 713 */ 714 struct mutex child_mutex; 715 struct list_head child_list; 716 struct perf_event *parent; 717 718 int oncpu; 719 int cpu; 720 721 struct list_head owner_entry; 722 struct task_struct *owner; 723 724 /* mmap bits */ 725 struct mutex mmap_mutex; 726 atomic_t mmap_count; 727 728 struct perf_buffer *rb; 729 struct list_head rb_entry; 730 unsigned long rcu_batches; 731 int rcu_pending; 732 733 /* poll related */ 734 wait_queue_head_t waitq; 735 struct fasync_struct *fasync; 736 737 /* delayed work for NMIs and such */ 738 int pending_wakeup; 739 int pending_kill; 740 int pending_disable; 741 unsigned long pending_addr; /* SIGTRAP */ 742 struct irq_work pending; 743 744 atomic_t event_limit; 745 746 /* address range filters */ 747 struct perf_addr_filters_head addr_filters; 748 /* vma address array for file-based filders */ 749 struct perf_addr_filter_range *addr_filter_ranges; 750 unsigned long addr_filters_gen; 751 752 /* for aux_output events */ 753 struct perf_event *aux_event; 754 755 void (*destroy)(struct perf_event *); 756 struct rcu_head rcu_head; 757 758 struct pid_namespace *ns; 759 u64 id; 760 761 u64 (*clock)(void); 762 perf_overflow_handler_t overflow_handler; 763 void *overflow_handler_context; 764#ifdef CONFIG_BPF_SYSCALL 765 perf_overflow_handler_t orig_overflow_handler; 766 struct bpf_prog *prog; 767 u64 bpf_cookie; 768#endif 769 770#ifdef CONFIG_EVENT_TRACING 771 struct trace_event_call *tp_event; 772 struct event_filter *filter; 773#ifdef CONFIG_FUNCTION_TRACER 774 struct ftrace_ops ftrace_ops; 775#endif 776#endif 777 778#ifdef CONFIG_CGROUP_PERF 779 struct perf_cgroup *cgrp; /* cgroup event is attach to */ 780#endif 781 782#ifdef CONFIG_SECURITY 783 void *security; 784#endif 785 struct list_head sb_list; 786#endif /* CONFIG_PERF_EVENTS */ 787}; 788 789 790struct perf_event_groups { 791 struct rb_root tree; 792 u64 index; 793}; 794 795/** 796 * struct perf_event_context - event context structure 797 * 798 * Used as a container for task events and CPU events as well: 799 */ 800struct perf_event_context { 801 struct pmu *pmu; 802 /* 803 * Protect the states of the events in the list, 804 * nr_active, and the list: 805 */ 806 raw_spinlock_t lock; 807 /* 808 * Protect the list of events. Locking either mutex or lock 809 * is sufficient to ensure the list doesn't change; to change 810 * the list you need to lock both the mutex and the spinlock. 811 */ 812 struct mutex mutex; 813 814 struct list_head active_ctx_list; 815 struct perf_event_groups pinned_groups; 816 struct perf_event_groups flexible_groups; 817 struct list_head event_list; 818 819 struct list_head pinned_active; 820 struct list_head flexible_active; 821 822 int nr_events; 823 int nr_active; 824 int is_active; 825 int nr_stat; 826 int nr_freq; 827 int rotate_disable; 828 /* 829 * Set when nr_events != nr_active, except tolerant to events not 830 * necessary to be active due to scheduling constraints, such as cgroups. 831 */ 832 int rotate_necessary; 833 refcount_t refcount; 834 struct task_struct *task; 835 836 /* 837 * Context clock, runs when context enabled. 838 */ 839 u64 time; 840 u64 timestamp; 841 842 /* 843 * These fields let us detect when two contexts have both 844 * been cloned (inherited) from a common ancestor. 845 */ 846 struct perf_event_context *parent_ctx; 847 u64 parent_gen; 848 u64 generation; 849 int pin_count; 850#ifdef CONFIG_CGROUP_PERF 851 int nr_cgroups; /* cgroup evts */ 852#endif 853 void *task_ctx_data; /* pmu specific data */ 854 struct rcu_head rcu_head; 855}; 856 857/* 858 * Number of contexts where an event can trigger: 859 * task, softirq, hardirq, nmi. 860 */ 861#define PERF_NR_CONTEXTS 4 862 863/** 864 * struct perf_event_cpu_context - per cpu event context structure 865 */ 866struct perf_cpu_context { 867 struct perf_event_context ctx; 868 struct perf_event_context *task_ctx; 869 int active_oncpu; 870 int exclusive; 871 872 raw_spinlock_t hrtimer_lock; 873 struct hrtimer hrtimer; 874 ktime_t hrtimer_interval; 875 unsigned int hrtimer_active; 876 877#ifdef CONFIG_CGROUP_PERF 878 struct perf_cgroup *cgrp; 879 struct list_head cgrp_cpuctx_entry; 880#endif 881 882 struct list_head sched_cb_entry; 883 int sched_cb_usage; 884 885 int online; 886 /* 887 * Per-CPU storage for iterators used in visit_groups_merge. The default 888 * storage is of size 2 to hold the CPU and any CPU event iterators. 889 */ 890 int heap_size; 891 struct perf_event **heap; 892 struct perf_event *heap_default[2]; 893}; 894 895struct perf_output_handle { 896 struct perf_event *event; 897 struct perf_buffer *rb; 898 unsigned long wakeup; 899 unsigned long size; 900 u64 aux_flags; 901 union { 902 void *addr; 903 unsigned long head; 904 }; 905 int page; 906}; 907 908struct bpf_perf_event_data_kern { 909 bpf_user_pt_regs_t *regs; 910 struct perf_sample_data *data; 911 struct perf_event *event; 912}; 913 914#ifdef CONFIG_CGROUP_PERF 915 916/* 917 * perf_cgroup_info keeps track of time_enabled for a cgroup. 918 * This is a per-cpu dynamically allocated data structure. 919 */ 920struct perf_cgroup_info { 921 u64 time; 922 u64 timestamp; 923}; 924 925struct perf_cgroup { 926 struct cgroup_subsys_state css; 927 struct perf_cgroup_info __percpu *info; 928}; 929 930/* 931 * Must ensure cgroup is pinned (css_get) before calling 932 * this function. In other words, we cannot call this function 933 * if there is no cgroup event for the current CPU context. 934 */ 935static inline struct perf_cgroup * 936perf_cgroup_from_task(struct task_struct *task, struct perf_event_context *ctx) 937{ 938 return container_of(task_css_check(task, perf_event_cgrp_id, 939 ctx ? lockdep_is_held(&ctx->lock) 940 : true), 941 struct perf_cgroup, css); 942} 943#endif /* CONFIG_CGROUP_PERF */ 944 945#ifdef CONFIG_PERF_EVENTS 946 947extern void *perf_aux_output_begin(struct perf_output_handle *handle, 948 struct perf_event *event); 949extern void perf_aux_output_end(struct perf_output_handle *handle, 950 unsigned long size); 951extern int perf_aux_output_skip(struct perf_output_handle *handle, 952 unsigned long size); 953extern void *perf_get_aux(struct perf_output_handle *handle); 954extern void perf_aux_output_flag(struct perf_output_handle *handle, u64 flags); 955extern void perf_event_itrace_started(struct perf_event *event); 956 957extern int perf_pmu_register(struct pmu *pmu, const char *name, int type); 958extern void perf_pmu_unregister(struct pmu *pmu); 959 960extern void __perf_event_task_sched_in(struct task_struct *prev, 961 struct task_struct *task); 962extern void __perf_event_task_sched_out(struct task_struct *prev, 963 struct task_struct *next); 964extern int perf_event_init_task(struct task_struct *child, u64 clone_flags); 965extern void perf_event_exit_task(struct task_struct *child); 966extern void perf_event_free_task(struct task_struct *task); 967extern void perf_event_delayed_put(struct task_struct *task); 968extern struct file *perf_event_get(unsigned int fd); 969extern const struct perf_event *perf_get_event(struct file *file); 970extern const struct perf_event_attr *perf_event_attrs(struct perf_event *event); 971extern void perf_event_print_debug(void); 972extern void perf_pmu_disable(struct pmu *pmu); 973extern void perf_pmu_enable(struct pmu *pmu); 974extern void perf_sched_cb_dec(struct pmu *pmu); 975extern void perf_sched_cb_inc(struct pmu *pmu); 976extern int perf_event_task_disable(void); 977extern int perf_event_task_enable(void); 978 979extern void perf_pmu_resched(struct pmu *pmu); 980 981extern int perf_event_refresh(struct perf_event *event, int refresh); 982extern void perf_event_update_userpage(struct perf_event *event); 983extern int perf_event_release_kernel(struct perf_event *event); 984extern struct perf_event * 985perf_event_create_kernel_counter(struct perf_event_attr *attr, 986 int cpu, 987 struct task_struct *task, 988 perf_overflow_handler_t callback, 989 void *context); 990extern void perf_pmu_migrate_context(struct pmu *pmu, 991 int src_cpu, int dst_cpu); 992int perf_event_read_local(struct perf_event *event, u64 *value, 993 u64 *enabled, u64 *running); 994extern u64 perf_event_read_value(struct perf_event *event, 995 u64 *enabled, u64 *running); 996 997 998struct perf_sample_data { 999 /* 1000 * Fields set by perf_sample_data_init(), group so as to 1001 * minimize the cachelines touched. 1002 */ 1003 u64 addr; 1004 struct perf_raw_record *raw; 1005 struct perf_branch_stack *br_stack; 1006 u64 period; 1007 union perf_sample_weight weight; 1008 u64 txn; 1009 union perf_mem_data_src data_src; 1010 1011 /* 1012 * The other fields, optionally {set,used} by 1013 * perf_{prepare,output}_sample(). 1014 */ 1015 u64 type; 1016 u64 ip; 1017 struct { 1018 u32 pid; 1019 u32 tid; 1020 } tid_entry; 1021 u64 time; 1022 u64 id; 1023 u64 stream_id; 1024 struct { 1025 u32 cpu; 1026 u32 reserved; 1027 } cpu_entry; 1028 struct perf_callchain_entry *callchain; 1029 u64 aux_size; 1030 1031 struct perf_regs regs_user; 1032 struct perf_regs regs_intr; 1033 u64 stack_user_size; 1034 1035 u64 phys_addr; 1036 u64 cgroup; 1037 u64 data_page_size; 1038 u64 code_page_size; 1039} ____cacheline_aligned; 1040 1041/* default value for data source */ 1042#define PERF_MEM_NA (PERF_MEM_S(OP, NA) |\ 1043 PERF_MEM_S(LVL, NA) |\ 1044 PERF_MEM_S(SNOOP, NA) |\ 1045 PERF_MEM_S(LOCK, NA) |\ 1046 PERF_MEM_S(TLB, NA)) 1047 1048static inline void perf_sample_data_init(struct perf_sample_data *data, 1049 u64 addr, u64 period) 1050{ 1051 /* remaining struct members initialized in perf_prepare_sample() */ 1052 data->addr = addr; 1053 data->raw = NULL; 1054 data->br_stack = NULL; 1055 data->period = period; 1056 data->weight.full = 0; 1057 data->data_src.val = PERF_MEM_NA; 1058 data->txn = 0; 1059} 1060 1061extern void perf_output_sample(struct perf_output_handle *handle, 1062 struct perf_event_header *header, 1063 struct perf_sample_data *data, 1064 struct perf_event *event); 1065extern void perf_prepare_sample(struct perf_event_header *header, 1066 struct perf_sample_data *data, 1067 struct perf_event *event, 1068 struct pt_regs *regs); 1069 1070extern int perf_event_overflow(struct perf_event *event, 1071 struct perf_sample_data *data, 1072 struct pt_regs *regs); 1073 1074extern void perf_event_output_forward(struct perf_event *event, 1075 struct perf_sample_data *data, 1076 struct pt_regs *regs); 1077extern void perf_event_output_backward(struct perf_event *event, 1078 struct perf_sample_data *data, 1079 struct pt_regs *regs); 1080extern int perf_event_output(struct perf_event *event, 1081 struct perf_sample_data *data, 1082 struct pt_regs *regs); 1083 1084static inline bool 1085is_default_overflow_handler(struct perf_event *event) 1086{ 1087 if (likely(event->overflow_handler == perf_event_output_forward)) 1088 return true; 1089 if (unlikely(event->overflow_handler == perf_event_output_backward)) 1090 return true; 1091 return false; 1092} 1093 1094extern void 1095perf_event_header__init_id(struct perf_event_header *header, 1096 struct perf_sample_data *data, 1097 struct perf_event *event); 1098extern void 1099perf_event__output_id_sample(struct perf_event *event, 1100 struct perf_output_handle *handle, 1101 struct perf_sample_data *sample); 1102 1103extern void 1104perf_log_lost_samples(struct perf_event *event, u64 lost); 1105 1106static inline bool event_has_any_exclude_flag(struct perf_event *event) 1107{ 1108 struct perf_event_attr *attr = &event->attr; 1109 1110 return attr->exclude_idle || attr->exclude_user || 1111 attr->exclude_kernel || attr->exclude_hv || 1112 attr->exclude_guest || attr->exclude_host; 1113} 1114 1115static inline bool is_sampling_event(struct perf_event *event) 1116{ 1117 return event->attr.sample_period != 0; 1118} 1119 1120/* 1121 * Return 1 for a software event, 0 for a hardware event 1122 */ 1123static inline int is_software_event(struct perf_event *event) 1124{ 1125 return event->event_caps & PERF_EV_CAP_SOFTWARE; 1126} 1127 1128/* 1129 * Return 1 for event in sw context, 0 for event in hw context 1130 */ 1131static inline int in_software_context(struct perf_event *event) 1132{ 1133 return event->ctx->pmu->task_ctx_nr == perf_sw_context; 1134} 1135 1136static inline int is_exclusive_pmu(struct pmu *pmu) 1137{ 1138 return pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE; 1139} 1140 1141extern struct static_key perf_swevent_enabled[PERF_COUNT_SW_MAX]; 1142 1143extern void ___perf_sw_event(u32, u64, struct pt_regs *, u64); 1144extern void __perf_sw_event(u32, u64, struct pt_regs *, u64); 1145 1146#ifndef perf_arch_fetch_caller_regs 1147static inline void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip) { } 1148#endif 1149 1150/* 1151 * When generating a perf sample in-line, instead of from an interrupt / 1152 * exception, we lack a pt_regs. This is typically used from software events 1153 * like: SW_CONTEXT_SWITCHES, SW_MIGRATIONS and the tie-in with tracepoints. 1154 * 1155 * We typically don't need a full set, but (for x86) do require: 1156 * - ip for PERF_SAMPLE_IP 1157 * - cs for user_mode() tests 1158 * - sp for PERF_SAMPLE_CALLCHAIN 1159 * - eflags for MISC bits and CALLCHAIN (see: perf_hw_regs()) 1160 * 1161 * NOTE: assumes @regs is otherwise already 0 filled; this is important for 1162 * things like PERF_SAMPLE_REGS_INTR. 1163 */ 1164static inline void perf_fetch_caller_regs(struct pt_regs *regs) 1165{ 1166 perf_arch_fetch_caller_regs(regs, CALLER_ADDR0); 1167} 1168 1169static __always_inline void 1170perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr) 1171{ 1172 if (static_key_false(&perf_swevent_enabled[event_id])) 1173 __perf_sw_event(event_id, nr, regs, addr); 1174} 1175 1176DECLARE_PER_CPU(struct pt_regs, __perf_regs[4]); 1177 1178/* 1179 * 'Special' version for the scheduler, it hard assumes no recursion, 1180 * which is guaranteed by us not actually scheduling inside other swevents 1181 * because those disable preemption. 1182 */ 1183static __always_inline void __perf_sw_event_sched(u32 event_id, u64 nr, u64 addr) 1184{ 1185 struct pt_regs *regs = this_cpu_ptr(&__perf_regs[0]); 1186 1187 perf_fetch_caller_regs(regs); 1188 ___perf_sw_event(event_id, nr, regs, addr); 1189} 1190 1191extern struct static_key_false perf_sched_events; 1192 1193static __always_inline bool __perf_sw_enabled(int swevt) 1194{ 1195 return static_key_false(&perf_swevent_enabled[swevt]); 1196} 1197 1198static inline void perf_event_task_migrate(struct task_struct *task) 1199{ 1200 if (__perf_sw_enabled(PERF_COUNT_SW_CPU_MIGRATIONS)) 1201 task->sched_migrated = 1; 1202} 1203 1204static inline void perf_event_task_sched_in(struct task_struct *prev, 1205 struct task_struct *task) 1206{ 1207 if (static_branch_unlikely(&perf_sched_events)) 1208 __perf_event_task_sched_in(prev, task); 1209 1210 if (__perf_sw_enabled(PERF_COUNT_SW_CPU_MIGRATIONS) && 1211 task->sched_migrated) { 1212 __perf_sw_event_sched(PERF_COUNT_SW_CPU_MIGRATIONS, 1, 0); 1213 task->sched_migrated = 0; 1214 } 1215} 1216 1217static inline void perf_event_task_sched_out(struct task_struct *prev, 1218 struct task_struct *next) 1219{ 1220 if (__perf_sw_enabled(PERF_COUNT_SW_CONTEXT_SWITCHES)) 1221 __perf_sw_event_sched(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 0); 1222 1223#ifdef CONFIG_CGROUP_PERF 1224 if (__perf_sw_enabled(PERF_COUNT_SW_CGROUP_SWITCHES) && 1225 perf_cgroup_from_task(prev, NULL) != 1226 perf_cgroup_from_task(next, NULL)) 1227 __perf_sw_event_sched(PERF_COUNT_SW_CGROUP_SWITCHES, 1, 0); 1228#endif 1229 1230 if (static_branch_unlikely(&perf_sched_events)) 1231 __perf_event_task_sched_out(prev, next); 1232} 1233 1234extern void perf_event_mmap(struct vm_area_struct *vma); 1235 1236extern void perf_event_ksymbol(u16 ksym_type, u64 addr, u32 len, 1237 bool unregister, const char *sym); 1238extern void perf_event_bpf_event(struct bpf_prog *prog, 1239 enum perf_bpf_event_type type, 1240 u16 flags); 1241 1242extern struct perf_guest_info_callbacks *perf_guest_cbs; 1243extern int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks); 1244extern int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks); 1245 1246extern void perf_event_exec(void); 1247extern void perf_event_comm(struct task_struct *tsk, bool exec); 1248extern void perf_event_namespaces(struct task_struct *tsk); 1249extern void perf_event_fork(struct task_struct *tsk); 1250extern void perf_event_text_poke(const void *addr, 1251 const void *old_bytes, size_t old_len, 1252 const void *new_bytes, size_t new_len); 1253 1254/* Callchains */ 1255DECLARE_PER_CPU(struct perf_callchain_entry, perf_callchain_entry); 1256 1257extern void perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs); 1258extern void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs); 1259extern struct perf_callchain_entry * 1260get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user, 1261 u32 max_stack, bool crosstask, bool add_mark); 1262extern struct perf_callchain_entry *perf_callchain(struct perf_event *event, struct pt_regs *regs); 1263extern int get_callchain_buffers(int max_stack); 1264extern void put_callchain_buffers(void); 1265extern struct perf_callchain_entry *get_callchain_entry(int *rctx); 1266extern void put_callchain_entry(int rctx); 1267 1268extern int sysctl_perf_event_max_stack; 1269extern int sysctl_perf_event_max_contexts_per_stack; 1270 1271static inline int perf_callchain_store_context(struct perf_callchain_entry_ctx *ctx, u64 ip) 1272{ 1273 if (ctx->contexts < sysctl_perf_event_max_contexts_per_stack) { 1274 struct perf_callchain_entry *entry = ctx->entry; 1275 entry->ip[entry->nr++] = ip; 1276 ++ctx->contexts; 1277 return 0; 1278 } else { 1279 ctx->contexts_maxed = true; 1280 return -1; /* no more room, stop walking the stack */ 1281 } 1282} 1283 1284static inline int perf_callchain_store(struct perf_callchain_entry_ctx *ctx, u64 ip) 1285{ 1286 if (ctx->nr < ctx->max_stack && !ctx->contexts_maxed) { 1287 struct perf_callchain_entry *entry = ctx->entry; 1288 entry->ip[entry->nr++] = ip; 1289 ++ctx->nr; 1290 return 0; 1291 } else { 1292 return -1; /* no more room, stop walking the stack */ 1293 } 1294} 1295 1296extern int sysctl_perf_event_paranoid; 1297extern int sysctl_perf_event_mlock; 1298extern int sysctl_perf_event_sample_rate; 1299extern int sysctl_perf_cpu_time_max_percent; 1300 1301extern void perf_sample_event_took(u64 sample_len_ns); 1302 1303int perf_proc_update_handler(struct ctl_table *table, int write, 1304 void *buffer, size_t *lenp, loff_t *ppos); 1305int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write, 1306 void *buffer, size_t *lenp, loff_t *ppos); 1307int perf_event_max_stack_handler(struct ctl_table *table, int write, 1308 void *buffer, size_t *lenp, loff_t *ppos); 1309 1310/* Access to perf_event_open(2) syscall. */ 1311#define PERF_SECURITY_OPEN 0 1312 1313/* Finer grained perf_event_open(2) access control. */ 1314#define PERF_SECURITY_CPU 1 1315#define PERF_SECURITY_KERNEL 2 1316#define PERF_SECURITY_TRACEPOINT 3 1317 1318static inline int perf_is_paranoid(void) 1319{ 1320 return sysctl_perf_event_paranoid > -1; 1321} 1322 1323static inline int perf_allow_kernel(struct perf_event_attr *attr) 1324{ 1325 if (sysctl_perf_event_paranoid > 1 && !perfmon_capable()) 1326 return -EACCES; 1327 1328 return security_perf_event_open(attr, PERF_SECURITY_KERNEL); 1329} 1330 1331static inline int perf_allow_cpu(struct perf_event_attr *attr) 1332{ 1333 if (sysctl_perf_event_paranoid > 0 && !perfmon_capable()) 1334 return -EACCES; 1335 1336 return security_perf_event_open(attr, PERF_SECURITY_CPU); 1337} 1338 1339static inline int perf_allow_tracepoint(struct perf_event_attr *attr) 1340{ 1341 if (sysctl_perf_event_paranoid > -1 && !perfmon_capable()) 1342 return -EPERM; 1343 1344 return security_perf_event_open(attr, PERF_SECURITY_TRACEPOINT); 1345} 1346 1347extern void perf_event_init(void); 1348extern void perf_tp_event(u16 event_type, u64 count, void *record, 1349 int entry_size, struct pt_regs *regs, 1350 struct hlist_head *head, int rctx, 1351 struct task_struct *task); 1352extern void perf_bp_event(struct perf_event *event, void *data); 1353 1354#ifndef perf_misc_flags 1355# define perf_misc_flags(regs) \ 1356 (user_mode(regs) ? PERF_RECORD_MISC_USER : PERF_RECORD_MISC_KERNEL) 1357# define perf_instruction_pointer(regs) instruction_pointer(regs) 1358#endif 1359#ifndef perf_arch_bpf_user_pt_regs 1360# define perf_arch_bpf_user_pt_regs(regs) regs 1361#endif 1362 1363static inline bool has_branch_stack(struct perf_event *event) 1364{ 1365 return event->attr.sample_type & PERF_SAMPLE_BRANCH_STACK; 1366} 1367 1368static inline bool needs_branch_stack(struct perf_event *event) 1369{ 1370 return event->attr.branch_sample_type != 0; 1371} 1372 1373static inline bool has_aux(struct perf_event *event) 1374{ 1375 return event->pmu->setup_aux; 1376} 1377 1378static inline bool is_write_backward(struct perf_event *event) 1379{ 1380 return !!event->attr.write_backward; 1381} 1382 1383static inline bool has_addr_filter(struct perf_event *event) 1384{ 1385 return event->pmu->nr_addr_filters; 1386} 1387 1388/* 1389 * An inherited event uses parent's filters 1390 */ 1391static inline struct perf_addr_filters_head * 1392perf_event_addr_filters(struct perf_event *event) 1393{ 1394 struct perf_addr_filters_head *ifh = &event->addr_filters; 1395 1396 if (event->parent) 1397 ifh = &event->parent->addr_filters; 1398 1399 return ifh; 1400} 1401 1402extern void perf_event_addr_filters_sync(struct perf_event *event); 1403 1404extern int perf_output_begin(struct perf_output_handle *handle, 1405 struct perf_sample_data *data, 1406 struct perf_event *event, unsigned int size); 1407extern int perf_output_begin_forward(struct perf_output_handle *handle, 1408 struct perf_sample_data *data, 1409 struct perf_event *event, 1410 unsigned int size); 1411extern int perf_output_begin_backward(struct perf_output_handle *handle, 1412 struct perf_sample_data *data, 1413 struct perf_event *event, 1414 unsigned int size); 1415 1416extern void perf_output_end(struct perf_output_handle *handle); 1417extern unsigned int perf_output_copy(struct perf_output_handle *handle, 1418 const void *buf, unsigned int len); 1419extern unsigned int perf_output_skip(struct perf_output_handle *handle, 1420 unsigned int len); 1421extern long perf_output_copy_aux(struct perf_output_handle *aux_handle, 1422 struct perf_output_handle *handle, 1423 unsigned long from, unsigned long to); 1424extern int perf_swevent_get_recursion_context(void); 1425extern void perf_swevent_put_recursion_context(int rctx); 1426extern u64 perf_swevent_set_period(struct perf_event *event); 1427extern void perf_event_enable(struct perf_event *event); 1428extern void perf_event_disable(struct perf_event *event); 1429extern void perf_event_disable_local(struct perf_event *event); 1430extern void perf_event_disable_inatomic(struct perf_event *event); 1431extern void perf_event_task_tick(void); 1432extern int perf_event_account_interrupt(struct perf_event *event); 1433extern int perf_event_period(struct perf_event *event, u64 value); 1434extern u64 perf_event_pause(struct perf_event *event, bool reset); 1435#else /* !CONFIG_PERF_EVENTS: */ 1436static inline void * 1437perf_aux_output_begin(struct perf_output_handle *handle, 1438 struct perf_event *event) { return NULL; } 1439static inline void 1440perf_aux_output_end(struct perf_output_handle *handle, unsigned long size) 1441 { } 1442static inline int 1443perf_aux_output_skip(struct perf_output_handle *handle, 1444 unsigned long size) { return -EINVAL; } 1445static inline void * 1446perf_get_aux(struct perf_output_handle *handle) { return NULL; } 1447static inline void 1448perf_event_task_migrate(struct task_struct *task) { } 1449static inline void 1450perf_event_task_sched_in(struct task_struct *prev, 1451 struct task_struct *task) { } 1452static inline void 1453perf_event_task_sched_out(struct task_struct *prev, 1454 struct task_struct *next) { } 1455static inline int perf_event_init_task(struct task_struct *child, 1456 u64 clone_flags) { return 0; } 1457static inline void perf_event_exit_task(struct task_struct *child) { } 1458static inline void perf_event_free_task(struct task_struct *task) { } 1459static inline void perf_event_delayed_put(struct task_struct *task) { } 1460static inline struct file *perf_event_get(unsigned int fd) { return ERR_PTR(-EINVAL); } 1461static inline const struct perf_event *perf_get_event(struct file *file) 1462{ 1463 return ERR_PTR(-EINVAL); 1464} 1465static inline const struct perf_event_attr *perf_event_attrs(struct perf_event *event) 1466{ 1467 return ERR_PTR(-EINVAL); 1468} 1469static inline int perf_event_read_local(struct perf_event *event, u64 *value, 1470 u64 *enabled, u64 *running) 1471{ 1472 return -EINVAL; 1473} 1474static inline void perf_event_print_debug(void) { } 1475static inline int perf_event_task_disable(void) { return -EINVAL; } 1476static inline int perf_event_task_enable(void) { return -EINVAL; } 1477static inline int perf_event_refresh(struct perf_event *event, int refresh) 1478{ 1479 return -EINVAL; 1480} 1481 1482static inline void 1483perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr) { } 1484static inline void 1485perf_bp_event(struct perf_event *event, void *data) { } 1486 1487static inline int perf_register_guest_info_callbacks 1488(struct perf_guest_info_callbacks *callbacks) { return 0; } 1489static inline int perf_unregister_guest_info_callbacks 1490(struct perf_guest_info_callbacks *callbacks) { return 0; } 1491 1492static inline void perf_event_mmap(struct vm_area_struct *vma) { } 1493 1494typedef int (perf_ksymbol_get_name_f)(char *name, int name_len, void *data); 1495static inline void perf_event_ksymbol(u16 ksym_type, u64 addr, u32 len, 1496 bool unregister, const char *sym) { } 1497static inline void perf_event_bpf_event(struct bpf_prog *prog, 1498 enum perf_bpf_event_type type, 1499 u16 flags) { } 1500static inline void perf_event_exec(void) { } 1501static inline void perf_event_comm(struct task_struct *tsk, bool exec) { } 1502static inline void perf_event_namespaces(struct task_struct *tsk) { } 1503static inline void perf_event_fork(struct task_struct *tsk) { } 1504static inline void perf_event_text_poke(const void *addr, 1505 const void *old_bytes, 1506 size_t old_len, 1507 const void *new_bytes, 1508 size_t new_len) { } 1509static inline void perf_event_init(void) { } 1510static inline int perf_swevent_get_recursion_context(void) { return -1; } 1511static inline void perf_swevent_put_recursion_context(int rctx) { } 1512static inline u64 perf_swevent_set_period(struct perf_event *event) { return 0; } 1513static inline void perf_event_enable(struct perf_event *event) { } 1514static inline void perf_event_disable(struct perf_event *event) { } 1515static inline int __perf_event_disable(void *info) { return -1; } 1516static inline void perf_event_task_tick(void) { } 1517static inline int perf_event_release_kernel(struct perf_event *event) { return 0; } 1518static inline int perf_event_period(struct perf_event *event, u64 value) 1519{ 1520 return -EINVAL; 1521} 1522static inline u64 perf_event_pause(struct perf_event *event, bool reset) 1523{ 1524 return 0; 1525} 1526#endif 1527 1528#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_INTEL) 1529extern void perf_restore_debug_store(void); 1530#else 1531static inline void perf_restore_debug_store(void) { } 1532#endif 1533 1534static __always_inline bool perf_raw_frag_last(const struct perf_raw_frag *frag) 1535{ 1536 return frag->pad < sizeof(u64); 1537} 1538 1539#define perf_output_put(handle, x) perf_output_copy((handle), &(x), sizeof(x)) 1540 1541struct perf_pmu_events_attr { 1542 struct device_attribute attr; 1543 u64 id; 1544 const char *event_str; 1545}; 1546 1547struct perf_pmu_events_ht_attr { 1548 struct device_attribute attr; 1549 u64 id; 1550 const char *event_str_ht; 1551 const char *event_str_noht; 1552}; 1553 1554struct perf_pmu_events_hybrid_attr { 1555 struct device_attribute attr; 1556 u64 id; 1557 const char *event_str; 1558 u64 pmu_type; 1559}; 1560 1561struct perf_pmu_format_hybrid_attr { 1562 struct device_attribute attr; 1563 u64 pmu_type; 1564}; 1565 1566ssize_t perf_event_sysfs_show(struct device *dev, struct device_attribute *attr, 1567 char *page); 1568 1569#define PMU_EVENT_ATTR(_name, _var, _id, _show) \ 1570static struct perf_pmu_events_attr _var = { \ 1571 .attr = __ATTR(_name, 0444, _show, NULL), \ 1572 .id = _id, \ 1573}; 1574 1575#define PMU_EVENT_ATTR_STRING(_name, _var, _str) \ 1576static struct perf_pmu_events_attr _var = { \ 1577 .attr = __ATTR(_name, 0444, perf_event_sysfs_show, NULL), \ 1578 .id = 0, \ 1579 .event_str = _str, \ 1580}; 1581 1582#define PMU_EVENT_ATTR_ID(_name, _show, _id) \ 1583 (&((struct perf_pmu_events_attr[]) { \ 1584 { .attr = __ATTR(_name, 0444, _show, NULL), \ 1585 .id = _id, } \ 1586 })[0].attr.attr) 1587 1588#define PMU_FORMAT_ATTR(_name, _format) \ 1589static ssize_t \ 1590_name##_show(struct device *dev, \ 1591 struct device_attribute *attr, \ 1592 char *page) \ 1593{ \ 1594 BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE); \ 1595 return sprintf(page, _format "\n"); \ 1596} \ 1597 \ 1598static struct device_attribute format_attr_##_name = __ATTR_RO(_name) 1599 1600/* Performance counter hotplug functions */ 1601#ifdef CONFIG_PERF_EVENTS 1602int perf_event_init_cpu(unsigned int cpu); 1603int perf_event_exit_cpu(unsigned int cpu); 1604#else 1605#define perf_event_init_cpu NULL 1606#define perf_event_exit_cpu NULL 1607#endif 1608 1609extern void __weak arch_perf_update_userpage(struct perf_event *event, 1610 struct perf_event_mmap_page *userpg, 1611 u64 now); 1612 1613#ifdef CONFIG_MMU 1614extern __weak u64 arch_perf_get_page_size(struct mm_struct *mm, unsigned long addr); 1615#endif 1616 1617#endif /* _LINUX_PERF_EVENT_H */