1/* 2 * Performance counters: 3 * 4 * Copyright (C) 2008-2009, Thomas Gleixner <tglx@linutronix.de> 5 * Copyright (C) 2008-2009, Red Hat, Inc., Ingo Molnar 6 * Copyright (C) 2008-2009, Red Hat, Inc., Peter Zijlstra 7 * 8 * Data type definitions, declarations, prototypes. 9 * 10 * Started by: Thomas Gleixner and Ingo Molnar 11 * 12 * For licencing details see kernel-base/COPYING 13 */ 14#ifndef _LINUX_PERF_COUNTER_H 15#define _LINUX_PERF_COUNTER_H 16 17#include <linux/types.h> 18#include <linux/ioctl.h> 19#include <asm/byteorder.h> 20 21/* 22 * User-space ABI bits: 23 */ 24 25/* 26 * attr.type 27 */ 28enum perf_type_id { 29 PERF_TYPE_HARDWARE = 0, 30 PERF_TYPE_SOFTWARE = 1, 31 PERF_TYPE_TRACEPOINT = 2, 32 PERF_TYPE_HW_CACHE = 3, 33 PERF_TYPE_RAW = 4, 34 35 PERF_TYPE_MAX, /* non-ABI */ 36}; 37 38/* 39 * Generalized performance counter event types, used by the 40 * attr.event_id parameter of the sys_perf_counter_open() 41 * syscall: 42 */ 43enum perf_hw_id { 44 /* 45 * Common hardware events, generalized by the kernel: 46 */ 47 PERF_COUNT_HW_CPU_CYCLES = 0, 48 PERF_COUNT_HW_INSTRUCTIONS = 1, 49 PERF_COUNT_HW_CACHE_REFERENCES = 2, 50 PERF_COUNT_HW_CACHE_MISSES = 3, 51 PERF_COUNT_HW_BRANCH_INSTRUCTIONS = 4, 52 PERF_COUNT_HW_BRANCH_MISSES = 5, 53 PERF_COUNT_HW_BUS_CYCLES = 6, 54 55 PERF_COUNT_HW_MAX, /* non-ABI */ 56}; 57 58/* 59 * Generalized hardware cache counters: 60 * 61 * { L1-D, L1-I, LLC, ITLB, DTLB, BPU } x 62 * { read, write, prefetch } x 63 * { accesses, misses } 64 */ 65enum perf_hw_cache_id { 66 PERF_COUNT_HW_CACHE_L1D = 0, 67 PERF_COUNT_HW_CACHE_L1I = 1, 68 PERF_COUNT_HW_CACHE_LL = 2, 69 PERF_COUNT_HW_CACHE_DTLB = 3, 70 PERF_COUNT_HW_CACHE_ITLB = 4, 71 PERF_COUNT_HW_CACHE_BPU = 5, 72 73 PERF_COUNT_HW_CACHE_MAX, /* non-ABI */ 74}; 75 76enum perf_hw_cache_op_id { 77 PERF_COUNT_HW_CACHE_OP_READ = 0, 78 PERF_COUNT_HW_CACHE_OP_WRITE = 1, 79 PERF_COUNT_HW_CACHE_OP_PREFETCH = 2, 80 81 PERF_COUNT_HW_CACHE_OP_MAX, /* non-ABI */ 82}; 83 84enum perf_hw_cache_op_result_id { 85 PERF_COUNT_HW_CACHE_RESULT_ACCESS = 0, 86 PERF_COUNT_HW_CACHE_RESULT_MISS = 1, 87 88 PERF_COUNT_HW_CACHE_RESULT_MAX, /* non-ABI */ 89}; 90 91/* 92 * Special "software" counters provided by the kernel, even if the hardware 93 * does not support performance counters. These counters measure various 94 * physical and sw events of the kernel (and allow the profiling of them as 95 * well): 96 */ 97enum perf_sw_ids { 98 PERF_COUNT_SW_CPU_CLOCK = 0, 99 PERF_COUNT_SW_TASK_CLOCK = 1, 100 PERF_COUNT_SW_PAGE_FAULTS = 2, 101 PERF_COUNT_SW_CONTEXT_SWITCHES = 3, 102 PERF_COUNT_SW_CPU_MIGRATIONS = 4, 103 PERF_COUNT_SW_PAGE_FAULTS_MIN = 5, 104 PERF_COUNT_SW_PAGE_FAULTS_MAJ = 6, 105 106 PERF_COUNT_SW_MAX, /* non-ABI */ 107}; 108 109/* 110 * Bits that can be set in attr.sample_type to request information 111 * in the overflow packets. 112 */ 113enum perf_counter_sample_format { 114 PERF_SAMPLE_IP = 1U << 0, 115 PERF_SAMPLE_TID = 1U << 1, 116 PERF_SAMPLE_TIME = 1U << 2, 117 PERF_SAMPLE_ADDR = 1U << 3, 118 PERF_SAMPLE_GROUP = 1U << 4, 119 PERF_SAMPLE_CALLCHAIN = 1U << 5, 120 PERF_SAMPLE_ID = 1U << 6, 121 PERF_SAMPLE_CPU = 1U << 7, 122 PERF_SAMPLE_PERIOD = 1U << 8, 123 PERF_SAMPLE_STREAM_ID = 1U << 9, 124 PERF_SAMPLE_TP_RECORD = 1U << 10, 125 126 PERF_SAMPLE_MAX = 1U << 11, /* non-ABI */ 127}; 128 129/* 130 * Bits that can be set in attr.read_format to request that 131 * reads on the counter should return the indicated quantities, 132 * in increasing order of bit value, after the counter value. 133 */ 134enum perf_counter_read_format { 135 PERF_FORMAT_TOTAL_TIME_ENABLED = 1U << 0, 136 PERF_FORMAT_TOTAL_TIME_RUNNING = 1U << 1, 137 PERF_FORMAT_ID = 1U << 2, 138 139 PERF_FORMAT_MAX = 1U << 3, /* non-ABI */ 140}; 141 142#define PERF_ATTR_SIZE_VER0 64 /* sizeof first published struct */ 143 144/* 145 * Hardware event to monitor via a performance monitoring counter: 146 */ 147struct perf_counter_attr { 148 149 /* 150 * Major type: hardware/software/tracepoint/etc. 151 */ 152 __u32 type; 153 154 /* 155 * Size of the attr structure, for fwd/bwd compat. 156 */ 157 __u32 size; 158 159 /* 160 * Type specific configuration information. 161 */ 162 __u64 config; 163 164 union { 165 __u64 sample_period; 166 __u64 sample_freq; 167 }; 168 169 __u64 sample_type; 170 __u64 read_format; 171 172 __u64 disabled : 1, /* off by default */ 173 inherit : 1, /* children inherit it */ 174 pinned : 1, /* must always be on PMU */ 175 exclusive : 1, /* only group on PMU */ 176 exclude_user : 1, /* don't count user */ 177 exclude_kernel : 1, /* ditto kernel */ 178 exclude_hv : 1, /* ditto hypervisor */ 179 exclude_idle : 1, /* don't count when idle */ 180 mmap : 1, /* include mmap data */ 181 comm : 1, /* include comm data */ 182 freq : 1, /* use freq, not period */ 183 inherit_stat : 1, /* per task counts */ 184 enable_on_exec : 1, /* next exec enables */ 185 task : 1, /* trace fork/exit */ 186 187 __reserved_1 : 50; 188 189 __u32 wakeup_events; /* wakeup every n events */ 190 __u32 __reserved_2; 191 192 __u64 __reserved_3; 193}; 194 195/* 196 * Ioctls that can be done on a perf counter fd: 197 */ 198#define PERF_COUNTER_IOC_ENABLE _IO ('$', 0) 199#define PERF_COUNTER_IOC_DISABLE _IO ('$', 1) 200#define PERF_COUNTER_IOC_REFRESH _IO ('$', 2) 201#define PERF_COUNTER_IOC_RESET _IO ('$', 3) 202#define PERF_COUNTER_IOC_PERIOD _IOW('$', 4, u64) 203 204enum perf_counter_ioc_flags { 205 PERF_IOC_FLAG_GROUP = 1U << 0, 206}; 207 208/* 209 * Structure of the page that can be mapped via mmap 210 */ 211struct perf_counter_mmap_page { 212 __u32 version; /* version number of this structure */ 213 __u32 compat_version; /* lowest version this is compat with */ 214 215 /* 216 * Bits needed to read the hw counters in user-space. 217 * 218 * u32 seq; 219 * s64 count; 220 * 221 * do { 222 * seq = pc->lock; 223 * 224 * barrier() 225 * if (pc->index) { 226 * count = pmc_read(pc->index - 1); 227 * count += pc->offset; 228 * } else 229 * goto regular_read; 230 * 231 * barrier(); 232 * } while (pc->lock != seq); 233 * 234 * NOTE: for obvious reason this only works on self-monitoring 235 * processes. 236 */ 237 __u32 lock; /* seqlock for synchronization */ 238 __u32 index; /* hardware counter identifier */ 239 __s64 offset; /* add to hardware counter value */ 240 __u64 time_enabled; /* time counter active */ 241 __u64 time_running; /* time counter on cpu */ 242 243 /* 244 * Hole for extension of the self monitor capabilities 245 */ 246 247 __u64 __reserved[123]; /* align to 1k */ 248 249 /* 250 * Control data for the mmap() data buffer. 251 * 252 * User-space reading the @data_head value should issue an rmb(), on 253 * SMP capable platforms, after reading this value -- see 254 * perf_counter_wakeup(). 255 * 256 * When the mapping is PROT_WRITE the @data_tail value should be 257 * written by userspace to reflect the last read data. In this case 258 * the kernel will not over-write unread data. 259 */ 260 __u64 data_head; /* head in the data section */ 261 __u64 data_tail; /* user-space written tail */ 262}; 263 264#define PERF_EVENT_MISC_CPUMODE_MASK (3 << 0) 265#define PERF_EVENT_MISC_CPUMODE_UNKNOWN (0 << 0) 266#define PERF_EVENT_MISC_KERNEL (1 << 0) 267#define PERF_EVENT_MISC_USER (2 << 0) 268#define PERF_EVENT_MISC_HYPERVISOR (3 << 0) 269 270struct perf_event_header { 271 __u32 type; 272 __u16 misc; 273 __u16 size; 274}; 275 276enum perf_event_type { 277 278 /* 279 * The MMAP events record the PROT_EXEC mappings so that we can 280 * correlate userspace IPs to code. They have the following structure: 281 * 282 * struct { 283 * struct perf_event_header header; 284 * 285 * u32 pid, tid; 286 * u64 addr; 287 * u64 len; 288 * u64 pgoff; 289 * char filename[]; 290 * }; 291 */ 292 PERF_EVENT_MMAP = 1, 293 294 /* 295 * struct { 296 * struct perf_event_header header; 297 * u64 id; 298 * u64 lost; 299 * }; 300 */ 301 PERF_EVENT_LOST = 2, 302 303 /* 304 * struct { 305 * struct perf_event_header header; 306 * 307 * u32 pid, tid; 308 * char comm[]; 309 * }; 310 */ 311 PERF_EVENT_COMM = 3, 312 313 /* 314 * struct { 315 * struct perf_event_header header; 316 * u32 pid, ppid; 317 * u32 tid, ptid; 318 * }; 319 */ 320 PERF_EVENT_EXIT = 4, 321 322 /* 323 * struct { 324 * struct perf_event_header header; 325 * u64 time; 326 * u64 id; 327 * u64 stream_id; 328 * }; 329 */ 330 PERF_EVENT_THROTTLE = 5, 331 PERF_EVENT_UNTHROTTLE = 6, 332 333 /* 334 * struct { 335 * struct perf_event_header header; 336 * u32 pid, ppid; 337 * u32 tid, ptid; 338 * }; 339 */ 340 PERF_EVENT_FORK = 7, 341 342 /* 343 * struct { 344 * struct perf_event_header header; 345 * u32 pid, tid; 346 * u64 value; 347 * { u64 time_enabled; } && PERF_FORMAT_ENABLED 348 * { u64 time_running; } && PERF_FORMAT_RUNNING 349 * { u64 parent_id; } && PERF_FORMAT_ID 350 * }; 351 */ 352 PERF_EVENT_READ = 8, 353 354 /* 355 * struct { 356 * struct perf_event_header header; 357 * 358 * { u64 ip; } && PERF_SAMPLE_IP 359 * { u32 pid, tid; } && PERF_SAMPLE_TID 360 * { u64 time; } && PERF_SAMPLE_TIME 361 * { u64 addr; } && PERF_SAMPLE_ADDR 362 * { u64 id; } && PERF_SAMPLE_ID 363 * { u64 stream_id;} && PERF_SAMPLE_STREAM_ID 364 * { u32 cpu, res; } && PERF_SAMPLE_CPU 365 * { u64 period; } && PERF_SAMPLE_PERIOD 366 * 367 * { u64 nr; 368 * { u64 id, val; } cnt[nr]; } && PERF_SAMPLE_GROUP 369 * 370 * { u64 nr, 371 * u64 ips[nr]; } && PERF_SAMPLE_CALLCHAIN 372 * }; 373 */ 374 PERF_EVENT_SAMPLE = 9, 375 376 PERF_EVENT_MAX, /* non-ABI */ 377}; 378 379enum perf_callchain_context { 380 PERF_CONTEXT_HV = (__u64)-32, 381 PERF_CONTEXT_KERNEL = (__u64)-128, 382 PERF_CONTEXT_USER = (__u64)-512, 383 384 PERF_CONTEXT_GUEST = (__u64)-2048, 385 PERF_CONTEXT_GUEST_KERNEL = (__u64)-2176, 386 PERF_CONTEXT_GUEST_USER = (__u64)-2560, 387 388 PERF_CONTEXT_MAX = (__u64)-4095, 389}; 390 391#ifdef __KERNEL__ 392/* 393 * Kernel-internal data types and definitions: 394 */ 395 396#ifdef CONFIG_PERF_COUNTERS 397# include <asm/perf_counter.h> 398#endif 399 400#include <linux/list.h> 401#include <linux/mutex.h> 402#include <linux/rculist.h> 403#include <linux/rcupdate.h> 404#include <linux/spinlock.h> 405#include <linux/hrtimer.h> 406#include <linux/fs.h> 407#include <linux/pid_namespace.h> 408#include <asm/atomic.h> 409 410#define PERF_MAX_STACK_DEPTH 255 411 412struct perf_callchain_entry { 413 __u64 nr; 414 __u64 ip[PERF_MAX_STACK_DEPTH]; 415}; 416 417struct perf_tracepoint_record { 418 int size; 419 char *record; 420}; 421 422struct task_struct; 423 424/** 425 * struct hw_perf_counter - performance counter hardware details: 426 */ 427struct hw_perf_counter { 428#ifdef CONFIG_PERF_COUNTERS 429 union { 430 struct { /* hardware */ 431 u64 config; 432 unsigned long config_base; 433 unsigned long counter_base; 434 int idx; 435 }; 436 union { /* software */ 437 atomic64_t count; 438 struct hrtimer hrtimer; 439 }; 440 }; 441 atomic64_t prev_count; 442 u64 sample_period; 443 u64 last_period; 444 atomic64_t period_left; 445 u64 interrupts; 446 447 u64 freq_count; 448 u64 freq_interrupts; 449 u64 freq_stamp; 450#endif 451}; 452 453struct perf_counter; 454 455/** 456 * struct pmu - generic performance monitoring unit 457 */ 458struct pmu { 459 int (*enable) (struct perf_counter *counter); 460 void (*disable) (struct perf_counter *counter); 461 void (*read) (struct perf_counter *counter); 462 void (*unthrottle) (struct perf_counter *counter); 463}; 464 465/** 466 * enum perf_counter_active_state - the states of a counter 467 */ 468enum perf_counter_active_state { 469 PERF_COUNTER_STATE_ERROR = -2, 470 PERF_COUNTER_STATE_OFF = -1, 471 PERF_COUNTER_STATE_INACTIVE = 0, 472 PERF_COUNTER_STATE_ACTIVE = 1, 473}; 474 475struct file; 476 477struct perf_mmap_data { 478 struct rcu_head rcu_head; 479 int nr_pages; /* nr of data pages */ 480 int writable; /* are we writable */ 481 int nr_locked; /* nr pages mlocked */ 482 483 atomic_t poll; /* POLL_ for wakeups */ 484 atomic_t events; /* event limit */ 485 486 atomic_long_t head; /* write position */ 487 atomic_long_t done_head; /* completed head */ 488 489 atomic_t lock; /* concurrent writes */ 490 atomic_t wakeup; /* needs a wakeup */ 491 atomic_t lost; /* nr records lost */ 492 493 struct perf_counter_mmap_page *user_page; 494 void *data_pages[0]; 495}; 496 497struct perf_pending_entry { 498 struct perf_pending_entry *next; 499 void (*func)(struct perf_pending_entry *); 500}; 501 502/** 503 * struct perf_counter - performance counter kernel representation: 504 */ 505struct perf_counter { 506#ifdef CONFIG_PERF_COUNTERS 507 struct list_head list_entry; 508 struct list_head event_entry; 509 struct list_head sibling_list; 510 int nr_siblings; 511 struct perf_counter *group_leader; 512 const struct pmu *pmu; 513 514 enum perf_counter_active_state state; 515 atomic64_t count; 516 517 /* 518 * These are the total time in nanoseconds that the counter 519 * has been enabled (i.e. eligible to run, and the task has 520 * been scheduled in, if this is a per-task counter) 521 * and running (scheduled onto the CPU), respectively. 522 * 523 * They are computed from tstamp_enabled, tstamp_running and 524 * tstamp_stopped when the counter is in INACTIVE or ACTIVE state. 525 */ 526 u64 total_time_enabled; 527 u64 total_time_running; 528 529 /* 530 * These are timestamps used for computing total_time_enabled 531 * and total_time_running when the counter is in INACTIVE or 532 * ACTIVE state, measured in nanoseconds from an arbitrary point 533 * in time. 534 * tstamp_enabled: the notional time when the counter was enabled 535 * tstamp_running: the notional time when the counter was scheduled on 536 * tstamp_stopped: in INACTIVE state, the notional time when the 537 * counter was scheduled off. 538 */ 539 u64 tstamp_enabled; 540 u64 tstamp_running; 541 u64 tstamp_stopped; 542 543 struct perf_counter_attr attr; 544 struct hw_perf_counter hw; 545 546 struct perf_counter_context *ctx; 547 struct file *filp; 548 549 /* 550 * These accumulate total time (in nanoseconds) that children 551 * counters have been enabled and running, respectively. 552 */ 553 atomic64_t child_total_time_enabled; 554 atomic64_t child_total_time_running; 555 556 /* 557 * Protect attach/detach and child_list: 558 */ 559 struct mutex child_mutex; 560 struct list_head child_list; 561 struct perf_counter *parent; 562 563 int oncpu; 564 int cpu; 565 566 struct list_head owner_entry; 567 struct task_struct *owner; 568 569 /* mmap bits */ 570 struct mutex mmap_mutex; 571 atomic_t mmap_count; 572 struct perf_mmap_data *data; 573 574 /* poll related */ 575 wait_queue_head_t waitq; 576 struct fasync_struct *fasync; 577 578 /* delayed work for NMIs and such */ 579 int pending_wakeup; 580 int pending_kill; 581 int pending_disable; 582 struct perf_pending_entry pending; 583 584 atomic_t event_limit; 585 586 void (*destroy)(struct perf_counter *); 587 struct rcu_head rcu_head; 588 589 struct pid_namespace *ns; 590 u64 id; 591#endif 592}; 593 594/** 595 * struct perf_counter_context - counter context structure 596 * 597 * Used as a container for task counters and CPU counters as well: 598 */ 599struct perf_counter_context { 600 /* 601 * Protect the states of the counters in the list, 602 * nr_active, and the list: 603 */ 604 spinlock_t lock; 605 /* 606 * Protect the list of counters. Locking either mutex or lock 607 * is sufficient to ensure the list doesn't change; to change 608 * the list you need to lock both the mutex and the spinlock. 609 */ 610 struct mutex mutex; 611 612 struct list_head counter_list; 613 struct list_head event_list; 614 int nr_counters; 615 int nr_active; 616 int is_active; 617 int nr_stat; 618 atomic_t refcount; 619 struct task_struct *task; 620 621 /* 622 * Context clock, runs when context enabled. 623 */ 624 u64 time; 625 u64 timestamp; 626 627 /* 628 * These fields let us detect when two contexts have both 629 * been cloned (inherited) from a common ancestor. 630 */ 631 struct perf_counter_context *parent_ctx; 632 u64 parent_gen; 633 u64 generation; 634 int pin_count; 635 struct rcu_head rcu_head; 636}; 637 638/** 639 * struct perf_counter_cpu_context - per cpu counter context structure 640 */ 641struct perf_cpu_context { 642 struct perf_counter_context ctx; 643 struct perf_counter_context *task_ctx; 644 int active_oncpu; 645 int max_pertask; 646 int exclusive; 647 648 /* 649 * Recursion avoidance: 650 * 651 * task, softirq, irq, nmi context 652 */ 653 int recursion[4]; 654}; 655 656#ifdef CONFIG_PERF_COUNTERS 657 658/* 659 * Set by architecture code: 660 */ 661extern int perf_max_counters; 662 663extern const struct pmu *hw_perf_counter_init(struct perf_counter *counter); 664 665extern void perf_counter_task_sched_in(struct task_struct *task, int cpu); 666extern void perf_counter_task_sched_out(struct task_struct *task, 667 struct task_struct *next, int cpu); 668extern void perf_counter_task_tick(struct task_struct *task, int cpu); 669extern int perf_counter_init_task(struct task_struct *child); 670extern void perf_counter_exit_task(struct task_struct *child); 671extern void perf_counter_free_task(struct task_struct *task); 672extern void set_perf_counter_pending(void); 673extern void perf_counter_do_pending(void); 674extern void perf_counter_print_debug(void); 675extern void __perf_disable(void); 676extern bool __perf_enable(void); 677extern void perf_disable(void); 678extern void perf_enable(void); 679extern int perf_counter_task_disable(void); 680extern int perf_counter_task_enable(void); 681extern int hw_perf_group_sched_in(struct perf_counter *group_leader, 682 struct perf_cpu_context *cpuctx, 683 struct perf_counter_context *ctx, int cpu); 684extern void perf_counter_update_userpage(struct perf_counter *counter); 685 686struct perf_sample_data { 687 struct pt_regs *regs; 688 u64 addr; 689 u64 period; 690 void *private; 691}; 692 693extern int perf_counter_overflow(struct perf_counter *counter, int nmi, 694 struct perf_sample_data *data); 695 696/* 697 * Return 1 for a software counter, 0 for a hardware counter 698 */ 699static inline int is_software_counter(struct perf_counter *counter) 700{ 701 return (counter->attr.type != PERF_TYPE_RAW) && 702 (counter->attr.type != PERF_TYPE_HARDWARE) && 703 (counter->attr.type != PERF_TYPE_HW_CACHE); 704} 705 706extern atomic_t perf_swcounter_enabled[PERF_COUNT_SW_MAX]; 707 708extern void __perf_swcounter_event(u32, u64, int, struct pt_regs *, u64); 709 710static inline void 711perf_swcounter_event(u32 event, u64 nr, int nmi, struct pt_regs *regs, u64 addr) 712{ 713 if (atomic_read(&perf_swcounter_enabled[event])) 714 __perf_swcounter_event(event, nr, nmi, regs, addr); 715} 716 717extern void __perf_counter_mmap(struct vm_area_struct *vma); 718 719static inline void perf_counter_mmap(struct vm_area_struct *vma) 720{ 721 if (vma->vm_flags & VM_EXEC) 722 __perf_counter_mmap(vma); 723} 724 725extern void perf_counter_comm(struct task_struct *tsk); 726extern void perf_counter_fork(struct task_struct *tsk); 727 728extern struct perf_callchain_entry *perf_callchain(struct pt_regs *regs); 729 730extern int sysctl_perf_counter_paranoid; 731extern int sysctl_perf_counter_mlock; 732extern int sysctl_perf_counter_sample_rate; 733 734extern void perf_counter_init(void); 735 736#ifndef perf_misc_flags 737#define perf_misc_flags(regs) (user_mode(regs) ? PERF_EVENT_MISC_USER : \ 738 PERF_EVENT_MISC_KERNEL) 739#define perf_instruction_pointer(regs) instruction_pointer(regs) 740#endif 741 742#else 743static inline void 744perf_counter_task_sched_in(struct task_struct *task, int cpu) { } 745static inline void 746perf_counter_task_sched_out(struct task_struct *task, 747 struct task_struct *next, int cpu) { } 748static inline void 749perf_counter_task_tick(struct task_struct *task, int cpu) { } 750static inline int perf_counter_init_task(struct task_struct *child) { return 0; } 751static inline void perf_counter_exit_task(struct task_struct *child) { } 752static inline void perf_counter_free_task(struct task_struct *task) { } 753static inline void perf_counter_do_pending(void) { } 754static inline void perf_counter_print_debug(void) { } 755static inline void perf_disable(void) { } 756static inline void perf_enable(void) { } 757static inline int perf_counter_task_disable(void) { return -EINVAL; } 758static inline int perf_counter_task_enable(void) { return -EINVAL; } 759 760static inline void 761perf_swcounter_event(u32 event, u64 nr, int nmi, 762 struct pt_regs *regs, u64 addr) { } 763 764static inline void perf_counter_mmap(struct vm_area_struct *vma) { } 765static inline void perf_counter_comm(struct task_struct *tsk) { } 766static inline void perf_counter_fork(struct task_struct *tsk) { } 767static inline void perf_counter_init(void) { } 768#endif 769 770#endif /* __KERNEL__ */ 771#endif /* _LINUX_PERF_COUNTER_H */