Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'perfcounters-rename-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'perfcounters-rename-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
perf: Tidy up after the big rename
perf: Do the big rename: Performance Counters -> Performance Events
perf_counter: Rename 'event' to event_id/hw_event
perf_counter: Rename list_entry -> group_entry, counter_list -> group_list

Manually resolved some fairly trivial conflicts with the tracing tree in
include/trace/ftrace.h and kernel/trace/trace_syscalls.c.

+3257 -2800
+1 -1
MAINTAINERS
··· 4000 4000 F: include/linux/delayacct.h 4001 4001 F: kernel/delayacct.c 4002 4002 4003 - PERFORMANCE COUNTER SUBSYSTEM 4003 + PERFORMANCE EVENTS SUBSYSTEM 4004 4004 M: Peter Zijlstra <a.p.zijlstra@chello.nl> 4005 4005 M: Paul Mackerras <paulus@samba.org> 4006 4006 M: Ingo Molnar <mingo@elte.hu>
+1 -1
arch/arm/include/asm/unistd.h
··· 390 390 #define __NR_preadv (__NR_SYSCALL_BASE+361) 391 391 #define __NR_pwritev (__NR_SYSCALL_BASE+362) 392 392 #define __NR_rt_tgsigqueueinfo (__NR_SYSCALL_BASE+363) 393 - #define __NR_perf_counter_open (__NR_SYSCALL_BASE+364) 393 + #define __NR_perf_event_open (__NR_SYSCALL_BASE+364) 394 394 395 395 /* 396 396 * The following SWIs are ARM private.
+1 -1
arch/arm/kernel/calls.S
··· 373 373 CALL(sys_preadv) 374 374 CALL(sys_pwritev) 375 375 CALL(sys_rt_tgsigqueueinfo) 376 - CALL(sys_perf_counter_open) 376 + CALL(sys_perf_event_open) 377 377 #ifndef syscalls_counted 378 378 .equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls 379 379 #define syscalls_counted
+1 -1
arch/blackfin/include/asm/unistd.h
··· 381 381 #define __NR_preadv 366 382 382 #define __NR_pwritev 367 383 383 #define __NR_rt_tgsigqueueinfo 368 384 - #define __NR_perf_counter_open 369 384 + #define __NR_perf_event_open 369 385 385 386 386 #define __NR_syscall 370 387 387 #define NR_syscalls __NR_syscall
+1 -1
arch/blackfin/mach-common/entry.S
··· 1620 1620 .long _sys_preadv 1621 1621 .long _sys_pwritev 1622 1622 .long _sys_rt_tgsigqueueinfo 1623 - .long _sys_perf_counter_open 1623 + .long _sys_perf_event_open 1624 1624 1625 1625 .rept NR_syscalls-(.-_sys_call_table)/4 1626 1626 .long _sys_ni_syscall
+1 -1
arch/frv/Kconfig
··· 7 7 default y 8 8 select HAVE_IDE 9 9 select HAVE_ARCH_TRACEHOOK 10 - select HAVE_PERF_COUNTERS 10 + select HAVE_PERF_EVENTS 11 11 12 12 config ZONE_DMA 13 13 bool
+5 -5
arch/frv/include/asm/perf_counter.h arch/frv/include/asm/perf_event.h
··· 1 - /* FRV performance counter support 1 + /* FRV performance event support 2 2 * 3 3 * Copyright (C) 2009 Red Hat, Inc. All Rights Reserved. 4 4 * Written by David Howells (dhowells@redhat.com) ··· 9 9 * 2 of the Licence, or (at your option) any later version. 10 10 */ 11 11 12 - #ifndef _ASM_PERF_COUNTER_H 13 - #define _ASM_PERF_COUNTER_H 12 + #ifndef _ASM_PERF_EVENT_H 13 + #define _ASM_PERF_EVENT_H 14 14 15 - #define PERF_COUNTER_INDEX_OFFSET 0 15 + #define PERF_EVENT_INDEX_OFFSET 0 16 16 17 - #endif /* _ASM_PERF_COUNTER_H */ 17 + #endif /* _ASM_PERF_EVENT_H */
+1 -1
arch/frv/include/asm/unistd.h
··· 342 342 #define __NR_preadv 333 343 343 #define __NR_pwritev 334 344 344 #define __NR_rt_tgsigqueueinfo 335 345 - #define __NR_perf_counter_open 336 345 + #define __NR_perf_event_open 336 346 346 347 347 #ifdef __KERNEL__ 348 348
+1 -1
arch/frv/kernel/entry.S
··· 1525 1525 .long sys_preadv 1526 1526 .long sys_pwritev 1527 1527 .long sys_rt_tgsigqueueinfo /* 335 */ 1528 - .long sys_perf_counter_open 1528 + .long sys_perf_event_open 1529 1529 1530 1530 syscall_table_size = (. - sys_call_table)
+1 -1
arch/frv/lib/Makefile
··· 5 5 lib-y := \ 6 6 __ashldi3.o __lshrdi3.o __muldi3.o __ashrdi3.o __negdi2.o __ucmpdi2.o \ 7 7 checksum.o memcpy.o memset.o atomic-ops.o atomic64-ops.o \ 8 - outsl_ns.o outsl_sw.o insl_ns.o insl_sw.o cache.o perf_counter.o 8 + outsl_ns.o outsl_sw.o insl_ns.o insl_sw.o cache.o perf_event.o
+4 -4
arch/frv/lib/perf_counter.c arch/frv/lib/perf_event.c
··· 1 - /* Performance counter handling 1 + /* Performance event handling 2 2 * 3 3 * Copyright (C) 2009 Red Hat, Inc. All Rights Reserved. 4 4 * Written by David Howells (dhowells@redhat.com) ··· 9 9 * 2 of the Licence, or (at your option) any later version. 10 10 */ 11 11 12 - #include <linux/perf_counter.h> 12 + #include <linux/perf_event.h> 13 13 14 14 /* 15 - * mark the performance counter as pending 15 + * mark the performance event as pending 16 16 */ 17 - void set_perf_counter_pending(void) 17 + void set_perf_event_pending(void) 18 18 { 19 19 }
+1 -1
arch/m68k/include/asm/unistd.h
··· 335 335 #define __NR_preadv 329 336 336 #define __NR_pwritev 330 337 337 #define __NR_rt_tgsigqueueinfo 331 338 - #define __NR_perf_counter_open 332 338 + #define __NR_perf_event_open 332 339 339 340 340 #ifdef __KERNEL__ 341 341
+1 -1
arch/m68k/kernel/entry.S
··· 756 756 .long sys_preadv 757 757 .long sys_pwritev /* 330 */ 758 758 .long sys_rt_tgsigqueueinfo 759 - .long sys_perf_counter_open 759 + .long sys_perf_event_open 760 760
+1 -1
arch/m68knommu/kernel/syscalltable.S
··· 350 350 .long sys_preadv 351 351 .long sys_pwritev /* 330 */ 352 352 .long sys_rt_tgsigqueueinfo 353 - .long sys_perf_counter_open 353 + .long sys_perf_event_open 354 354 355 355 .rept NR_syscalls-(.-sys_call_table)/4 356 356 .long sys_ni_syscall
+1 -1
arch/microblaze/include/asm/unistd.h
··· 381 381 #define __NR_preadv 363 /* new */ 382 382 #define __NR_pwritev 364 /* new */ 383 383 #define __NR_rt_tgsigqueueinfo 365 /* new */ 384 - #define __NR_perf_counter_open 366 /* new */ 384 + #define __NR_perf_event_open 366 /* new */ 385 385 386 386 #define __NR_syscalls 367 387 387
+1 -1
arch/microblaze/kernel/syscall_table.S
··· 370 370 .long sys_ni_syscall 371 371 .long sys_ni_syscall 372 372 .long sys_rt_tgsigqueueinfo /* 365 */ 373 - .long sys_perf_counter_open 373 + .long sys_perf_event_open
+3 -3
arch/mips/include/asm/unistd.h
··· 353 353 #define __NR_preadv (__NR_Linux + 330) 354 354 #define __NR_pwritev (__NR_Linux + 331) 355 355 #define __NR_rt_tgsigqueueinfo (__NR_Linux + 332) 356 - #define __NR_perf_counter_open (__NR_Linux + 333) 356 + #define __NR_perf_event_open (__NR_Linux + 333) 357 357 #define __NR_accept4 (__NR_Linux + 334) 358 358 359 359 /* ··· 664 664 #define __NR_preadv (__NR_Linux + 289) 665 665 #define __NR_pwritev (__NR_Linux + 290) 666 666 #define __NR_rt_tgsigqueueinfo (__NR_Linux + 291) 667 - #define __NR_perf_counter_open (__NR_Linux + 292) 667 + #define __NR_perf_event_open (__NR_Linux + 292) 668 668 #define __NR_accept4 (__NR_Linux + 293) 669 669 670 670 /* ··· 979 979 #define __NR_preadv (__NR_Linux + 293) 980 980 #define __NR_pwritev (__NR_Linux + 294) 981 981 #define __NR_rt_tgsigqueueinfo (__NR_Linux + 295) 982 - #define __NR_perf_counter_open (__NR_Linux + 296) 982 + #define __NR_perf_event_open (__NR_Linux + 296) 983 983 #define __NR_accept4 (__NR_Linux + 297) 984 984 985 985 /*
+1 -1
arch/mips/kernel/scall32-o32.S
··· 581 581 sys sys_preadv 6 /* 4330 */ 582 582 sys sys_pwritev 6 583 583 sys sys_rt_tgsigqueueinfo 4 584 - sys sys_perf_counter_open 5 584 + sys sys_perf_event_open 5 585 585 sys sys_accept4 4 586 586 .endm 587 587
+1 -1
arch/mips/kernel/scall64-64.S
··· 418 418 PTR sys_preadv 419 419 PTR sys_pwritev /* 5390 */ 420 420 PTR sys_rt_tgsigqueueinfo 421 - PTR sys_perf_counter_open 421 + PTR sys_perf_event_open 422 422 PTR sys_accept4 423 423 .size sys_call_table,.-sys_call_table
+1 -1
arch/mips/kernel/scall64-n32.S
··· 416 416 PTR sys_preadv 417 417 PTR sys_pwritev 418 418 PTR compat_sys_rt_tgsigqueueinfo /* 5295 */ 419 - PTR sys_perf_counter_open 419 + PTR sys_perf_event_open 420 420 PTR sys_accept4 421 421 .size sysn32_call_table,.-sysn32_call_table
+1 -1
arch/mips/kernel/scall64-o32.S
··· 536 536 PTR compat_sys_preadv /* 4330 */ 537 537 PTR compat_sys_pwritev 538 538 PTR compat_sys_rt_tgsigqueueinfo 539 - PTR sys_perf_counter_open 539 + PTR sys_perf_event_open 540 540 PTR sys_accept4 541 541 .size sys_call_table,.-sys_call_table
+1 -1
arch/mn10300/include/asm/unistd.h
··· 347 347 #define __NR_preadv 334 348 348 #define __NR_pwritev 335 349 349 #define __NR_rt_tgsigqueueinfo 336 350 - #define __NR_perf_counter_open 337 350 + #define __NR_perf_event_open 337 351 351 352 352 #ifdef __KERNEL__ 353 353
+1 -1
arch/mn10300/kernel/entry.S
··· 723 723 .long sys_preadv 724 724 .long sys_pwritev /* 335 */ 725 725 .long sys_rt_tgsigqueueinfo 726 - .long sys_perf_counter_open 726 + .long sys_perf_event_open 727 727 728 728 729 729 nr_syscalls=(.-sys_call_table)/4
+1 -1
arch/parisc/Kconfig
··· 16 16 select RTC_DRV_GENERIC 17 17 select INIT_ALL_POSSIBLE 18 18 select BUG 19 - select HAVE_PERF_COUNTERS 19 + select HAVE_PERF_EVENTS 20 20 select GENERIC_ATOMIC64 if !64BIT 21 21 help 22 22 The PA-RISC microprocessor is designed by Hewlett-Packard and used
-7
arch/parisc/include/asm/perf_counter.h
··· 1 - #ifndef __ASM_PARISC_PERF_COUNTER_H 2 - #define __ASM_PARISC_PERF_COUNTER_H 3 - 4 - /* parisc only supports software counters through this interface. */ 5 - static inline void set_perf_counter_pending(void) { } 6 - 7 - #endif /* __ASM_PARISC_PERF_COUNTER_H */
+7
arch/parisc/include/asm/perf_event.h
··· 1 + #ifndef __ASM_PARISC_PERF_EVENT_H 2 + #define __ASM_PARISC_PERF_EVENT_H 3 + 4 + /* parisc only supports software events through this interface. */ 5 + static inline void set_perf_event_pending(void) { } 6 + 7 + #endif /* __ASM_PARISC_PERF_EVENT_H */
+2 -2
arch/parisc/include/asm/unistd.h
··· 810 810 #define __NR_preadv (__NR_Linux + 315) 811 811 #define __NR_pwritev (__NR_Linux + 316) 812 812 #define __NR_rt_tgsigqueueinfo (__NR_Linux + 317) 813 - #define __NR_perf_counter_open (__NR_Linux + 318) 813 + #define __NR_perf_event_open (__NR_Linux + 318) 814 814 815 - #define __NR_Linux_syscalls (__NR_perf_counter_open + 1) 815 + #define __NR_Linux_syscalls (__NR_perf_event_open + 1) 816 816 817 817 818 818 #define __IGNORE_select /* newselect */
+1 -1
arch/parisc/kernel/syscall_table.S
··· 416 416 ENTRY_COMP(preadv) /* 315 */ 417 417 ENTRY_COMP(pwritev) 418 418 ENTRY_COMP(rt_tgsigqueueinfo) 419 - ENTRY_SAME(perf_counter_open) 419 + ENTRY_SAME(perf_event_open) 420 420 421 421 /* Nothing yet */ 422 422
+1 -1
arch/powerpc/Kconfig
··· 129 129 select HAVE_OPROFILE 130 130 select HAVE_SYSCALL_WRAPPERS if PPC64 131 131 select GENERIC_ATOMIC64 if PPC32 132 - select HAVE_PERF_COUNTERS 132 + select HAVE_PERF_EVENTS 133 133 134 134 config EARLY_PRINTK 135 135 bool
+11 -11
arch/powerpc/include/asm/hw_irq.h
··· 135 135 */ 136 136 struct irq_chip; 137 137 138 - #ifdef CONFIG_PERF_COUNTERS 138 + #ifdef CONFIG_PERF_EVENTS 139 139 140 140 #ifdef CONFIG_PPC64 141 - static inline unsigned long test_perf_counter_pending(void) 141 + static inline unsigned long test_perf_event_pending(void) 142 142 { 143 143 unsigned long x; 144 144 145 145 asm volatile("lbz %0,%1(13)" 146 146 : "=r" (x) 147 - : "i" (offsetof(struct paca_struct, perf_counter_pending))); 147 + : "i" (offsetof(struct paca_struct, perf_event_pending))); 148 148 return x; 149 149 } 150 150 151 - static inline void set_perf_counter_pending(void) 151 + static inline void set_perf_event_pending(void) 152 152 { 153 153 asm volatile("stb %0,%1(13)" : : 154 154 "r" (1), 155 - "i" (offsetof(struct paca_struct, perf_counter_pending))); 155 + "i" (offsetof(struct paca_struct, perf_event_pending))); 156 156 } 157 157 158 - static inline void clear_perf_counter_pending(void) 158 + static inline void clear_perf_event_pending(void) 159 159 { 160 160 asm volatile("stb %0,%1(13)" : : 161 161 "r" (0), 162 - "i" (offsetof(struct paca_struct, perf_counter_pending))); 162 + "i" (offsetof(struct paca_struct, perf_event_pending))); 163 163 } 164 164 #endif /* CONFIG_PPC64 */ 165 165 166 - #else /* CONFIG_PERF_COUNTERS */ 166 + #else /* CONFIG_PERF_EVENTS */ 167 167 168 - static inline unsigned long test_perf_counter_pending(void) 168 + static inline unsigned long test_perf_event_pending(void) 169 169 { 170 170 return 0; 171 171 } 172 172 173 - static inline void clear_perf_counter_pending(void) {} 174 - #endif /* CONFIG_PERF_COUNTERS */ 173 + static inline void clear_perf_event_pending(void) {} 174 + #endif /* CONFIG_PERF_EVENTS */ 175 175 176 176 #endif /* __KERNEL__ */ 177 177 #endif /* _ASM_POWERPC_HW_IRQ_H */
+1 -1
arch/powerpc/include/asm/paca.h
··· 122 122 u8 soft_enabled; /* irq soft-enable flag */ 123 123 u8 hard_enabled; /* set if irqs are enabled in MSR */ 124 124 u8 io_sync; /* writel() needs spin_unlock sync */ 125 - u8 perf_counter_pending; /* PM interrupt while soft-disabled */ 125 + u8 perf_event_pending; /* PM interrupt while soft-disabled */ 126 126 127 127 /* Stuff for accurate time accounting */ 128 128 u64 user_time; /* accumulated usermode TB ticks */
+13 -13
arch/powerpc/include/asm/perf_counter.h arch/powerpc/include/asm/perf_event.h
··· 1 1 /* 2 - * Performance counter support - PowerPC-specific definitions. 2 + * Performance event support - PowerPC-specific definitions. 3 3 * 4 4 * Copyright 2008-2009 Paul Mackerras, IBM Corporation. 5 5 * ··· 12 12 13 13 #include <asm/hw_irq.h> 14 14 15 - #define MAX_HWCOUNTERS 8 15 + #define MAX_HWEVENTS 8 16 16 #define MAX_EVENT_ALTERNATIVES 8 17 - #define MAX_LIMITED_HWCOUNTERS 2 17 + #define MAX_LIMITED_HWEVENTS 2 18 18 19 19 /* 20 20 * This struct provides the constants and functions needed to ··· 22 22 */ 23 23 struct power_pmu { 24 24 const char *name; 25 - int n_counter; 25 + int n_event; 26 26 int max_alternatives; 27 27 unsigned long add_fields; 28 28 unsigned long test_adder; 29 29 int (*compute_mmcr)(u64 events[], int n_ev, 30 30 unsigned int hwc[], unsigned long mmcr[]); 31 - int (*get_constraint)(u64 event, unsigned long *mskp, 31 + int (*get_constraint)(u64 event_id, unsigned long *mskp, 32 32 unsigned long *valp); 33 - int (*get_alternatives)(u64 event, unsigned int flags, 33 + int (*get_alternatives)(u64 event_id, unsigned int flags, 34 34 u64 alt[]); 35 35 void (*disable_pmc)(unsigned int pmc, unsigned long mmcr[]); 36 - int (*limited_pmc_event)(u64 event); 36 + int (*limited_pmc_event)(u64 event_id); 37 37 u32 flags; 38 38 int n_generic; 39 39 int *generic_events; ··· 61 61 extern unsigned long perf_misc_flags(struct pt_regs *regs); 62 62 extern unsigned long perf_instruction_pointer(struct pt_regs *regs); 63 63 64 - #define PERF_COUNTER_INDEX_OFFSET 1 64 + #define PERF_EVENT_INDEX_OFFSET 1 65 65 66 66 /* 67 - * Only override the default definitions in include/linux/perf_counter.h 67 + * Only override the default definitions in include/linux/perf_event.h 68 68 * if we have hardware PMU support. 69 69 */ 70 70 #ifdef CONFIG_PPC_PERF_CTRS ··· 73 73 74 74 /* 75 75 * The power_pmu.get_constraint function returns a 32/64-bit value and 76 - * a 32/64-bit mask that express the constraints between this event and 76 + * a 32/64-bit mask that express the constraints between this event_id and 77 77 * other events. 78 78 * 79 79 * The value and mask are divided up into (non-overlapping) bitfields 80 80 * of three different types: 81 81 * 82 82 * Select field: this expresses the constraint that some set of bits 83 - * in MMCR* needs to be set to a specific value for this event. For a 83 + * in MMCR* needs to be set to a specific value for this event_id. For a 84 84 * select field, the mask contains 1s in every bit of the field, and 85 85 * the value contains a unique value for each possible setting of the 86 86 * MMCR* bits. The constraint checking code will ensure that two events ··· 102 102 * possible.) For N classes, the field is N+1 bits wide, and each class 103 103 * is assigned one bit from the least-significant N bits. The mask has 104 104 * only the most-significant bit set, and the value has only the bit 105 - * for the event's class set. The test_adder has the least significant 105 + * for the event_id's class set. The test_adder has the least significant 106 106 * bit set in the field. 107 107 * 108 - * If an event is not subject to the constraint expressed by a particular 108 + * If an event_id is not subject to the constraint expressed by a particular 109 109 * field, then it will have 0 in both the mask and value for that field. 110 110 */
+1 -1
arch/powerpc/include/asm/systbl.h
··· 322 322 SYSCALL_SPU(dup3) 323 323 SYSCALL_SPU(pipe2) 324 324 SYSCALL(inotify_init1) 325 - SYSCALL_SPU(perf_counter_open) 325 + SYSCALL_SPU(perf_event_open) 326 326 COMPAT_SYS_SPU(preadv) 327 327 COMPAT_SYS_SPU(pwritev) 328 328 COMPAT_SYS(rt_tgsigqueueinfo)
+1 -1
arch/powerpc/include/asm/unistd.h
··· 341 341 #define __NR_dup3 316 342 342 #define __NR_pipe2 317 343 343 #define __NR_inotify_init1 318 344 - #define __NR_perf_counter_open 319 344 + #define __NR_perf_event_open 319 345 345 #define __NR_preadv 320 346 346 #define __NR_pwritev 321 347 347 #define __NR_rt_tgsigqueueinfo 322
+1 -1
arch/powerpc/kernel/Makefile
··· 97 97 98 98 obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o 99 99 obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o 100 - obj-$(CONFIG_PPC_PERF_CTRS) += perf_counter.o perf_callchain.o 100 + obj-$(CONFIG_PPC_PERF_CTRS) += perf_event.o perf_callchain.o 101 101 obj64-$(CONFIG_PPC_PERF_CTRS) += power4-pmu.o ppc970-pmu.o power5-pmu.o \ 102 102 power5+-pmu.o power6-pmu.o power7-pmu.o 103 103 obj32-$(CONFIG_PPC_PERF_CTRS) += mpc7450-pmu.o
+1 -1
arch/powerpc/kernel/asm-offsets.c
··· 133 133 DEFINE(PACAKMSR, offsetof(struct paca_struct, kernel_msr)); 134 134 DEFINE(PACASOFTIRQEN, offsetof(struct paca_struct, soft_enabled)); 135 135 DEFINE(PACAHARDIRQEN, offsetof(struct paca_struct, hard_enabled)); 136 - DEFINE(PACAPERFPEND, offsetof(struct paca_struct, perf_counter_pending)); 136 + DEFINE(PACAPERFPEND, offsetof(struct paca_struct, perf_event_pending)); 137 137 DEFINE(PACACONTEXTID, offsetof(struct paca_struct, context.id)); 138 138 #ifdef CONFIG_PPC_MM_SLICES 139 139 DEFINE(PACALOWSLICESPSIZE, offsetof(struct paca_struct,
+4 -4
arch/powerpc/kernel/entry_64.S
··· 556 556 2: 557 557 TRACE_AND_RESTORE_IRQ(r5); 558 558 559 - #ifdef CONFIG_PERF_COUNTERS 560 - /* check paca->perf_counter_pending if we're enabling ints */ 559 + #ifdef CONFIG_PERF_EVENTS 560 + /* check paca->perf_event_pending if we're enabling ints */ 561 561 lbz r3,PACAPERFPEND(r13) 562 562 and. r3,r3,r5 563 563 beq 27f 564 - bl .perf_counter_do_pending 564 + bl .perf_event_do_pending 565 565 27: 566 - #endif /* CONFIG_PERF_COUNTERS */ 566 + #endif /* CONFIG_PERF_EVENTS */ 567 567 568 568 /* extract EE bit and use it to restore paca->hard_enabled */ 569 569 ld r3,_MSR(r1)
+4 -4
arch/powerpc/kernel/irq.c
··· 53 53 #include <linux/bootmem.h> 54 54 #include <linux/pci.h> 55 55 #include <linux/debugfs.h> 56 - #include <linux/perf_counter.h> 56 + #include <linux/perf_event.h> 57 57 58 58 #include <asm/uaccess.h> 59 59 #include <asm/system.h> ··· 138 138 } 139 139 #endif /* CONFIG_PPC_STD_MMU_64 */ 140 140 141 - if (test_perf_counter_pending()) { 142 - clear_perf_counter_pending(); 143 - perf_counter_do_pending(); 141 + if (test_perf_event_pending()) { 142 + clear_perf_event_pending(); 143 + perf_event_do_pending(); 144 144 } 145 145 146 146 /*
+1 -1
arch/powerpc/kernel/mpc7450-pmu.c
··· 9 9 * 2 of the License, or (at your option) any later version. 10 10 */ 11 11 #include <linux/string.h> 12 - #include <linux/perf_counter.h> 12 + #include <linux/perf_event.h> 13 13 #include <asm/reg.h> 14 14 #include <asm/cputable.h> 15 15
+1 -1
arch/powerpc/kernel/perf_callchain.c
··· 10 10 */ 11 11 #include <linux/kernel.h> 12 12 #include <linux/sched.h> 13 - #include <linux/perf_counter.h> 13 + #include <linux/perf_event.h> 14 14 #include <linux/percpu.h> 15 15 #include <linux/uaccess.h> 16 16 #include <linux/mm.h>
+290 -290
arch/powerpc/kernel/perf_counter.c arch/powerpc/kernel/perf_event.c
··· 1 1 /* 2 - * Performance counter support - powerpc architecture code 2 + * Performance event support - powerpc architecture code 3 3 * 4 4 * Copyright 2008-2009 Paul Mackerras, IBM Corporation. 5 5 * ··· 10 10 */ 11 11 #include <linux/kernel.h> 12 12 #include <linux/sched.h> 13 - #include <linux/perf_counter.h> 13 + #include <linux/perf_event.h> 14 14 #include <linux/percpu.h> 15 15 #include <linux/hardirq.h> 16 16 #include <asm/reg.h> ··· 19 19 #include <asm/firmware.h> 20 20 #include <asm/ptrace.h> 21 21 22 - struct cpu_hw_counters { 23 - int n_counters; 22 + struct cpu_hw_events { 23 + int n_events; 24 24 int n_percpu; 25 25 int disabled; 26 26 int n_added; 27 27 int n_limited; 28 28 u8 pmcs_enabled; 29 - struct perf_counter *counter[MAX_HWCOUNTERS]; 30 - u64 events[MAX_HWCOUNTERS]; 31 - unsigned int flags[MAX_HWCOUNTERS]; 29 + struct perf_event *event[MAX_HWEVENTS]; 30 + u64 events[MAX_HWEVENTS]; 31 + unsigned int flags[MAX_HWEVENTS]; 32 32 unsigned long mmcr[3]; 33 - struct perf_counter *limited_counter[MAX_LIMITED_HWCOUNTERS]; 34 - u8 limited_hwidx[MAX_LIMITED_HWCOUNTERS]; 35 - u64 alternatives[MAX_HWCOUNTERS][MAX_EVENT_ALTERNATIVES]; 36 - unsigned long amasks[MAX_HWCOUNTERS][MAX_EVENT_ALTERNATIVES]; 37 - unsigned long avalues[MAX_HWCOUNTERS][MAX_EVENT_ALTERNATIVES]; 33 + struct perf_event *limited_event[MAX_LIMITED_HWEVENTS]; 34 + u8 limited_hwidx[MAX_LIMITED_HWEVENTS]; 35 + u64 alternatives[MAX_HWEVENTS][MAX_EVENT_ALTERNATIVES]; 36 + unsigned long amasks[MAX_HWEVENTS][MAX_EVENT_ALTERNATIVES]; 37 + unsigned long avalues[MAX_HWEVENTS][MAX_EVENT_ALTERNATIVES]; 38 38 }; 39 - DEFINE_PER_CPU(struct cpu_hw_counters, cpu_hw_counters); 39 + DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events); 40 40 41 41 struct power_pmu *ppmu; 42 42 ··· 47 47 * where the hypervisor bit is forced to 1 (as on Apple G5 processors), 48 48 * then we need to use the FCHV bit to ignore kernel events. 49 49 */ 50 - static unsigned int freeze_counters_kernel = MMCR0_FCS; 50 + static unsigned int freeze_events_kernel = MMCR0_FCS; 51 51 52 52 /* 53 53 * 32-bit doesn't have MMCRA but does have an MMCR2, ··· 122 122 123 123 if (ppmu->flags & PPMU_ALT_SIPR) { 124 124 if (mmcra & POWER6_MMCRA_SIHV) 125 - return PERF_EVENT_MISC_HYPERVISOR; 125 + return PERF_RECORD_MISC_HYPERVISOR; 126 126 return (mmcra & POWER6_MMCRA_SIPR) ? 127 - PERF_EVENT_MISC_USER : PERF_EVENT_MISC_KERNEL; 127 + PERF_RECORD_MISC_USER : PERF_RECORD_MISC_KERNEL; 128 128 } 129 129 if (mmcra & MMCRA_SIHV) 130 - return PERF_EVENT_MISC_HYPERVISOR; 131 - return (mmcra & MMCRA_SIPR) ? PERF_EVENT_MISC_USER : 132 - PERF_EVENT_MISC_KERNEL; 130 + return PERF_RECORD_MISC_HYPERVISOR; 131 + return (mmcra & MMCRA_SIPR) ? PERF_RECORD_MISC_USER : 132 + PERF_RECORD_MISC_KERNEL; 133 133 } 134 134 135 135 /* ··· 152 152 153 153 #endif /* CONFIG_PPC64 */ 154 154 155 - static void perf_counter_interrupt(struct pt_regs *regs); 155 + static void perf_event_interrupt(struct pt_regs *regs); 156 156 157 - void perf_counter_print_debug(void) 157 + void perf_event_print_debug(void) 158 158 { 159 159 } 160 160 ··· 240 240 * Check if a set of events can all go on the PMU at once. 241 241 * If they can't, this will look at alternative codes for the events 242 242 * and see if any combination of alternative codes is feasible. 243 - * The feasible set is returned in event[]. 243 + * The feasible set is returned in event_id[]. 244 244 */ 245 - static int power_check_constraints(struct cpu_hw_counters *cpuhw, 246 - u64 event[], unsigned int cflags[], 245 + static int power_check_constraints(struct cpu_hw_events *cpuhw, 246 + u64 event_id[], unsigned int cflags[], 247 247 int n_ev) 248 248 { 249 249 unsigned long mask, value, nv; 250 - unsigned long smasks[MAX_HWCOUNTERS], svalues[MAX_HWCOUNTERS]; 251 - int n_alt[MAX_HWCOUNTERS], choice[MAX_HWCOUNTERS]; 250 + unsigned long smasks[MAX_HWEVENTS], svalues[MAX_HWEVENTS]; 251 + int n_alt[MAX_HWEVENTS], choice[MAX_HWEVENTS]; 252 252 int i, j; 253 253 unsigned long addf = ppmu->add_fields; 254 254 unsigned long tadd = ppmu->test_adder; 255 255 256 - if (n_ev > ppmu->n_counter) 256 + if (n_ev > ppmu->n_event) 257 257 return -1; 258 258 259 259 /* First see if the events will go on as-is */ 260 260 for (i = 0; i < n_ev; ++i) { 261 261 if ((cflags[i] & PPMU_LIMITED_PMC_REQD) 262 - && !ppmu->limited_pmc_event(event[i])) { 263 - ppmu->get_alternatives(event[i], cflags[i], 262 + && !ppmu->limited_pmc_event(event_id[i])) { 263 + ppmu->get_alternatives(event_id[i], cflags[i], 264 264 cpuhw->alternatives[i]); 265 - event[i] = cpuhw->alternatives[i][0]; 265 + event_id[i] = cpuhw->alternatives[i][0]; 266 266 } 267 - if (ppmu->get_constraint(event[i], &cpuhw->amasks[i][0], 267 + if (ppmu->get_constraint(event_id[i], &cpuhw->amasks[i][0], 268 268 &cpuhw->avalues[i][0])) 269 269 return -1; 270 270 } ··· 287 287 return -1; 288 288 for (i = 0; i < n_ev; ++i) { 289 289 choice[i] = 0; 290 - n_alt[i] = ppmu->get_alternatives(event[i], cflags[i], 290 + n_alt[i] = ppmu->get_alternatives(event_id[i], cflags[i], 291 291 cpuhw->alternatives[i]); 292 292 for (j = 1; j < n_alt[i]; ++j) 293 293 ppmu->get_constraint(cpuhw->alternatives[i][j], ··· 307 307 j = choice[i]; 308 308 } 309 309 /* 310 - * See if any alternative k for event i, 310 + * See if any alternative k for event_id i, 311 311 * where k > j, will satisfy the constraints. 312 312 */ 313 313 while (++j < n_alt[i]) { ··· 321 321 if (j >= n_alt[i]) { 322 322 /* 323 323 * No feasible alternative, backtrack 324 - * to event i-1 and continue enumerating its 324 + * to event_id i-1 and continue enumerating its 325 325 * alternatives from where we got up to. 326 326 */ 327 327 if (--i < 0) 328 328 return -1; 329 329 } else { 330 330 /* 331 - * Found a feasible alternative for event i, 332 - * remember where we got up to with this event, 333 - * go on to the next event, and start with 331 + * Found a feasible alternative for event_id i, 332 + * remember where we got up to with this event_id, 333 + * go on to the next event_id, and start with 334 334 * the first alternative for it. 335 335 */ 336 336 choice[i] = j; ··· 345 345 346 346 /* OK, we have a feasible combination, tell the caller the solution */ 347 347 for (i = 0; i < n_ev; ++i) 348 - event[i] = cpuhw->alternatives[i][choice[i]]; 348 + event_id[i] = cpuhw->alternatives[i][choice[i]]; 349 349 return 0; 350 350 } 351 351 352 352 /* 353 - * Check if newly-added counters have consistent settings for 353 + * Check if newly-added events have consistent settings for 354 354 * exclude_{user,kernel,hv} with each other and any previously 355 - * added counters. 355 + * added events. 356 356 */ 357 - static int check_excludes(struct perf_counter **ctrs, unsigned int cflags[], 357 + static int check_excludes(struct perf_event **ctrs, unsigned int cflags[], 358 358 int n_prev, int n_new) 359 359 { 360 360 int eu = 0, ek = 0, eh = 0; 361 361 int i, n, first; 362 - struct perf_counter *counter; 362 + struct perf_event *event; 363 363 364 364 n = n_prev + n_new; 365 365 if (n <= 1) ··· 371 371 cflags[i] &= ~PPMU_LIMITED_PMC_REQD; 372 372 continue; 373 373 } 374 - counter = ctrs[i]; 374 + event = ctrs[i]; 375 375 if (first) { 376 - eu = counter->attr.exclude_user; 377 - ek = counter->attr.exclude_kernel; 378 - eh = counter->attr.exclude_hv; 376 + eu = event->attr.exclude_user; 377 + ek = event->attr.exclude_kernel; 378 + eh = event->attr.exclude_hv; 379 379 first = 0; 380 - } else if (counter->attr.exclude_user != eu || 381 - counter->attr.exclude_kernel != ek || 382 - counter->attr.exclude_hv != eh) { 380 + } else if (event->attr.exclude_user != eu || 381 + event->attr.exclude_kernel != ek || 382 + event->attr.exclude_hv != eh) { 383 383 return -EAGAIN; 384 384 } 385 385 } ··· 392 392 return 0; 393 393 } 394 394 395 - static void power_pmu_read(struct perf_counter *counter) 395 + static void power_pmu_read(struct perf_event *event) 396 396 { 397 397 s64 val, delta, prev; 398 398 399 - if (!counter->hw.idx) 399 + if (!event->hw.idx) 400 400 return; 401 401 /* 402 402 * Performance monitor interrupts come even when interrupts ··· 404 404 * Therefore we treat them like NMIs. 405 405 */ 406 406 do { 407 - prev = atomic64_read(&counter->hw.prev_count); 407 + prev = atomic64_read(&event->hw.prev_count); 408 408 barrier(); 409 - val = read_pmc(counter->hw.idx); 410 - } while (atomic64_cmpxchg(&counter->hw.prev_count, prev, val) != prev); 409 + val = read_pmc(event->hw.idx); 410 + } while (atomic64_cmpxchg(&event->hw.prev_count, prev, val) != prev); 411 411 412 412 /* The counters are only 32 bits wide */ 413 413 delta = (val - prev) & 0xfffffffful; 414 - atomic64_add(delta, &counter->count); 415 - atomic64_sub(delta, &counter->hw.period_left); 414 + atomic64_add(delta, &event->count); 415 + atomic64_sub(delta, &event->hw.period_left); 416 416 } 417 417 418 418 /* 419 419 * On some machines, PMC5 and PMC6 can't be written, don't respect 420 420 * the freeze conditions, and don't generate interrupts. This tells 421 - * us if `counter' is using such a PMC. 421 + * us if `event' is using such a PMC. 422 422 */ 423 423 static int is_limited_pmc(int pmcnum) 424 424 { ··· 426 426 && (pmcnum == 5 || pmcnum == 6); 427 427 } 428 428 429 - static void freeze_limited_counters(struct cpu_hw_counters *cpuhw, 429 + static void freeze_limited_events(struct cpu_hw_events *cpuhw, 430 430 unsigned long pmc5, unsigned long pmc6) 431 431 { 432 - struct perf_counter *counter; 432 + struct perf_event *event; 433 433 u64 val, prev, delta; 434 434 int i; 435 435 436 436 for (i = 0; i < cpuhw->n_limited; ++i) { 437 - counter = cpuhw->limited_counter[i]; 438 - if (!counter->hw.idx) 437 + event = cpuhw->limited_event[i]; 438 + if (!event->hw.idx) 439 439 continue; 440 - val = (counter->hw.idx == 5) ? pmc5 : pmc6; 441 - prev = atomic64_read(&counter->hw.prev_count); 442 - counter->hw.idx = 0; 440 + val = (event->hw.idx == 5) ? pmc5 : pmc6; 441 + prev = atomic64_read(&event->hw.prev_count); 442 + event->hw.idx = 0; 443 443 delta = (val - prev) & 0xfffffffful; 444 - atomic64_add(delta, &counter->count); 444 + atomic64_add(delta, &event->count); 445 445 } 446 446 } 447 447 448 - static void thaw_limited_counters(struct cpu_hw_counters *cpuhw, 448 + static void thaw_limited_events(struct cpu_hw_events *cpuhw, 449 449 unsigned long pmc5, unsigned long pmc6) 450 450 { 451 - struct perf_counter *counter; 451 + struct perf_event *event; 452 452 u64 val; 453 453 int i; 454 454 455 455 for (i = 0; i < cpuhw->n_limited; ++i) { 456 - counter = cpuhw->limited_counter[i]; 457 - counter->hw.idx = cpuhw->limited_hwidx[i]; 458 - val = (counter->hw.idx == 5) ? pmc5 : pmc6; 459 - atomic64_set(&counter->hw.prev_count, val); 460 - perf_counter_update_userpage(counter); 456 + event = cpuhw->limited_event[i]; 457 + event->hw.idx = cpuhw->limited_hwidx[i]; 458 + val = (event->hw.idx == 5) ? pmc5 : pmc6; 459 + atomic64_set(&event->hw.prev_count, val); 460 + perf_event_update_userpage(event); 461 461 } 462 462 } 463 463 464 464 /* 465 - * Since limited counters don't respect the freeze conditions, we 465 + * Since limited events don't respect the freeze conditions, we 466 466 * have to read them immediately after freezing or unfreezing the 467 - * other counters. We try to keep the values from the limited 468 - * counters as consistent as possible by keeping the delay (in 467 + * other events. We try to keep the values from the limited 468 + * events as consistent as possible by keeping the delay (in 469 469 * cycles and instructions) between freezing/unfreezing and reading 470 - * the limited counters as small and consistent as possible. 471 - * Therefore, if any limited counters are in use, we read them 470 + * the limited events as small and consistent as possible. 471 + * Therefore, if any limited events are in use, we read them 472 472 * both, and always in the same order, to minimize variability, 473 473 * and do it inside the same asm that writes MMCR0. 474 474 */ 475 - static void write_mmcr0(struct cpu_hw_counters *cpuhw, unsigned long mmcr0) 475 + static void write_mmcr0(struct cpu_hw_events *cpuhw, unsigned long mmcr0) 476 476 { 477 477 unsigned long pmc5, pmc6; 478 478 ··· 485 485 * Write MMCR0, then read PMC5 and PMC6 immediately. 486 486 * To ensure we don't get a performance monitor interrupt 487 487 * between writing MMCR0 and freezing/thawing the limited 488 - * counters, we first write MMCR0 with the counter overflow 488 + * events, we first write MMCR0 with the event overflow 489 489 * interrupt enable bits turned off. 490 490 */ 491 491 asm volatile("mtspr %3,%2; mfspr %0,%4; mfspr %1,%5" ··· 495 495 "i" (SPRN_PMC5), "i" (SPRN_PMC6)); 496 496 497 497 if (mmcr0 & MMCR0_FC) 498 - freeze_limited_counters(cpuhw, pmc5, pmc6); 498 + freeze_limited_events(cpuhw, pmc5, pmc6); 499 499 else 500 - thaw_limited_counters(cpuhw, pmc5, pmc6); 500 + thaw_limited_events(cpuhw, pmc5, pmc6); 501 501 502 502 /* 503 - * Write the full MMCR0 including the counter overflow interrupt 503 + * Write the full MMCR0 including the event overflow interrupt 504 504 * enable bits, if necessary. 505 505 */ 506 506 if (mmcr0 & (MMCR0_PMC1CE | MMCR0_PMCjCE)) ··· 508 508 } 509 509 510 510 /* 511 - * Disable all counters to prevent PMU interrupts and to allow 512 - * counters to be added or removed. 511 + * Disable all events to prevent PMU interrupts and to allow 512 + * events to be added or removed. 513 513 */ 514 514 void hw_perf_disable(void) 515 515 { 516 - struct cpu_hw_counters *cpuhw; 516 + struct cpu_hw_events *cpuhw; 517 517 unsigned long flags; 518 518 519 519 if (!ppmu) 520 520 return; 521 521 local_irq_save(flags); 522 - cpuhw = &__get_cpu_var(cpu_hw_counters); 522 + cpuhw = &__get_cpu_var(cpu_hw_events); 523 523 524 524 if (!cpuhw->disabled) { 525 525 cpuhw->disabled = 1; ··· 545 545 /* 546 546 * Set the 'freeze counters' bit. 547 547 * The barrier is to make sure the mtspr has been 548 - * executed and the PMU has frozen the counters 548 + * executed and the PMU has frozen the events 549 549 * before we return. 550 550 */ 551 551 write_mmcr0(cpuhw, mfspr(SPRN_MMCR0) | MMCR0_FC); ··· 555 555 } 556 556 557 557 /* 558 - * Re-enable all counters if disable == 0. 559 - * If we were previously disabled and counters were added, then 558 + * Re-enable all events if disable == 0. 559 + * If we were previously disabled and events were added, then 560 560 * put the new config on the PMU. 561 561 */ 562 562 void hw_perf_enable(void) 563 563 { 564 - struct perf_counter *counter; 565 - struct cpu_hw_counters *cpuhw; 564 + struct perf_event *event; 565 + struct cpu_hw_events *cpuhw; 566 566 unsigned long flags; 567 567 long i; 568 568 unsigned long val; 569 569 s64 left; 570 - unsigned int hwc_index[MAX_HWCOUNTERS]; 570 + unsigned int hwc_index[MAX_HWEVENTS]; 571 571 int n_lim; 572 572 int idx; 573 573 574 574 if (!ppmu) 575 575 return; 576 576 local_irq_save(flags); 577 - cpuhw = &__get_cpu_var(cpu_hw_counters); 577 + cpuhw = &__get_cpu_var(cpu_hw_events); 578 578 if (!cpuhw->disabled) { 579 579 local_irq_restore(flags); 580 580 return; ··· 582 582 cpuhw->disabled = 0; 583 583 584 584 /* 585 - * If we didn't change anything, or only removed counters, 585 + * If we didn't change anything, or only removed events, 586 586 * no need to recalculate MMCR* settings and reset the PMCs. 587 587 * Just reenable the PMU with the current MMCR* settings 588 - * (possibly updated for removal of counters). 588 + * (possibly updated for removal of events). 589 589 */ 590 590 if (!cpuhw->n_added) { 591 591 mtspr(SPRN_MMCRA, cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE); 592 592 mtspr(SPRN_MMCR1, cpuhw->mmcr[1]); 593 - if (cpuhw->n_counters == 0) 593 + if (cpuhw->n_events == 0) 594 594 ppc_set_pmu_inuse(0); 595 595 goto out_enable; 596 596 } 597 597 598 598 /* 599 - * Compute MMCR* values for the new set of counters 599 + * Compute MMCR* values for the new set of events 600 600 */ 601 - if (ppmu->compute_mmcr(cpuhw->events, cpuhw->n_counters, hwc_index, 601 + if (ppmu->compute_mmcr(cpuhw->events, cpuhw->n_events, hwc_index, 602 602 cpuhw->mmcr)) { 603 603 /* shouldn't ever get here */ 604 604 printk(KERN_ERR "oops compute_mmcr failed\n"); ··· 607 607 608 608 /* 609 609 * Add in MMCR0 freeze bits corresponding to the 610 - * attr.exclude_* bits for the first counter. 611 - * We have already checked that all counters have the 612 - * same values for these bits as the first counter. 610 + * attr.exclude_* bits for the first event. 611 + * We have already checked that all events have the 612 + * same values for these bits as the first event. 613 613 */ 614 - counter = cpuhw->counter[0]; 615 - if (counter->attr.exclude_user) 614 + event = cpuhw->event[0]; 615 + if (event->attr.exclude_user) 616 616 cpuhw->mmcr[0] |= MMCR0_FCP; 617 - if (counter->attr.exclude_kernel) 618 - cpuhw->mmcr[0] |= freeze_counters_kernel; 619 - if (counter->attr.exclude_hv) 617 + if (event->attr.exclude_kernel) 618 + cpuhw->mmcr[0] |= freeze_events_kernel; 619 + if (event->attr.exclude_hv) 620 620 cpuhw->mmcr[0] |= MMCR0_FCHV; 621 621 622 622 /* 623 623 * Write the new configuration to MMCR* with the freeze 624 - * bit set and set the hardware counters to their initial values. 625 - * Then unfreeze the counters. 624 + * bit set and set the hardware events to their initial values. 625 + * Then unfreeze the events. 626 626 */ 627 627 ppc_set_pmu_inuse(1); 628 628 mtspr(SPRN_MMCRA, cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE); ··· 631 631 | MMCR0_FC); 632 632 633 633 /* 634 - * Read off any pre-existing counters that need to move 634 + * Read off any pre-existing events that need to move 635 635 * to another PMC. 636 636 */ 637 - for (i = 0; i < cpuhw->n_counters; ++i) { 638 - counter = cpuhw->counter[i]; 639 - if (counter->hw.idx && counter->hw.idx != hwc_index[i] + 1) { 640 - power_pmu_read(counter); 641 - write_pmc(counter->hw.idx, 0); 642 - counter->hw.idx = 0; 637 + for (i = 0; i < cpuhw->n_events; ++i) { 638 + event = cpuhw->event[i]; 639 + if (event->hw.idx && event->hw.idx != hwc_index[i] + 1) { 640 + power_pmu_read(event); 641 + write_pmc(event->hw.idx, 0); 642 + event->hw.idx = 0; 643 643 } 644 644 } 645 645 646 646 /* 647 - * Initialize the PMCs for all the new and moved counters. 647 + * Initialize the PMCs for all the new and moved events. 648 648 */ 649 649 cpuhw->n_limited = n_lim = 0; 650 - for (i = 0; i < cpuhw->n_counters; ++i) { 651 - counter = cpuhw->counter[i]; 652 - if (counter->hw.idx) 650 + for (i = 0; i < cpuhw->n_events; ++i) { 651 + event = cpuhw->event[i]; 652 + if (event->hw.idx) 653 653 continue; 654 654 idx = hwc_index[i] + 1; 655 655 if (is_limited_pmc(idx)) { 656 - cpuhw->limited_counter[n_lim] = counter; 656 + cpuhw->limited_event[n_lim] = event; 657 657 cpuhw->limited_hwidx[n_lim] = idx; 658 658 ++n_lim; 659 659 continue; 660 660 } 661 661 val = 0; 662 - if (counter->hw.sample_period) { 663 - left = atomic64_read(&counter->hw.period_left); 662 + if (event->hw.sample_period) { 663 + left = atomic64_read(&event->hw.period_left); 664 664 if (left < 0x80000000L) 665 665 val = 0x80000000L - left; 666 666 } 667 - atomic64_set(&counter->hw.prev_count, val); 668 - counter->hw.idx = idx; 667 + atomic64_set(&event->hw.prev_count, val); 668 + event->hw.idx = idx; 669 669 write_pmc(idx, val); 670 - perf_counter_update_userpage(counter); 670 + perf_event_update_userpage(event); 671 671 } 672 672 cpuhw->n_limited = n_lim; 673 673 cpuhw->mmcr[0] |= MMCR0_PMXE | MMCR0_FCECE; ··· 688 688 local_irq_restore(flags); 689 689 } 690 690 691 - static int collect_events(struct perf_counter *group, int max_count, 692 - struct perf_counter *ctrs[], u64 *events, 691 + static int collect_events(struct perf_event *group, int max_count, 692 + struct perf_event *ctrs[], u64 *events, 693 693 unsigned int *flags) 694 694 { 695 695 int n = 0; 696 - struct perf_counter *counter; 696 + struct perf_event *event; 697 697 698 - if (!is_software_counter(group)) { 698 + if (!is_software_event(group)) { 699 699 if (n >= max_count) 700 700 return -1; 701 701 ctrs[n] = group; 702 - flags[n] = group->hw.counter_base; 702 + flags[n] = group->hw.event_base; 703 703 events[n++] = group->hw.config; 704 704 } 705 - list_for_each_entry(counter, &group->sibling_list, list_entry) { 706 - if (!is_software_counter(counter) && 707 - counter->state != PERF_COUNTER_STATE_OFF) { 705 + list_for_each_entry(event, &group->sibling_list, list_entry) { 706 + if (!is_software_event(event) && 707 + event->state != PERF_EVENT_STATE_OFF) { 708 708 if (n >= max_count) 709 709 return -1; 710 - ctrs[n] = counter; 711 - flags[n] = counter->hw.counter_base; 712 - events[n++] = counter->hw.config; 710 + ctrs[n] = event; 711 + flags[n] = event->hw.event_base; 712 + events[n++] = event->hw.config; 713 713 } 714 714 } 715 715 return n; 716 716 } 717 717 718 - static void counter_sched_in(struct perf_counter *counter, int cpu) 718 + static void event_sched_in(struct perf_event *event, int cpu) 719 719 { 720 - counter->state = PERF_COUNTER_STATE_ACTIVE; 721 - counter->oncpu = cpu; 722 - counter->tstamp_running += counter->ctx->time - counter->tstamp_stopped; 723 - if (is_software_counter(counter)) 724 - counter->pmu->enable(counter); 720 + event->state = PERF_EVENT_STATE_ACTIVE; 721 + event->oncpu = cpu; 722 + event->tstamp_running += event->ctx->time - event->tstamp_stopped; 723 + if (is_software_event(event)) 724 + event->pmu->enable(event); 725 725 } 726 726 727 727 /* 728 - * Called to enable a whole group of counters. 728 + * Called to enable a whole group of events. 729 729 * Returns 1 if the group was enabled, or -EAGAIN if it could not be. 730 730 * Assumes the caller has disabled interrupts and has 731 731 * frozen the PMU with hw_perf_save_disable. 732 732 */ 733 - int hw_perf_group_sched_in(struct perf_counter *group_leader, 733 + int hw_perf_group_sched_in(struct perf_event *group_leader, 734 734 struct perf_cpu_context *cpuctx, 735 - struct perf_counter_context *ctx, int cpu) 735 + struct perf_event_context *ctx, int cpu) 736 736 { 737 - struct cpu_hw_counters *cpuhw; 737 + struct cpu_hw_events *cpuhw; 738 738 long i, n, n0; 739 - struct perf_counter *sub; 739 + struct perf_event *sub; 740 740 741 741 if (!ppmu) 742 742 return 0; 743 - cpuhw = &__get_cpu_var(cpu_hw_counters); 744 - n0 = cpuhw->n_counters; 745 - n = collect_events(group_leader, ppmu->n_counter - n0, 746 - &cpuhw->counter[n0], &cpuhw->events[n0], 743 + cpuhw = &__get_cpu_var(cpu_hw_events); 744 + n0 = cpuhw->n_events; 745 + n = collect_events(group_leader, ppmu->n_event - n0, 746 + &cpuhw->event[n0], &cpuhw->events[n0], 747 747 &cpuhw->flags[n0]); 748 748 if (n < 0) 749 749 return -EAGAIN; 750 - if (check_excludes(cpuhw->counter, cpuhw->flags, n0, n)) 750 + if (check_excludes(cpuhw->event, cpuhw->flags, n0, n)) 751 751 return -EAGAIN; 752 752 i = power_check_constraints(cpuhw, cpuhw->events, cpuhw->flags, n + n0); 753 753 if (i < 0) 754 754 return -EAGAIN; 755 - cpuhw->n_counters = n0 + n; 755 + cpuhw->n_events = n0 + n; 756 756 cpuhw->n_added += n; 757 757 758 758 /* 759 - * OK, this group can go on; update counter states etc., 760 - * and enable any software counters 759 + * OK, this group can go on; update event states etc., 760 + * and enable any software events 761 761 */ 762 762 for (i = n0; i < n0 + n; ++i) 763 - cpuhw->counter[i]->hw.config = cpuhw->events[i]; 763 + cpuhw->event[i]->hw.config = cpuhw->events[i]; 764 764 cpuctx->active_oncpu += n; 765 765 n = 1; 766 - counter_sched_in(group_leader, cpu); 766 + event_sched_in(group_leader, cpu); 767 767 list_for_each_entry(sub, &group_leader->sibling_list, list_entry) { 768 - if (sub->state != PERF_COUNTER_STATE_OFF) { 769 - counter_sched_in(sub, cpu); 768 + if (sub->state != PERF_EVENT_STATE_OFF) { 769 + event_sched_in(sub, cpu); 770 770 ++n; 771 771 } 772 772 } ··· 776 776 } 777 777 778 778 /* 779 - * Add a counter to the PMU. 780 - * If all counters are not already frozen, then we disable and 779 + * Add a event to the PMU. 780 + * If all events are not already frozen, then we disable and 781 781 * re-enable the PMU in order to get hw_perf_enable to do the 782 782 * actual work of reconfiguring the PMU. 783 783 */ 784 - static int power_pmu_enable(struct perf_counter *counter) 784 + static int power_pmu_enable(struct perf_event *event) 785 785 { 786 - struct cpu_hw_counters *cpuhw; 786 + struct cpu_hw_events *cpuhw; 787 787 unsigned long flags; 788 788 int n0; 789 789 int ret = -EAGAIN; ··· 792 792 perf_disable(); 793 793 794 794 /* 795 - * Add the counter to the list (if there is room) 795 + * Add the event to the list (if there is room) 796 796 * and check whether the total set is still feasible. 797 797 */ 798 - cpuhw = &__get_cpu_var(cpu_hw_counters); 799 - n0 = cpuhw->n_counters; 800 - if (n0 >= ppmu->n_counter) 798 + cpuhw = &__get_cpu_var(cpu_hw_events); 799 + n0 = cpuhw->n_events; 800 + if (n0 >= ppmu->n_event) 801 801 goto out; 802 - cpuhw->counter[n0] = counter; 803 - cpuhw->events[n0] = counter->hw.config; 804 - cpuhw->flags[n0] = counter->hw.counter_base; 805 - if (check_excludes(cpuhw->counter, cpuhw->flags, n0, 1)) 802 + cpuhw->event[n0] = event; 803 + cpuhw->events[n0] = event->hw.config; 804 + cpuhw->flags[n0] = event->hw.event_base; 805 + if (check_excludes(cpuhw->event, cpuhw->flags, n0, 1)) 806 806 goto out; 807 807 if (power_check_constraints(cpuhw, cpuhw->events, cpuhw->flags, n0 + 1)) 808 808 goto out; 809 809 810 - counter->hw.config = cpuhw->events[n0]; 811 - ++cpuhw->n_counters; 810 + event->hw.config = cpuhw->events[n0]; 811 + ++cpuhw->n_events; 812 812 ++cpuhw->n_added; 813 813 814 814 ret = 0; ··· 819 819 } 820 820 821 821 /* 822 - * Remove a counter from the PMU. 822 + * Remove a event from the PMU. 823 823 */ 824 - static void power_pmu_disable(struct perf_counter *counter) 824 + static void power_pmu_disable(struct perf_event *event) 825 825 { 826 - struct cpu_hw_counters *cpuhw; 826 + struct cpu_hw_events *cpuhw; 827 827 long i; 828 828 unsigned long flags; 829 829 830 830 local_irq_save(flags); 831 831 perf_disable(); 832 832 833 - power_pmu_read(counter); 833 + power_pmu_read(event); 834 834 835 - cpuhw = &__get_cpu_var(cpu_hw_counters); 836 - for (i = 0; i < cpuhw->n_counters; ++i) { 837 - if (counter == cpuhw->counter[i]) { 838 - while (++i < cpuhw->n_counters) 839 - cpuhw->counter[i-1] = cpuhw->counter[i]; 840 - --cpuhw->n_counters; 841 - ppmu->disable_pmc(counter->hw.idx - 1, cpuhw->mmcr); 842 - if (counter->hw.idx) { 843 - write_pmc(counter->hw.idx, 0); 844 - counter->hw.idx = 0; 835 + cpuhw = &__get_cpu_var(cpu_hw_events); 836 + for (i = 0; i < cpuhw->n_events; ++i) { 837 + if (event == cpuhw->event[i]) { 838 + while (++i < cpuhw->n_events) 839 + cpuhw->event[i-1] = cpuhw->event[i]; 840 + --cpuhw->n_events; 841 + ppmu->disable_pmc(event->hw.idx - 1, cpuhw->mmcr); 842 + if (event->hw.idx) { 843 + write_pmc(event->hw.idx, 0); 844 + event->hw.idx = 0; 845 845 } 846 - perf_counter_update_userpage(counter); 846 + perf_event_update_userpage(event); 847 847 break; 848 848 } 849 849 } 850 850 for (i = 0; i < cpuhw->n_limited; ++i) 851 - if (counter == cpuhw->limited_counter[i]) 851 + if (event == cpuhw->limited_event[i]) 852 852 break; 853 853 if (i < cpuhw->n_limited) { 854 854 while (++i < cpuhw->n_limited) { 855 - cpuhw->limited_counter[i-1] = cpuhw->limited_counter[i]; 855 + cpuhw->limited_event[i-1] = cpuhw->limited_event[i]; 856 856 cpuhw->limited_hwidx[i-1] = cpuhw->limited_hwidx[i]; 857 857 } 858 858 --cpuhw->n_limited; 859 859 } 860 - if (cpuhw->n_counters == 0) { 861 - /* disable exceptions if no counters are running */ 860 + if (cpuhw->n_events == 0) { 861 + /* disable exceptions if no events are running */ 862 862 cpuhw->mmcr[0] &= ~(MMCR0_PMXE | MMCR0_FCECE); 863 863 } 864 864 ··· 867 867 } 868 868 869 869 /* 870 - * Re-enable interrupts on a counter after they were throttled 870 + * Re-enable interrupts on a event after they were throttled 871 871 * because they were coming too fast. 872 872 */ 873 - static void power_pmu_unthrottle(struct perf_counter *counter) 873 + static void power_pmu_unthrottle(struct perf_event *event) 874 874 { 875 875 s64 val, left; 876 876 unsigned long flags; 877 877 878 - if (!counter->hw.idx || !counter->hw.sample_period) 878 + if (!event->hw.idx || !event->hw.sample_period) 879 879 return; 880 880 local_irq_save(flags); 881 881 perf_disable(); 882 - power_pmu_read(counter); 883 - left = counter->hw.sample_period; 884 - counter->hw.last_period = left; 882 + power_pmu_read(event); 883 + left = event->hw.sample_period; 884 + event->hw.last_period = left; 885 885 val = 0; 886 886 if (left < 0x80000000L) 887 887 val = 0x80000000L - left; 888 - write_pmc(counter->hw.idx, val); 889 - atomic64_set(&counter->hw.prev_count, val); 890 - atomic64_set(&counter->hw.period_left, left); 891 - perf_counter_update_userpage(counter); 888 + write_pmc(event->hw.idx, val); 889 + atomic64_set(&event->hw.prev_count, val); 890 + atomic64_set(&event->hw.period_left, left); 891 + perf_event_update_userpage(event); 892 892 perf_enable(); 893 893 local_irq_restore(flags); 894 894 } ··· 901 901 }; 902 902 903 903 /* 904 - * Return 1 if we might be able to put counter on a limited PMC, 904 + * Return 1 if we might be able to put event on a limited PMC, 905 905 * or 0 if not. 906 - * A counter can only go on a limited PMC if it counts something 906 + * A event can only go on a limited PMC if it counts something 907 907 * that a limited PMC can count, doesn't require interrupts, and 908 908 * doesn't exclude any processor mode. 909 909 */ 910 - static int can_go_on_limited_pmc(struct perf_counter *counter, u64 ev, 910 + static int can_go_on_limited_pmc(struct perf_event *event, u64 ev, 911 911 unsigned int flags) 912 912 { 913 913 int n; 914 914 u64 alt[MAX_EVENT_ALTERNATIVES]; 915 915 916 - if (counter->attr.exclude_user 917 - || counter->attr.exclude_kernel 918 - || counter->attr.exclude_hv 919 - || counter->attr.sample_period) 916 + if (event->attr.exclude_user 917 + || event->attr.exclude_kernel 918 + || event->attr.exclude_hv 919 + || event->attr.sample_period) 920 920 return 0; 921 921 922 922 if (ppmu->limited_pmc_event(ev)) 923 923 return 1; 924 924 925 925 /* 926 - * The requested event isn't on a limited PMC already; 926 + * The requested event_id isn't on a limited PMC already; 927 927 * see if any alternative code goes on a limited PMC. 928 928 */ 929 929 if (!ppmu->get_alternatives) ··· 936 936 } 937 937 938 938 /* 939 - * Find an alternative event that goes on a normal PMC, if possible, 940 - * and return the event code, or 0 if there is no such alternative. 941 - * (Note: event code 0 is "don't count" on all machines.) 939 + * Find an alternative event_id that goes on a normal PMC, if possible, 940 + * and return the event_id code, or 0 if there is no such alternative. 941 + * (Note: event_id code 0 is "don't count" on all machines.) 942 942 */ 943 943 static u64 normal_pmc_alternative(u64 ev, unsigned long flags) 944 944 { ··· 952 952 return alt[0]; 953 953 } 954 954 955 - /* Number of perf_counters counting hardware events */ 956 - static atomic_t num_counters; 955 + /* Number of perf_events counting hardware events */ 956 + static atomic_t num_events; 957 957 /* Used to avoid races in calling reserve/release_pmc_hardware */ 958 958 static DEFINE_MUTEX(pmc_reserve_mutex); 959 959 960 960 /* 961 - * Release the PMU if this is the last perf_counter. 961 + * Release the PMU if this is the last perf_event. 962 962 */ 963 - static void hw_perf_counter_destroy(struct perf_counter *counter) 963 + static void hw_perf_event_destroy(struct perf_event *event) 964 964 { 965 - if (!atomic_add_unless(&num_counters, -1, 1)) { 965 + if (!atomic_add_unless(&num_events, -1, 1)) { 966 966 mutex_lock(&pmc_reserve_mutex); 967 - if (atomic_dec_return(&num_counters) == 0) 967 + if (atomic_dec_return(&num_events) == 0) 968 968 release_pmc_hardware(); 969 969 mutex_unlock(&pmc_reserve_mutex); 970 970 } 971 971 } 972 972 973 973 /* 974 - * Translate a generic cache event config to a raw event code. 974 + * Translate a generic cache event_id config to a raw event_id code. 975 975 */ 976 976 static int hw_perf_cache_event(u64 config, u64 *eventp) 977 977 { ··· 1000 1000 return 0; 1001 1001 } 1002 1002 1003 - const struct pmu *hw_perf_counter_init(struct perf_counter *counter) 1003 + const struct pmu *hw_perf_event_init(struct perf_event *event) 1004 1004 { 1005 1005 u64 ev; 1006 1006 unsigned long flags; 1007 - struct perf_counter *ctrs[MAX_HWCOUNTERS]; 1008 - u64 events[MAX_HWCOUNTERS]; 1009 - unsigned int cflags[MAX_HWCOUNTERS]; 1007 + struct perf_event *ctrs[MAX_HWEVENTS]; 1008 + u64 events[MAX_HWEVENTS]; 1009 + unsigned int cflags[MAX_HWEVENTS]; 1010 1010 int n; 1011 1011 int err; 1012 - struct cpu_hw_counters *cpuhw; 1012 + struct cpu_hw_events *cpuhw; 1013 1013 1014 1014 if (!ppmu) 1015 1015 return ERR_PTR(-ENXIO); 1016 - switch (counter->attr.type) { 1016 + switch (event->attr.type) { 1017 1017 case PERF_TYPE_HARDWARE: 1018 - ev = counter->attr.config; 1018 + ev = event->attr.config; 1019 1019 if (ev >= ppmu->n_generic || ppmu->generic_events[ev] == 0) 1020 1020 return ERR_PTR(-EOPNOTSUPP); 1021 1021 ev = ppmu->generic_events[ev]; 1022 1022 break; 1023 1023 case PERF_TYPE_HW_CACHE: 1024 - err = hw_perf_cache_event(counter->attr.config, &ev); 1024 + err = hw_perf_cache_event(event->attr.config, &ev); 1025 1025 if (err) 1026 1026 return ERR_PTR(err); 1027 1027 break; 1028 1028 case PERF_TYPE_RAW: 1029 - ev = counter->attr.config; 1029 + ev = event->attr.config; 1030 1030 break; 1031 1031 default: 1032 1032 return ERR_PTR(-EINVAL); 1033 1033 } 1034 - counter->hw.config_base = ev; 1035 - counter->hw.idx = 0; 1034 + event->hw.config_base = ev; 1035 + event->hw.idx = 0; 1036 1036 1037 1037 /* 1038 1038 * If we are not running on a hypervisor, force the ··· 1040 1040 * the user set it to. 1041 1041 */ 1042 1042 if (!firmware_has_feature(FW_FEATURE_LPAR)) 1043 - counter->attr.exclude_hv = 0; 1043 + event->attr.exclude_hv = 0; 1044 1044 1045 1045 /* 1046 - * If this is a per-task counter, then we can use 1046 + * If this is a per-task event, then we can use 1047 1047 * PM_RUN_* events interchangeably with their non RUN_* 1048 1048 * equivalents, e.g. PM_RUN_CYC instead of PM_CYC. 1049 1049 * XXX we should check if the task is an idle task. 1050 1050 */ 1051 1051 flags = 0; 1052 - if (counter->ctx->task) 1052 + if (event->ctx->task) 1053 1053 flags |= PPMU_ONLY_COUNT_RUN; 1054 1054 1055 1055 /* 1056 - * If this machine has limited counters, check whether this 1057 - * event could go on a limited counter. 1056 + * If this machine has limited events, check whether this 1057 + * event_id could go on a limited event. 1058 1058 */ 1059 1059 if (ppmu->flags & PPMU_LIMITED_PMC5_6) { 1060 - if (can_go_on_limited_pmc(counter, ev, flags)) { 1060 + if (can_go_on_limited_pmc(event, ev, flags)) { 1061 1061 flags |= PPMU_LIMITED_PMC_OK; 1062 1062 } else if (ppmu->limited_pmc_event(ev)) { 1063 1063 /* 1064 - * The requested event is on a limited PMC, 1064 + * The requested event_id is on a limited PMC, 1065 1065 * but we can't use a limited PMC; see if any 1066 1066 * alternative goes on a normal PMC. 1067 1067 */ ··· 1073 1073 1074 1074 /* 1075 1075 * If this is in a group, check if it can go on with all the 1076 - * other hardware counters in the group. We assume the counter 1076 + * other hardware events in the group. We assume the event 1077 1077 * hasn't been linked into its leader's sibling list at this point. 1078 1078 */ 1079 1079 n = 0; 1080 - if (counter->group_leader != counter) { 1081 - n = collect_events(counter->group_leader, ppmu->n_counter - 1, 1080 + if (event->group_leader != event) { 1081 + n = collect_events(event->group_leader, ppmu->n_event - 1, 1082 1082 ctrs, events, cflags); 1083 1083 if (n < 0) 1084 1084 return ERR_PTR(-EINVAL); 1085 1085 } 1086 1086 events[n] = ev; 1087 - ctrs[n] = counter; 1087 + ctrs[n] = event; 1088 1088 cflags[n] = flags; 1089 1089 if (check_excludes(ctrs, cflags, n, 1)) 1090 1090 return ERR_PTR(-EINVAL); 1091 1091 1092 - cpuhw = &get_cpu_var(cpu_hw_counters); 1092 + cpuhw = &get_cpu_var(cpu_hw_events); 1093 1093 err = power_check_constraints(cpuhw, events, cflags, n + 1); 1094 - put_cpu_var(cpu_hw_counters); 1094 + put_cpu_var(cpu_hw_events); 1095 1095 if (err) 1096 1096 return ERR_PTR(-EINVAL); 1097 1097 1098 - counter->hw.config = events[n]; 1099 - counter->hw.counter_base = cflags[n]; 1100 - counter->hw.last_period = counter->hw.sample_period; 1101 - atomic64_set(&counter->hw.period_left, counter->hw.last_period); 1098 + event->hw.config = events[n]; 1099 + event->hw.event_base = cflags[n]; 1100 + event->hw.last_period = event->hw.sample_period; 1101 + atomic64_set(&event->hw.period_left, event->hw.last_period); 1102 1102 1103 1103 /* 1104 1104 * See if we need to reserve the PMU. 1105 - * If no counters are currently in use, then we have to take a 1105 + * If no events are currently in use, then we have to take a 1106 1106 * mutex to ensure that we don't race with another task doing 1107 1107 * reserve_pmc_hardware or release_pmc_hardware. 1108 1108 */ 1109 1109 err = 0; 1110 - if (!atomic_inc_not_zero(&num_counters)) { 1110 + if (!atomic_inc_not_zero(&num_events)) { 1111 1111 mutex_lock(&pmc_reserve_mutex); 1112 - if (atomic_read(&num_counters) == 0 && 1113 - reserve_pmc_hardware(perf_counter_interrupt)) 1112 + if (atomic_read(&num_events) == 0 && 1113 + reserve_pmc_hardware(perf_event_interrupt)) 1114 1114 err = -EBUSY; 1115 1115 else 1116 - atomic_inc(&num_counters); 1116 + atomic_inc(&num_events); 1117 1117 mutex_unlock(&pmc_reserve_mutex); 1118 1118 } 1119 - counter->destroy = hw_perf_counter_destroy; 1119 + event->destroy = hw_perf_event_destroy; 1120 1120 1121 1121 if (err) 1122 1122 return ERR_PTR(err); ··· 1128 1128 * things if requested. Note that interrupts are hard-disabled 1129 1129 * here so there is no possibility of being interrupted. 1130 1130 */ 1131 - static void record_and_restart(struct perf_counter *counter, unsigned long val, 1131 + static void record_and_restart(struct perf_event *event, unsigned long val, 1132 1132 struct pt_regs *regs, int nmi) 1133 1133 { 1134 - u64 period = counter->hw.sample_period; 1134 + u64 period = event->hw.sample_period; 1135 1135 s64 prev, delta, left; 1136 1136 int record = 0; 1137 1137 1138 1138 /* we don't have to worry about interrupts here */ 1139 - prev = atomic64_read(&counter->hw.prev_count); 1139 + prev = atomic64_read(&event->hw.prev_count); 1140 1140 delta = (val - prev) & 0xfffffffful; 1141 - atomic64_add(delta, &counter->count); 1141 + atomic64_add(delta, &event->count); 1142 1142 1143 1143 /* 1144 - * See if the total period for this counter has expired, 1144 + * See if the total period for this event has expired, 1145 1145 * and update for the next period. 1146 1146 */ 1147 1147 val = 0; 1148 - left = atomic64_read(&counter->hw.period_left) - delta; 1148 + left = atomic64_read(&event->hw.period_left) - delta; 1149 1149 if (period) { 1150 1150 if (left <= 0) { 1151 1151 left += period; ··· 1163 1163 if (record) { 1164 1164 struct perf_sample_data data = { 1165 1165 .addr = 0, 1166 - .period = counter->hw.last_period, 1166 + .period = event->hw.last_period, 1167 1167 }; 1168 1168 1169 - if (counter->attr.sample_type & PERF_SAMPLE_ADDR) 1169 + if (event->attr.sample_type & PERF_SAMPLE_ADDR) 1170 1170 perf_get_data_addr(regs, &data.addr); 1171 1171 1172 - if (perf_counter_overflow(counter, nmi, &data, regs)) { 1172 + if (perf_event_overflow(event, nmi, &data, regs)) { 1173 1173 /* 1174 1174 * Interrupts are coming too fast - throttle them 1175 - * by setting the counter to 0, so it will be 1175 + * by setting the event to 0, so it will be 1176 1176 * at least 2^30 cycles until the next interrupt 1177 - * (assuming each counter counts at most 2 counts 1177 + * (assuming each event counts at most 2 counts 1178 1178 * per cycle). 1179 1179 */ 1180 1180 val = 0; ··· 1182 1182 } 1183 1183 } 1184 1184 1185 - write_pmc(counter->hw.idx, val); 1186 - atomic64_set(&counter->hw.prev_count, val); 1187 - atomic64_set(&counter->hw.period_left, left); 1188 - perf_counter_update_userpage(counter); 1185 + write_pmc(event->hw.idx, val); 1186 + atomic64_set(&event->hw.prev_count, val); 1187 + atomic64_set(&event->hw.period_left, left); 1188 + perf_event_update_userpage(event); 1189 1189 } 1190 1190 1191 1191 /* 1192 1192 * Called from generic code to get the misc flags (i.e. processor mode) 1193 - * for an event. 1193 + * for an event_id. 1194 1194 */ 1195 1195 unsigned long perf_misc_flags(struct pt_regs *regs) 1196 1196 { ··· 1198 1198 1199 1199 if (flags) 1200 1200 return flags; 1201 - return user_mode(regs) ? PERF_EVENT_MISC_USER : 1202 - PERF_EVENT_MISC_KERNEL; 1201 + return user_mode(regs) ? PERF_RECORD_MISC_USER : 1202 + PERF_RECORD_MISC_KERNEL; 1203 1203 } 1204 1204 1205 1205 /* 1206 1206 * Called from generic code to get the instruction pointer 1207 - * for an event. 1207 + * for an event_id. 1208 1208 */ 1209 1209 unsigned long perf_instruction_pointer(struct pt_regs *regs) 1210 1210 { ··· 1220 1220 /* 1221 1221 * Performance monitor interrupt stuff 1222 1222 */ 1223 - static void perf_counter_interrupt(struct pt_regs *regs) 1223 + static void perf_event_interrupt(struct pt_regs *regs) 1224 1224 { 1225 1225 int i; 1226 - struct cpu_hw_counters *cpuhw = &__get_cpu_var(cpu_hw_counters); 1227 - struct perf_counter *counter; 1226 + struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); 1227 + struct perf_event *event; 1228 1228 unsigned long val; 1229 1229 int found = 0; 1230 1230 int nmi; 1231 1231 1232 1232 if (cpuhw->n_limited) 1233 - freeze_limited_counters(cpuhw, mfspr(SPRN_PMC5), 1233 + freeze_limited_events(cpuhw, mfspr(SPRN_PMC5), 1234 1234 mfspr(SPRN_PMC6)); 1235 1235 1236 1236 perf_read_regs(regs); ··· 1241 1241 else 1242 1242 irq_enter(); 1243 1243 1244 - for (i = 0; i < cpuhw->n_counters; ++i) { 1245 - counter = cpuhw->counter[i]; 1246 - if (!counter->hw.idx || is_limited_pmc(counter->hw.idx)) 1244 + for (i = 0; i < cpuhw->n_events; ++i) { 1245 + event = cpuhw->event[i]; 1246 + if (!event->hw.idx || is_limited_pmc(event->hw.idx)) 1247 1247 continue; 1248 - val = read_pmc(counter->hw.idx); 1248 + val = read_pmc(event->hw.idx); 1249 1249 if ((int)val < 0) { 1250 - /* counter has overflowed */ 1250 + /* event has overflowed */ 1251 1251 found = 1; 1252 - record_and_restart(counter, val, regs, nmi); 1252 + record_and_restart(event, val, regs, nmi); 1253 1253 } 1254 1254 } 1255 1255 1256 1256 /* 1257 - * In case we didn't find and reset the counter that caused 1258 - * the interrupt, scan all counters and reset any that are 1257 + * In case we didn't find and reset the event that caused 1258 + * the interrupt, scan all events and reset any that are 1259 1259 * negative, to avoid getting continual interrupts. 1260 1260 * Any that we processed in the previous loop will not be negative. 1261 1261 */ 1262 1262 if (!found) { 1263 - for (i = 0; i < ppmu->n_counter; ++i) { 1263 + for (i = 0; i < ppmu->n_event; ++i) { 1264 1264 if (is_limited_pmc(i + 1)) 1265 1265 continue; 1266 1266 val = read_pmc(i + 1); ··· 1273 1273 * Reset MMCR0 to its normal value. This will set PMXE and 1274 1274 * clear FC (freeze counters) and PMAO (perf mon alert occurred) 1275 1275 * and thus allow interrupts to occur again. 1276 - * XXX might want to use MSR.PM to keep the counters frozen until 1276 + * XXX might want to use MSR.PM to keep the events frozen until 1277 1277 * we get back out of this interrupt. 1278 1278 */ 1279 1279 write_mmcr0(cpuhw, cpuhw->mmcr[0]); ··· 1284 1284 irq_exit(); 1285 1285 } 1286 1286 1287 - void hw_perf_counter_setup(int cpu) 1287 + void hw_perf_event_setup(int cpu) 1288 1288 { 1289 - struct cpu_hw_counters *cpuhw = &per_cpu(cpu_hw_counters, cpu); 1289 + struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu); 1290 1290 1291 1291 if (!ppmu) 1292 1292 return; ··· 1308 1308 * Use FCHV to ignore kernel events if MSR.HV is set. 1309 1309 */ 1310 1310 if (mfmsr() & MSR_HV) 1311 - freeze_counters_kernel = MMCR0_FCHV; 1311 + freeze_events_kernel = MMCR0_FCHV; 1312 1312 #endif /* CONFIG_PPC64 */ 1313 1313 1314 1314 return 0;
+1 -1
arch/powerpc/kernel/power4-pmu.c
··· 9 9 * 2 of the License, or (at your option) any later version. 10 10 */ 11 11 #include <linux/kernel.h> 12 - #include <linux/perf_counter.h> 12 + #include <linux/perf_event.h> 13 13 #include <linux/string.h> 14 14 #include <asm/reg.h> 15 15 #include <asm/cputable.h>
+1 -1
arch/powerpc/kernel/power5+-pmu.c
··· 9 9 * 2 of the License, or (at your option) any later version. 10 10 */ 11 11 #include <linux/kernel.h> 12 - #include <linux/perf_counter.h> 12 + #include <linux/perf_event.h> 13 13 #include <linux/string.h> 14 14 #include <asm/reg.h> 15 15 #include <asm/cputable.h>
+1 -1
arch/powerpc/kernel/power5-pmu.c
··· 9 9 * 2 of the License, or (at your option) any later version. 10 10 */ 11 11 #include <linux/kernel.h> 12 - #include <linux/perf_counter.h> 12 + #include <linux/perf_event.h> 13 13 #include <linux/string.h> 14 14 #include <asm/reg.h> 15 15 #include <asm/cputable.h>
+1 -1
arch/powerpc/kernel/power6-pmu.c
··· 9 9 * 2 of the License, or (at your option) any later version. 10 10 */ 11 11 #include <linux/kernel.h> 12 - #include <linux/perf_counter.h> 12 + #include <linux/perf_event.h> 13 13 #include <linux/string.h> 14 14 #include <asm/reg.h> 15 15 #include <asm/cputable.h>
+1 -1
arch/powerpc/kernel/power7-pmu.c
··· 9 9 * 2 of the License, or (at your option) any later version. 10 10 */ 11 11 #include <linux/kernel.h> 12 - #include <linux/perf_counter.h> 12 + #include <linux/perf_event.h> 13 13 #include <linux/string.h> 14 14 #include <asm/reg.h> 15 15 #include <asm/cputable.h>
+1 -1
arch/powerpc/kernel/ppc970-pmu.c
··· 9 9 * 2 of the License, or (at your option) any later version. 10 10 */ 11 11 #include <linux/string.h> 12 - #include <linux/perf_counter.h> 12 + #include <linux/perf_event.h> 13 13 #include <asm/reg.h> 14 14 #include <asm/cputable.h> 15 15
+15 -15
arch/powerpc/kernel/time.c
··· 53 53 #include <linux/posix-timers.h> 54 54 #include <linux/irq.h> 55 55 #include <linux/delay.h> 56 - #include <linux/perf_counter.h> 56 + #include <linux/perf_event.h> 57 57 58 58 #include <asm/io.h> 59 59 #include <asm/processor.h> ··· 527 527 } 528 528 #endif /* CONFIG_PPC_ISERIES */ 529 529 530 - #if defined(CONFIG_PERF_COUNTERS) && defined(CONFIG_PPC32) 531 - DEFINE_PER_CPU(u8, perf_counter_pending); 530 + #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_PPC32) 531 + DEFINE_PER_CPU(u8, perf_event_pending); 532 532 533 - void set_perf_counter_pending(void) 533 + void set_perf_event_pending(void) 534 534 { 535 - get_cpu_var(perf_counter_pending) = 1; 535 + get_cpu_var(perf_event_pending) = 1; 536 536 set_dec(1); 537 - put_cpu_var(perf_counter_pending); 537 + put_cpu_var(perf_event_pending); 538 538 } 539 539 540 - #define test_perf_counter_pending() __get_cpu_var(perf_counter_pending) 541 - #define clear_perf_counter_pending() __get_cpu_var(perf_counter_pending) = 0 540 + #define test_perf_event_pending() __get_cpu_var(perf_event_pending) 541 + #define clear_perf_event_pending() __get_cpu_var(perf_event_pending) = 0 542 542 543 - #else /* CONFIG_PERF_COUNTERS && CONFIG_PPC32 */ 543 + #else /* CONFIG_PERF_EVENTS && CONFIG_PPC32 */ 544 544 545 - #define test_perf_counter_pending() 0 546 - #define clear_perf_counter_pending() 545 + #define test_perf_event_pending() 0 546 + #define clear_perf_event_pending() 547 547 548 - #endif /* CONFIG_PERF_COUNTERS && CONFIG_PPC32 */ 548 + #endif /* CONFIG_PERF_EVENTS && CONFIG_PPC32 */ 549 549 550 550 /* 551 551 * For iSeries shared processors, we have to let the hypervisor ··· 573 573 set_dec(DECREMENTER_MAX); 574 574 575 575 #ifdef CONFIG_PPC32 576 - if (test_perf_counter_pending()) { 577 - clear_perf_counter_pending(); 578 - perf_counter_do_pending(); 576 + if (test_perf_event_pending()) { 577 + clear_perf_event_pending(); 578 + perf_event_do_pending(); 579 579 } 580 580 if (atomic_read(&ppc_n_lost_interrupts) != 0) 581 581 do_IRQ(regs);
+4 -4
arch/powerpc/mm/fault.c
··· 29 29 #include <linux/module.h> 30 30 #include <linux/kprobes.h> 31 31 #include <linux/kdebug.h> 32 - #include <linux/perf_counter.h> 32 + #include <linux/perf_event.h> 33 33 34 34 #include <asm/firmware.h> 35 35 #include <asm/page.h> ··· 171 171 die("Weird page fault", regs, SIGSEGV); 172 172 } 173 173 174 - perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address); 174 + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address); 175 175 176 176 /* When running in the kernel we expect faults to occur only to 177 177 * addresses in user space. All other faults represent errors in the ··· 312 312 } 313 313 if (ret & VM_FAULT_MAJOR) { 314 314 current->maj_flt++; 315 - perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0, 315 + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0, 316 316 regs, address); 317 317 #ifdef CONFIG_PPC_SMLPAR 318 318 if (firmware_has_feature(FW_FEATURE_CMO)) { ··· 323 323 #endif 324 324 } else { 325 325 current->min_flt++; 326 - perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0, 326 + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0, 327 327 regs, address); 328 328 } 329 329 up_read(&mm->mmap_sem);
+2 -2
arch/powerpc/platforms/Kconfig.cputype
··· 280 280 281 281 config PPC_PERF_CTRS 282 282 def_bool y 283 - depends on PERF_COUNTERS && PPC_HAVE_PMU_SUPPORT 283 + depends on PERF_EVENTS && PPC_HAVE_PMU_SUPPORT 284 284 help 285 - This enables the powerpc-specific perf_counter back-end. 285 + This enables the powerpc-specific perf_event back-end. 286 286 287 287 config SMP 288 288 depends on PPC_BOOK3S || PPC_BOOK3E || FSL_BOOKE
+1 -1
arch/s390/Kconfig
··· 94 94 select HAVE_KVM if 64BIT 95 95 select HAVE_ARCH_TRACEHOOK 96 96 select INIT_ALL_POSSIBLE 97 - select HAVE_PERF_COUNTERS 97 + select HAVE_PERF_EVENTS 98 98 99 99 config SCHED_OMIT_FRAME_POINTER 100 100 bool
-10
arch/s390/include/asm/perf_counter.h
··· 1 - /* 2 - * Performance counter support - s390 specific definitions. 3 - * 4 - * Copyright 2009 Martin Schwidefsky, IBM Corporation. 5 - */ 6 - 7 - static inline void set_perf_counter_pending(void) {} 8 - static inline void clear_perf_counter_pending(void) {} 9 - 10 - #define PERF_COUNTER_INDEX_OFFSET 0
+10
arch/s390/include/asm/perf_event.h
··· 1 + /* 2 + * Performance event support - s390 specific definitions. 3 + * 4 + * Copyright 2009 Martin Schwidefsky, IBM Corporation. 5 + */ 6 + 7 + static inline void set_perf_event_pending(void) {} 8 + static inline void clear_perf_event_pending(void) {} 9 + 10 + #define PERF_EVENT_INDEX_OFFSET 0
+1 -1
arch/s390/include/asm/unistd.h
··· 268 268 #define __NR_preadv 328 269 269 #define __NR_pwritev 329 270 270 #define __NR_rt_tgsigqueueinfo 330 271 - #define __NR_perf_counter_open 331 271 + #define __NR_perf_event_open 331 272 272 #define NR_syscalls 332 273 273 274 274 /*
+4 -4
arch/s390/kernel/compat_wrapper.S
··· 1832 1832 llgtr %r5,%r5 # struct compat_siginfo * 1833 1833 jg compat_sys_rt_tgsigqueueinfo_wrapper # branch to system call 1834 1834 1835 - .globl sys_perf_counter_open_wrapper 1836 - sys_perf_counter_open_wrapper: 1837 - llgtr %r2,%r2 # const struct perf_counter_attr * 1835 + .globl sys_perf_event_open_wrapper 1836 + sys_perf_event_open_wrapper: 1837 + llgtr %r2,%r2 # const struct perf_event_attr * 1838 1838 lgfr %r3,%r3 # pid_t 1839 1839 lgfr %r4,%r4 # int 1840 1840 lgfr %r5,%r5 # int 1841 1841 llgfr %r6,%r6 # unsigned long 1842 - jg sys_perf_counter_open # branch to system call 1842 + jg sys_perf_event_open # branch to system call
+1 -1
arch/s390/kernel/syscalls.S
··· 339 339 SYSCALL(sys_preadv,sys_preadv,compat_sys_preadv_wrapper) 340 340 SYSCALL(sys_pwritev,sys_pwritev,compat_sys_pwritev_wrapper) 341 341 SYSCALL(sys_rt_tgsigqueueinfo,sys_rt_tgsigqueueinfo,compat_sys_rt_tgsigqueueinfo_wrapper) /* 330 */ 342 - SYSCALL(sys_perf_counter_open,sys_perf_counter_open,sys_perf_counter_open_wrapper) 342 + SYSCALL(sys_perf_event_open,sys_perf_event_open,sys_perf_event_open_wrapper)
+4 -4
arch/s390/mm/fault.c
··· 10 10 * Copyright (C) 1995 Linus Torvalds 11 11 */ 12 12 13 - #include <linux/perf_counter.h> 13 + #include <linux/perf_event.h> 14 14 #include <linux/signal.h> 15 15 #include <linux/sched.h> 16 16 #include <linux/kernel.h> ··· 306 306 * interrupts again and then search the VMAs 307 307 */ 308 308 local_irq_enable(); 309 - perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address); 309 + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address); 310 310 down_read(&mm->mmap_sem); 311 311 312 312 si_code = SEGV_MAPERR; ··· 366 366 } 367 367 if (fault & VM_FAULT_MAJOR) { 368 368 tsk->maj_flt++; 369 - perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0, 369 + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0, 370 370 regs, address); 371 371 } else { 372 372 tsk->min_flt++; 373 - perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0, 373 + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0, 374 374 regs, address); 375 375 } 376 376 up_read(&mm->mmap_sem);
+1 -1
arch/sh/Kconfig
··· 16 16 select HAVE_IOREMAP_PROT if MMU 17 17 select HAVE_ARCH_TRACEHOOK 18 18 select HAVE_DMA_API_DEBUG 19 - select HAVE_PERF_COUNTERS 19 + select HAVE_PERF_EVENTS 20 20 select HAVE_KERNEL_GZIP 21 21 select HAVE_KERNEL_BZIP2 22 22 select HAVE_KERNEL_LZMA
-9
arch/sh/include/asm/perf_counter.h
··· 1 - #ifndef __ASM_SH_PERF_COUNTER_H 2 - #define __ASM_SH_PERF_COUNTER_H 3 - 4 - /* SH only supports software counters through this interface. */ 5 - static inline void set_perf_counter_pending(void) {} 6 - 7 - #define PERF_COUNTER_INDEX_OFFSET 0 8 - 9 - #endif /* __ASM_SH_PERF_COUNTER_H */
+9
arch/sh/include/asm/perf_event.h
··· 1 + #ifndef __ASM_SH_PERF_EVENT_H 2 + #define __ASM_SH_PERF_EVENT_H 3 + 4 + /* SH only supports software events through this interface. */ 5 + static inline void set_perf_event_pending(void) {} 6 + 7 + #define PERF_EVENT_INDEX_OFFSET 0 8 + 9 + #endif /* __ASM_SH_PERF_EVENT_H */
+1 -1
arch/sh/include/asm/unistd_32.h
··· 344 344 #define __NR_preadv 333 345 345 #define __NR_pwritev 334 346 346 #define __NR_rt_tgsigqueueinfo 335 347 - #define __NR_perf_counter_open 336 347 + #define __NR_perf_event_open 336 348 348 349 349 #define NR_syscalls 337 350 350
+1 -1
arch/sh/include/asm/unistd_64.h
··· 384 384 #define __NR_preadv 361 385 385 #define __NR_pwritev 362 386 386 #define __NR_rt_tgsigqueueinfo 363 387 - #define __NR_perf_counter_open 364 387 + #define __NR_perf_event_open 364 388 388 389 389 #ifdef __KERNEL__ 390 390
+1 -1
arch/sh/kernel/syscalls_32.S
··· 352 352 .long sys_preadv 353 353 .long sys_pwritev 354 354 .long sys_rt_tgsigqueueinfo /* 335 */ 355 - .long sys_perf_counter_open 355 + .long sys_perf_event_open
+1 -1
arch/sh/kernel/syscalls_64.S
··· 390 390 .long sys_preadv 391 391 .long sys_pwritev 392 392 .long sys_rt_tgsigqueueinfo 393 - .long sys_perf_counter_open 393 + .long sys_perf_event_open
+4 -4
arch/sh/mm/fault_32.c
··· 15 15 #include <linux/mm.h> 16 16 #include <linux/hardirq.h> 17 17 #include <linux/kprobes.h> 18 - #include <linux/perf_counter.h> 18 + #include <linux/perf_event.h> 19 19 #include <asm/io_trapped.h> 20 20 #include <asm/system.h> 21 21 #include <asm/mmu_context.h> ··· 157 157 if ((regs->sr & SR_IMASK) != SR_IMASK) 158 158 local_irq_enable(); 159 159 160 - perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address); 160 + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address); 161 161 162 162 /* 163 163 * If we're in an interrupt, have no user context or are running ··· 208 208 } 209 209 if (fault & VM_FAULT_MAJOR) { 210 210 tsk->maj_flt++; 211 - perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0, 211 + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0, 212 212 regs, address); 213 213 } else { 214 214 tsk->min_flt++; 215 - perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0, 215 + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0, 216 216 regs, address); 217 217 } 218 218
+4 -4
arch/sh/mm/tlbflush_64.c
··· 20 20 #include <linux/mman.h> 21 21 #include <linux/mm.h> 22 22 #include <linux/smp.h> 23 - #include <linux/perf_counter.h> 23 + #include <linux/perf_event.h> 24 24 #include <linux/interrupt.h> 25 25 #include <asm/system.h> 26 26 #include <asm/io.h> ··· 116 116 /* Not an IO address, so reenable interrupts */ 117 117 local_irq_enable(); 118 118 119 - perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address); 119 + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address); 120 120 121 121 /* 122 122 * If we're in an interrupt or have no user ··· 201 201 202 202 if (fault & VM_FAULT_MAJOR) { 203 203 tsk->maj_flt++; 204 - perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0, 204 + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0, 205 205 regs, address); 206 206 } else { 207 207 tsk->min_flt++; 208 - perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0, 208 + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0, 209 209 regs, address); 210 210 } 211 211
+2 -2
arch/sparc/Kconfig
··· 25 25 select ARCH_WANT_OPTIONAL_GPIOLIB 26 26 select RTC_CLASS 27 27 select RTC_DRV_M48T59 28 - select HAVE_PERF_COUNTERS 28 + select HAVE_PERF_EVENTS 29 29 select HAVE_DMA_ATTRS 30 30 select HAVE_DMA_API_DEBUG 31 31 ··· 47 47 select RTC_DRV_BQ4802 48 48 select RTC_DRV_SUN4V 49 49 select RTC_DRV_STARFIRE 50 - select HAVE_PERF_COUNTERS 50 + select HAVE_PERF_EVENTS 51 51 52 52 config ARCH_DEFCONFIG 53 53 string
-14
arch/sparc/include/asm/perf_counter.h
··· 1 - #ifndef __ASM_SPARC_PERF_COUNTER_H 2 - #define __ASM_SPARC_PERF_COUNTER_H 3 - 4 - extern void set_perf_counter_pending(void); 5 - 6 - #define PERF_COUNTER_INDEX_OFFSET 0 7 - 8 - #ifdef CONFIG_PERF_COUNTERS 9 - extern void init_hw_perf_counters(void); 10 - #else 11 - static inline void init_hw_perf_counters(void) { } 12 - #endif 13 - 14 - #endif
+14
arch/sparc/include/asm/perf_event.h
··· 1 + #ifndef __ASM_SPARC_PERF_EVENT_H 2 + #define __ASM_SPARC_PERF_EVENT_H 3 + 4 + extern void set_perf_event_pending(void); 5 + 6 + #define PERF_EVENT_INDEX_OFFSET 0 7 + 8 + #ifdef CONFIG_PERF_EVENTS 9 + extern void init_hw_perf_events(void); 10 + #else 11 + static inline void init_hw_perf_events(void) { } 12 + #endif 13 + 14 + #endif
+1 -1
arch/sparc/include/asm/unistd.h
··· 395 395 #define __NR_preadv 324 396 396 #define __NR_pwritev 325 397 397 #define __NR_rt_tgsigqueueinfo 326 398 - #define __NR_perf_counter_open 327 398 + #define __NR_perf_event_open 327 399 399 400 400 #define NR_SYSCALLS 328 401 401
+1 -1
arch/sparc/kernel/Makefile
··· 104 104 audit--$(CONFIG_AUDIT) := compat_audit.o 105 105 obj-$(CONFIG_COMPAT) += $(audit--y) 106 106 107 - pc--$(CONFIG_PERF_COUNTERS) := perf_counter.o 107 + pc--$(CONFIG_PERF_EVENTS) := perf_event.o 108 108 obj-$(CONFIG_SPARC64) += $(pc--y)
+2 -2
arch/sparc/kernel/nmi.c
··· 19 19 #include <linux/delay.h> 20 20 #include <linux/smp.h> 21 21 22 - #include <asm/perf_counter.h> 22 + #include <asm/perf_event.h> 23 23 #include <asm/ptrace.h> 24 24 #include <asm/local.h> 25 25 #include <asm/pcr.h> ··· 265 265 } 266 266 } 267 267 if (!err) 268 - init_hw_perf_counters(); 268 + init_hw_perf_events(); 269 269 270 270 return err; 271 271 }
+5 -5
arch/sparc/kernel/pcr.c
··· 7 7 #include <linux/init.h> 8 8 #include <linux/irq.h> 9 9 10 - #include <linux/perf_counter.h> 10 + #include <linux/perf_event.h> 11 11 12 12 #include <asm/pil.h> 13 13 #include <asm/pcr.h> ··· 15 15 16 16 /* This code is shared between various users of the performance 17 17 * counters. Users will be oprofile, pseudo-NMI watchdog, and the 18 - * perf_counter support layer. 18 + * perf_event support layer. 19 19 */ 20 20 21 21 #define PCR_SUN4U_ENABLE (PCR_PIC_PRIV | PCR_STRACE | PCR_UTRACE) ··· 42 42 43 43 old_regs = set_irq_regs(regs); 44 44 irq_enter(); 45 - #ifdef CONFIG_PERF_COUNTERS 46 - perf_counter_do_pending(); 45 + #ifdef CONFIG_PERF_EVENTS 46 + perf_event_do_pending(); 47 47 #endif 48 48 irq_exit(); 49 49 set_irq_regs(old_regs); 50 50 } 51 51 52 - void set_perf_counter_pending(void) 52 + void set_perf_event_pending(void) 53 53 { 54 54 set_softint(1 << PIL_DEFERRED_PCR_WORK); 55 55 }
+89 -89
arch/sparc/kernel/perf_counter.c arch/sparc/kernel/perf_event.c
··· 1 - /* Performance counter support for sparc64. 1 + /* Performance event support for sparc64. 2 2 * 3 3 * Copyright (C) 2009 David S. Miller <davem@davemloft.net> 4 4 * 5 - * This code is based almost entirely upon the x86 perf counter 5 + * This code is based almost entirely upon the x86 perf event 6 6 * code, which is: 7 7 * 8 8 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de> ··· 12 12 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> 13 13 */ 14 14 15 - #include <linux/perf_counter.h> 15 + #include <linux/perf_event.h> 16 16 #include <linux/kprobes.h> 17 17 #include <linux/kernel.h> 18 18 #include <linux/kdebug.h> ··· 46 46 * normal code. 47 47 */ 48 48 49 - #define MAX_HWCOUNTERS 2 49 + #define MAX_HWEVENTS 2 50 50 #define MAX_PERIOD ((1UL << 32) - 1) 51 51 52 52 #define PIC_UPPER_INDEX 0 53 53 #define PIC_LOWER_INDEX 1 54 54 55 - struct cpu_hw_counters { 56 - struct perf_counter *counters[MAX_HWCOUNTERS]; 57 - unsigned long used_mask[BITS_TO_LONGS(MAX_HWCOUNTERS)]; 58 - unsigned long active_mask[BITS_TO_LONGS(MAX_HWCOUNTERS)]; 55 + struct cpu_hw_events { 56 + struct perf_event *events[MAX_HWEVENTS]; 57 + unsigned long used_mask[BITS_TO_LONGS(MAX_HWEVENTS)]; 58 + unsigned long active_mask[BITS_TO_LONGS(MAX_HWEVENTS)]; 59 59 int enabled; 60 60 }; 61 - DEFINE_PER_CPU(struct cpu_hw_counters, cpu_hw_counters) = { .enabled = 1, }; 61 + DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = { .enabled = 1, }; 62 62 63 63 struct perf_event_map { 64 64 u16 encoding; ··· 87 87 [PERF_COUNT_HW_CACHE_MISSES] = { 0x0009, PIC_UPPER }, 88 88 }; 89 89 90 - static const struct perf_event_map *ultra3i_event_map(int event) 90 + static const struct perf_event_map *ultra3i_event_map(int event_id) 91 91 { 92 - return &ultra3i_perfmon_event_map[event]; 92 + return &ultra3i_perfmon_event_map[event_id]; 93 93 } 94 94 95 95 static const struct sparc_pmu ultra3i_pmu = { ··· 111 111 [PERF_COUNT_HW_BRANCH_MISSES] = { 0x0202, PIC_UPPER | PIC_LOWER }, 112 112 }; 113 113 114 - static const struct perf_event_map *niagara2_event_map(int event) 114 + static const struct perf_event_map *niagara2_event_map(int event_id) 115 115 { 116 - return &niagara2_perfmon_event_map[event]; 116 + return &niagara2_perfmon_event_map[event_id]; 117 117 } 118 118 119 119 static const struct sparc_pmu niagara2_pmu = { ··· 130 130 131 131 static const struct sparc_pmu *sparc_pmu __read_mostly; 132 132 133 - static u64 event_encoding(u64 event, int idx) 133 + static u64 event_encoding(u64 event_id, int idx) 134 134 { 135 135 if (idx == PIC_UPPER_INDEX) 136 - event <<= sparc_pmu->upper_shift; 136 + event_id <<= sparc_pmu->upper_shift; 137 137 else 138 - event <<= sparc_pmu->lower_shift; 139 - return event; 138 + event_id <<= sparc_pmu->lower_shift; 139 + return event_id; 140 140 } 141 141 142 142 static u64 mask_for_index(int idx) ··· 151 151 sparc_pmu->lower_nop, idx); 152 152 } 153 153 154 - static inline void sparc_pmu_enable_counter(struct hw_perf_counter *hwc, 154 + static inline void sparc_pmu_enable_event(struct hw_perf_event *hwc, 155 155 int idx) 156 156 { 157 157 u64 val, mask = mask_for_index(idx); ··· 160 160 pcr_ops->write((val & ~mask) | hwc->config); 161 161 } 162 162 163 - static inline void sparc_pmu_disable_counter(struct hw_perf_counter *hwc, 163 + static inline void sparc_pmu_disable_event(struct hw_perf_event *hwc, 164 164 int idx) 165 165 { 166 166 u64 mask = mask_for_index(idx); ··· 172 172 173 173 void hw_perf_enable(void) 174 174 { 175 - struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); 175 + struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 176 176 u64 val; 177 177 int i; 178 178 ··· 184 184 185 185 val = pcr_ops->read(); 186 186 187 - for (i = 0; i < MAX_HWCOUNTERS; i++) { 188 - struct perf_counter *cp = cpuc->counters[i]; 189 - struct hw_perf_counter *hwc; 187 + for (i = 0; i < MAX_HWEVENTS; i++) { 188 + struct perf_event *cp = cpuc->events[i]; 189 + struct hw_perf_event *hwc; 190 190 191 191 if (!cp) 192 192 continue; ··· 199 199 200 200 void hw_perf_disable(void) 201 201 { 202 - struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); 202 + struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 203 203 u64 val; 204 204 205 205 if (!cpuc->enabled) ··· 241 241 write_pic(pic); 242 242 } 243 243 244 - static int sparc_perf_counter_set_period(struct perf_counter *counter, 245 - struct hw_perf_counter *hwc, int idx) 244 + static int sparc_perf_event_set_period(struct perf_event *event, 245 + struct hw_perf_event *hwc, int idx) 246 246 { 247 247 s64 left = atomic64_read(&hwc->period_left); 248 248 s64 period = hwc->sample_period; ··· 268 268 269 269 write_pmc(idx, (u64)(-left) & 0xffffffff); 270 270 271 - perf_counter_update_userpage(counter); 271 + perf_event_update_userpage(event); 272 272 273 273 return ret; 274 274 } 275 275 276 - static int sparc_pmu_enable(struct perf_counter *counter) 276 + static int sparc_pmu_enable(struct perf_event *event) 277 277 { 278 - struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); 279 - struct hw_perf_counter *hwc = &counter->hw; 278 + struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 279 + struct hw_perf_event *hwc = &event->hw; 280 280 int idx = hwc->idx; 281 281 282 282 if (test_and_set_bit(idx, cpuc->used_mask)) 283 283 return -EAGAIN; 284 284 285 - sparc_pmu_disable_counter(hwc, idx); 285 + sparc_pmu_disable_event(hwc, idx); 286 286 287 - cpuc->counters[idx] = counter; 287 + cpuc->events[idx] = event; 288 288 set_bit(idx, cpuc->active_mask); 289 289 290 - sparc_perf_counter_set_period(counter, hwc, idx); 291 - sparc_pmu_enable_counter(hwc, idx); 292 - perf_counter_update_userpage(counter); 290 + sparc_perf_event_set_period(event, hwc, idx); 291 + sparc_pmu_enable_event(hwc, idx); 292 + perf_event_update_userpage(event); 293 293 return 0; 294 294 } 295 295 296 - static u64 sparc_perf_counter_update(struct perf_counter *counter, 297 - struct hw_perf_counter *hwc, int idx) 296 + static u64 sparc_perf_event_update(struct perf_event *event, 297 + struct hw_perf_event *hwc, int idx) 298 298 { 299 299 int shift = 64 - 32; 300 300 u64 prev_raw_count, new_raw_count; ··· 311 311 delta = (new_raw_count << shift) - (prev_raw_count << shift); 312 312 delta >>= shift; 313 313 314 - atomic64_add(delta, &counter->count); 314 + atomic64_add(delta, &event->count); 315 315 atomic64_sub(delta, &hwc->period_left); 316 316 317 317 return new_raw_count; 318 318 } 319 319 320 - static void sparc_pmu_disable(struct perf_counter *counter) 320 + static void sparc_pmu_disable(struct perf_event *event) 321 321 { 322 - struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); 323 - struct hw_perf_counter *hwc = &counter->hw; 322 + struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 323 + struct hw_perf_event *hwc = &event->hw; 324 324 int idx = hwc->idx; 325 325 326 326 clear_bit(idx, cpuc->active_mask); 327 - sparc_pmu_disable_counter(hwc, idx); 327 + sparc_pmu_disable_event(hwc, idx); 328 328 329 329 barrier(); 330 330 331 - sparc_perf_counter_update(counter, hwc, idx); 332 - cpuc->counters[idx] = NULL; 331 + sparc_perf_event_update(event, hwc, idx); 332 + cpuc->events[idx] = NULL; 333 333 clear_bit(idx, cpuc->used_mask); 334 334 335 - perf_counter_update_userpage(counter); 335 + perf_event_update_userpage(event); 336 336 } 337 337 338 - static void sparc_pmu_read(struct perf_counter *counter) 338 + static void sparc_pmu_read(struct perf_event *event) 339 339 { 340 - struct hw_perf_counter *hwc = &counter->hw; 341 - sparc_perf_counter_update(counter, hwc, hwc->idx); 340 + struct hw_perf_event *hwc = &event->hw; 341 + sparc_perf_event_update(event, hwc, hwc->idx); 342 342 } 343 343 344 - static void sparc_pmu_unthrottle(struct perf_counter *counter) 344 + static void sparc_pmu_unthrottle(struct perf_event *event) 345 345 { 346 - struct hw_perf_counter *hwc = &counter->hw; 347 - sparc_pmu_enable_counter(hwc, hwc->idx); 346 + struct hw_perf_event *hwc = &event->hw; 347 + sparc_pmu_enable_event(hwc, hwc->idx); 348 348 } 349 349 350 - static atomic_t active_counters = ATOMIC_INIT(0); 350 + static atomic_t active_events = ATOMIC_INIT(0); 351 351 static DEFINE_MUTEX(pmc_grab_mutex); 352 352 353 - void perf_counter_grab_pmc(void) 353 + void perf_event_grab_pmc(void) 354 354 { 355 - if (atomic_inc_not_zero(&active_counters)) 355 + if (atomic_inc_not_zero(&active_events)) 356 356 return; 357 357 358 358 mutex_lock(&pmc_grab_mutex); 359 - if (atomic_read(&active_counters) == 0) { 359 + if (atomic_read(&active_events) == 0) { 360 360 if (atomic_read(&nmi_active) > 0) { 361 361 on_each_cpu(stop_nmi_watchdog, NULL, 1); 362 362 BUG_ON(atomic_read(&nmi_active) != 0); 363 363 } 364 - atomic_inc(&active_counters); 364 + atomic_inc(&active_events); 365 365 } 366 366 mutex_unlock(&pmc_grab_mutex); 367 367 } 368 368 369 - void perf_counter_release_pmc(void) 369 + void perf_event_release_pmc(void) 370 370 { 371 - if (atomic_dec_and_mutex_lock(&active_counters, &pmc_grab_mutex)) { 371 + if (atomic_dec_and_mutex_lock(&active_events, &pmc_grab_mutex)) { 372 372 if (atomic_read(&nmi_active) == 0) 373 373 on_each_cpu(start_nmi_watchdog, NULL, 1); 374 374 mutex_unlock(&pmc_grab_mutex); 375 375 } 376 376 } 377 377 378 - static void hw_perf_counter_destroy(struct perf_counter *counter) 378 + static void hw_perf_event_destroy(struct perf_event *event) 379 379 { 380 - perf_counter_release_pmc(); 380 + perf_event_release_pmc(); 381 381 } 382 382 383 - static int __hw_perf_counter_init(struct perf_counter *counter) 383 + static int __hw_perf_event_init(struct perf_event *event) 384 384 { 385 - struct perf_counter_attr *attr = &counter->attr; 386 - struct hw_perf_counter *hwc = &counter->hw; 385 + struct perf_event_attr *attr = &event->attr; 386 + struct hw_perf_event *hwc = &event->hw; 387 387 const struct perf_event_map *pmap; 388 388 u64 enc; 389 389 ··· 396 396 if (attr->config >= sparc_pmu->max_events) 397 397 return -EINVAL; 398 398 399 - perf_counter_grab_pmc(); 400 - counter->destroy = hw_perf_counter_destroy; 399 + perf_event_grab_pmc(); 400 + event->destroy = hw_perf_event_destroy; 401 401 402 402 /* We save the enable bits in the config_base. So to 403 403 * turn off sampling just write 'config', and to enable ··· 439 439 .unthrottle = sparc_pmu_unthrottle, 440 440 }; 441 441 442 - const struct pmu *hw_perf_counter_init(struct perf_counter *counter) 442 + const struct pmu *hw_perf_event_init(struct perf_event *event) 443 443 { 444 - int err = __hw_perf_counter_init(counter); 444 + int err = __hw_perf_event_init(event); 445 445 446 446 if (err) 447 447 return ERR_PTR(err); 448 448 return &pmu; 449 449 } 450 450 451 - void perf_counter_print_debug(void) 451 + void perf_event_print_debug(void) 452 452 { 453 453 unsigned long flags; 454 454 u64 pcr, pic; ··· 471 471 local_irq_restore(flags); 472 472 } 473 473 474 - static int __kprobes perf_counter_nmi_handler(struct notifier_block *self, 474 + static int __kprobes perf_event_nmi_handler(struct notifier_block *self, 475 475 unsigned long cmd, void *__args) 476 476 { 477 477 struct die_args *args = __args; 478 478 struct perf_sample_data data; 479 - struct cpu_hw_counters *cpuc; 479 + struct cpu_hw_events *cpuc; 480 480 struct pt_regs *regs; 481 481 int idx; 482 482 483 - if (!atomic_read(&active_counters)) 483 + if (!atomic_read(&active_events)) 484 484 return NOTIFY_DONE; 485 485 486 486 switch (cmd) { ··· 495 495 496 496 data.addr = 0; 497 497 498 - cpuc = &__get_cpu_var(cpu_hw_counters); 499 - for (idx = 0; idx < MAX_HWCOUNTERS; idx++) { 500 - struct perf_counter *counter = cpuc->counters[idx]; 501 - struct hw_perf_counter *hwc; 498 + cpuc = &__get_cpu_var(cpu_hw_events); 499 + for (idx = 0; idx < MAX_HWEVENTS; idx++) { 500 + struct perf_event *event = cpuc->events[idx]; 501 + struct hw_perf_event *hwc; 502 502 u64 val; 503 503 504 504 if (!test_bit(idx, cpuc->active_mask)) 505 505 continue; 506 - hwc = &counter->hw; 507 - val = sparc_perf_counter_update(counter, hwc, idx); 506 + hwc = &event->hw; 507 + val = sparc_perf_event_update(event, hwc, idx); 508 508 if (val & (1ULL << 31)) 509 509 continue; 510 510 511 - data.period = counter->hw.last_period; 512 - if (!sparc_perf_counter_set_period(counter, hwc, idx)) 511 + data.period = event->hw.last_period; 512 + if (!sparc_perf_event_set_period(event, hwc, idx)) 513 513 continue; 514 514 515 - if (perf_counter_overflow(counter, 1, &data, regs)) 516 - sparc_pmu_disable_counter(hwc, idx); 515 + if (perf_event_overflow(event, 1, &data, regs)) 516 + sparc_pmu_disable_event(hwc, idx); 517 517 } 518 518 519 519 return NOTIFY_STOP; 520 520 } 521 521 522 - static __read_mostly struct notifier_block perf_counter_nmi_notifier = { 523 - .notifier_call = perf_counter_nmi_handler, 522 + static __read_mostly struct notifier_block perf_event_nmi_notifier = { 523 + .notifier_call = perf_event_nmi_handler, 524 524 }; 525 525 526 526 static bool __init supported_pmu(void) ··· 536 536 return false; 537 537 } 538 538 539 - void __init init_hw_perf_counters(void) 539 + void __init init_hw_perf_events(void) 540 540 { 541 - pr_info("Performance counters: "); 541 + pr_info("Performance events: "); 542 542 543 543 if (!supported_pmu()) { 544 544 pr_cont("No support for PMU type '%s'\n", sparc_pmu_type); ··· 547 547 548 548 pr_cont("Supported PMU type is '%s'\n", sparc_pmu_type); 549 549 550 - /* All sparc64 PMUs currently have 2 counters. But this simple 551 - * driver only supports one active counter at a time. 550 + /* All sparc64 PMUs currently have 2 events. But this simple 551 + * driver only supports one active event at a time. 552 552 */ 553 - perf_max_counters = 1; 553 + perf_max_events = 1; 554 554 555 - register_die_notifier(&perf_counter_nmi_notifier); 555 + register_die_notifier(&perf_event_nmi_notifier); 556 556 }
+1 -1
arch/sparc/kernel/systbls_32.S
··· 82 82 /*310*/ .long sys_utimensat, sys_signalfd, sys_timerfd_create, sys_eventfd, sys_fallocate 83 83 /*315*/ .long sys_timerfd_settime, sys_timerfd_gettime, sys_signalfd4, sys_eventfd2, sys_epoll_create1 84 84 /*320*/ .long sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, sys_preadv 85 - /*325*/ .long sys_pwritev, sys_rt_tgsigqueueinfo, sys_perf_counter_open 85 + /*325*/ .long sys_pwritev, sys_rt_tgsigqueueinfo, sys_perf_event_open 86 86
+2 -2
arch/sparc/kernel/systbls_64.S
··· 83 83 /*310*/ .word compat_sys_utimensat, compat_sys_signalfd, sys_timerfd_create, sys_eventfd, compat_sys_fallocate 84 84 .word compat_sys_timerfd_settime, compat_sys_timerfd_gettime, compat_sys_signalfd4, sys_eventfd2, sys_epoll_create1 85 85 /*320*/ .word sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, compat_sys_preadv 86 - .word compat_sys_pwritev, compat_sys_rt_tgsigqueueinfo, sys_perf_counter_open 86 + .word compat_sys_pwritev, compat_sys_rt_tgsigqueueinfo, sys_perf_event_open 87 87 88 88 #endif /* CONFIG_COMPAT */ 89 89 ··· 158 158 /*310*/ .word sys_utimensat, sys_signalfd, sys_timerfd_create, sys_eventfd, sys_fallocate 159 159 .word sys_timerfd_settime, sys_timerfd_gettime, sys_signalfd4, sys_eventfd2, sys_epoll_create1 160 160 /*320*/ .word sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, sys_preadv 161 - .word sys_pwritev, sys_rt_tgsigqueueinfo, sys_perf_counter_open 161 + .word sys_pwritev, sys_rt_tgsigqueueinfo, sys_perf_event_open
+1 -1
arch/x86/Kconfig
··· 24 24 select HAVE_UNSTABLE_SCHED_CLOCK 25 25 select HAVE_IDE 26 26 select HAVE_OPROFILE 27 - select HAVE_PERF_COUNTERS if (!M386 && !M486) 27 + select HAVE_PERF_EVENTS if (!M386 && !M486) 28 28 select HAVE_IOREMAP_PROT 29 29 select HAVE_KPROBES 30 30 select ARCH_WANT_OPTIONAL_GPIOLIB
+1 -1
arch/x86/ia32/ia32entry.S
··· 831 831 .quad compat_sys_preadv 832 832 .quad compat_sys_pwritev 833 833 .quad compat_sys_rt_tgsigqueueinfo /* 335 */ 834 - .quad sys_perf_counter_open 834 + .quad sys_perf_event_open 835 835 ia32_syscall_end:
+1 -1
arch/x86/include/asm/entry_arch.h
··· 49 49 BUILD_INTERRUPT(error_interrupt,ERROR_APIC_VECTOR) 50 50 BUILD_INTERRUPT(spurious_interrupt,SPURIOUS_APIC_VECTOR) 51 51 52 - #ifdef CONFIG_PERF_COUNTERS 52 + #ifdef CONFIG_PERF_EVENTS 53 53 BUILD_INTERRUPT(perf_pending_interrupt, LOCAL_PENDING_VECTOR) 54 54 #endif 55 55
+15 -15
arch/x86/include/asm/perf_counter.h arch/x86/include/asm/perf_event.h
··· 1 - #ifndef _ASM_X86_PERF_COUNTER_H 2 - #define _ASM_X86_PERF_COUNTER_H 1 + #ifndef _ASM_X86_PERF_EVENT_H 2 + #define _ASM_X86_PERF_EVENT_H 3 3 4 4 /* 5 - * Performance counter hw details: 5 + * Performance event hw details: 6 6 */ 7 7 8 8 #define X86_PMC_MAX_GENERIC 8 ··· 43 43 union cpuid10_eax { 44 44 struct { 45 45 unsigned int version_id:8; 46 - unsigned int num_counters:8; 46 + unsigned int num_events:8; 47 47 unsigned int bit_width:8; 48 48 unsigned int mask_length:8; 49 49 } split; ··· 52 52 53 53 union cpuid10_edx { 54 54 struct { 55 - unsigned int num_counters_fixed:4; 55 + unsigned int num_events_fixed:4; 56 56 unsigned int reserved:28; 57 57 } split; 58 58 unsigned int full; ··· 60 60 61 61 62 62 /* 63 - * Fixed-purpose performance counters: 63 + * Fixed-purpose performance events: 64 64 */ 65 65 66 66 /* ··· 87 87 /* 88 88 * We model BTS tracing as another fixed-mode PMC. 89 89 * 90 - * We choose a value in the middle of the fixed counter range, since lower 91 - * values are used by actual fixed counters and higher values are used 90 + * We choose a value in the middle of the fixed event range, since lower 91 + * values are used by actual fixed events and higher values are used 92 92 * to indicate other overflow conditions in the PERF_GLOBAL_STATUS msr. 93 93 */ 94 94 #define X86_PMC_IDX_FIXED_BTS (X86_PMC_IDX_FIXED + 16) 95 95 96 96 97 - #ifdef CONFIG_PERF_COUNTERS 98 - extern void init_hw_perf_counters(void); 99 - extern void perf_counters_lapic_init(void); 97 + #ifdef CONFIG_PERF_EVENTS 98 + extern void init_hw_perf_events(void); 99 + extern void perf_events_lapic_init(void); 100 100 101 - #define PERF_COUNTER_INDEX_OFFSET 0 101 + #define PERF_EVENT_INDEX_OFFSET 0 102 102 103 103 #else 104 - static inline void init_hw_perf_counters(void) { } 105 - static inline void perf_counters_lapic_init(void) { } 104 + static inline void init_hw_perf_events(void) { } 105 + static inline void perf_events_lapic_init(void) { } 106 106 #endif 107 107 108 - #endif /* _ASM_X86_PERF_COUNTER_H */ 108 + #endif /* _ASM_X86_PERF_EVENT_H */
+1 -1
arch/x86/include/asm/unistd_32.h
··· 341 341 #define __NR_preadv 333 342 342 #define __NR_pwritev 334 343 343 #define __NR_rt_tgsigqueueinfo 335 344 - #define __NR_perf_counter_open 336 344 + #define __NR_perf_event_open 336 345 345 346 346 #ifdef __KERNEL__ 347 347
+2 -2
arch/x86/include/asm/unistd_64.h
··· 659 659 __SYSCALL(__NR_pwritev, sys_pwritev) 660 660 #define __NR_rt_tgsigqueueinfo 297 661 661 __SYSCALL(__NR_rt_tgsigqueueinfo, sys_rt_tgsigqueueinfo) 662 - #define __NR_perf_counter_open 298 663 - __SYSCALL(__NR_perf_counter_open, sys_perf_counter_open) 662 + #define __NR_perf_event_open 298 663 + __SYSCALL(__NR_perf_event_open, sys_perf_event_open) 664 664 665 665 #ifndef __NO_STUBS 666 666 #define __ARCH_WANT_OLD_READDIR
+3 -3
arch/x86/kernel/apic/apic.c
··· 14 14 * Mikael Pettersson : PM converted to driver model. 15 15 */ 16 16 17 - #include <linux/perf_counter.h> 17 + #include <linux/perf_event.h> 18 18 #include <linux/kernel_stat.h> 19 19 #include <linux/mc146818rtc.h> 20 20 #include <linux/acpi_pmtmr.h> ··· 35 35 #include <linux/smp.h> 36 36 #include <linux/mm.h> 37 37 38 - #include <asm/perf_counter.h> 38 + #include <asm/perf_event.h> 39 39 #include <asm/x86_init.h> 40 40 #include <asm/pgalloc.h> 41 41 #include <asm/atomic.h> ··· 1189 1189 apic_write(APIC_ESR, 0); 1190 1190 } 1191 1191 #endif 1192 - perf_counters_lapic_init(); 1192 + perf_events_lapic_init(); 1193 1193 1194 1194 preempt_disable(); 1195 1195
+1 -1
arch/x86/kernel/cpu/Makefile
··· 27 27 obj-$(CONFIG_CPU_SUP_TRANSMETA_32) += transmeta.o 28 28 obj-$(CONFIG_CPU_SUP_UMC_32) += umc.o 29 29 30 - obj-$(CONFIG_PERF_COUNTERS) += perf_counter.o 30 + obj-$(CONFIG_PERF_EVENTS) += perf_event.o 31 31 32 32 obj-$(CONFIG_X86_MCE) += mcheck/ 33 33 obj-$(CONFIG_MTRR) += mtrr/
+2 -2
arch/x86/kernel/cpu/common.c
··· 13 13 #include <linux/io.h> 14 14 15 15 #include <asm/stackprotector.h> 16 - #include <asm/perf_counter.h> 16 + #include <asm/perf_event.h> 17 17 #include <asm/mmu_context.h> 18 18 #include <asm/hypervisor.h> 19 19 #include <asm/processor.h> ··· 869 869 #else 870 870 vgetcpu_set_mode(); 871 871 #endif 872 - init_hw_perf_counters(); 872 + init_hw_perf_events(); 873 873 } 874 874 875 875 void __cpuinit identify_secondary_cpu(struct cpuinfo_x86 *c)
+278 -278
arch/x86/kernel/cpu/perf_counter.c arch/x86/kernel/cpu/perf_event.c
··· 1 1 /* 2 - * Performance counter x86 architecture code 2 + * Performance events x86 architecture code 3 3 * 4 4 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de> 5 5 * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar ··· 11 11 * For licencing details see kernel-base/COPYING 12 12 */ 13 13 14 - #include <linux/perf_counter.h> 14 + #include <linux/perf_event.h> 15 15 #include <linux/capability.h> 16 16 #include <linux/notifier.h> 17 17 #include <linux/hardirq.h> ··· 27 27 #include <asm/stacktrace.h> 28 28 #include <asm/nmi.h> 29 29 30 - static u64 perf_counter_mask __read_mostly; 30 + static u64 perf_event_mask __read_mostly; 31 31 32 - /* The maximal number of PEBS counters: */ 33 - #define MAX_PEBS_COUNTERS 4 32 + /* The maximal number of PEBS events: */ 33 + #define MAX_PEBS_EVENTS 4 34 34 35 35 /* The size of a BTS record in bytes: */ 36 36 #define BTS_RECORD_SIZE 24 ··· 65 65 u64 pebs_index; 66 66 u64 pebs_absolute_maximum; 67 67 u64 pebs_interrupt_threshold; 68 - u64 pebs_counter_reset[MAX_PEBS_COUNTERS]; 68 + u64 pebs_event_reset[MAX_PEBS_EVENTS]; 69 69 }; 70 70 71 - struct cpu_hw_counters { 72 - struct perf_counter *counters[X86_PMC_IDX_MAX]; 71 + struct cpu_hw_events { 72 + struct perf_event *events[X86_PMC_IDX_MAX]; 73 73 unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; 74 74 unsigned long active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; 75 75 unsigned long interrupts; ··· 86 86 int (*handle_irq)(struct pt_regs *); 87 87 void (*disable_all)(void); 88 88 void (*enable_all)(void); 89 - void (*enable)(struct hw_perf_counter *, int); 90 - void (*disable)(struct hw_perf_counter *, int); 89 + void (*enable)(struct hw_perf_event *, int); 90 + void (*disable)(struct hw_perf_event *, int); 91 91 unsigned eventsel; 92 92 unsigned perfctr; 93 93 u64 (*event_map)(int); 94 94 u64 (*raw_event)(u64); 95 95 int max_events; 96 - int num_counters; 97 - int num_counters_fixed; 98 - int counter_bits; 99 - u64 counter_mask; 96 + int num_events; 97 + int num_events_fixed; 98 + int event_bits; 99 + u64 event_mask; 100 100 int apic; 101 101 u64 max_period; 102 102 u64 intel_ctrl; ··· 106 106 107 107 static struct x86_pmu x86_pmu __read_mostly; 108 108 109 - static DEFINE_PER_CPU(struct cpu_hw_counters, cpu_hw_counters) = { 109 + static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = { 110 110 .enabled = 1, 111 111 }; 112 112 ··· 124 124 [PERF_COUNT_HW_BUS_CYCLES] = 0x0062, 125 125 }; 126 126 127 - static u64 p6_pmu_event_map(int event) 127 + static u64 p6_pmu_event_map(int hw_event) 128 128 { 129 - return p6_perfmon_event_map[event]; 129 + return p6_perfmon_event_map[hw_event]; 130 130 } 131 131 132 132 /* 133 - * Counter setting that is specified not to count anything. 133 + * Event setting that is specified not to count anything. 134 134 * We use this to effectively disable a counter. 135 135 * 136 136 * L2_RQSTS with 0 MESI unit mask. 137 137 */ 138 - #define P6_NOP_COUNTER 0x0000002EULL 138 + #define P6_NOP_EVENT 0x0000002EULL 139 139 140 - static u64 p6_pmu_raw_event(u64 event) 140 + static u64 p6_pmu_raw_event(u64 hw_event) 141 141 { 142 142 #define P6_EVNTSEL_EVENT_MASK 0x000000FFULL 143 143 #define P6_EVNTSEL_UNIT_MASK 0x0000FF00ULL 144 144 #define P6_EVNTSEL_EDGE_MASK 0x00040000ULL 145 145 #define P6_EVNTSEL_INV_MASK 0x00800000ULL 146 - #define P6_EVNTSEL_COUNTER_MASK 0xFF000000ULL 146 + #define P6_EVNTSEL_REG_MASK 0xFF000000ULL 147 147 148 148 #define P6_EVNTSEL_MASK \ 149 149 (P6_EVNTSEL_EVENT_MASK | \ 150 150 P6_EVNTSEL_UNIT_MASK | \ 151 151 P6_EVNTSEL_EDGE_MASK | \ 152 152 P6_EVNTSEL_INV_MASK | \ 153 - P6_EVNTSEL_COUNTER_MASK) 153 + P6_EVNTSEL_REG_MASK) 154 154 155 - return event & P6_EVNTSEL_MASK; 155 + return hw_event & P6_EVNTSEL_MASK; 156 156 } 157 157 158 158 ··· 170 170 [PERF_COUNT_HW_BUS_CYCLES] = 0x013c, 171 171 }; 172 172 173 - static u64 intel_pmu_event_map(int event) 173 + static u64 intel_pmu_event_map(int hw_event) 174 174 { 175 - return intel_perfmon_event_map[event]; 175 + return intel_perfmon_event_map[hw_event]; 176 176 } 177 177 178 178 /* 179 - * Generalized hw caching related event table, filled 179 + * Generalized hw caching related hw_event table, filled 180 180 * in on a per model basis. A value of 0 means 181 - * 'not supported', -1 means 'event makes no sense on 182 - * this CPU', any other value means the raw event 181 + * 'not supported', -1 means 'hw_event makes no sense on 182 + * this CPU', any other value means the raw hw_event 183 183 * ID. 184 184 */ 185 185 ··· 463 463 }, 464 464 }; 465 465 466 - static u64 intel_pmu_raw_event(u64 event) 466 + static u64 intel_pmu_raw_event(u64 hw_event) 467 467 { 468 468 #define CORE_EVNTSEL_EVENT_MASK 0x000000FFULL 469 469 #define CORE_EVNTSEL_UNIT_MASK 0x0000FF00ULL 470 470 #define CORE_EVNTSEL_EDGE_MASK 0x00040000ULL 471 471 #define CORE_EVNTSEL_INV_MASK 0x00800000ULL 472 - #define CORE_EVNTSEL_COUNTER_MASK 0xFF000000ULL 472 + #define CORE_EVNTSEL_REG_MASK 0xFF000000ULL 473 473 474 474 #define CORE_EVNTSEL_MASK \ 475 475 (CORE_EVNTSEL_EVENT_MASK | \ 476 476 CORE_EVNTSEL_UNIT_MASK | \ 477 477 CORE_EVNTSEL_EDGE_MASK | \ 478 478 CORE_EVNTSEL_INV_MASK | \ 479 - CORE_EVNTSEL_COUNTER_MASK) 479 + CORE_EVNTSEL_REG_MASK) 480 480 481 - return event & CORE_EVNTSEL_MASK; 481 + return hw_event & CORE_EVNTSEL_MASK; 482 482 } 483 483 484 484 static const u64 amd_hw_cache_event_ids ··· 585 585 [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5, 586 586 }; 587 587 588 - static u64 amd_pmu_event_map(int event) 588 + static u64 amd_pmu_event_map(int hw_event) 589 589 { 590 - return amd_perfmon_event_map[event]; 590 + return amd_perfmon_event_map[hw_event]; 591 591 } 592 592 593 - static u64 amd_pmu_raw_event(u64 event) 593 + static u64 amd_pmu_raw_event(u64 hw_event) 594 594 { 595 595 #define K7_EVNTSEL_EVENT_MASK 0x7000000FFULL 596 596 #define K7_EVNTSEL_UNIT_MASK 0x00000FF00ULL 597 597 #define K7_EVNTSEL_EDGE_MASK 0x000040000ULL 598 598 #define K7_EVNTSEL_INV_MASK 0x000800000ULL 599 - #define K7_EVNTSEL_COUNTER_MASK 0x0FF000000ULL 599 + #define K7_EVNTSEL_REG_MASK 0x0FF000000ULL 600 600 601 601 #define K7_EVNTSEL_MASK \ 602 602 (K7_EVNTSEL_EVENT_MASK | \ 603 603 K7_EVNTSEL_UNIT_MASK | \ 604 604 K7_EVNTSEL_EDGE_MASK | \ 605 605 K7_EVNTSEL_INV_MASK | \ 606 - K7_EVNTSEL_COUNTER_MASK) 606 + K7_EVNTSEL_REG_MASK) 607 607 608 - return event & K7_EVNTSEL_MASK; 608 + return hw_event & K7_EVNTSEL_MASK; 609 609 } 610 610 611 611 /* 612 - * Propagate counter elapsed time into the generic counter. 613 - * Can only be executed on the CPU where the counter is active. 612 + * Propagate event elapsed time into the generic event. 613 + * Can only be executed on the CPU where the event is active. 614 614 * Returns the delta events processed. 615 615 */ 616 616 static u64 617 - x86_perf_counter_update(struct perf_counter *counter, 618 - struct hw_perf_counter *hwc, int idx) 617 + x86_perf_event_update(struct perf_event *event, 618 + struct hw_perf_event *hwc, int idx) 619 619 { 620 - int shift = 64 - x86_pmu.counter_bits; 620 + int shift = 64 - x86_pmu.event_bits; 621 621 u64 prev_raw_count, new_raw_count; 622 622 s64 delta; 623 623 ··· 625 625 return 0; 626 626 627 627 /* 628 - * Careful: an NMI might modify the previous counter value. 628 + * Careful: an NMI might modify the previous event value. 629 629 * 630 630 * Our tactic to handle this is to first atomically read and 631 631 * exchange a new raw count - then add that new-prev delta 632 - * count to the generic counter atomically: 632 + * count to the generic event atomically: 633 633 */ 634 634 again: 635 635 prev_raw_count = atomic64_read(&hwc->prev_count); 636 - rdmsrl(hwc->counter_base + idx, new_raw_count); 636 + rdmsrl(hwc->event_base + idx, new_raw_count); 637 637 638 638 if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count, 639 639 new_raw_count) != prev_raw_count) ··· 642 642 /* 643 643 * Now we have the new raw value and have updated the prev 644 644 * timestamp already. We can now calculate the elapsed delta 645 - * (counter-)time and add that to the generic counter. 645 + * (event-)time and add that to the generic event. 646 646 * 647 647 * Careful, not all hw sign-extends above the physical width 648 648 * of the count. ··· 650 650 delta = (new_raw_count << shift) - (prev_raw_count << shift); 651 651 delta >>= shift; 652 652 653 - atomic64_add(delta, &counter->count); 653 + atomic64_add(delta, &event->count); 654 654 atomic64_sub(delta, &hwc->period_left); 655 655 656 656 return new_raw_count; 657 657 } 658 658 659 - static atomic_t active_counters; 659 + static atomic_t active_events; 660 660 static DEFINE_MUTEX(pmc_reserve_mutex); 661 661 662 662 static bool reserve_pmc_hardware(void) ··· 667 667 if (nmi_watchdog == NMI_LOCAL_APIC) 668 668 disable_lapic_nmi_watchdog(); 669 669 670 - for (i = 0; i < x86_pmu.num_counters; i++) { 670 + for (i = 0; i < x86_pmu.num_events; i++) { 671 671 if (!reserve_perfctr_nmi(x86_pmu.perfctr + i)) 672 672 goto perfctr_fail; 673 673 } 674 674 675 - for (i = 0; i < x86_pmu.num_counters; i++) { 675 + for (i = 0; i < x86_pmu.num_events; i++) { 676 676 if (!reserve_evntsel_nmi(x86_pmu.eventsel + i)) 677 677 goto eventsel_fail; 678 678 } ··· 685 685 for (i--; i >= 0; i--) 686 686 release_evntsel_nmi(x86_pmu.eventsel + i); 687 687 688 - i = x86_pmu.num_counters; 688 + i = x86_pmu.num_events; 689 689 690 690 perfctr_fail: 691 691 for (i--; i >= 0; i--) ··· 703 703 #ifdef CONFIG_X86_LOCAL_APIC 704 704 int i; 705 705 706 - for (i = 0; i < x86_pmu.num_counters; i++) { 706 + for (i = 0; i < x86_pmu.num_events; i++) { 707 707 release_perfctr_nmi(x86_pmu.perfctr + i); 708 708 release_evntsel_nmi(x86_pmu.eventsel + i); 709 709 } ··· 720 720 721 721 static inline void init_debug_store_on_cpu(int cpu) 722 722 { 723 - struct debug_store *ds = per_cpu(cpu_hw_counters, cpu).ds; 723 + struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds; 724 724 725 725 if (!ds) 726 726 return; ··· 732 732 733 733 static inline void fini_debug_store_on_cpu(int cpu) 734 734 { 735 - if (!per_cpu(cpu_hw_counters, cpu).ds) 735 + if (!per_cpu(cpu_hw_events, cpu).ds) 736 736 return; 737 737 738 738 wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA, 0, 0); ··· 751 751 fini_debug_store_on_cpu(cpu); 752 752 753 753 for_each_possible_cpu(cpu) { 754 - struct debug_store *ds = per_cpu(cpu_hw_counters, cpu).ds; 754 + struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds; 755 755 756 756 if (!ds) 757 757 continue; 758 758 759 - per_cpu(cpu_hw_counters, cpu).ds = NULL; 759 + per_cpu(cpu_hw_events, cpu).ds = NULL; 760 760 761 761 kfree((void *)(unsigned long)ds->bts_buffer_base); 762 762 kfree(ds); ··· 796 796 ds->bts_interrupt_threshold = 797 797 ds->bts_absolute_maximum - BTS_OVFL_TH; 798 798 799 - per_cpu(cpu_hw_counters, cpu).ds = ds; 799 + per_cpu(cpu_hw_events, cpu).ds = ds; 800 800 err = 0; 801 801 } 802 802 ··· 812 812 return err; 813 813 } 814 814 815 - static void hw_perf_counter_destroy(struct perf_counter *counter) 815 + static void hw_perf_event_destroy(struct perf_event *event) 816 816 { 817 - if (atomic_dec_and_mutex_lock(&active_counters, &pmc_reserve_mutex)) { 817 + if (atomic_dec_and_mutex_lock(&active_events, &pmc_reserve_mutex)) { 818 818 release_pmc_hardware(); 819 819 release_bts_hardware(); 820 820 mutex_unlock(&pmc_reserve_mutex); ··· 827 827 } 828 828 829 829 static inline int 830 - set_ext_hw_attr(struct hw_perf_counter *hwc, struct perf_counter_attr *attr) 830 + set_ext_hw_attr(struct hw_perf_event *hwc, struct perf_event_attr *attr) 831 831 { 832 832 unsigned int cache_type, cache_op, cache_result; 833 833 u64 config, val; ··· 880 880 881 881 static void intel_pmu_disable_bts(void) 882 882 { 883 - struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); 883 + struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 884 884 unsigned long debugctlmsr; 885 885 886 886 if (!cpuc->ds) ··· 898 898 /* 899 899 * Setup the hardware configuration for a given attr_type 900 900 */ 901 - static int __hw_perf_counter_init(struct perf_counter *counter) 901 + static int __hw_perf_event_init(struct perf_event *event) 902 902 { 903 - struct perf_counter_attr *attr = &counter->attr; 904 - struct hw_perf_counter *hwc = &counter->hw; 903 + struct perf_event_attr *attr = &event->attr; 904 + struct hw_perf_event *hwc = &event->hw; 905 905 u64 config; 906 906 int err; 907 907 ··· 909 909 return -ENODEV; 910 910 911 911 err = 0; 912 - if (!atomic_inc_not_zero(&active_counters)) { 912 + if (!atomic_inc_not_zero(&active_events)) { 913 913 mutex_lock(&pmc_reserve_mutex); 914 - if (atomic_read(&active_counters) == 0) { 914 + if (atomic_read(&active_events) == 0) { 915 915 if (!reserve_pmc_hardware()) 916 916 err = -EBUSY; 917 917 else 918 918 err = reserve_bts_hardware(); 919 919 } 920 920 if (!err) 921 - atomic_inc(&active_counters); 921 + atomic_inc(&active_events); 922 922 mutex_unlock(&pmc_reserve_mutex); 923 923 } 924 924 if (err) 925 925 return err; 926 926 927 - counter->destroy = hw_perf_counter_destroy; 927 + event->destroy = hw_perf_event_destroy; 928 928 929 929 /* 930 930 * Generate PMC IRQs: ··· 948 948 /* 949 949 * If we have a PMU initialized but no APIC 950 950 * interrupts, we cannot sample hardware 951 - * counters (user-space has to fall back and 952 - * sample via a hrtimer based software counter): 951 + * events (user-space has to fall back and 952 + * sample via a hrtimer based software event): 953 953 */ 954 954 if (!x86_pmu.apic) 955 955 return -EOPNOTSUPP; 956 956 } 957 957 958 958 /* 959 - * Raw event type provide the config in the event structure 959 + * Raw hw_event type provide the config in the hw_event structure 960 960 */ 961 961 if (attr->type == PERF_TYPE_RAW) { 962 962 hwc->config |= x86_pmu.raw_event(attr->config); ··· 1001 1001 1002 1002 static void p6_pmu_disable_all(void) 1003 1003 { 1004 - struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); 1004 + struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 1005 1005 u64 val; 1006 1006 1007 1007 if (!cpuc->enabled) ··· 1018 1018 1019 1019 static void intel_pmu_disable_all(void) 1020 1020 { 1021 - struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); 1021 + struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 1022 1022 1023 1023 if (!cpuc->enabled) 1024 1024 return; ··· 1034 1034 1035 1035 static void amd_pmu_disable_all(void) 1036 1036 { 1037 - struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); 1037 + struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 1038 1038 int idx; 1039 1039 1040 1040 if (!cpuc->enabled) ··· 1043 1043 cpuc->enabled = 0; 1044 1044 /* 1045 1045 * ensure we write the disable before we start disabling the 1046 - * counters proper, so that amd_pmu_enable_counter() does the 1046 + * events proper, so that amd_pmu_enable_event() does the 1047 1047 * right thing. 1048 1048 */ 1049 1049 barrier(); 1050 1050 1051 - for (idx = 0; idx < x86_pmu.num_counters; idx++) { 1051 + for (idx = 0; idx < x86_pmu.num_events; idx++) { 1052 1052 u64 val; 1053 1053 1054 1054 if (!test_bit(idx, cpuc->active_mask)) ··· 1070 1070 1071 1071 static void p6_pmu_enable_all(void) 1072 1072 { 1073 - struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); 1073 + struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 1074 1074 unsigned long val; 1075 1075 1076 1076 if (cpuc->enabled) ··· 1087 1087 1088 1088 static void intel_pmu_enable_all(void) 1089 1089 { 1090 - struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); 1090 + struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 1091 1091 1092 1092 if (cpuc->enabled) 1093 1093 return; ··· 1098 1098 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl); 1099 1099 1100 1100 if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask)) { 1101 - struct perf_counter *counter = 1102 - cpuc->counters[X86_PMC_IDX_FIXED_BTS]; 1101 + struct perf_event *event = 1102 + cpuc->events[X86_PMC_IDX_FIXED_BTS]; 1103 1103 1104 - if (WARN_ON_ONCE(!counter)) 1104 + if (WARN_ON_ONCE(!event)) 1105 1105 return; 1106 1106 1107 - intel_pmu_enable_bts(counter->hw.config); 1107 + intel_pmu_enable_bts(event->hw.config); 1108 1108 } 1109 1109 } 1110 1110 1111 1111 static void amd_pmu_enable_all(void) 1112 1112 { 1113 - struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); 1113 + struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 1114 1114 int idx; 1115 1115 1116 1116 if (cpuc->enabled) ··· 1119 1119 cpuc->enabled = 1; 1120 1120 barrier(); 1121 1121 1122 - for (idx = 0; idx < x86_pmu.num_counters; idx++) { 1123 - struct perf_counter *counter = cpuc->counters[idx]; 1122 + for (idx = 0; idx < x86_pmu.num_events; idx++) { 1123 + struct perf_event *event = cpuc->events[idx]; 1124 1124 u64 val; 1125 1125 1126 1126 if (!test_bit(idx, cpuc->active_mask)) 1127 1127 continue; 1128 1128 1129 - val = counter->hw.config; 1129 + val = event->hw.config; 1130 1130 val |= ARCH_PERFMON_EVENTSEL0_ENABLE; 1131 1131 wrmsrl(MSR_K7_EVNTSEL0 + idx, val); 1132 1132 } ··· 1153 1153 wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack); 1154 1154 } 1155 1155 1156 - static inline void x86_pmu_enable_counter(struct hw_perf_counter *hwc, int idx) 1156 + static inline void x86_pmu_enable_event(struct hw_perf_event *hwc, int idx) 1157 1157 { 1158 1158 (void)checking_wrmsrl(hwc->config_base + idx, 1159 1159 hwc->config | ARCH_PERFMON_EVENTSEL0_ENABLE); 1160 1160 } 1161 1161 1162 - static inline void x86_pmu_disable_counter(struct hw_perf_counter *hwc, int idx) 1162 + static inline void x86_pmu_disable_event(struct hw_perf_event *hwc, int idx) 1163 1163 { 1164 1164 (void)checking_wrmsrl(hwc->config_base + idx, hwc->config); 1165 1165 } 1166 1166 1167 1167 static inline void 1168 - intel_pmu_disable_fixed(struct hw_perf_counter *hwc, int __idx) 1168 + intel_pmu_disable_fixed(struct hw_perf_event *hwc, int __idx) 1169 1169 { 1170 1170 int idx = __idx - X86_PMC_IDX_FIXED; 1171 1171 u64 ctrl_val, mask; ··· 1178 1178 } 1179 1179 1180 1180 static inline void 1181 - p6_pmu_disable_counter(struct hw_perf_counter *hwc, int idx) 1181 + p6_pmu_disable_event(struct hw_perf_event *hwc, int idx) 1182 1182 { 1183 - struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); 1184 - u64 val = P6_NOP_COUNTER; 1183 + struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 1184 + u64 val = P6_NOP_EVENT; 1185 1185 1186 1186 if (cpuc->enabled) 1187 1187 val |= ARCH_PERFMON_EVENTSEL0_ENABLE; ··· 1190 1190 } 1191 1191 1192 1192 static inline void 1193 - intel_pmu_disable_counter(struct hw_perf_counter *hwc, int idx) 1193 + intel_pmu_disable_event(struct hw_perf_event *hwc, int idx) 1194 1194 { 1195 1195 if (unlikely(idx == X86_PMC_IDX_FIXED_BTS)) { 1196 1196 intel_pmu_disable_bts(); ··· 1202 1202 return; 1203 1203 } 1204 1204 1205 - x86_pmu_disable_counter(hwc, idx); 1205 + x86_pmu_disable_event(hwc, idx); 1206 1206 } 1207 1207 1208 1208 static inline void 1209 - amd_pmu_disable_counter(struct hw_perf_counter *hwc, int idx) 1209 + amd_pmu_disable_event(struct hw_perf_event *hwc, int idx) 1210 1210 { 1211 - x86_pmu_disable_counter(hwc, idx); 1211 + x86_pmu_disable_event(hwc, idx); 1212 1212 } 1213 1213 1214 1214 static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left); 1215 1215 1216 1216 /* 1217 1217 * Set the next IRQ period, based on the hwc->period_left value. 1218 - * To be called with the counter disabled in hw: 1218 + * To be called with the event disabled in hw: 1219 1219 */ 1220 1220 static int 1221 - x86_perf_counter_set_period(struct perf_counter *counter, 1222 - struct hw_perf_counter *hwc, int idx) 1221 + x86_perf_event_set_period(struct perf_event *event, 1222 + struct hw_perf_event *hwc, int idx) 1223 1223 { 1224 1224 s64 left = atomic64_read(&hwc->period_left); 1225 1225 s64 period = hwc->sample_period; ··· 1245 1245 ret = 1; 1246 1246 } 1247 1247 /* 1248 - * Quirk: certain CPUs dont like it if just 1 event is left: 1248 + * Quirk: certain CPUs dont like it if just 1 hw_event is left: 1249 1249 */ 1250 1250 if (unlikely(left < 2)) 1251 1251 left = 2; ··· 1256 1256 per_cpu(pmc_prev_left[idx], smp_processor_id()) = left; 1257 1257 1258 1258 /* 1259 - * The hw counter starts counting from this counter offset, 1259 + * The hw event starts counting from this event offset, 1260 1260 * mark it to be able to extra future deltas: 1261 1261 */ 1262 1262 atomic64_set(&hwc->prev_count, (u64)-left); 1263 1263 1264 - err = checking_wrmsrl(hwc->counter_base + idx, 1265 - (u64)(-left) & x86_pmu.counter_mask); 1264 + err = checking_wrmsrl(hwc->event_base + idx, 1265 + (u64)(-left) & x86_pmu.event_mask); 1266 1266 1267 - perf_counter_update_userpage(counter); 1267 + perf_event_update_userpage(event); 1268 1268 1269 1269 return ret; 1270 1270 } 1271 1271 1272 1272 static inline void 1273 - intel_pmu_enable_fixed(struct hw_perf_counter *hwc, int __idx) 1273 + intel_pmu_enable_fixed(struct hw_perf_event *hwc, int __idx) 1274 1274 { 1275 1275 int idx = __idx - X86_PMC_IDX_FIXED; 1276 1276 u64 ctrl_val, bits, mask; ··· 1295 1295 err = checking_wrmsrl(hwc->config_base, ctrl_val); 1296 1296 } 1297 1297 1298 - static void p6_pmu_enable_counter(struct hw_perf_counter *hwc, int idx) 1298 + static void p6_pmu_enable_event(struct hw_perf_event *hwc, int idx) 1299 1299 { 1300 - struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); 1300 + struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 1301 1301 u64 val; 1302 1302 1303 1303 val = hwc->config; ··· 1308 1308 } 1309 1309 1310 1310 1311 - static void intel_pmu_enable_counter(struct hw_perf_counter *hwc, int idx) 1311 + static void intel_pmu_enable_event(struct hw_perf_event *hwc, int idx) 1312 1312 { 1313 1313 if (unlikely(idx == X86_PMC_IDX_FIXED_BTS)) { 1314 - if (!__get_cpu_var(cpu_hw_counters).enabled) 1314 + if (!__get_cpu_var(cpu_hw_events).enabled) 1315 1315 return; 1316 1316 1317 1317 intel_pmu_enable_bts(hwc->config); ··· 1323 1323 return; 1324 1324 } 1325 1325 1326 - x86_pmu_enable_counter(hwc, idx); 1326 + x86_pmu_enable_event(hwc, idx); 1327 1327 } 1328 1328 1329 - static void amd_pmu_enable_counter(struct hw_perf_counter *hwc, int idx) 1329 + static void amd_pmu_enable_event(struct hw_perf_event *hwc, int idx) 1330 1330 { 1331 - struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); 1331 + struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 1332 1332 1333 1333 if (cpuc->enabled) 1334 - x86_pmu_enable_counter(hwc, idx); 1334 + x86_pmu_enable_event(hwc, idx); 1335 1335 } 1336 1336 1337 1337 static int 1338 - fixed_mode_idx(struct perf_counter *counter, struct hw_perf_counter *hwc) 1338 + fixed_mode_idx(struct perf_event *event, struct hw_perf_event *hwc) 1339 1339 { 1340 - unsigned int event; 1340 + unsigned int hw_event; 1341 1341 1342 - event = hwc->config & ARCH_PERFMON_EVENT_MASK; 1342 + hw_event = hwc->config & ARCH_PERFMON_EVENT_MASK; 1343 1343 1344 - if (unlikely((event == 1344 + if (unlikely((hw_event == 1345 1345 x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS)) && 1346 1346 (hwc->sample_period == 1))) 1347 1347 return X86_PMC_IDX_FIXED_BTS; 1348 1348 1349 - if (!x86_pmu.num_counters_fixed) 1349 + if (!x86_pmu.num_events_fixed) 1350 1350 return -1; 1351 1351 1352 - if (unlikely(event == x86_pmu.event_map(PERF_COUNT_HW_INSTRUCTIONS))) 1352 + if (unlikely(hw_event == x86_pmu.event_map(PERF_COUNT_HW_INSTRUCTIONS))) 1353 1353 return X86_PMC_IDX_FIXED_INSTRUCTIONS; 1354 - if (unlikely(event == x86_pmu.event_map(PERF_COUNT_HW_CPU_CYCLES))) 1354 + if (unlikely(hw_event == x86_pmu.event_map(PERF_COUNT_HW_CPU_CYCLES))) 1355 1355 return X86_PMC_IDX_FIXED_CPU_CYCLES; 1356 - if (unlikely(event == x86_pmu.event_map(PERF_COUNT_HW_BUS_CYCLES))) 1356 + if (unlikely(hw_event == x86_pmu.event_map(PERF_COUNT_HW_BUS_CYCLES))) 1357 1357 return X86_PMC_IDX_FIXED_BUS_CYCLES; 1358 1358 1359 1359 return -1; 1360 1360 } 1361 1361 1362 1362 /* 1363 - * Find a PMC slot for the freshly enabled / scheduled in counter: 1363 + * Find a PMC slot for the freshly enabled / scheduled in event: 1364 1364 */ 1365 - static int x86_pmu_enable(struct perf_counter *counter) 1365 + static int x86_pmu_enable(struct perf_event *event) 1366 1366 { 1367 - struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); 1368 - struct hw_perf_counter *hwc = &counter->hw; 1367 + struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 1368 + struct hw_perf_event *hwc = &event->hw; 1369 1369 int idx; 1370 1370 1371 - idx = fixed_mode_idx(counter, hwc); 1371 + idx = fixed_mode_idx(event, hwc); 1372 1372 if (idx == X86_PMC_IDX_FIXED_BTS) { 1373 1373 /* BTS is already occupied. */ 1374 1374 if (test_and_set_bit(idx, cpuc->used_mask)) 1375 1375 return -EAGAIN; 1376 1376 1377 1377 hwc->config_base = 0; 1378 - hwc->counter_base = 0; 1378 + hwc->event_base = 0; 1379 1379 hwc->idx = idx; 1380 1380 } else if (idx >= 0) { 1381 1381 /* 1382 - * Try to get the fixed counter, if that is already taken 1383 - * then try to get a generic counter: 1382 + * Try to get the fixed event, if that is already taken 1383 + * then try to get a generic event: 1384 1384 */ 1385 1385 if (test_and_set_bit(idx, cpuc->used_mask)) 1386 1386 goto try_generic; 1387 1387 1388 1388 hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL; 1389 1389 /* 1390 - * We set it so that counter_base + idx in wrmsr/rdmsr maps to 1390 + * We set it so that event_base + idx in wrmsr/rdmsr maps to 1391 1391 * MSR_ARCH_PERFMON_FIXED_CTR0 ... CTR2: 1392 1392 */ 1393 - hwc->counter_base = 1393 + hwc->event_base = 1394 1394 MSR_ARCH_PERFMON_FIXED_CTR0 - X86_PMC_IDX_FIXED; 1395 1395 hwc->idx = idx; 1396 1396 } else { 1397 1397 idx = hwc->idx; 1398 - /* Try to get the previous generic counter again */ 1398 + /* Try to get the previous generic event again */ 1399 1399 if (test_and_set_bit(idx, cpuc->used_mask)) { 1400 1400 try_generic: 1401 1401 idx = find_first_zero_bit(cpuc->used_mask, 1402 - x86_pmu.num_counters); 1403 - if (idx == x86_pmu.num_counters) 1402 + x86_pmu.num_events); 1403 + if (idx == x86_pmu.num_events) 1404 1404 return -EAGAIN; 1405 1405 1406 1406 set_bit(idx, cpuc->used_mask); 1407 1407 hwc->idx = idx; 1408 1408 } 1409 1409 hwc->config_base = x86_pmu.eventsel; 1410 - hwc->counter_base = x86_pmu.perfctr; 1410 + hwc->event_base = x86_pmu.perfctr; 1411 1411 } 1412 1412 1413 - perf_counters_lapic_init(); 1413 + perf_events_lapic_init(); 1414 1414 1415 1415 x86_pmu.disable(hwc, idx); 1416 1416 1417 - cpuc->counters[idx] = counter; 1417 + cpuc->events[idx] = event; 1418 1418 set_bit(idx, cpuc->active_mask); 1419 1419 1420 - x86_perf_counter_set_period(counter, hwc, idx); 1420 + x86_perf_event_set_period(event, hwc, idx); 1421 1421 x86_pmu.enable(hwc, idx); 1422 1422 1423 - perf_counter_update_userpage(counter); 1423 + perf_event_update_userpage(event); 1424 1424 1425 1425 return 0; 1426 1426 } 1427 1427 1428 - static void x86_pmu_unthrottle(struct perf_counter *counter) 1428 + static void x86_pmu_unthrottle(struct perf_event *event) 1429 1429 { 1430 - struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); 1431 - struct hw_perf_counter *hwc = &counter->hw; 1430 + struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 1431 + struct hw_perf_event *hwc = &event->hw; 1432 1432 1433 1433 if (WARN_ON_ONCE(hwc->idx >= X86_PMC_IDX_MAX || 1434 - cpuc->counters[hwc->idx] != counter)) 1434 + cpuc->events[hwc->idx] != event)) 1435 1435 return; 1436 1436 1437 1437 x86_pmu.enable(hwc, hwc->idx); 1438 1438 } 1439 1439 1440 - void perf_counter_print_debug(void) 1440 + void perf_event_print_debug(void) 1441 1441 { 1442 1442 u64 ctrl, status, overflow, pmc_ctrl, pmc_count, prev_left, fixed; 1443 - struct cpu_hw_counters *cpuc; 1443 + struct cpu_hw_events *cpuc; 1444 1444 unsigned long flags; 1445 1445 int cpu, idx; 1446 1446 1447 - if (!x86_pmu.num_counters) 1447 + if (!x86_pmu.num_events) 1448 1448 return; 1449 1449 1450 1450 local_irq_save(flags); 1451 1451 1452 1452 cpu = smp_processor_id(); 1453 - cpuc = &per_cpu(cpu_hw_counters, cpu); 1453 + cpuc = &per_cpu(cpu_hw_events, cpu); 1454 1454 1455 1455 if (x86_pmu.version >= 2) { 1456 1456 rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl); ··· 1466 1466 } 1467 1467 pr_info("CPU#%d: used: %016llx\n", cpu, *(u64 *)cpuc->used_mask); 1468 1468 1469 - for (idx = 0; idx < x86_pmu.num_counters; idx++) { 1469 + for (idx = 0; idx < x86_pmu.num_events; idx++) { 1470 1470 rdmsrl(x86_pmu.eventsel + idx, pmc_ctrl); 1471 1471 rdmsrl(x86_pmu.perfctr + idx, pmc_count); 1472 1472 ··· 1479 1479 pr_info("CPU#%d: gen-PMC%d left: %016llx\n", 1480 1480 cpu, idx, prev_left); 1481 1481 } 1482 - for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++) { 1482 + for (idx = 0; idx < x86_pmu.num_events_fixed; idx++) { 1483 1483 rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, pmc_count); 1484 1484 1485 1485 pr_info("CPU#%d: fixed-PMC%d count: %016llx\n", ··· 1488 1488 local_irq_restore(flags); 1489 1489 } 1490 1490 1491 - static void intel_pmu_drain_bts_buffer(struct cpu_hw_counters *cpuc) 1491 + static void intel_pmu_drain_bts_buffer(struct cpu_hw_events *cpuc) 1492 1492 { 1493 1493 struct debug_store *ds = cpuc->ds; 1494 1494 struct bts_record { ··· 1496 1496 u64 to; 1497 1497 u64 flags; 1498 1498 }; 1499 - struct perf_counter *counter = cpuc->counters[X86_PMC_IDX_FIXED_BTS]; 1499 + struct perf_event *event = cpuc->events[X86_PMC_IDX_FIXED_BTS]; 1500 1500 struct bts_record *at, *top; 1501 1501 struct perf_output_handle handle; 1502 1502 struct perf_event_header header; 1503 1503 struct perf_sample_data data; 1504 1504 struct pt_regs regs; 1505 1505 1506 - if (!counter) 1506 + if (!event) 1507 1507 return; 1508 1508 1509 1509 if (!ds) ··· 1518 1518 ds->bts_index = ds->bts_buffer_base; 1519 1519 1520 1520 1521 - data.period = counter->hw.last_period; 1521 + data.period = event->hw.last_period; 1522 1522 data.addr = 0; 1523 1523 regs.ip = 0; 1524 1524 ··· 1527 1527 * We will overwrite the from and to address before we output 1528 1528 * the sample. 1529 1529 */ 1530 - perf_prepare_sample(&header, &data, counter, &regs); 1530 + perf_prepare_sample(&header, &data, event, &regs); 1531 1531 1532 - if (perf_output_begin(&handle, counter, 1532 + if (perf_output_begin(&handle, event, 1533 1533 header.size * (top - at), 1, 1)) 1534 1534 return; 1535 1535 ··· 1537 1537 data.ip = at->from; 1538 1538 data.addr = at->to; 1539 1539 1540 - perf_output_sample(&handle, &header, &data, counter); 1540 + perf_output_sample(&handle, &header, &data, event); 1541 1541 } 1542 1542 1543 1543 perf_output_end(&handle); 1544 1544 1545 1545 /* There's new data available. */ 1546 - counter->hw.interrupts++; 1547 - counter->pending_kill = POLL_IN; 1546 + event->hw.interrupts++; 1547 + event->pending_kill = POLL_IN; 1548 1548 } 1549 1549 1550 - static void x86_pmu_disable(struct perf_counter *counter) 1550 + static void x86_pmu_disable(struct perf_event *event) 1551 1551 { 1552 - struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); 1553 - struct hw_perf_counter *hwc = &counter->hw; 1552 + struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 1553 + struct hw_perf_event *hwc = &event->hw; 1554 1554 int idx = hwc->idx; 1555 1555 1556 1556 /* ··· 1562 1562 1563 1563 /* 1564 1564 * Make sure the cleared pointer becomes visible before we 1565 - * (potentially) free the counter: 1565 + * (potentially) free the event: 1566 1566 */ 1567 1567 barrier(); 1568 1568 1569 1569 /* 1570 - * Drain the remaining delta count out of a counter 1570 + * Drain the remaining delta count out of a event 1571 1571 * that we are disabling: 1572 1572 */ 1573 - x86_perf_counter_update(counter, hwc, idx); 1573 + x86_perf_event_update(event, hwc, idx); 1574 1574 1575 1575 /* Drain the remaining BTS records. */ 1576 1576 if (unlikely(idx == X86_PMC_IDX_FIXED_BTS)) 1577 1577 intel_pmu_drain_bts_buffer(cpuc); 1578 1578 1579 - cpuc->counters[idx] = NULL; 1579 + cpuc->events[idx] = NULL; 1580 1580 clear_bit(idx, cpuc->used_mask); 1581 1581 1582 - perf_counter_update_userpage(counter); 1582 + perf_event_update_userpage(event); 1583 1583 } 1584 1584 1585 1585 /* 1586 - * Save and restart an expired counter. Called by NMI contexts, 1587 - * so it has to be careful about preempting normal counter ops: 1586 + * Save and restart an expired event. Called by NMI contexts, 1587 + * so it has to be careful about preempting normal event ops: 1588 1588 */ 1589 - static int intel_pmu_save_and_restart(struct perf_counter *counter) 1589 + static int intel_pmu_save_and_restart(struct perf_event *event) 1590 1590 { 1591 - struct hw_perf_counter *hwc = &counter->hw; 1591 + struct hw_perf_event *hwc = &event->hw; 1592 1592 int idx = hwc->idx; 1593 1593 int ret; 1594 1594 1595 - x86_perf_counter_update(counter, hwc, idx); 1596 - ret = x86_perf_counter_set_period(counter, hwc, idx); 1595 + x86_perf_event_update(event, hwc, idx); 1596 + ret = x86_perf_event_set_period(event, hwc, idx); 1597 1597 1598 - if (counter->state == PERF_COUNTER_STATE_ACTIVE) 1599 - intel_pmu_enable_counter(hwc, idx); 1598 + if (event->state == PERF_EVENT_STATE_ACTIVE) 1599 + intel_pmu_enable_event(hwc, idx); 1600 1600 1601 1601 return ret; 1602 1602 } 1603 1603 1604 1604 static void intel_pmu_reset(void) 1605 1605 { 1606 - struct debug_store *ds = __get_cpu_var(cpu_hw_counters).ds; 1606 + struct debug_store *ds = __get_cpu_var(cpu_hw_events).ds; 1607 1607 unsigned long flags; 1608 1608 int idx; 1609 1609 1610 - if (!x86_pmu.num_counters) 1610 + if (!x86_pmu.num_events) 1611 1611 return; 1612 1612 1613 1613 local_irq_save(flags); 1614 1614 1615 1615 printk("clearing PMU state on CPU#%d\n", smp_processor_id()); 1616 1616 1617 - for (idx = 0; idx < x86_pmu.num_counters; idx++) { 1617 + for (idx = 0; idx < x86_pmu.num_events; idx++) { 1618 1618 checking_wrmsrl(x86_pmu.eventsel + idx, 0ull); 1619 1619 checking_wrmsrl(x86_pmu.perfctr + idx, 0ull); 1620 1620 } 1621 - for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++) { 1621 + for (idx = 0; idx < x86_pmu.num_events_fixed; idx++) { 1622 1622 checking_wrmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull); 1623 1623 } 1624 1624 if (ds) ··· 1630 1630 static int p6_pmu_handle_irq(struct pt_regs *regs) 1631 1631 { 1632 1632 struct perf_sample_data data; 1633 - struct cpu_hw_counters *cpuc; 1634 - struct perf_counter *counter; 1635 - struct hw_perf_counter *hwc; 1633 + struct cpu_hw_events *cpuc; 1634 + struct perf_event *event; 1635 + struct hw_perf_event *hwc; 1636 1636 int idx, handled = 0; 1637 1637 u64 val; 1638 1638 1639 1639 data.addr = 0; 1640 1640 1641 - cpuc = &__get_cpu_var(cpu_hw_counters); 1641 + cpuc = &__get_cpu_var(cpu_hw_events); 1642 1642 1643 - for (idx = 0; idx < x86_pmu.num_counters; idx++) { 1643 + for (idx = 0; idx < x86_pmu.num_events; idx++) { 1644 1644 if (!test_bit(idx, cpuc->active_mask)) 1645 1645 continue; 1646 1646 1647 - counter = cpuc->counters[idx]; 1648 - hwc = &counter->hw; 1647 + event = cpuc->events[idx]; 1648 + hwc = &event->hw; 1649 1649 1650 - val = x86_perf_counter_update(counter, hwc, idx); 1651 - if (val & (1ULL << (x86_pmu.counter_bits - 1))) 1650 + val = x86_perf_event_update(event, hwc, idx); 1651 + if (val & (1ULL << (x86_pmu.event_bits - 1))) 1652 1652 continue; 1653 1653 1654 1654 /* 1655 - * counter overflow 1655 + * event overflow 1656 1656 */ 1657 1657 handled = 1; 1658 - data.period = counter->hw.last_period; 1658 + data.period = event->hw.last_period; 1659 1659 1660 - if (!x86_perf_counter_set_period(counter, hwc, idx)) 1660 + if (!x86_perf_event_set_period(event, hwc, idx)) 1661 1661 continue; 1662 1662 1663 - if (perf_counter_overflow(counter, 1, &data, regs)) 1664 - p6_pmu_disable_counter(hwc, idx); 1663 + if (perf_event_overflow(event, 1, &data, regs)) 1664 + p6_pmu_disable_event(hwc, idx); 1665 1665 } 1666 1666 1667 1667 if (handled) ··· 1677 1677 static int intel_pmu_handle_irq(struct pt_regs *regs) 1678 1678 { 1679 1679 struct perf_sample_data data; 1680 - struct cpu_hw_counters *cpuc; 1680 + struct cpu_hw_events *cpuc; 1681 1681 int bit, loops; 1682 1682 u64 ack, status; 1683 1683 1684 1684 data.addr = 0; 1685 1685 1686 - cpuc = &__get_cpu_var(cpu_hw_counters); 1686 + cpuc = &__get_cpu_var(cpu_hw_events); 1687 1687 1688 1688 perf_disable(); 1689 1689 intel_pmu_drain_bts_buffer(cpuc); ··· 1696 1696 loops = 0; 1697 1697 again: 1698 1698 if (++loops > 100) { 1699 - WARN_ONCE(1, "perfcounters: irq loop stuck!\n"); 1700 - perf_counter_print_debug(); 1699 + WARN_ONCE(1, "perfevents: irq loop stuck!\n"); 1700 + perf_event_print_debug(); 1701 1701 intel_pmu_reset(); 1702 1702 perf_enable(); 1703 1703 return 1; ··· 1706 1706 inc_irq_stat(apic_perf_irqs); 1707 1707 ack = status; 1708 1708 for_each_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) { 1709 - struct perf_counter *counter = cpuc->counters[bit]; 1709 + struct perf_event *event = cpuc->events[bit]; 1710 1710 1711 1711 clear_bit(bit, (unsigned long *) &status); 1712 1712 if (!test_bit(bit, cpuc->active_mask)) 1713 1713 continue; 1714 1714 1715 - if (!intel_pmu_save_and_restart(counter)) 1715 + if (!intel_pmu_save_and_restart(event)) 1716 1716 continue; 1717 1717 1718 - data.period = counter->hw.last_period; 1718 + data.period = event->hw.last_period; 1719 1719 1720 - if (perf_counter_overflow(counter, 1, &data, regs)) 1721 - intel_pmu_disable_counter(&counter->hw, bit); 1720 + if (perf_event_overflow(event, 1, &data, regs)) 1721 + intel_pmu_disable_event(&event->hw, bit); 1722 1722 } 1723 1723 1724 1724 intel_pmu_ack_status(ack); ··· 1738 1738 static int amd_pmu_handle_irq(struct pt_regs *regs) 1739 1739 { 1740 1740 struct perf_sample_data data; 1741 - struct cpu_hw_counters *cpuc; 1742 - struct perf_counter *counter; 1743 - struct hw_perf_counter *hwc; 1741 + struct cpu_hw_events *cpuc; 1742 + struct perf_event *event; 1743 + struct hw_perf_event *hwc; 1744 1744 int idx, handled = 0; 1745 1745 u64 val; 1746 1746 1747 1747 data.addr = 0; 1748 1748 1749 - cpuc = &__get_cpu_var(cpu_hw_counters); 1749 + cpuc = &__get_cpu_var(cpu_hw_events); 1750 1750 1751 - for (idx = 0; idx < x86_pmu.num_counters; idx++) { 1751 + for (idx = 0; idx < x86_pmu.num_events; idx++) { 1752 1752 if (!test_bit(idx, cpuc->active_mask)) 1753 1753 continue; 1754 1754 1755 - counter = cpuc->counters[idx]; 1756 - hwc = &counter->hw; 1755 + event = cpuc->events[idx]; 1756 + hwc = &event->hw; 1757 1757 1758 - val = x86_perf_counter_update(counter, hwc, idx); 1759 - if (val & (1ULL << (x86_pmu.counter_bits - 1))) 1758 + val = x86_perf_event_update(event, hwc, idx); 1759 + if (val & (1ULL << (x86_pmu.event_bits - 1))) 1760 1760 continue; 1761 1761 1762 1762 /* 1763 - * counter overflow 1763 + * event overflow 1764 1764 */ 1765 1765 handled = 1; 1766 - data.period = counter->hw.last_period; 1766 + data.period = event->hw.last_period; 1767 1767 1768 - if (!x86_perf_counter_set_period(counter, hwc, idx)) 1768 + if (!x86_perf_event_set_period(event, hwc, idx)) 1769 1769 continue; 1770 1770 1771 - if (perf_counter_overflow(counter, 1, &data, regs)) 1772 - amd_pmu_disable_counter(hwc, idx); 1771 + if (perf_event_overflow(event, 1, &data, regs)) 1772 + amd_pmu_disable_event(hwc, idx); 1773 1773 } 1774 1774 1775 1775 if (handled) ··· 1783 1783 irq_enter(); 1784 1784 ack_APIC_irq(); 1785 1785 inc_irq_stat(apic_pending_irqs); 1786 - perf_counter_do_pending(); 1786 + perf_event_do_pending(); 1787 1787 irq_exit(); 1788 1788 } 1789 1789 1790 - void set_perf_counter_pending(void) 1790 + void set_perf_event_pending(void) 1791 1791 { 1792 1792 #ifdef CONFIG_X86_LOCAL_APIC 1793 1793 apic->send_IPI_self(LOCAL_PENDING_VECTOR); 1794 1794 #endif 1795 1795 } 1796 1796 1797 - void perf_counters_lapic_init(void) 1797 + void perf_events_lapic_init(void) 1798 1798 { 1799 1799 #ifdef CONFIG_X86_LOCAL_APIC 1800 1800 if (!x86_pmu.apic || !x86_pmu_initialized()) ··· 1808 1808 } 1809 1809 1810 1810 static int __kprobes 1811 - perf_counter_nmi_handler(struct notifier_block *self, 1811 + perf_event_nmi_handler(struct notifier_block *self, 1812 1812 unsigned long cmd, void *__args) 1813 1813 { 1814 1814 struct die_args *args = __args; 1815 1815 struct pt_regs *regs; 1816 1816 1817 - if (!atomic_read(&active_counters)) 1817 + if (!atomic_read(&active_events)) 1818 1818 return NOTIFY_DONE; 1819 1819 1820 1820 switch (cmd) { ··· 1833 1833 #endif 1834 1834 /* 1835 1835 * Can't rely on the handled return value to say it was our NMI, two 1836 - * counters could trigger 'simultaneously' raising two back-to-back NMIs. 1836 + * events could trigger 'simultaneously' raising two back-to-back NMIs. 1837 1837 * 1838 1838 * If the first NMI handles both, the latter will be empty and daze 1839 1839 * the CPU. ··· 1843 1843 return NOTIFY_STOP; 1844 1844 } 1845 1845 1846 - static __read_mostly struct notifier_block perf_counter_nmi_notifier = { 1847 - .notifier_call = perf_counter_nmi_handler, 1846 + static __read_mostly struct notifier_block perf_event_nmi_notifier = { 1847 + .notifier_call = perf_event_nmi_handler, 1848 1848 .next = NULL, 1849 1849 .priority = 1 1850 1850 }; ··· 1854 1854 .handle_irq = p6_pmu_handle_irq, 1855 1855 .disable_all = p6_pmu_disable_all, 1856 1856 .enable_all = p6_pmu_enable_all, 1857 - .enable = p6_pmu_enable_counter, 1858 - .disable = p6_pmu_disable_counter, 1857 + .enable = p6_pmu_enable_event, 1858 + .disable = p6_pmu_disable_event, 1859 1859 .eventsel = MSR_P6_EVNTSEL0, 1860 1860 .perfctr = MSR_P6_PERFCTR0, 1861 1861 .event_map = p6_pmu_event_map, ··· 1864 1864 .apic = 1, 1865 1865 .max_period = (1ULL << 31) - 1, 1866 1866 .version = 0, 1867 - .num_counters = 2, 1867 + .num_events = 2, 1868 1868 /* 1869 - * Counters have 40 bits implemented. However they are designed such 1869 + * Events have 40 bits implemented. However they are designed such 1870 1870 * that bits [32-39] are sign extensions of bit 31. As such the 1871 - * effective width of a counter for P6-like PMU is 32 bits only. 1871 + * effective width of a event for P6-like PMU is 32 bits only. 1872 1872 * 1873 1873 * See IA-32 Intel Architecture Software developer manual Vol 3B 1874 1874 */ 1875 - .counter_bits = 32, 1876 - .counter_mask = (1ULL << 32) - 1, 1875 + .event_bits = 32, 1876 + .event_mask = (1ULL << 32) - 1, 1877 1877 }; 1878 1878 1879 1879 static struct x86_pmu intel_pmu = { ··· 1881 1881 .handle_irq = intel_pmu_handle_irq, 1882 1882 .disable_all = intel_pmu_disable_all, 1883 1883 .enable_all = intel_pmu_enable_all, 1884 - .enable = intel_pmu_enable_counter, 1885 - .disable = intel_pmu_disable_counter, 1884 + .enable = intel_pmu_enable_event, 1885 + .disable = intel_pmu_disable_event, 1886 1886 .eventsel = MSR_ARCH_PERFMON_EVENTSEL0, 1887 1887 .perfctr = MSR_ARCH_PERFMON_PERFCTR0, 1888 1888 .event_map = intel_pmu_event_map, ··· 1892 1892 /* 1893 1893 * Intel PMCs cannot be accessed sanely above 32 bit width, 1894 1894 * so we install an artificial 1<<31 period regardless of 1895 - * the generic counter period: 1895 + * the generic event period: 1896 1896 */ 1897 1897 .max_period = (1ULL << 31) - 1, 1898 1898 .enable_bts = intel_pmu_enable_bts, ··· 1904 1904 .handle_irq = amd_pmu_handle_irq, 1905 1905 .disable_all = amd_pmu_disable_all, 1906 1906 .enable_all = amd_pmu_enable_all, 1907 - .enable = amd_pmu_enable_counter, 1908 - .disable = amd_pmu_disable_counter, 1907 + .enable = amd_pmu_enable_event, 1908 + .disable = amd_pmu_disable_event, 1909 1909 .eventsel = MSR_K7_EVNTSEL0, 1910 1910 .perfctr = MSR_K7_PERFCTR0, 1911 1911 .event_map = amd_pmu_event_map, 1912 1912 .raw_event = amd_pmu_raw_event, 1913 1913 .max_events = ARRAY_SIZE(amd_perfmon_event_map), 1914 - .num_counters = 4, 1915 - .counter_bits = 48, 1916 - .counter_mask = (1ULL << 48) - 1, 1914 + .num_events = 4, 1915 + .event_bits = 48, 1916 + .event_mask = (1ULL << 48) - 1, 1917 1917 .apic = 1, 1918 1918 /* use highest bit to detect overflow */ 1919 1919 .max_period = (1ULL << 47) - 1, ··· 1970 1970 1971 1971 /* 1972 1972 * Check whether the Architectural PerfMon supports 1973 - * Branch Misses Retired Event or not. 1973 + * Branch Misses Retired hw_event or not. 1974 1974 */ 1975 1975 cpuid(10, &eax.full, &ebx, &unused, &edx.full); 1976 1976 if (eax.split.mask_length <= ARCH_PERFMON_BRANCH_MISSES_RETIRED) ··· 1982 1982 1983 1983 x86_pmu = intel_pmu; 1984 1984 x86_pmu.version = version; 1985 - x86_pmu.num_counters = eax.split.num_counters; 1986 - x86_pmu.counter_bits = eax.split.bit_width; 1987 - x86_pmu.counter_mask = (1ULL << eax.split.bit_width) - 1; 1985 + x86_pmu.num_events = eax.split.num_events; 1986 + x86_pmu.event_bits = eax.split.bit_width; 1987 + x86_pmu.event_mask = (1ULL << eax.split.bit_width) - 1; 1988 1988 1989 1989 /* 1990 - * Quirk: v2 perfmon does not report fixed-purpose counters, so 1991 - * assume at least 3 counters: 1990 + * Quirk: v2 perfmon does not report fixed-purpose events, so 1991 + * assume at least 3 events: 1992 1992 */ 1993 - x86_pmu.num_counters_fixed = max((int)edx.split.num_counters_fixed, 3); 1993 + x86_pmu.num_events_fixed = max((int)edx.split.num_events_fixed, 3); 1994 1994 1995 1995 /* 1996 1996 * Install the hw-cache-events table: ··· 2037 2037 return 0; 2038 2038 } 2039 2039 2040 - void __init init_hw_perf_counters(void) 2040 + void __init init_hw_perf_events(void) 2041 2041 { 2042 2042 int err; 2043 2043 2044 - pr_info("Performance Counters: "); 2044 + pr_info("Performance Events: "); 2045 2045 2046 2046 switch (boot_cpu_data.x86_vendor) { 2047 2047 case X86_VENDOR_INTEL: ··· 2054 2054 return; 2055 2055 } 2056 2056 if (err != 0) { 2057 - pr_cont("no PMU driver, software counters only.\n"); 2057 + pr_cont("no PMU driver, software events only.\n"); 2058 2058 return; 2059 2059 } 2060 2060 2061 2061 pr_cont("%s PMU driver.\n", x86_pmu.name); 2062 2062 2063 - if (x86_pmu.num_counters > X86_PMC_MAX_GENERIC) { 2064 - WARN(1, KERN_ERR "hw perf counters %d > max(%d), clipping!", 2065 - x86_pmu.num_counters, X86_PMC_MAX_GENERIC); 2066 - x86_pmu.num_counters = X86_PMC_MAX_GENERIC; 2063 + if (x86_pmu.num_events > X86_PMC_MAX_GENERIC) { 2064 + WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!", 2065 + x86_pmu.num_events, X86_PMC_MAX_GENERIC); 2066 + x86_pmu.num_events = X86_PMC_MAX_GENERIC; 2067 2067 } 2068 - perf_counter_mask = (1 << x86_pmu.num_counters) - 1; 2069 - perf_max_counters = x86_pmu.num_counters; 2068 + perf_event_mask = (1 << x86_pmu.num_events) - 1; 2069 + perf_max_events = x86_pmu.num_events; 2070 2070 2071 - if (x86_pmu.num_counters_fixed > X86_PMC_MAX_FIXED) { 2072 - WARN(1, KERN_ERR "hw perf counters fixed %d > max(%d), clipping!", 2073 - x86_pmu.num_counters_fixed, X86_PMC_MAX_FIXED); 2074 - x86_pmu.num_counters_fixed = X86_PMC_MAX_FIXED; 2071 + if (x86_pmu.num_events_fixed > X86_PMC_MAX_FIXED) { 2072 + WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!", 2073 + x86_pmu.num_events_fixed, X86_PMC_MAX_FIXED); 2074 + x86_pmu.num_events_fixed = X86_PMC_MAX_FIXED; 2075 2075 } 2076 2076 2077 - perf_counter_mask |= 2078 - ((1LL << x86_pmu.num_counters_fixed)-1) << X86_PMC_IDX_FIXED; 2079 - x86_pmu.intel_ctrl = perf_counter_mask; 2077 + perf_event_mask |= 2078 + ((1LL << x86_pmu.num_events_fixed)-1) << X86_PMC_IDX_FIXED; 2079 + x86_pmu.intel_ctrl = perf_event_mask; 2080 2080 2081 - perf_counters_lapic_init(); 2082 - register_die_notifier(&perf_counter_nmi_notifier); 2081 + perf_events_lapic_init(); 2082 + register_die_notifier(&perf_event_nmi_notifier); 2083 2083 2084 - pr_info("... version: %d\n", x86_pmu.version); 2085 - pr_info("... bit width: %d\n", x86_pmu.counter_bits); 2086 - pr_info("... generic counters: %d\n", x86_pmu.num_counters); 2087 - pr_info("... value mask: %016Lx\n", x86_pmu.counter_mask); 2088 - pr_info("... max period: %016Lx\n", x86_pmu.max_period); 2089 - pr_info("... fixed-purpose counters: %d\n", x86_pmu.num_counters_fixed); 2090 - pr_info("... counter mask: %016Lx\n", perf_counter_mask); 2084 + pr_info("... version: %d\n", x86_pmu.version); 2085 + pr_info("... bit width: %d\n", x86_pmu.event_bits); 2086 + pr_info("... generic registers: %d\n", x86_pmu.num_events); 2087 + pr_info("... value mask: %016Lx\n", x86_pmu.event_mask); 2088 + pr_info("... max period: %016Lx\n", x86_pmu.max_period); 2089 + pr_info("... fixed-purpose events: %d\n", x86_pmu.num_events_fixed); 2090 + pr_info("... event mask: %016Lx\n", perf_event_mask); 2091 2091 } 2092 2092 2093 - static inline void x86_pmu_read(struct perf_counter *counter) 2093 + static inline void x86_pmu_read(struct perf_event *event) 2094 2094 { 2095 - x86_perf_counter_update(counter, &counter->hw, counter->hw.idx); 2095 + x86_perf_event_update(event, &event->hw, event->hw.idx); 2096 2096 } 2097 2097 2098 2098 static const struct pmu pmu = { ··· 2102 2102 .unthrottle = x86_pmu_unthrottle, 2103 2103 }; 2104 2104 2105 - const struct pmu *hw_perf_counter_init(struct perf_counter *counter) 2105 + const struct pmu *hw_perf_event_init(struct perf_event *event) 2106 2106 { 2107 2107 int err; 2108 2108 2109 - err = __hw_perf_counter_init(counter); 2109 + err = __hw_perf_event_init(event); 2110 2110 if (err) { 2111 - if (counter->destroy) 2112 - counter->destroy(counter); 2111 + if (event->destroy) 2112 + event->destroy(event); 2113 2113 return ERR_PTR(err); 2114 2114 } 2115 2115 ··· 2292 2292 return entry; 2293 2293 } 2294 2294 2295 - void hw_perf_counter_setup_online(int cpu) 2295 + void hw_perf_event_setup_online(int cpu) 2296 2296 { 2297 2297 init_debug_store_on_cpu(cpu); 2298 2298 }
+1 -1
arch/x86/kernel/cpu/perfctr-watchdog.c
··· 20 20 #include <linux/kprobes.h> 21 21 22 22 #include <asm/apic.h> 23 - #include <asm/perf_counter.h> 23 + #include <asm/perf_event.h> 24 24 25 25 struct nmi_watchdog_ctlblk { 26 26 unsigned int cccr_msr;
+1 -1
arch/x86/kernel/entry_64.S
··· 1021 1021 apicinterrupt SPURIOUS_APIC_VECTOR \ 1022 1022 spurious_interrupt smp_spurious_interrupt 1023 1023 1024 - #ifdef CONFIG_PERF_COUNTERS 1024 + #ifdef CONFIG_PERF_EVENTS 1025 1025 apicinterrupt LOCAL_PENDING_VECTOR \ 1026 1026 perf_pending_interrupt smp_perf_pending_interrupt 1027 1027 #endif
+1 -1
arch/x86/kernel/irqinit.c
··· 208 208 alloc_intr_gate(ERROR_APIC_VECTOR, error_interrupt); 209 209 210 210 /* Performance monitoring interrupts: */ 211 - # ifdef CONFIG_PERF_COUNTERS 211 + # ifdef CONFIG_PERF_EVENTS 212 212 alloc_intr_gate(LOCAL_PENDING_VECTOR, perf_pending_interrupt); 213 213 # endif 214 214
+1 -1
arch/x86/kernel/syscall_table_32.S
··· 335 335 .long sys_preadv 336 336 .long sys_pwritev 337 337 .long sys_rt_tgsigqueueinfo /* 335 */ 338 - .long sys_perf_counter_open 338 + .long sys_perf_event_open
+4 -4
arch/x86/mm/fault.c
··· 10 10 #include <linux/bootmem.h> /* max_low_pfn */ 11 11 #include <linux/kprobes.h> /* __kprobes, ... */ 12 12 #include <linux/mmiotrace.h> /* kmmio_handler, ... */ 13 - #include <linux/perf_counter.h> /* perf_swcounter_event */ 13 + #include <linux/perf_event.h> /* perf_sw_event */ 14 14 15 15 #include <asm/traps.h> /* dotraplinkage, ... */ 16 16 #include <asm/pgalloc.h> /* pgd_*(), ... */ ··· 1017 1017 if (unlikely(error_code & PF_RSVD)) 1018 1018 pgtable_bad(regs, error_code, address); 1019 1019 1020 - perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address); 1020 + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address); 1021 1021 1022 1022 /* 1023 1023 * If we're in an interrupt, have no user context or are running ··· 1114 1114 1115 1115 if (fault & VM_FAULT_MAJOR) { 1116 1116 tsk->maj_flt++; 1117 - perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0, 1117 + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0, 1118 1118 regs, address); 1119 1119 } else { 1120 1120 tsk->min_flt++; 1121 - perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0, 1121 + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0, 1122 1122 regs, address); 1123 1123 } 1124 1124
+2 -2
arch/x86/oprofile/op_model_ppro.c
··· 234 234 if (eax.split.version_id == 0 && current_cpu_data.x86 == 6 && 235 235 current_cpu_data.x86_model == 15) { 236 236 eax.split.version_id = 2; 237 - eax.split.num_counters = 2; 237 + eax.split.num_events = 2; 238 238 eax.split.bit_width = 40; 239 239 } 240 240 241 - num_counters = eax.split.num_counters; 241 + num_counters = eax.split.num_events; 242 242 243 243 op_arch_perfmon_spec.num_counters = num_counters; 244 244 op_arch_perfmon_spec.num_controls = num_counters;
+1 -1
arch/x86/oprofile/op_x86_model.h
··· 13 13 #define OP_X86_MODEL_H 14 14 15 15 #include <asm/types.h> 16 - #include <asm/perf_counter.h> 16 + #include <asm/perf_event.h> 17 17 18 18 struct op_msr { 19 19 unsigned long addr;
+2 -2
drivers/char/sysrq.c
··· 26 26 #include <linux/proc_fs.h> 27 27 #include <linux/nmi.h> 28 28 #include <linux/quotaops.h> 29 - #include <linux/perf_counter.h> 29 + #include <linux/perf_event.h> 30 30 #include <linux/kernel.h> 31 31 #include <linux/module.h> 32 32 #include <linux/suspend.h> ··· 252 252 struct pt_regs *regs = get_irq_regs(); 253 253 if (regs) 254 254 show_regs(regs); 255 - perf_counter_print_debug(); 255 + perf_event_print_debug(); 256 256 } 257 257 static struct sysrq_key_op sysrq_showregs_op = { 258 258 .handler = sysrq_handle_showregs,
+3 -3
fs/exec.c
··· 33 33 #include <linux/string.h> 34 34 #include <linux/init.h> 35 35 #include <linux/pagemap.h> 36 - #include <linux/perf_counter.h> 36 + #include <linux/perf_event.h> 37 37 #include <linux/highmem.h> 38 38 #include <linux/spinlock.h> 39 39 #include <linux/key.h> ··· 923 923 task_lock(tsk); 924 924 strlcpy(tsk->comm, buf, sizeof(tsk->comm)); 925 925 task_unlock(tsk); 926 - perf_counter_comm(tsk); 926 + perf_event_comm(tsk); 927 927 } 928 928 929 929 int flush_old_exec(struct linux_binprm * bprm) ··· 997 997 * security domain: 998 998 */ 999 999 if (!get_dumpable(current->mm)) 1000 - perf_counter_exit_task(current); 1000 + perf_event_exit_task(current); 1001 1001 1002 1002 /* An exec changes our domain. We are no longer part of the thread 1003 1003 group */
+2 -2
include/asm-generic/unistd.h
··· 620 620 621 621 #define __NR_rt_tgsigqueueinfo 240 622 622 __SYSCALL(__NR_rt_tgsigqueueinfo, sys_rt_tgsigqueueinfo) 623 - #define __NR_perf_counter_open 241 624 - __SYSCALL(__NR_perf_counter_open, sys_perf_counter_open) 623 + #define __NR_perf_event_open 241 624 + __SYSCALL(__NR_perf_event_open, sys_perf_event_open) 625 625 626 626 #undef __NR_syscalls 627 627 #define __NR_syscalls 242
+7 -7
include/linux/init_task.h
··· 106 106 107 107 extern struct cred init_cred; 108 108 109 - #ifdef CONFIG_PERF_COUNTERS 110 - # define INIT_PERF_COUNTERS(tsk) \ 111 - .perf_counter_mutex = \ 112 - __MUTEX_INITIALIZER(tsk.perf_counter_mutex), \ 113 - .perf_counter_list = LIST_HEAD_INIT(tsk.perf_counter_list), 109 + #ifdef CONFIG_PERF_EVENTS 110 + # define INIT_PERF_EVENTS(tsk) \ 111 + .perf_event_mutex = \ 112 + __MUTEX_INITIALIZER(tsk.perf_event_mutex), \ 113 + .perf_event_list = LIST_HEAD_INIT(tsk.perf_event_list), 114 114 #else 115 - # define INIT_PERF_COUNTERS(tsk) 115 + # define INIT_PERF_EVENTS(tsk) 116 116 #endif 117 117 118 118 /* ··· 178 178 }, \ 179 179 .dirties = INIT_PROP_LOCAL_SINGLE(dirties), \ 180 180 INIT_IDS \ 181 - INIT_PERF_COUNTERS(tsk) \ 181 + INIT_PERF_EVENTS(tsk) \ 182 182 INIT_TRACE_IRQFLAGS \ 183 183 INIT_LOCKDEP \ 184 184 INIT_FTRACE_GRAPH \
+40 -457
include/linux/perf_counter.h
··· 1 1 /* 2 - * Performance counters: 2 + * NOTE: this file will be removed in a future kernel release, it is 3 + * provided as a courtesy copy of user-space code that relies on the 4 + * old (pre-rename) symbols and constants. 5 + * 6 + * Performance events: 3 7 * 4 8 * Copyright (C) 2008-2009, Thomas Gleixner <tglx@linutronix.de> 5 9 * Copyright (C) 2008-2009, Red Hat, Inc., Ingo Molnar ··· 135 131 * as specified by attr.read_format: 136 132 * 137 133 * struct read_format { 138 - * { u64 value; 139 - * { u64 time_enabled; } && PERF_FORMAT_ENABLED 140 - * { u64 time_running; } && PERF_FORMAT_RUNNING 141 - * { u64 id; } && PERF_FORMAT_ID 142 - * } && !PERF_FORMAT_GROUP 134 + * { u64 value; 135 + * { u64 time_enabled; } && PERF_FORMAT_ENABLED 136 + * { u64 time_running; } && PERF_FORMAT_RUNNING 137 + * { u64 id; } && PERF_FORMAT_ID 138 + * } && !PERF_FORMAT_GROUP 143 139 * 144 - * { u64 nr; 145 - * { u64 time_enabled; } && PERF_FORMAT_ENABLED 146 - * { u64 time_running; } && PERF_FORMAT_RUNNING 147 - * { u64 value; 148 - * { u64 id; } && PERF_FORMAT_ID 149 - * } cntr[nr]; 150 - * } && PERF_FORMAT_GROUP 140 + * { u64 nr; 141 + * { u64 time_enabled; } && PERF_FORMAT_ENABLED 142 + * { u64 time_running; } && PERF_FORMAT_RUNNING 143 + * { u64 value; 144 + * { u64 id; } && PERF_FORMAT_ID 145 + * } cntr[nr]; 146 + * } && PERF_FORMAT_GROUP 151 147 * }; 152 148 */ 153 149 enum perf_counter_read_format { ··· 318 314 319 315 /* 320 316 * struct { 321 - * struct perf_event_header header; 322 - * u64 id; 323 - * u64 lost; 317 + * struct perf_event_header header; 318 + * u64 id; 319 + * u64 lost; 324 320 * }; 325 321 */ 326 322 PERF_EVENT_LOST = 2, ··· 368 364 369 365 /* 370 366 * struct { 371 - * struct perf_event_header header; 372 - * u32 pid, tid; 367 + * struct perf_event_header header; 368 + * u32 pid, tid; 373 369 * 374 - * struct read_format values; 370 + * struct read_format values; 375 371 * }; 376 372 */ 377 373 PERF_EVENT_READ = 8, ··· 387 383 * { u64 id; } && PERF_SAMPLE_ID 388 384 * { u64 stream_id;} && PERF_SAMPLE_STREAM_ID 389 385 * { u32 cpu, res; } && PERF_SAMPLE_CPU 390 - * { u64 period; } && PERF_SAMPLE_PERIOD 386 + * { u64 period; } && PERF_SAMPLE_PERIOD 391 387 * 392 388 * { struct read_format values; } && PERF_SAMPLE_READ 393 389 * 394 390 * { u64 nr, 395 391 * u64 ips[nr]; } && PERF_SAMPLE_CALLCHAIN 396 392 * 397 - * # 398 - * # The RAW record below is opaque data wrt the ABI 399 - * # 400 - * # That is, the ABI doesn't make any promises wrt to 401 - * # the stability of its content, it may vary depending 402 - * # on event, hardware, kernel version and phase of 403 - * # the moon. 404 - * # 405 - * # In other words, PERF_SAMPLE_RAW contents are not an ABI. 406 - * # 393 + * # 394 + * # The RAW record below is opaque data wrt the ABI 395 + * # 396 + * # That is, the ABI doesn't make any promises wrt to 397 + * # the stability of its content, it may vary depending 398 + * # on event, hardware, kernel version and phase of 399 + * # the moon. 400 + * # 401 + * # In other words, PERF_SAMPLE_RAW contents are not an ABI. 402 + * # 407 403 * 408 404 * { u32 size; 409 405 * char data[size];}&& PERF_SAMPLE_RAW ··· 426 422 PERF_CONTEXT_MAX = (__u64)-4095, 427 423 }; 428 424 429 - #define PERF_FLAG_FD_NO_GROUP (1U << 0) 430 - #define PERF_FLAG_FD_OUTPUT (1U << 1) 431 - 432 - #ifdef __KERNEL__ 433 - /* 434 - * Kernel-internal data types and definitions: 435 - */ 436 - 437 - #ifdef CONFIG_PERF_COUNTERS 438 - # include <asm/perf_counter.h> 439 - #endif 440 - 441 - #include <linux/list.h> 442 - #include <linux/mutex.h> 443 - #include <linux/rculist.h> 444 - #include <linux/rcupdate.h> 445 - #include <linux/spinlock.h> 446 - #include <linux/hrtimer.h> 447 - #include <linux/fs.h> 448 - #include <linux/pid_namespace.h> 449 - #include <asm/atomic.h> 450 - 451 - #define PERF_MAX_STACK_DEPTH 255 452 - 453 - struct perf_callchain_entry { 454 - __u64 nr; 455 - __u64 ip[PERF_MAX_STACK_DEPTH]; 456 - }; 457 - 458 - struct perf_raw_record { 459 - u32 size; 460 - void *data; 461 - }; 462 - 463 - struct task_struct; 464 - 465 - /** 466 - * struct hw_perf_counter - performance counter hardware details: 467 - */ 468 - struct hw_perf_counter { 469 - #ifdef CONFIG_PERF_COUNTERS 470 - union { 471 - struct { /* hardware */ 472 - u64 config; 473 - unsigned long config_base; 474 - unsigned long counter_base; 475 - int idx; 476 - }; 477 - union { /* software */ 478 - atomic64_t count; 479 - struct hrtimer hrtimer; 480 - }; 481 - }; 482 - atomic64_t prev_count; 483 - u64 sample_period; 484 - u64 last_period; 485 - atomic64_t period_left; 486 - u64 interrupts; 487 - 488 - u64 freq_count; 489 - u64 freq_interrupts; 490 - u64 freq_stamp; 491 - #endif 492 - }; 493 - 494 - struct perf_counter; 495 - 496 - /** 497 - * struct pmu - generic performance monitoring unit 498 - */ 499 - struct pmu { 500 - int (*enable) (struct perf_counter *counter); 501 - void (*disable) (struct perf_counter *counter); 502 - void (*read) (struct perf_counter *counter); 503 - void (*unthrottle) (struct perf_counter *counter); 504 - }; 505 - 506 - /** 507 - * enum perf_counter_active_state - the states of a counter 508 - */ 509 - enum perf_counter_active_state { 510 - PERF_COUNTER_STATE_ERROR = -2, 511 - PERF_COUNTER_STATE_OFF = -1, 512 - PERF_COUNTER_STATE_INACTIVE = 0, 513 - PERF_COUNTER_STATE_ACTIVE = 1, 514 - }; 515 - 516 - struct file; 517 - 518 - struct perf_mmap_data { 519 - struct rcu_head rcu_head; 520 - int nr_pages; /* nr of data pages */ 521 - int writable; /* are we writable */ 522 - int nr_locked; /* nr pages mlocked */ 523 - 524 - atomic_t poll; /* POLL_ for wakeups */ 525 - atomic_t events; /* event limit */ 526 - 527 - atomic_long_t head; /* write position */ 528 - atomic_long_t done_head; /* completed head */ 529 - 530 - atomic_t lock; /* concurrent writes */ 531 - atomic_t wakeup; /* needs a wakeup */ 532 - atomic_t lost; /* nr records lost */ 533 - 534 - long watermark; /* wakeup watermark */ 535 - 536 - struct perf_counter_mmap_page *user_page; 537 - void *data_pages[0]; 538 - }; 539 - 540 - struct perf_pending_entry { 541 - struct perf_pending_entry *next; 542 - void (*func)(struct perf_pending_entry *); 543 - }; 544 - 545 - /** 546 - * struct perf_counter - performance counter kernel representation: 547 - */ 548 - struct perf_counter { 549 - #ifdef CONFIG_PERF_COUNTERS 550 - struct list_head list_entry; 551 - struct list_head event_entry; 552 - struct list_head sibling_list; 553 - int nr_siblings; 554 - struct perf_counter *group_leader; 555 - struct perf_counter *output; 556 - const struct pmu *pmu; 557 - 558 - enum perf_counter_active_state state; 559 - atomic64_t count; 560 - 561 - /* 562 - * These are the total time in nanoseconds that the counter 563 - * has been enabled (i.e. eligible to run, and the task has 564 - * been scheduled in, if this is a per-task counter) 565 - * and running (scheduled onto the CPU), respectively. 566 - * 567 - * They are computed from tstamp_enabled, tstamp_running and 568 - * tstamp_stopped when the counter is in INACTIVE or ACTIVE state. 569 - */ 570 - u64 total_time_enabled; 571 - u64 total_time_running; 572 - 573 - /* 574 - * These are timestamps used for computing total_time_enabled 575 - * and total_time_running when the counter is in INACTIVE or 576 - * ACTIVE state, measured in nanoseconds from an arbitrary point 577 - * in time. 578 - * tstamp_enabled: the notional time when the counter was enabled 579 - * tstamp_running: the notional time when the counter was scheduled on 580 - * tstamp_stopped: in INACTIVE state, the notional time when the 581 - * counter was scheduled off. 582 - */ 583 - u64 tstamp_enabled; 584 - u64 tstamp_running; 585 - u64 tstamp_stopped; 586 - 587 - struct perf_counter_attr attr; 588 - struct hw_perf_counter hw; 589 - 590 - struct perf_counter_context *ctx; 591 - struct file *filp; 592 - 593 - /* 594 - * These accumulate total time (in nanoseconds) that children 595 - * counters have been enabled and running, respectively. 596 - */ 597 - atomic64_t child_total_time_enabled; 598 - atomic64_t child_total_time_running; 599 - 600 - /* 601 - * Protect attach/detach and child_list: 602 - */ 603 - struct mutex child_mutex; 604 - struct list_head child_list; 605 - struct perf_counter *parent; 606 - 607 - int oncpu; 608 - int cpu; 609 - 610 - struct list_head owner_entry; 611 - struct task_struct *owner; 612 - 613 - /* mmap bits */ 614 - struct mutex mmap_mutex; 615 - atomic_t mmap_count; 616 - struct perf_mmap_data *data; 617 - 618 - /* poll related */ 619 - wait_queue_head_t waitq; 620 - struct fasync_struct *fasync; 621 - 622 - /* delayed work for NMIs and such */ 623 - int pending_wakeup; 624 - int pending_kill; 625 - int pending_disable; 626 - struct perf_pending_entry pending; 627 - 628 - atomic_t event_limit; 629 - 630 - void (*destroy)(struct perf_counter *); 631 - struct rcu_head rcu_head; 632 - 633 - struct pid_namespace *ns; 634 - u64 id; 635 - #endif 636 - }; 637 - 638 - /** 639 - * struct perf_counter_context - counter context structure 640 - * 641 - * Used as a container for task counters and CPU counters as well: 642 - */ 643 - struct perf_counter_context { 644 - /* 645 - * Protect the states of the counters in the list, 646 - * nr_active, and the list: 647 - */ 648 - spinlock_t lock; 649 - /* 650 - * Protect the list of counters. Locking either mutex or lock 651 - * is sufficient to ensure the list doesn't change; to change 652 - * the list you need to lock both the mutex and the spinlock. 653 - */ 654 - struct mutex mutex; 655 - 656 - struct list_head counter_list; 657 - struct list_head event_list; 658 - int nr_counters; 659 - int nr_active; 660 - int is_active; 661 - int nr_stat; 662 - atomic_t refcount; 663 - struct task_struct *task; 664 - 665 - /* 666 - * Context clock, runs when context enabled. 667 - */ 668 - u64 time; 669 - u64 timestamp; 670 - 671 - /* 672 - * These fields let us detect when two contexts have both 673 - * been cloned (inherited) from a common ancestor. 674 - */ 675 - struct perf_counter_context *parent_ctx; 676 - u64 parent_gen; 677 - u64 generation; 678 - int pin_count; 679 - struct rcu_head rcu_head; 680 - }; 681 - 682 - /** 683 - * struct perf_counter_cpu_context - per cpu counter context structure 684 - */ 685 - struct perf_cpu_context { 686 - struct perf_counter_context ctx; 687 - struct perf_counter_context *task_ctx; 688 - int active_oncpu; 689 - int max_pertask; 690 - int exclusive; 691 - 692 - /* 693 - * Recursion avoidance: 694 - * 695 - * task, softirq, irq, nmi context 696 - */ 697 - int recursion[4]; 698 - }; 699 - 700 - struct perf_output_handle { 701 - struct perf_counter *counter; 702 - struct perf_mmap_data *data; 703 - unsigned long head; 704 - unsigned long offset; 705 - int nmi; 706 - int sample; 707 - int locked; 708 - unsigned long flags; 709 - }; 710 - 711 - #ifdef CONFIG_PERF_COUNTERS 425 + #define PERF_FLAG_FD_NO_GROUP (1U << 0) 426 + #define PERF_FLAG_FD_OUTPUT (1U << 1) 712 427 713 428 /* 714 - * Set by architecture code: 429 + * In case some app still references the old symbols: 715 430 */ 716 - extern int perf_max_counters; 717 431 718 - extern const struct pmu *hw_perf_counter_init(struct perf_counter *counter); 432 + #define __NR_perf_counter_open __NR_perf_event_open 719 433 720 - extern void perf_counter_task_sched_in(struct task_struct *task, int cpu); 721 - extern void perf_counter_task_sched_out(struct task_struct *task, 722 - struct task_struct *next, int cpu); 723 - extern void perf_counter_task_tick(struct task_struct *task, int cpu); 724 - extern int perf_counter_init_task(struct task_struct *child); 725 - extern void perf_counter_exit_task(struct task_struct *child); 726 - extern void perf_counter_free_task(struct task_struct *task); 727 - extern void set_perf_counter_pending(void); 728 - extern void perf_counter_do_pending(void); 729 - extern void perf_counter_print_debug(void); 730 - extern void __perf_disable(void); 731 - extern bool __perf_enable(void); 732 - extern void perf_disable(void); 733 - extern void perf_enable(void); 734 - extern int perf_counter_task_disable(void); 735 - extern int perf_counter_task_enable(void); 736 - extern int hw_perf_group_sched_in(struct perf_counter *group_leader, 737 - struct perf_cpu_context *cpuctx, 738 - struct perf_counter_context *ctx, int cpu); 739 - extern void perf_counter_update_userpage(struct perf_counter *counter); 434 + #define PR_TASK_PERF_COUNTERS_DISABLE PR_TASK_PERF_EVENTS_DISABLE 435 + #define PR_TASK_PERF_COUNTERS_ENABLE PR_TASK_PERF_EVENTS_ENABLE 740 436 741 - struct perf_sample_data { 742 - u64 type; 743 - 744 - u64 ip; 745 - struct { 746 - u32 pid; 747 - u32 tid; 748 - } tid_entry; 749 - u64 time; 750 - u64 addr; 751 - u64 id; 752 - u64 stream_id; 753 - struct { 754 - u32 cpu; 755 - u32 reserved; 756 - } cpu_entry; 757 - u64 period; 758 - struct perf_callchain_entry *callchain; 759 - struct perf_raw_record *raw; 760 - }; 761 - 762 - extern void perf_output_sample(struct perf_output_handle *handle, 763 - struct perf_event_header *header, 764 - struct perf_sample_data *data, 765 - struct perf_counter *counter); 766 - extern void perf_prepare_sample(struct perf_event_header *header, 767 - struct perf_sample_data *data, 768 - struct perf_counter *counter, 769 - struct pt_regs *regs); 770 - 771 - extern int perf_counter_overflow(struct perf_counter *counter, int nmi, 772 - struct perf_sample_data *data, 773 - struct pt_regs *regs); 774 - 775 - /* 776 - * Return 1 for a software counter, 0 for a hardware counter 777 - */ 778 - static inline int is_software_counter(struct perf_counter *counter) 779 - { 780 - return (counter->attr.type != PERF_TYPE_RAW) && 781 - (counter->attr.type != PERF_TYPE_HARDWARE) && 782 - (counter->attr.type != PERF_TYPE_HW_CACHE); 783 - } 784 - 785 - extern atomic_t perf_swcounter_enabled[PERF_COUNT_SW_MAX]; 786 - 787 - extern void __perf_swcounter_event(u32, u64, int, struct pt_regs *, u64); 788 - 789 - static inline void 790 - perf_swcounter_event(u32 event, u64 nr, int nmi, struct pt_regs *regs, u64 addr) 791 - { 792 - if (atomic_read(&perf_swcounter_enabled[event])) 793 - __perf_swcounter_event(event, nr, nmi, regs, addr); 794 - } 795 - 796 - extern void __perf_counter_mmap(struct vm_area_struct *vma); 797 - 798 - static inline void perf_counter_mmap(struct vm_area_struct *vma) 799 - { 800 - if (vma->vm_flags & VM_EXEC) 801 - __perf_counter_mmap(vma); 802 - } 803 - 804 - extern void perf_counter_comm(struct task_struct *tsk); 805 - extern void perf_counter_fork(struct task_struct *tsk); 806 - 807 - extern struct perf_callchain_entry *perf_callchain(struct pt_regs *regs); 808 - 809 - extern int sysctl_perf_counter_paranoid; 810 - extern int sysctl_perf_counter_mlock; 811 - extern int sysctl_perf_counter_sample_rate; 812 - 813 - extern void perf_counter_init(void); 814 - extern void perf_tpcounter_event(int event_id, u64 addr, u64 count, 815 - void *record, int entry_size); 816 - 817 - #ifndef perf_misc_flags 818 - #define perf_misc_flags(regs) (user_mode(regs) ? PERF_EVENT_MISC_USER : \ 819 - PERF_EVENT_MISC_KERNEL) 820 - #define perf_instruction_pointer(regs) instruction_pointer(regs) 821 - #endif 822 - 823 - extern int perf_output_begin(struct perf_output_handle *handle, 824 - struct perf_counter *counter, unsigned int size, 825 - int nmi, int sample); 826 - extern void perf_output_end(struct perf_output_handle *handle); 827 - extern void perf_output_copy(struct perf_output_handle *handle, 828 - const void *buf, unsigned int len); 829 - #else 830 - static inline void 831 - perf_counter_task_sched_in(struct task_struct *task, int cpu) { } 832 - static inline void 833 - perf_counter_task_sched_out(struct task_struct *task, 834 - struct task_struct *next, int cpu) { } 835 - static inline void 836 - perf_counter_task_tick(struct task_struct *task, int cpu) { } 837 - static inline int perf_counter_init_task(struct task_struct *child) { return 0; } 838 - static inline void perf_counter_exit_task(struct task_struct *child) { } 839 - static inline void perf_counter_free_task(struct task_struct *task) { } 840 - static inline void perf_counter_do_pending(void) { } 841 - static inline void perf_counter_print_debug(void) { } 842 - static inline void perf_disable(void) { } 843 - static inline void perf_enable(void) { } 844 - static inline int perf_counter_task_disable(void) { return -EINVAL; } 845 - static inline int perf_counter_task_enable(void) { return -EINVAL; } 846 - 847 - static inline void 848 - perf_swcounter_event(u32 event, u64 nr, int nmi, 849 - struct pt_regs *regs, u64 addr) { } 850 - 851 - static inline void perf_counter_mmap(struct vm_area_struct *vma) { } 852 - static inline void perf_counter_comm(struct task_struct *tsk) { } 853 - static inline void perf_counter_fork(struct task_struct *tsk) { } 854 - static inline void perf_counter_init(void) { } 855 - 856 - #endif 857 - 858 - #define perf_output_put(handle, x) \ 859 - perf_output_copy((handle), &(x), sizeof(x)) 860 - 861 - #endif /* __KERNEL__ */ 862 437 #endif /* _LINUX_PERF_COUNTER_H */
+858
include/linux/perf_event.h
··· 1 + /* 2 + * Performance events: 3 + * 4 + * Copyright (C) 2008-2009, Thomas Gleixner <tglx@linutronix.de> 5 + * Copyright (C) 2008-2009, Red Hat, Inc., Ingo Molnar 6 + * Copyright (C) 2008-2009, Red Hat, Inc., Peter Zijlstra 7 + * 8 + * Data type definitions, declarations, prototypes. 9 + * 10 + * Started by: Thomas Gleixner and Ingo Molnar 11 + * 12 + * For licencing details see kernel-base/COPYING 13 + */ 14 + #ifndef _LINUX_PERF_EVENT_H 15 + #define _LINUX_PERF_EVENT_H 16 + 17 + #include <linux/types.h> 18 + #include <linux/ioctl.h> 19 + #include <asm/byteorder.h> 20 + 21 + /* 22 + * User-space ABI bits: 23 + */ 24 + 25 + /* 26 + * attr.type 27 + */ 28 + enum perf_type_id { 29 + PERF_TYPE_HARDWARE = 0, 30 + PERF_TYPE_SOFTWARE = 1, 31 + PERF_TYPE_TRACEPOINT = 2, 32 + PERF_TYPE_HW_CACHE = 3, 33 + PERF_TYPE_RAW = 4, 34 + 35 + PERF_TYPE_MAX, /* non-ABI */ 36 + }; 37 + 38 + /* 39 + * Generalized performance event event_id types, used by the 40 + * attr.event_id parameter of the sys_perf_event_open() 41 + * syscall: 42 + */ 43 + enum perf_hw_id { 44 + /* 45 + * Common hardware events, generalized by the kernel: 46 + */ 47 + PERF_COUNT_HW_CPU_CYCLES = 0, 48 + PERF_COUNT_HW_INSTRUCTIONS = 1, 49 + PERF_COUNT_HW_CACHE_REFERENCES = 2, 50 + PERF_COUNT_HW_CACHE_MISSES = 3, 51 + PERF_COUNT_HW_BRANCH_INSTRUCTIONS = 4, 52 + PERF_COUNT_HW_BRANCH_MISSES = 5, 53 + PERF_COUNT_HW_BUS_CYCLES = 6, 54 + 55 + PERF_COUNT_HW_MAX, /* non-ABI */ 56 + }; 57 + 58 + /* 59 + * Generalized hardware cache events: 60 + * 61 + * { L1-D, L1-I, LLC, ITLB, DTLB, BPU } x 62 + * { read, write, prefetch } x 63 + * { accesses, misses } 64 + */ 65 + enum perf_hw_cache_id { 66 + PERF_COUNT_HW_CACHE_L1D = 0, 67 + PERF_COUNT_HW_CACHE_L1I = 1, 68 + PERF_COUNT_HW_CACHE_LL = 2, 69 + PERF_COUNT_HW_CACHE_DTLB = 3, 70 + PERF_COUNT_HW_CACHE_ITLB = 4, 71 + PERF_COUNT_HW_CACHE_BPU = 5, 72 + 73 + PERF_COUNT_HW_CACHE_MAX, /* non-ABI */ 74 + }; 75 + 76 + enum perf_hw_cache_op_id { 77 + PERF_COUNT_HW_CACHE_OP_READ = 0, 78 + PERF_COUNT_HW_CACHE_OP_WRITE = 1, 79 + PERF_COUNT_HW_CACHE_OP_PREFETCH = 2, 80 + 81 + PERF_COUNT_HW_CACHE_OP_MAX, /* non-ABI */ 82 + }; 83 + 84 + enum perf_hw_cache_op_result_id { 85 + PERF_COUNT_HW_CACHE_RESULT_ACCESS = 0, 86 + PERF_COUNT_HW_CACHE_RESULT_MISS = 1, 87 + 88 + PERF_COUNT_HW_CACHE_RESULT_MAX, /* non-ABI */ 89 + }; 90 + 91 + /* 92 + * Special "software" events provided by the kernel, even if the hardware 93 + * does not support performance events. These events measure various 94 + * physical and sw events of the kernel (and allow the profiling of them as 95 + * well): 96 + */ 97 + enum perf_sw_ids { 98 + PERF_COUNT_SW_CPU_CLOCK = 0, 99 + PERF_COUNT_SW_TASK_CLOCK = 1, 100 + PERF_COUNT_SW_PAGE_FAULTS = 2, 101 + PERF_COUNT_SW_CONTEXT_SWITCHES = 3, 102 + PERF_COUNT_SW_CPU_MIGRATIONS = 4, 103 + PERF_COUNT_SW_PAGE_FAULTS_MIN = 5, 104 + PERF_COUNT_SW_PAGE_FAULTS_MAJ = 6, 105 + 106 + PERF_COUNT_SW_MAX, /* non-ABI */ 107 + }; 108 + 109 + /* 110 + * Bits that can be set in attr.sample_type to request information 111 + * in the overflow packets. 112 + */ 113 + enum perf_event_sample_format { 114 + PERF_SAMPLE_IP = 1U << 0, 115 + PERF_SAMPLE_TID = 1U << 1, 116 + PERF_SAMPLE_TIME = 1U << 2, 117 + PERF_SAMPLE_ADDR = 1U << 3, 118 + PERF_SAMPLE_READ = 1U << 4, 119 + PERF_SAMPLE_CALLCHAIN = 1U << 5, 120 + PERF_SAMPLE_ID = 1U << 6, 121 + PERF_SAMPLE_CPU = 1U << 7, 122 + PERF_SAMPLE_PERIOD = 1U << 8, 123 + PERF_SAMPLE_STREAM_ID = 1U << 9, 124 + PERF_SAMPLE_RAW = 1U << 10, 125 + 126 + PERF_SAMPLE_MAX = 1U << 11, /* non-ABI */ 127 + }; 128 + 129 + /* 130 + * The format of the data returned by read() on a perf event fd, 131 + * as specified by attr.read_format: 132 + * 133 + * struct read_format { 134 + * { u64 value; 135 + * { u64 time_enabled; } && PERF_FORMAT_ENABLED 136 + * { u64 time_running; } && PERF_FORMAT_RUNNING 137 + * { u64 id; } && PERF_FORMAT_ID 138 + * } && !PERF_FORMAT_GROUP 139 + * 140 + * { u64 nr; 141 + * { u64 time_enabled; } && PERF_FORMAT_ENABLED 142 + * { u64 time_running; } && PERF_FORMAT_RUNNING 143 + * { u64 value; 144 + * { u64 id; } && PERF_FORMAT_ID 145 + * } cntr[nr]; 146 + * } && PERF_FORMAT_GROUP 147 + * }; 148 + */ 149 + enum perf_event_read_format { 150 + PERF_FORMAT_TOTAL_TIME_ENABLED = 1U << 0, 151 + PERF_FORMAT_TOTAL_TIME_RUNNING = 1U << 1, 152 + PERF_FORMAT_ID = 1U << 2, 153 + PERF_FORMAT_GROUP = 1U << 3, 154 + 155 + PERF_FORMAT_MAX = 1U << 4, /* non-ABI */ 156 + }; 157 + 158 + #define PERF_ATTR_SIZE_VER0 64 /* sizeof first published struct */ 159 + 160 + /* 161 + * Hardware event_id to monitor via a performance monitoring event: 162 + */ 163 + struct perf_event_attr { 164 + 165 + /* 166 + * Major type: hardware/software/tracepoint/etc. 167 + */ 168 + __u32 type; 169 + 170 + /* 171 + * Size of the attr structure, for fwd/bwd compat. 172 + */ 173 + __u32 size; 174 + 175 + /* 176 + * Type specific configuration information. 177 + */ 178 + __u64 config; 179 + 180 + union { 181 + __u64 sample_period; 182 + __u64 sample_freq; 183 + }; 184 + 185 + __u64 sample_type; 186 + __u64 read_format; 187 + 188 + __u64 disabled : 1, /* off by default */ 189 + inherit : 1, /* children inherit it */ 190 + pinned : 1, /* must always be on PMU */ 191 + exclusive : 1, /* only group on PMU */ 192 + exclude_user : 1, /* don't count user */ 193 + exclude_kernel : 1, /* ditto kernel */ 194 + exclude_hv : 1, /* ditto hypervisor */ 195 + exclude_idle : 1, /* don't count when idle */ 196 + mmap : 1, /* include mmap data */ 197 + comm : 1, /* include comm data */ 198 + freq : 1, /* use freq, not period */ 199 + inherit_stat : 1, /* per task counts */ 200 + enable_on_exec : 1, /* next exec enables */ 201 + task : 1, /* trace fork/exit */ 202 + watermark : 1, /* wakeup_watermark */ 203 + 204 + __reserved_1 : 49; 205 + 206 + union { 207 + __u32 wakeup_events; /* wakeup every n events */ 208 + __u32 wakeup_watermark; /* bytes before wakeup */ 209 + }; 210 + __u32 __reserved_2; 211 + 212 + __u64 __reserved_3; 213 + }; 214 + 215 + /* 216 + * Ioctls that can be done on a perf event fd: 217 + */ 218 + #define PERF_EVENT_IOC_ENABLE _IO ('$', 0) 219 + #define PERF_EVENT_IOC_DISABLE _IO ('$', 1) 220 + #define PERF_EVENT_IOC_REFRESH _IO ('$', 2) 221 + #define PERF_EVENT_IOC_RESET _IO ('$', 3) 222 + #define PERF_EVENT_IOC_PERIOD _IOW('$', 4, u64) 223 + #define PERF_EVENT_IOC_SET_OUTPUT _IO ('$', 5) 224 + 225 + enum perf_event_ioc_flags { 226 + PERF_IOC_FLAG_GROUP = 1U << 0, 227 + }; 228 + 229 + /* 230 + * Structure of the page that can be mapped via mmap 231 + */ 232 + struct perf_event_mmap_page { 233 + __u32 version; /* version number of this structure */ 234 + __u32 compat_version; /* lowest version this is compat with */ 235 + 236 + /* 237 + * Bits needed to read the hw events in user-space. 238 + * 239 + * u32 seq; 240 + * s64 count; 241 + * 242 + * do { 243 + * seq = pc->lock; 244 + * 245 + * barrier() 246 + * if (pc->index) { 247 + * count = pmc_read(pc->index - 1); 248 + * count += pc->offset; 249 + * } else 250 + * goto regular_read; 251 + * 252 + * barrier(); 253 + * } while (pc->lock != seq); 254 + * 255 + * NOTE: for obvious reason this only works on self-monitoring 256 + * processes. 257 + */ 258 + __u32 lock; /* seqlock for synchronization */ 259 + __u32 index; /* hardware event identifier */ 260 + __s64 offset; /* add to hardware event value */ 261 + __u64 time_enabled; /* time event active */ 262 + __u64 time_running; /* time event on cpu */ 263 + 264 + /* 265 + * Hole for extension of the self monitor capabilities 266 + */ 267 + 268 + __u64 __reserved[123]; /* align to 1k */ 269 + 270 + /* 271 + * Control data for the mmap() data buffer. 272 + * 273 + * User-space reading the @data_head value should issue an rmb(), on 274 + * SMP capable platforms, after reading this value -- see 275 + * perf_event_wakeup(). 276 + * 277 + * When the mapping is PROT_WRITE the @data_tail value should be 278 + * written by userspace to reflect the last read data. In this case 279 + * the kernel will not over-write unread data. 280 + */ 281 + __u64 data_head; /* head in the data section */ 282 + __u64 data_tail; /* user-space written tail */ 283 + }; 284 + 285 + #define PERF_RECORD_MISC_CPUMODE_MASK (3 << 0) 286 + #define PERF_RECORD_MISC_CPUMODE_UNKNOWN (0 << 0) 287 + #define PERF_RECORD_MISC_KERNEL (1 << 0) 288 + #define PERF_RECORD_MISC_USER (2 << 0) 289 + #define PERF_RECORD_MISC_HYPERVISOR (3 << 0) 290 + 291 + struct perf_event_header { 292 + __u32 type; 293 + __u16 misc; 294 + __u16 size; 295 + }; 296 + 297 + enum perf_event_type { 298 + 299 + /* 300 + * The MMAP events record the PROT_EXEC mappings so that we can 301 + * correlate userspace IPs to code. They have the following structure: 302 + * 303 + * struct { 304 + * struct perf_event_header header; 305 + * 306 + * u32 pid, tid; 307 + * u64 addr; 308 + * u64 len; 309 + * u64 pgoff; 310 + * char filename[]; 311 + * }; 312 + */ 313 + PERF_RECORD_MMAP = 1, 314 + 315 + /* 316 + * struct { 317 + * struct perf_event_header header; 318 + * u64 id; 319 + * u64 lost; 320 + * }; 321 + */ 322 + PERF_RECORD_LOST = 2, 323 + 324 + /* 325 + * struct { 326 + * struct perf_event_header header; 327 + * 328 + * u32 pid, tid; 329 + * char comm[]; 330 + * }; 331 + */ 332 + PERF_RECORD_COMM = 3, 333 + 334 + /* 335 + * struct { 336 + * struct perf_event_header header; 337 + * u32 pid, ppid; 338 + * u32 tid, ptid; 339 + * u64 time; 340 + * }; 341 + */ 342 + PERF_RECORD_EXIT = 4, 343 + 344 + /* 345 + * struct { 346 + * struct perf_event_header header; 347 + * u64 time; 348 + * u64 id; 349 + * u64 stream_id; 350 + * }; 351 + */ 352 + PERF_RECORD_THROTTLE = 5, 353 + PERF_RECORD_UNTHROTTLE = 6, 354 + 355 + /* 356 + * struct { 357 + * struct perf_event_header header; 358 + * u32 pid, ppid; 359 + * u32 tid, ptid; 360 + * { u64 time; } && PERF_SAMPLE_TIME 361 + * }; 362 + */ 363 + PERF_RECORD_FORK = 7, 364 + 365 + /* 366 + * struct { 367 + * struct perf_event_header header; 368 + * u32 pid, tid; 369 + * 370 + * struct read_format values; 371 + * }; 372 + */ 373 + PERF_RECORD_READ = 8, 374 + 375 + /* 376 + * struct { 377 + * struct perf_event_header header; 378 + * 379 + * { u64 ip; } && PERF_SAMPLE_IP 380 + * { u32 pid, tid; } && PERF_SAMPLE_TID 381 + * { u64 time; } && PERF_SAMPLE_TIME 382 + * { u64 addr; } && PERF_SAMPLE_ADDR 383 + * { u64 id; } && PERF_SAMPLE_ID 384 + * { u64 stream_id;} && PERF_SAMPLE_STREAM_ID 385 + * { u32 cpu, res; } && PERF_SAMPLE_CPU 386 + * { u64 period; } && PERF_SAMPLE_PERIOD 387 + * 388 + * { struct read_format values; } && PERF_SAMPLE_READ 389 + * 390 + * { u64 nr, 391 + * u64 ips[nr]; } && PERF_SAMPLE_CALLCHAIN 392 + * 393 + * # 394 + * # The RAW record below is opaque data wrt the ABI 395 + * # 396 + * # That is, the ABI doesn't make any promises wrt to 397 + * # the stability of its content, it may vary depending 398 + * # on event, hardware, kernel version and phase of 399 + * # the moon. 400 + * # 401 + * # In other words, PERF_SAMPLE_RAW contents are not an ABI. 402 + * # 403 + * 404 + * { u32 size; 405 + * char data[size];}&& PERF_SAMPLE_RAW 406 + * }; 407 + */ 408 + PERF_RECORD_SAMPLE = 9, 409 + 410 + PERF_RECORD_MAX, /* non-ABI */ 411 + }; 412 + 413 + enum perf_callchain_context { 414 + PERF_CONTEXT_HV = (__u64)-32, 415 + PERF_CONTEXT_KERNEL = (__u64)-128, 416 + PERF_CONTEXT_USER = (__u64)-512, 417 + 418 + PERF_CONTEXT_GUEST = (__u64)-2048, 419 + PERF_CONTEXT_GUEST_KERNEL = (__u64)-2176, 420 + PERF_CONTEXT_GUEST_USER = (__u64)-2560, 421 + 422 + PERF_CONTEXT_MAX = (__u64)-4095, 423 + }; 424 + 425 + #define PERF_FLAG_FD_NO_GROUP (1U << 0) 426 + #define PERF_FLAG_FD_OUTPUT (1U << 1) 427 + 428 + #ifdef __KERNEL__ 429 + /* 430 + * Kernel-internal data types and definitions: 431 + */ 432 + 433 + #ifdef CONFIG_PERF_EVENTS 434 + # include <asm/perf_event.h> 435 + #endif 436 + 437 + #include <linux/list.h> 438 + #include <linux/mutex.h> 439 + #include <linux/rculist.h> 440 + #include <linux/rcupdate.h> 441 + #include <linux/spinlock.h> 442 + #include <linux/hrtimer.h> 443 + #include <linux/fs.h> 444 + #include <linux/pid_namespace.h> 445 + #include <asm/atomic.h> 446 + 447 + #define PERF_MAX_STACK_DEPTH 255 448 + 449 + struct perf_callchain_entry { 450 + __u64 nr; 451 + __u64 ip[PERF_MAX_STACK_DEPTH]; 452 + }; 453 + 454 + struct perf_raw_record { 455 + u32 size; 456 + void *data; 457 + }; 458 + 459 + struct task_struct; 460 + 461 + /** 462 + * struct hw_perf_event - performance event hardware details: 463 + */ 464 + struct hw_perf_event { 465 + #ifdef CONFIG_PERF_EVENTS 466 + union { 467 + struct { /* hardware */ 468 + u64 config; 469 + unsigned long config_base; 470 + unsigned long event_base; 471 + int idx; 472 + }; 473 + union { /* software */ 474 + atomic64_t count; 475 + struct hrtimer hrtimer; 476 + }; 477 + }; 478 + atomic64_t prev_count; 479 + u64 sample_period; 480 + u64 last_period; 481 + atomic64_t period_left; 482 + u64 interrupts; 483 + 484 + u64 freq_count; 485 + u64 freq_interrupts; 486 + u64 freq_stamp; 487 + #endif 488 + }; 489 + 490 + struct perf_event; 491 + 492 + /** 493 + * struct pmu - generic performance monitoring unit 494 + */ 495 + struct pmu { 496 + int (*enable) (struct perf_event *event); 497 + void (*disable) (struct perf_event *event); 498 + void (*read) (struct perf_event *event); 499 + void (*unthrottle) (struct perf_event *event); 500 + }; 501 + 502 + /** 503 + * enum perf_event_active_state - the states of a event 504 + */ 505 + enum perf_event_active_state { 506 + PERF_EVENT_STATE_ERROR = -2, 507 + PERF_EVENT_STATE_OFF = -1, 508 + PERF_EVENT_STATE_INACTIVE = 0, 509 + PERF_EVENT_STATE_ACTIVE = 1, 510 + }; 511 + 512 + struct file; 513 + 514 + struct perf_mmap_data { 515 + struct rcu_head rcu_head; 516 + int nr_pages; /* nr of data pages */ 517 + int writable; /* are we writable */ 518 + int nr_locked; /* nr pages mlocked */ 519 + 520 + atomic_t poll; /* POLL_ for wakeups */ 521 + atomic_t events; /* event_id limit */ 522 + 523 + atomic_long_t head; /* write position */ 524 + atomic_long_t done_head; /* completed head */ 525 + 526 + atomic_t lock; /* concurrent writes */ 527 + atomic_t wakeup; /* needs a wakeup */ 528 + atomic_t lost; /* nr records lost */ 529 + 530 + long watermark; /* wakeup watermark */ 531 + 532 + struct perf_event_mmap_page *user_page; 533 + void *data_pages[0]; 534 + }; 535 + 536 + struct perf_pending_entry { 537 + struct perf_pending_entry *next; 538 + void (*func)(struct perf_pending_entry *); 539 + }; 540 + 541 + /** 542 + * struct perf_event - performance event kernel representation: 543 + */ 544 + struct perf_event { 545 + #ifdef CONFIG_PERF_EVENTS 546 + struct list_head group_entry; 547 + struct list_head event_entry; 548 + struct list_head sibling_list; 549 + int nr_siblings; 550 + struct perf_event *group_leader; 551 + struct perf_event *output; 552 + const struct pmu *pmu; 553 + 554 + enum perf_event_active_state state; 555 + atomic64_t count; 556 + 557 + /* 558 + * These are the total time in nanoseconds that the event 559 + * has been enabled (i.e. eligible to run, and the task has 560 + * been scheduled in, if this is a per-task event) 561 + * and running (scheduled onto the CPU), respectively. 562 + * 563 + * They are computed from tstamp_enabled, tstamp_running and 564 + * tstamp_stopped when the event is in INACTIVE or ACTIVE state. 565 + */ 566 + u64 total_time_enabled; 567 + u64 total_time_running; 568 + 569 + /* 570 + * These are timestamps used for computing total_time_enabled 571 + * and total_time_running when the event is in INACTIVE or 572 + * ACTIVE state, measured in nanoseconds from an arbitrary point 573 + * in time. 574 + * tstamp_enabled: the notional time when the event was enabled 575 + * tstamp_running: the notional time when the event was scheduled on 576 + * tstamp_stopped: in INACTIVE state, the notional time when the 577 + * event was scheduled off. 578 + */ 579 + u64 tstamp_enabled; 580 + u64 tstamp_running; 581 + u64 tstamp_stopped; 582 + 583 + struct perf_event_attr attr; 584 + struct hw_perf_event hw; 585 + 586 + struct perf_event_context *ctx; 587 + struct file *filp; 588 + 589 + /* 590 + * These accumulate total time (in nanoseconds) that children 591 + * events have been enabled and running, respectively. 592 + */ 593 + atomic64_t child_total_time_enabled; 594 + atomic64_t child_total_time_running; 595 + 596 + /* 597 + * Protect attach/detach and child_list: 598 + */ 599 + struct mutex child_mutex; 600 + struct list_head child_list; 601 + struct perf_event *parent; 602 + 603 + int oncpu; 604 + int cpu; 605 + 606 + struct list_head owner_entry; 607 + struct task_struct *owner; 608 + 609 + /* mmap bits */ 610 + struct mutex mmap_mutex; 611 + atomic_t mmap_count; 612 + struct perf_mmap_data *data; 613 + 614 + /* poll related */ 615 + wait_queue_head_t waitq; 616 + struct fasync_struct *fasync; 617 + 618 + /* delayed work for NMIs and such */ 619 + int pending_wakeup; 620 + int pending_kill; 621 + int pending_disable; 622 + struct perf_pending_entry pending; 623 + 624 + atomic_t event_limit; 625 + 626 + void (*destroy)(struct perf_event *); 627 + struct rcu_head rcu_head; 628 + 629 + struct pid_namespace *ns; 630 + u64 id; 631 + #endif 632 + }; 633 + 634 + /** 635 + * struct perf_event_context - event context structure 636 + * 637 + * Used as a container for task events and CPU events as well: 638 + */ 639 + struct perf_event_context { 640 + /* 641 + * Protect the states of the events in the list, 642 + * nr_active, and the list: 643 + */ 644 + spinlock_t lock; 645 + /* 646 + * Protect the list of events. Locking either mutex or lock 647 + * is sufficient to ensure the list doesn't change; to change 648 + * the list you need to lock both the mutex and the spinlock. 649 + */ 650 + struct mutex mutex; 651 + 652 + struct list_head group_list; 653 + struct list_head event_list; 654 + int nr_events; 655 + int nr_active; 656 + int is_active; 657 + int nr_stat; 658 + atomic_t refcount; 659 + struct task_struct *task; 660 + 661 + /* 662 + * Context clock, runs when context enabled. 663 + */ 664 + u64 time; 665 + u64 timestamp; 666 + 667 + /* 668 + * These fields let us detect when two contexts have both 669 + * been cloned (inherited) from a common ancestor. 670 + */ 671 + struct perf_event_context *parent_ctx; 672 + u64 parent_gen; 673 + u64 generation; 674 + int pin_count; 675 + struct rcu_head rcu_head; 676 + }; 677 + 678 + /** 679 + * struct perf_event_cpu_context - per cpu event context structure 680 + */ 681 + struct perf_cpu_context { 682 + struct perf_event_context ctx; 683 + struct perf_event_context *task_ctx; 684 + int active_oncpu; 685 + int max_pertask; 686 + int exclusive; 687 + 688 + /* 689 + * Recursion avoidance: 690 + * 691 + * task, softirq, irq, nmi context 692 + */ 693 + int recursion[4]; 694 + }; 695 + 696 + struct perf_output_handle { 697 + struct perf_event *event; 698 + struct perf_mmap_data *data; 699 + unsigned long head; 700 + unsigned long offset; 701 + int nmi; 702 + int sample; 703 + int locked; 704 + unsigned long flags; 705 + }; 706 + 707 + #ifdef CONFIG_PERF_EVENTS 708 + 709 + /* 710 + * Set by architecture code: 711 + */ 712 + extern int perf_max_events; 713 + 714 + extern const struct pmu *hw_perf_event_init(struct perf_event *event); 715 + 716 + extern void perf_event_task_sched_in(struct task_struct *task, int cpu); 717 + extern void perf_event_task_sched_out(struct task_struct *task, 718 + struct task_struct *next, int cpu); 719 + extern void perf_event_task_tick(struct task_struct *task, int cpu); 720 + extern int perf_event_init_task(struct task_struct *child); 721 + extern void perf_event_exit_task(struct task_struct *child); 722 + extern void perf_event_free_task(struct task_struct *task); 723 + extern void set_perf_event_pending(void); 724 + extern void perf_event_do_pending(void); 725 + extern void perf_event_print_debug(void); 726 + extern void __perf_disable(void); 727 + extern bool __perf_enable(void); 728 + extern void perf_disable(void); 729 + extern void perf_enable(void); 730 + extern int perf_event_task_disable(void); 731 + extern int perf_event_task_enable(void); 732 + extern int hw_perf_group_sched_in(struct perf_event *group_leader, 733 + struct perf_cpu_context *cpuctx, 734 + struct perf_event_context *ctx, int cpu); 735 + extern void perf_event_update_userpage(struct perf_event *event); 736 + 737 + struct perf_sample_data { 738 + u64 type; 739 + 740 + u64 ip; 741 + struct { 742 + u32 pid; 743 + u32 tid; 744 + } tid_entry; 745 + u64 time; 746 + u64 addr; 747 + u64 id; 748 + u64 stream_id; 749 + struct { 750 + u32 cpu; 751 + u32 reserved; 752 + } cpu_entry; 753 + u64 period; 754 + struct perf_callchain_entry *callchain; 755 + struct perf_raw_record *raw; 756 + }; 757 + 758 + extern void perf_output_sample(struct perf_output_handle *handle, 759 + struct perf_event_header *header, 760 + struct perf_sample_data *data, 761 + struct perf_event *event); 762 + extern void perf_prepare_sample(struct perf_event_header *header, 763 + struct perf_sample_data *data, 764 + struct perf_event *event, 765 + struct pt_regs *regs); 766 + 767 + extern int perf_event_overflow(struct perf_event *event, int nmi, 768 + struct perf_sample_data *data, 769 + struct pt_regs *regs); 770 + 771 + /* 772 + * Return 1 for a software event, 0 for a hardware event 773 + */ 774 + static inline int is_software_event(struct perf_event *event) 775 + { 776 + return (event->attr.type != PERF_TYPE_RAW) && 777 + (event->attr.type != PERF_TYPE_HARDWARE) && 778 + (event->attr.type != PERF_TYPE_HW_CACHE); 779 + } 780 + 781 + extern atomic_t perf_swevent_enabled[PERF_COUNT_SW_MAX]; 782 + 783 + extern void __perf_sw_event(u32, u64, int, struct pt_regs *, u64); 784 + 785 + static inline void 786 + perf_sw_event(u32 event_id, u64 nr, int nmi, struct pt_regs *regs, u64 addr) 787 + { 788 + if (atomic_read(&perf_swevent_enabled[event_id])) 789 + __perf_sw_event(event_id, nr, nmi, regs, addr); 790 + } 791 + 792 + extern void __perf_event_mmap(struct vm_area_struct *vma); 793 + 794 + static inline void perf_event_mmap(struct vm_area_struct *vma) 795 + { 796 + if (vma->vm_flags & VM_EXEC) 797 + __perf_event_mmap(vma); 798 + } 799 + 800 + extern void perf_event_comm(struct task_struct *tsk); 801 + extern void perf_event_fork(struct task_struct *tsk); 802 + 803 + extern struct perf_callchain_entry *perf_callchain(struct pt_regs *regs); 804 + 805 + extern int sysctl_perf_event_paranoid; 806 + extern int sysctl_perf_event_mlock; 807 + extern int sysctl_perf_event_sample_rate; 808 + 809 + extern void perf_event_init(void); 810 + extern void perf_tp_event(int event_id, u64 addr, u64 count, 811 + void *record, int entry_size); 812 + 813 + #ifndef perf_misc_flags 814 + #define perf_misc_flags(regs) (user_mode(regs) ? PERF_RECORD_MISC_USER : \ 815 + PERF_RECORD_MISC_KERNEL) 816 + #define perf_instruction_pointer(regs) instruction_pointer(regs) 817 + #endif 818 + 819 + extern int perf_output_begin(struct perf_output_handle *handle, 820 + struct perf_event *event, unsigned int size, 821 + int nmi, int sample); 822 + extern void perf_output_end(struct perf_output_handle *handle); 823 + extern void perf_output_copy(struct perf_output_handle *handle, 824 + const void *buf, unsigned int len); 825 + #else 826 + static inline void 827 + perf_event_task_sched_in(struct task_struct *task, int cpu) { } 828 + static inline void 829 + perf_event_task_sched_out(struct task_struct *task, 830 + struct task_struct *next, int cpu) { } 831 + static inline void 832 + perf_event_task_tick(struct task_struct *task, int cpu) { } 833 + static inline int perf_event_init_task(struct task_struct *child) { return 0; } 834 + static inline void perf_event_exit_task(struct task_struct *child) { } 835 + static inline void perf_event_free_task(struct task_struct *task) { } 836 + static inline void perf_event_do_pending(void) { } 837 + static inline void perf_event_print_debug(void) { } 838 + static inline void perf_disable(void) { } 839 + static inline void perf_enable(void) { } 840 + static inline int perf_event_task_disable(void) { return -EINVAL; } 841 + static inline int perf_event_task_enable(void) { return -EINVAL; } 842 + 843 + static inline void 844 + perf_sw_event(u32 event_id, u64 nr, int nmi, 845 + struct pt_regs *regs, u64 addr) { } 846 + 847 + static inline void perf_event_mmap(struct vm_area_struct *vma) { } 848 + static inline void perf_event_comm(struct task_struct *tsk) { } 849 + static inline void perf_event_fork(struct task_struct *tsk) { } 850 + static inline void perf_event_init(void) { } 851 + 852 + #endif 853 + 854 + #define perf_output_put(handle, x) \ 855 + perf_output_copy((handle), &(x), sizeof(x)) 856 + 857 + #endif /* __KERNEL__ */ 858 + #endif /* _LINUX_PERF_EVENT_H */
+2 -2
include/linux/prctl.h
··· 85 85 #define PR_SET_TIMERSLACK 29 86 86 #define PR_GET_TIMERSLACK 30 87 87 88 - #define PR_TASK_PERF_COUNTERS_DISABLE 31 89 - #define PR_TASK_PERF_COUNTERS_ENABLE 32 88 + #define PR_TASK_PERF_EVENTS_DISABLE 31 89 + #define PR_TASK_PERF_EVENTS_ENABLE 32 90 90 91 91 #endif /* _LINUX_PRCTL_H */
+6 -6
include/linux/sched.h
··· 100 100 struct bio; 101 101 struct fs_struct; 102 102 struct bts_context; 103 - struct perf_counter_context; 103 + struct perf_event_context; 104 104 105 105 /* 106 106 * List of flags we want to share for kernel threads, ··· 701 701 #endif 702 702 #endif 703 703 704 - #ifdef CONFIG_PERF_COUNTERS 704 + #ifdef CONFIG_PERF_EVENTS 705 705 atomic_long_t locked_vm; 706 706 #endif 707 707 }; ··· 1451 1451 struct list_head pi_state_list; 1452 1452 struct futex_pi_state *pi_state_cache; 1453 1453 #endif 1454 - #ifdef CONFIG_PERF_COUNTERS 1455 - struct perf_counter_context *perf_counter_ctxp; 1456 - struct mutex perf_counter_mutex; 1457 - struct list_head perf_counter_list; 1454 + #ifdef CONFIG_PERF_EVENTS 1455 + struct perf_event_context *perf_event_ctxp; 1456 + struct mutex perf_event_mutex; 1457 + struct list_head perf_event_list; 1458 1458 #endif 1459 1459 #ifdef CONFIG_NUMA 1460 1460 struct mempolicy *mempolicy; /* Protected by alloc_lock */
+3 -3
include/linux/syscalls.h
··· 55 55 struct robust_list_head; 56 56 struct getcpu_cache; 57 57 struct old_linux_dirent; 58 - struct perf_counter_attr; 58 + struct perf_event_attr; 59 59 60 60 #include <linux/types.h> 61 61 #include <linux/aio_abi.h> ··· 877 877 int kernel_execve(const char *filename, char *const argv[], char *const envp[]); 878 878 879 879 880 - asmlinkage long sys_perf_counter_open( 881 - struct perf_counter_attr __user *attr_uptr, 880 + asmlinkage long sys_perf_event_open( 881 + struct perf_event_attr __user *attr_uptr, 882 882 pid_t pid, int cpu, int group_fd, unsigned long flags); 883 883 #endif
+5 -5
include/trace/ftrace.h
··· 378 378 #ifdef CONFIG_EVENT_PROFILE 379 379 380 380 /* 381 - * Generate the functions needed for tracepoint perf_counter support. 381 + * Generate the functions needed for tracepoint perf_event support. 382 382 * 383 383 * NOTE: The insertion profile callback (ftrace_profile_<call>) is defined later 384 384 * ··· 644 644 * { 645 645 * struct ftrace_data_offsets_<call> __maybe_unused __data_offsets; 646 646 * struct ftrace_event_call *event_call = &event_<call>; 647 - * extern void perf_tpcounter_event(int, u64, u64, void *, int); 647 + * extern void perf_tp_event(int, u64, u64, void *, int); 648 648 * struct ftrace_raw_##call *entry; 649 649 * u64 __addr = 0, __count = 1; 650 650 * unsigned long irq_flags; ··· 690 690 * 691 691 * <assign> <- affect our values 692 692 * 693 - * perf_tpcounter_event(event_call->id, __addr, __count, entry, 693 + * perf_tp_event(event_call->id, __addr, __count, entry, 694 694 * __entry_size); <- submit them to perf counter 695 695 * 696 696 * } ··· 710 710 { \ 711 711 struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\ 712 712 struct ftrace_event_call *event_call = &event_##call; \ 713 - extern void perf_tpcounter_event(int, u64, u64, void *, int); \ 713 + extern void perf_tp_event(int, u64, u64, void *, int); \ 714 714 struct ftrace_raw_##call *entry; \ 715 715 u64 __addr = 0, __count = 1; \ 716 716 unsigned long irq_flags; \ ··· 755 755 \ 756 756 { assign; } \ 757 757 \ 758 - perf_tpcounter_event(event_call->id, __addr, __count, entry, \ 758 + perf_tp_event(event_call->id, __addr, __count, entry, \ 759 759 __entry_size); \ 760 760 \ 761 761 end: \
+31 -14
init/Kconfig
··· 916 916 by some high performance threaded applications. Disabling 917 917 this option saves about 7k. 918 918 919 - config HAVE_PERF_COUNTERS 919 + config HAVE_PERF_EVENTS 920 920 bool 921 921 help 922 922 See tools/perf/design.txt for details. 923 923 924 - menu "Performance Counters" 924 + menu "Kernel Performance Events And Counters" 925 925 926 - config PERF_COUNTERS 927 - bool "Kernel Performance Counters" 928 - default y if PROFILING 929 - depends on HAVE_PERF_COUNTERS 926 + config PERF_EVENTS 927 + bool "Kernel performance events and counters" 928 + default y if (PROFILING || PERF_COUNTERS) 929 + depends on HAVE_PERF_EVENTS 930 930 select ANON_INODES 931 931 help 932 - Enable kernel support for performance counter hardware. 932 + Enable kernel support for various performance events provided 933 + by software and hardware. 933 934 934 - Performance counters are special hardware registers available 935 - on most modern CPUs. These registers count the number of certain 935 + Software events are supported either build-in or via the 936 + use of generic tracepoints. 937 + 938 + Most modern CPUs support performance events via performance 939 + counter registers. These registers count the number of certain 936 940 types of hw events: such as instructions executed, cachemisses 937 941 suffered, or branches mis-predicted - without slowing down the 938 942 kernel or applications. These registers can also trigger interrupts 939 943 when a threshold number of events have passed - and can thus be 940 944 used to profile the code that runs on that CPU. 941 945 942 - The Linux Performance Counter subsystem provides an abstraction of 943 - these hardware capabilities, available via a system call. It 946 + The Linux Performance Event subsystem provides an abstraction of 947 + these software and hardware cevent apabilities, available via a 948 + system call and used by the "perf" utility in tools/perf/. It 944 949 provides per task and per CPU counters, and it provides event 945 950 capabilities on top of those. 946 951 ··· 953 948 954 949 config EVENT_PROFILE 955 950 bool "Tracepoint profiling sources" 956 - depends on PERF_COUNTERS && EVENT_TRACING 951 + depends on PERF_EVENTS && EVENT_TRACING 957 952 default y 958 953 help 959 - Allow the use of tracepoints as software performance counters. 954 + Allow the use of tracepoints as software performance events. 960 955 961 - When this is enabled, you can create perf counters based on 956 + When this is enabled, you can create perf events based on 962 957 tracepoints using PERF_TYPE_TRACEPOINT and the tracepoint ID 963 958 found in debugfs://tracing/events/*/*/id. (The -e/--events 964 959 option to the perf tool can parse and interpret symbolic 965 960 tracepoints, in the subsystem:tracepoint_name format.) 961 + 962 + config PERF_COUNTERS 963 + bool "Kernel performance counters (old config option)" 964 + depends on HAVE_PERF_EVENTS 965 + help 966 + This config has been obsoleted by the PERF_EVENTS 967 + config option - please see that one for details. 968 + 969 + It has no effect on the kernel whether you enable 970 + it or not, it is a compatibility placeholder. 971 + 972 + Say N if unsure. 966 973 967 974 endmenu 968 975
+1 -1
kernel/Makefile
··· 95 95 obj-$(CONFIG_RING_BUFFER) += trace/ 96 96 obj-$(CONFIG_SMP) += sched_cpupri.o 97 97 obj-$(CONFIG_SLOW_WORK) += slow-work.o 98 - obj-$(CONFIG_PERF_COUNTERS) += perf_counter.o 98 + obj-$(CONFIG_PERF_EVENTS) += perf_event.o 99 99 100 100 ifneq ($(CONFIG_SCHED_OMIT_FRAME_POINTER),y) 101 101 # According to Alan Modra <alan@linuxcare.com.au>, the -fno-omit-frame-pointer is
+4 -4
kernel/exit.c
··· 47 47 #include <linux/tracehook.h> 48 48 #include <linux/fs_struct.h> 49 49 #include <linux/init_task.h> 50 - #include <linux/perf_counter.h> 50 + #include <linux/perf_event.h> 51 51 #include <trace/events/sched.h> 52 52 53 53 #include <asm/uaccess.h> ··· 154 154 { 155 155 struct task_struct *tsk = container_of(rhp, struct task_struct, rcu); 156 156 157 - #ifdef CONFIG_PERF_COUNTERS 158 - WARN_ON_ONCE(tsk->perf_counter_ctxp); 157 + #ifdef CONFIG_PERF_EVENTS 158 + WARN_ON_ONCE(tsk->perf_event_ctxp); 159 159 #endif 160 160 trace_sched_process_free(tsk); 161 161 put_task_struct(tsk); ··· 981 981 * Flush inherited counters to the parent - before the parent 982 982 * gets woken up by child-exit notifications. 983 983 */ 984 - perf_counter_exit_task(tsk); 984 + perf_event_exit_task(tsk); 985 985 986 986 exit_notify(tsk, group_dead); 987 987 #ifdef CONFIG_NUMA
+4 -4
kernel/fork.c
··· 61 61 #include <linux/blkdev.h> 62 62 #include <linux/fs_struct.h> 63 63 #include <linux/magic.h> 64 - #include <linux/perf_counter.h> 64 + #include <linux/perf_event.h> 65 65 66 66 #include <asm/pgtable.h> 67 67 #include <asm/pgalloc.h> ··· 1078 1078 /* Perform scheduler related setup. Assign this task to a CPU. */ 1079 1079 sched_fork(p, clone_flags); 1080 1080 1081 - retval = perf_counter_init_task(p); 1081 + retval = perf_event_init_task(p); 1082 1082 if (retval) 1083 1083 goto bad_fork_cleanup_policy; 1084 1084 ··· 1253 1253 write_unlock_irq(&tasklist_lock); 1254 1254 proc_fork_connector(p); 1255 1255 cgroup_post_fork(p); 1256 - perf_counter_fork(p); 1256 + perf_event_fork(p); 1257 1257 return p; 1258 1258 1259 1259 bad_fork_free_pid: ··· 1280 1280 bad_fork_cleanup_audit: 1281 1281 audit_free(p); 1282 1282 bad_fork_cleanup_policy: 1283 - perf_counter_free_task(p); 1283 + perf_event_free_task(p); 1284 1284 #ifdef CONFIG_NUMA 1285 1285 mpol_put(p->mempolicy); 1286 1286 bad_fork_cleanup_cgroup:
+1224 -1225
kernel/perf_counter.c kernel/perf_event.c
··· 1 1 /* 2 - * Performance counter core code 2 + * Performance events core code: 3 3 * 4 4 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de> 5 5 * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar 6 6 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> 7 7 * Copyright � 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> 8 8 * 9 - * For licensing details see kernel-base/COPYING 9 + * For licensing details see kernel-base/COPYING 10 10 */ 11 11 12 12 #include <linux/fs.h> ··· 26 26 #include <linux/syscalls.h> 27 27 #include <linux/anon_inodes.h> 28 28 #include <linux/kernel_stat.h> 29 - #include <linux/perf_counter.h> 29 + #include <linux/perf_event.h> 30 30 31 31 #include <asm/irq_regs.h> 32 32 33 33 /* 34 - * Each CPU has a list of per CPU counters: 34 + * Each CPU has a list of per CPU events: 35 35 */ 36 36 DEFINE_PER_CPU(struct perf_cpu_context, perf_cpu_context); 37 37 38 - int perf_max_counters __read_mostly = 1; 38 + int perf_max_events __read_mostly = 1; 39 39 static int perf_reserved_percpu __read_mostly; 40 40 static int perf_overcommit __read_mostly = 1; 41 41 42 - static atomic_t nr_counters __read_mostly; 43 - static atomic_t nr_mmap_counters __read_mostly; 44 - static atomic_t nr_comm_counters __read_mostly; 45 - static atomic_t nr_task_counters __read_mostly; 42 + static atomic_t nr_events __read_mostly; 43 + static atomic_t nr_mmap_events __read_mostly; 44 + static atomic_t nr_comm_events __read_mostly; 45 + static atomic_t nr_task_events __read_mostly; 46 46 47 47 /* 48 - * perf counter paranoia level: 48 + * perf event paranoia level: 49 49 * -1 - not paranoid at all 50 50 * 0 - disallow raw tracepoint access for unpriv 51 - * 1 - disallow cpu counters for unpriv 51 + * 1 - disallow cpu events for unpriv 52 52 * 2 - disallow kernel profiling for unpriv 53 53 */ 54 - int sysctl_perf_counter_paranoid __read_mostly = 1; 54 + int sysctl_perf_event_paranoid __read_mostly = 1; 55 55 56 56 static inline bool perf_paranoid_tracepoint_raw(void) 57 57 { 58 - return sysctl_perf_counter_paranoid > -1; 58 + return sysctl_perf_event_paranoid > -1; 59 59 } 60 60 61 61 static inline bool perf_paranoid_cpu(void) 62 62 { 63 - return sysctl_perf_counter_paranoid > 0; 63 + return sysctl_perf_event_paranoid > 0; 64 64 } 65 65 66 66 static inline bool perf_paranoid_kernel(void) 67 67 { 68 - return sysctl_perf_counter_paranoid > 1; 68 + return sysctl_perf_event_paranoid > 1; 69 69 } 70 70 71 - int sysctl_perf_counter_mlock __read_mostly = 512; /* 'free' kb per user */ 71 + int sysctl_perf_event_mlock __read_mostly = 512; /* 'free' kb per user */ 72 72 73 73 /* 74 - * max perf counter sample rate 74 + * max perf event sample rate 75 75 */ 76 - int sysctl_perf_counter_sample_rate __read_mostly = 100000; 76 + int sysctl_perf_event_sample_rate __read_mostly = 100000; 77 77 78 - static atomic64_t perf_counter_id; 78 + static atomic64_t perf_event_id; 79 79 80 80 /* 81 - * Lock for (sysadmin-configurable) counter reservations: 81 + * Lock for (sysadmin-configurable) event reservations: 82 82 */ 83 83 static DEFINE_SPINLOCK(perf_resource_lock); 84 84 85 85 /* 86 86 * Architecture provided APIs - weak aliases: 87 87 */ 88 - extern __weak const struct pmu *hw_perf_counter_init(struct perf_counter *counter) 88 + extern __weak const struct pmu *hw_perf_event_init(struct perf_event *event) 89 89 { 90 90 return NULL; 91 91 } ··· 93 93 void __weak hw_perf_disable(void) { barrier(); } 94 94 void __weak hw_perf_enable(void) { barrier(); } 95 95 96 - void __weak hw_perf_counter_setup(int cpu) { barrier(); } 97 - void __weak hw_perf_counter_setup_online(int cpu) { barrier(); } 96 + void __weak hw_perf_event_setup(int cpu) { barrier(); } 97 + void __weak hw_perf_event_setup_online(int cpu) { barrier(); } 98 98 99 99 int __weak 100 - hw_perf_group_sched_in(struct perf_counter *group_leader, 100 + hw_perf_group_sched_in(struct perf_event *group_leader, 101 101 struct perf_cpu_context *cpuctx, 102 - struct perf_counter_context *ctx, int cpu) 102 + struct perf_event_context *ctx, int cpu) 103 103 { 104 104 return 0; 105 105 } 106 106 107 - void __weak perf_counter_print_debug(void) { } 107 + void __weak perf_event_print_debug(void) { } 108 108 109 109 static DEFINE_PER_CPU(int, perf_disable_count); 110 110 ··· 130 130 hw_perf_enable(); 131 131 } 132 132 133 - static void get_ctx(struct perf_counter_context *ctx) 133 + static void get_ctx(struct perf_event_context *ctx) 134 134 { 135 135 WARN_ON(!atomic_inc_not_zero(&ctx->refcount)); 136 136 } 137 137 138 138 static void free_ctx(struct rcu_head *head) 139 139 { 140 - struct perf_counter_context *ctx; 140 + struct perf_event_context *ctx; 141 141 142 - ctx = container_of(head, struct perf_counter_context, rcu_head); 142 + ctx = container_of(head, struct perf_event_context, rcu_head); 143 143 kfree(ctx); 144 144 } 145 145 146 - static void put_ctx(struct perf_counter_context *ctx) 146 + static void put_ctx(struct perf_event_context *ctx) 147 147 { 148 148 if (atomic_dec_and_test(&ctx->refcount)) { 149 149 if (ctx->parent_ctx) ··· 154 154 } 155 155 } 156 156 157 - static void unclone_ctx(struct perf_counter_context *ctx) 157 + static void unclone_ctx(struct perf_event_context *ctx) 158 158 { 159 159 if (ctx->parent_ctx) { 160 160 put_ctx(ctx->parent_ctx); ··· 163 163 } 164 164 165 165 /* 166 - * If we inherit counters we want to return the parent counter id 166 + * If we inherit events we want to return the parent event id 167 167 * to userspace. 168 168 */ 169 - static u64 primary_counter_id(struct perf_counter *counter) 169 + static u64 primary_event_id(struct perf_event *event) 170 170 { 171 - u64 id = counter->id; 171 + u64 id = event->id; 172 172 173 - if (counter->parent) 174 - id = counter->parent->id; 173 + if (event->parent) 174 + id = event->parent->id; 175 175 176 176 return id; 177 177 } 178 178 179 179 /* 180 - * Get the perf_counter_context for a task and lock it. 180 + * Get the perf_event_context for a task and lock it. 181 181 * This has to cope with with the fact that until it is locked, 182 182 * the context could get moved to another task. 183 183 */ 184 - static struct perf_counter_context * 184 + static struct perf_event_context * 185 185 perf_lock_task_context(struct task_struct *task, unsigned long *flags) 186 186 { 187 - struct perf_counter_context *ctx; 187 + struct perf_event_context *ctx; 188 188 189 189 rcu_read_lock(); 190 190 retry: 191 - ctx = rcu_dereference(task->perf_counter_ctxp); 191 + ctx = rcu_dereference(task->perf_event_ctxp); 192 192 if (ctx) { 193 193 /* 194 194 * If this context is a clone of another, it might 195 195 * get swapped for another underneath us by 196 - * perf_counter_task_sched_out, though the 196 + * perf_event_task_sched_out, though the 197 197 * rcu_read_lock() protects us from any context 198 198 * getting freed. Lock the context and check if it 199 199 * got swapped before we could get the lock, and retry ··· 201 201 * can't get swapped on us any more. 202 202 */ 203 203 spin_lock_irqsave(&ctx->lock, *flags); 204 - if (ctx != rcu_dereference(task->perf_counter_ctxp)) { 204 + if (ctx != rcu_dereference(task->perf_event_ctxp)) { 205 205 spin_unlock_irqrestore(&ctx->lock, *flags); 206 206 goto retry; 207 207 } ··· 220 220 * can't get swapped to another task. This also increments its 221 221 * reference count so that the context can't get freed. 222 222 */ 223 - static struct perf_counter_context *perf_pin_task_context(struct task_struct *task) 223 + static struct perf_event_context *perf_pin_task_context(struct task_struct *task) 224 224 { 225 - struct perf_counter_context *ctx; 225 + struct perf_event_context *ctx; 226 226 unsigned long flags; 227 227 228 228 ctx = perf_lock_task_context(task, &flags); ··· 233 233 return ctx; 234 234 } 235 235 236 - static void perf_unpin_context(struct perf_counter_context *ctx) 236 + static void perf_unpin_context(struct perf_event_context *ctx) 237 237 { 238 238 unsigned long flags; 239 239 ··· 244 244 } 245 245 246 246 /* 247 - * Add a counter from the lists for its context. 247 + * Add a event from the lists for its context. 248 248 * Must be called with ctx->mutex and ctx->lock held. 249 249 */ 250 250 static void 251 - list_add_counter(struct perf_counter *counter, struct perf_counter_context *ctx) 251 + list_add_event(struct perf_event *event, struct perf_event_context *ctx) 252 252 { 253 - struct perf_counter *group_leader = counter->group_leader; 253 + struct perf_event *group_leader = event->group_leader; 254 254 255 255 /* 256 - * Depending on whether it is a standalone or sibling counter, 257 - * add it straight to the context's counter list, or to the group 256 + * Depending on whether it is a standalone or sibling event, 257 + * add it straight to the context's event list, or to the group 258 258 * leader's sibling list: 259 259 */ 260 - if (group_leader == counter) 261 - list_add_tail(&counter->list_entry, &ctx->counter_list); 260 + if (group_leader == event) 261 + list_add_tail(&event->group_entry, &ctx->group_list); 262 262 else { 263 - list_add_tail(&counter->list_entry, &group_leader->sibling_list); 263 + list_add_tail(&event->group_entry, &group_leader->sibling_list); 264 264 group_leader->nr_siblings++; 265 265 } 266 266 267 - list_add_rcu(&counter->event_entry, &ctx->event_list); 268 - ctx->nr_counters++; 269 - if (counter->attr.inherit_stat) 267 + list_add_rcu(&event->event_entry, &ctx->event_list); 268 + ctx->nr_events++; 269 + if (event->attr.inherit_stat) 270 270 ctx->nr_stat++; 271 271 } 272 272 273 273 /* 274 - * Remove a counter from the lists for its context. 274 + * Remove a event from the lists for its context. 275 275 * Must be called with ctx->mutex and ctx->lock held. 276 276 */ 277 277 static void 278 - list_del_counter(struct perf_counter *counter, struct perf_counter_context *ctx) 278 + list_del_event(struct perf_event *event, struct perf_event_context *ctx) 279 279 { 280 - struct perf_counter *sibling, *tmp; 280 + struct perf_event *sibling, *tmp; 281 281 282 - if (list_empty(&counter->list_entry)) 282 + if (list_empty(&event->group_entry)) 283 283 return; 284 - ctx->nr_counters--; 285 - if (counter->attr.inherit_stat) 284 + ctx->nr_events--; 285 + if (event->attr.inherit_stat) 286 286 ctx->nr_stat--; 287 287 288 - list_del_init(&counter->list_entry); 289 - list_del_rcu(&counter->event_entry); 288 + list_del_init(&event->group_entry); 289 + list_del_rcu(&event->event_entry); 290 290 291 - if (counter->group_leader != counter) 292 - counter->group_leader->nr_siblings--; 291 + if (event->group_leader != event) 292 + event->group_leader->nr_siblings--; 293 293 294 294 /* 295 - * If this was a group counter with sibling counters then 296 - * upgrade the siblings to singleton counters by adding them 295 + * If this was a group event with sibling events then 296 + * upgrade the siblings to singleton events by adding them 297 297 * to the context list directly: 298 298 */ 299 - list_for_each_entry_safe(sibling, tmp, 300 - &counter->sibling_list, list_entry) { 299 + list_for_each_entry_safe(sibling, tmp, &event->sibling_list, group_entry) { 301 300 302 - list_move_tail(&sibling->list_entry, &ctx->counter_list); 301 + list_move_tail(&sibling->group_entry, &ctx->group_list); 303 302 sibling->group_leader = sibling; 304 303 } 305 304 } 306 305 307 306 static void 308 - counter_sched_out(struct perf_counter *counter, 307 + event_sched_out(struct perf_event *event, 309 308 struct perf_cpu_context *cpuctx, 310 - struct perf_counter_context *ctx) 309 + struct perf_event_context *ctx) 311 310 { 312 - if (counter->state != PERF_COUNTER_STATE_ACTIVE) 311 + if (event->state != PERF_EVENT_STATE_ACTIVE) 313 312 return; 314 313 315 - counter->state = PERF_COUNTER_STATE_INACTIVE; 316 - if (counter->pending_disable) { 317 - counter->pending_disable = 0; 318 - counter->state = PERF_COUNTER_STATE_OFF; 314 + event->state = PERF_EVENT_STATE_INACTIVE; 315 + if (event->pending_disable) { 316 + event->pending_disable = 0; 317 + event->state = PERF_EVENT_STATE_OFF; 319 318 } 320 - counter->tstamp_stopped = ctx->time; 321 - counter->pmu->disable(counter); 322 - counter->oncpu = -1; 319 + event->tstamp_stopped = ctx->time; 320 + event->pmu->disable(event); 321 + event->oncpu = -1; 323 322 324 - if (!is_software_counter(counter)) 323 + if (!is_software_event(event)) 325 324 cpuctx->active_oncpu--; 326 325 ctx->nr_active--; 327 - if (counter->attr.exclusive || !cpuctx->active_oncpu) 326 + if (event->attr.exclusive || !cpuctx->active_oncpu) 328 327 cpuctx->exclusive = 0; 329 328 } 330 329 331 330 static void 332 - group_sched_out(struct perf_counter *group_counter, 331 + group_sched_out(struct perf_event *group_event, 333 332 struct perf_cpu_context *cpuctx, 334 - struct perf_counter_context *ctx) 333 + struct perf_event_context *ctx) 335 334 { 336 - struct perf_counter *counter; 335 + struct perf_event *event; 337 336 338 - if (group_counter->state != PERF_COUNTER_STATE_ACTIVE) 337 + if (group_event->state != PERF_EVENT_STATE_ACTIVE) 339 338 return; 340 339 341 - counter_sched_out(group_counter, cpuctx, ctx); 340 + event_sched_out(group_event, cpuctx, ctx); 342 341 343 342 /* 344 343 * Schedule out siblings (if any): 345 344 */ 346 - list_for_each_entry(counter, &group_counter->sibling_list, list_entry) 347 - counter_sched_out(counter, cpuctx, ctx); 345 + list_for_each_entry(event, &group_event->sibling_list, group_entry) 346 + event_sched_out(event, cpuctx, ctx); 348 347 349 - if (group_counter->attr.exclusive) 348 + if (group_event->attr.exclusive) 350 349 cpuctx->exclusive = 0; 351 350 } 352 351 353 352 /* 354 - * Cross CPU call to remove a performance counter 353 + * Cross CPU call to remove a performance event 355 354 * 356 - * We disable the counter on the hardware level first. After that we 355 + * We disable the event on the hardware level first. After that we 357 356 * remove it from the context list. 358 357 */ 359 - static void __perf_counter_remove_from_context(void *info) 358 + static void __perf_event_remove_from_context(void *info) 360 359 { 361 360 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); 362 - struct perf_counter *counter = info; 363 - struct perf_counter_context *ctx = counter->ctx; 361 + struct perf_event *event = info; 362 + struct perf_event_context *ctx = event->ctx; 364 363 365 364 /* 366 365 * If this is a task context, we need to check whether it is ··· 372 373 spin_lock(&ctx->lock); 373 374 /* 374 375 * Protect the list operation against NMI by disabling the 375 - * counters on a global level. 376 + * events on a global level. 376 377 */ 377 378 perf_disable(); 378 379 379 - counter_sched_out(counter, cpuctx, ctx); 380 + event_sched_out(event, cpuctx, ctx); 380 381 381 - list_del_counter(counter, ctx); 382 + list_del_event(event, ctx); 382 383 383 384 if (!ctx->task) { 384 385 /* 385 - * Allow more per task counters with respect to the 386 + * Allow more per task events with respect to the 386 387 * reservation: 387 388 */ 388 389 cpuctx->max_pertask = 389 - min(perf_max_counters - ctx->nr_counters, 390 - perf_max_counters - perf_reserved_percpu); 390 + min(perf_max_events - ctx->nr_events, 391 + perf_max_events - perf_reserved_percpu); 391 392 } 392 393 393 394 perf_enable(); ··· 396 397 397 398 398 399 /* 399 - * Remove the counter from a task's (or a CPU's) list of counters. 400 + * Remove the event from a task's (or a CPU's) list of events. 400 401 * 401 402 * Must be called with ctx->mutex held. 402 403 * 403 - * CPU counters are removed with a smp call. For task counters we only 404 + * CPU events are removed with a smp call. For task events we only 404 405 * call when the task is on a CPU. 405 406 * 406 - * If counter->ctx is a cloned context, callers must make sure that 407 - * every task struct that counter->ctx->task could possibly point to 407 + * If event->ctx is a cloned context, callers must make sure that 408 + * every task struct that event->ctx->task could possibly point to 408 409 * remains valid. This is OK when called from perf_release since 409 410 * that only calls us on the top-level context, which can't be a clone. 410 - * When called from perf_counter_exit_task, it's OK because the 411 + * When called from perf_event_exit_task, it's OK because the 411 412 * context has been detached from its task. 412 413 */ 413 - static void perf_counter_remove_from_context(struct perf_counter *counter) 414 + static void perf_event_remove_from_context(struct perf_event *event) 414 415 { 415 - struct perf_counter_context *ctx = counter->ctx; 416 + struct perf_event_context *ctx = event->ctx; 416 417 struct task_struct *task = ctx->task; 417 418 418 419 if (!task) { 419 420 /* 420 - * Per cpu counters are removed via an smp call and 421 + * Per cpu events are removed via an smp call and 421 422 * the removal is always sucessful. 422 423 */ 423 - smp_call_function_single(counter->cpu, 424 - __perf_counter_remove_from_context, 425 - counter, 1); 424 + smp_call_function_single(event->cpu, 425 + __perf_event_remove_from_context, 426 + event, 1); 426 427 return; 427 428 } 428 429 429 430 retry: 430 - task_oncpu_function_call(task, __perf_counter_remove_from_context, 431 - counter); 431 + task_oncpu_function_call(task, __perf_event_remove_from_context, 432 + event); 432 433 433 434 spin_lock_irq(&ctx->lock); 434 435 /* 435 436 * If the context is active we need to retry the smp call. 436 437 */ 437 - if (ctx->nr_active && !list_empty(&counter->list_entry)) { 438 + if (ctx->nr_active && !list_empty(&event->group_entry)) { 438 439 spin_unlock_irq(&ctx->lock); 439 440 goto retry; 440 441 } 441 442 442 443 /* 443 444 * The lock prevents that this context is scheduled in so we 444 - * can remove the counter safely, if the call above did not 445 + * can remove the event safely, if the call above did not 445 446 * succeed. 446 447 */ 447 - if (!list_empty(&counter->list_entry)) { 448 - list_del_counter(counter, ctx); 448 + if (!list_empty(&event->group_entry)) { 449 + list_del_event(event, ctx); 449 450 } 450 451 spin_unlock_irq(&ctx->lock); 451 452 } ··· 458 459 /* 459 460 * Update the record of the current time in a context. 460 461 */ 461 - static void update_context_time(struct perf_counter_context *ctx) 462 + static void update_context_time(struct perf_event_context *ctx) 462 463 { 463 464 u64 now = perf_clock(); 464 465 ··· 467 468 } 468 469 469 470 /* 470 - * Update the total_time_enabled and total_time_running fields for a counter. 471 + * Update the total_time_enabled and total_time_running fields for a event. 471 472 */ 472 - static void update_counter_times(struct perf_counter *counter) 473 + static void update_event_times(struct perf_event *event) 473 474 { 474 - struct perf_counter_context *ctx = counter->ctx; 475 + struct perf_event_context *ctx = event->ctx; 475 476 u64 run_end; 476 477 477 - if (counter->state < PERF_COUNTER_STATE_INACTIVE || 478 - counter->group_leader->state < PERF_COUNTER_STATE_INACTIVE) 478 + if (event->state < PERF_EVENT_STATE_INACTIVE || 479 + event->group_leader->state < PERF_EVENT_STATE_INACTIVE) 479 480 return; 480 481 481 - counter->total_time_enabled = ctx->time - counter->tstamp_enabled; 482 + event->total_time_enabled = ctx->time - event->tstamp_enabled; 482 483 483 - if (counter->state == PERF_COUNTER_STATE_INACTIVE) 484 - run_end = counter->tstamp_stopped; 484 + if (event->state == PERF_EVENT_STATE_INACTIVE) 485 + run_end = event->tstamp_stopped; 485 486 else 486 487 run_end = ctx->time; 487 488 488 - counter->total_time_running = run_end - counter->tstamp_running; 489 + event->total_time_running = run_end - event->tstamp_running; 489 490 } 490 491 491 492 /* 492 - * Update total_time_enabled and total_time_running for all counters in a group. 493 + * Update total_time_enabled and total_time_running for all events in a group. 493 494 */ 494 - static void update_group_times(struct perf_counter *leader) 495 + static void update_group_times(struct perf_event *leader) 495 496 { 496 - struct perf_counter *counter; 497 + struct perf_event *event; 497 498 498 - update_counter_times(leader); 499 - list_for_each_entry(counter, &leader->sibling_list, list_entry) 500 - update_counter_times(counter); 499 + update_event_times(leader); 500 + list_for_each_entry(event, &leader->sibling_list, group_entry) 501 + update_event_times(event); 501 502 } 502 503 503 504 /* 504 - * Cross CPU call to disable a performance counter 505 + * Cross CPU call to disable a performance event 505 506 */ 506 - static void __perf_counter_disable(void *info) 507 + static void __perf_event_disable(void *info) 507 508 { 508 - struct perf_counter *counter = info; 509 + struct perf_event *event = info; 509 510 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); 510 - struct perf_counter_context *ctx = counter->ctx; 511 + struct perf_event_context *ctx = event->ctx; 511 512 512 513 /* 513 - * If this is a per-task counter, need to check whether this 514 - * counter's task is the current task on this cpu. 514 + * If this is a per-task event, need to check whether this 515 + * event's task is the current task on this cpu. 515 516 */ 516 517 if (ctx->task && cpuctx->task_ctx != ctx) 517 518 return; ··· 519 520 spin_lock(&ctx->lock); 520 521 521 522 /* 522 - * If the counter is on, turn it off. 523 + * If the event is on, turn it off. 523 524 * If it is in error state, leave it in error state. 524 525 */ 525 - if (counter->state >= PERF_COUNTER_STATE_INACTIVE) { 526 + if (event->state >= PERF_EVENT_STATE_INACTIVE) { 526 527 update_context_time(ctx); 527 - update_group_times(counter); 528 - if (counter == counter->group_leader) 529 - group_sched_out(counter, cpuctx, ctx); 528 + update_group_times(event); 529 + if (event == event->group_leader) 530 + group_sched_out(event, cpuctx, ctx); 530 531 else 531 - counter_sched_out(counter, cpuctx, ctx); 532 - counter->state = PERF_COUNTER_STATE_OFF; 532 + event_sched_out(event, cpuctx, ctx); 533 + event->state = PERF_EVENT_STATE_OFF; 533 534 } 534 535 535 536 spin_unlock(&ctx->lock); 536 537 } 537 538 538 539 /* 539 - * Disable a counter. 540 + * Disable a event. 540 541 * 541 - * If counter->ctx is a cloned context, callers must make sure that 542 - * every task struct that counter->ctx->task could possibly point to 542 + * If event->ctx is a cloned context, callers must make sure that 543 + * every task struct that event->ctx->task could possibly point to 543 544 * remains valid. This condition is satisifed when called through 544 - * perf_counter_for_each_child or perf_counter_for_each because they 545 - * hold the top-level counter's child_mutex, so any descendant that 546 - * goes to exit will block in sync_child_counter. 547 - * When called from perf_pending_counter it's OK because counter->ctx 545 + * perf_event_for_each_child or perf_event_for_each because they 546 + * hold the top-level event's child_mutex, so any descendant that 547 + * goes to exit will block in sync_child_event. 548 + * When called from perf_pending_event it's OK because event->ctx 548 549 * is the current context on this CPU and preemption is disabled, 549 - * hence we can't get into perf_counter_task_sched_out for this context. 550 + * hence we can't get into perf_event_task_sched_out for this context. 550 551 */ 551 - static void perf_counter_disable(struct perf_counter *counter) 552 + static void perf_event_disable(struct perf_event *event) 552 553 { 553 - struct perf_counter_context *ctx = counter->ctx; 554 + struct perf_event_context *ctx = event->ctx; 554 555 struct task_struct *task = ctx->task; 555 556 556 557 if (!task) { 557 558 /* 558 - * Disable the counter on the cpu that it's on 559 + * Disable the event on the cpu that it's on 559 560 */ 560 - smp_call_function_single(counter->cpu, __perf_counter_disable, 561 - counter, 1); 561 + smp_call_function_single(event->cpu, __perf_event_disable, 562 + event, 1); 562 563 return; 563 564 } 564 565 565 566 retry: 566 - task_oncpu_function_call(task, __perf_counter_disable, counter); 567 + task_oncpu_function_call(task, __perf_event_disable, event); 567 568 568 569 spin_lock_irq(&ctx->lock); 569 570 /* 570 - * If the counter is still active, we need to retry the cross-call. 571 + * If the event is still active, we need to retry the cross-call. 571 572 */ 572 - if (counter->state == PERF_COUNTER_STATE_ACTIVE) { 573 + if (event->state == PERF_EVENT_STATE_ACTIVE) { 573 574 spin_unlock_irq(&ctx->lock); 574 575 goto retry; 575 576 } ··· 578 579 * Since we have the lock this context can't be scheduled 579 580 * in, so we can change the state safely. 580 581 */ 581 - if (counter->state == PERF_COUNTER_STATE_INACTIVE) { 582 - update_group_times(counter); 583 - counter->state = PERF_COUNTER_STATE_OFF; 582 + if (event->state == PERF_EVENT_STATE_INACTIVE) { 583 + update_group_times(event); 584 + event->state = PERF_EVENT_STATE_OFF; 584 585 } 585 586 586 587 spin_unlock_irq(&ctx->lock); 587 588 } 588 589 589 590 static int 590 - counter_sched_in(struct perf_counter *counter, 591 + event_sched_in(struct perf_event *event, 591 592 struct perf_cpu_context *cpuctx, 592 - struct perf_counter_context *ctx, 593 + struct perf_event_context *ctx, 593 594 int cpu) 594 595 { 595 - if (counter->state <= PERF_COUNTER_STATE_OFF) 596 + if (event->state <= PERF_EVENT_STATE_OFF) 596 597 return 0; 597 598 598 - counter->state = PERF_COUNTER_STATE_ACTIVE; 599 - counter->oncpu = cpu; /* TODO: put 'cpu' into cpuctx->cpu */ 599 + event->state = PERF_EVENT_STATE_ACTIVE; 600 + event->oncpu = cpu; /* TODO: put 'cpu' into cpuctx->cpu */ 600 601 /* 601 602 * The new state must be visible before we turn it on in the hardware: 602 603 */ 603 604 smp_wmb(); 604 605 605 - if (counter->pmu->enable(counter)) { 606 - counter->state = PERF_COUNTER_STATE_INACTIVE; 607 - counter->oncpu = -1; 606 + if (event->pmu->enable(event)) { 607 + event->state = PERF_EVENT_STATE_INACTIVE; 608 + event->oncpu = -1; 608 609 return -EAGAIN; 609 610 } 610 611 611 - counter->tstamp_running += ctx->time - counter->tstamp_stopped; 612 + event->tstamp_running += ctx->time - event->tstamp_stopped; 612 613 613 - if (!is_software_counter(counter)) 614 + if (!is_software_event(event)) 614 615 cpuctx->active_oncpu++; 615 616 ctx->nr_active++; 616 617 617 - if (counter->attr.exclusive) 618 + if (event->attr.exclusive) 618 619 cpuctx->exclusive = 1; 619 620 620 621 return 0; 621 622 } 622 623 623 624 static int 624 - group_sched_in(struct perf_counter *group_counter, 625 + group_sched_in(struct perf_event *group_event, 625 626 struct perf_cpu_context *cpuctx, 626 - struct perf_counter_context *ctx, 627 + struct perf_event_context *ctx, 627 628 int cpu) 628 629 { 629 - struct perf_counter *counter, *partial_group; 630 + struct perf_event *event, *partial_group; 630 631 int ret; 631 632 632 - if (group_counter->state == PERF_COUNTER_STATE_OFF) 633 + if (group_event->state == PERF_EVENT_STATE_OFF) 633 634 return 0; 634 635 635 - ret = hw_perf_group_sched_in(group_counter, cpuctx, ctx, cpu); 636 + ret = hw_perf_group_sched_in(group_event, cpuctx, ctx, cpu); 636 637 if (ret) 637 638 return ret < 0 ? ret : 0; 638 639 639 - if (counter_sched_in(group_counter, cpuctx, ctx, cpu)) 640 + if (event_sched_in(group_event, cpuctx, ctx, cpu)) 640 641 return -EAGAIN; 641 642 642 643 /* 643 644 * Schedule in siblings as one group (if any): 644 645 */ 645 - list_for_each_entry(counter, &group_counter->sibling_list, list_entry) { 646 - if (counter_sched_in(counter, cpuctx, ctx, cpu)) { 647 - partial_group = counter; 646 + list_for_each_entry(event, &group_event->sibling_list, group_entry) { 647 + if (event_sched_in(event, cpuctx, ctx, cpu)) { 648 + partial_group = event; 648 649 goto group_error; 649 650 } 650 651 } ··· 656 657 * Groups can be scheduled in as one unit only, so undo any 657 658 * partial group before returning: 658 659 */ 659 - list_for_each_entry(counter, &group_counter->sibling_list, list_entry) { 660 - if (counter == partial_group) 660 + list_for_each_entry(event, &group_event->sibling_list, group_entry) { 661 + if (event == partial_group) 661 662 break; 662 - counter_sched_out(counter, cpuctx, ctx); 663 + event_sched_out(event, cpuctx, ctx); 663 664 } 664 - counter_sched_out(group_counter, cpuctx, ctx); 665 + event_sched_out(group_event, cpuctx, ctx); 665 666 666 667 return -EAGAIN; 667 668 } 668 669 669 670 /* 670 - * Return 1 for a group consisting entirely of software counters, 671 - * 0 if the group contains any hardware counters. 671 + * Return 1 for a group consisting entirely of software events, 672 + * 0 if the group contains any hardware events. 672 673 */ 673 - static int is_software_only_group(struct perf_counter *leader) 674 + static int is_software_only_group(struct perf_event *leader) 674 675 { 675 - struct perf_counter *counter; 676 + struct perf_event *event; 676 677 677 - if (!is_software_counter(leader)) 678 + if (!is_software_event(leader)) 678 679 return 0; 679 680 680 - list_for_each_entry(counter, &leader->sibling_list, list_entry) 681 - if (!is_software_counter(counter)) 681 + list_for_each_entry(event, &leader->sibling_list, group_entry) 682 + if (!is_software_event(event)) 682 683 return 0; 683 684 684 685 return 1; 685 686 } 686 687 687 688 /* 688 - * Work out whether we can put this counter group on the CPU now. 689 + * Work out whether we can put this event group on the CPU now. 689 690 */ 690 - static int group_can_go_on(struct perf_counter *counter, 691 + static int group_can_go_on(struct perf_event *event, 691 692 struct perf_cpu_context *cpuctx, 692 693 int can_add_hw) 693 694 { 694 695 /* 695 - * Groups consisting entirely of software counters can always go on. 696 + * Groups consisting entirely of software events can always go on. 696 697 */ 697 - if (is_software_only_group(counter)) 698 + if (is_software_only_group(event)) 698 699 return 1; 699 700 /* 700 701 * If an exclusive group is already on, no other hardware 701 - * counters can go on. 702 + * events can go on. 702 703 */ 703 704 if (cpuctx->exclusive) 704 705 return 0; 705 706 /* 706 707 * If this group is exclusive and there are already 707 - * counters on the CPU, it can't go on. 708 + * events on the CPU, it can't go on. 708 709 */ 709 - if (counter->attr.exclusive && cpuctx->active_oncpu) 710 + if (event->attr.exclusive && cpuctx->active_oncpu) 710 711 return 0; 711 712 /* 712 713 * Otherwise, try to add it if all previous groups were able ··· 715 716 return can_add_hw; 716 717 } 717 718 718 - static void add_counter_to_ctx(struct perf_counter *counter, 719 - struct perf_counter_context *ctx) 719 + static void add_event_to_ctx(struct perf_event *event, 720 + struct perf_event_context *ctx) 720 721 { 721 - list_add_counter(counter, ctx); 722 - counter->tstamp_enabled = ctx->time; 723 - counter->tstamp_running = ctx->time; 724 - counter->tstamp_stopped = ctx->time; 722 + list_add_event(event, ctx); 723 + event->tstamp_enabled = ctx->time; 724 + event->tstamp_running = ctx->time; 725 + event->tstamp_stopped = ctx->time; 725 726 } 726 727 727 728 /* 728 - * Cross CPU call to install and enable a performance counter 729 + * Cross CPU call to install and enable a performance event 729 730 * 730 731 * Must be called with ctx->mutex held 731 732 */ 732 733 static void __perf_install_in_context(void *info) 733 734 { 734 735 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); 735 - struct perf_counter *counter = info; 736 - struct perf_counter_context *ctx = counter->ctx; 737 - struct perf_counter *leader = counter->group_leader; 736 + struct perf_event *event = info; 737 + struct perf_event_context *ctx = event->ctx; 738 + struct perf_event *leader = event->group_leader; 738 739 int cpu = smp_processor_id(); 739 740 int err; 740 741 ··· 743 744 * the current task context of this cpu. If not it has been 744 745 * scheduled out before the smp call arrived. 745 746 * Or possibly this is the right context but it isn't 746 - * on this cpu because it had no counters. 747 + * on this cpu because it had no events. 747 748 */ 748 749 if (ctx->task && cpuctx->task_ctx != ctx) { 749 750 if (cpuctx->task_ctx || ctx->task != current) ··· 757 758 758 759 /* 759 760 * Protect the list operation against NMI by disabling the 760 - * counters on a global level. NOP for non NMI based counters. 761 + * events on a global level. NOP for non NMI based events. 761 762 */ 762 763 perf_disable(); 763 764 764 - add_counter_to_ctx(counter, ctx); 765 + add_event_to_ctx(event, ctx); 765 766 766 767 /* 767 - * Don't put the counter on if it is disabled or if 768 + * Don't put the event on if it is disabled or if 768 769 * it is in a group and the group isn't on. 769 770 */ 770 - if (counter->state != PERF_COUNTER_STATE_INACTIVE || 771 - (leader != counter && leader->state != PERF_COUNTER_STATE_ACTIVE)) 771 + if (event->state != PERF_EVENT_STATE_INACTIVE || 772 + (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE)) 772 773 goto unlock; 773 774 774 775 /* 775 - * An exclusive counter can't go on if there are already active 776 - * hardware counters, and no hardware counter can go on if there 777 - * is already an exclusive counter on. 776 + * An exclusive event can't go on if there are already active 777 + * hardware events, and no hardware event can go on if there 778 + * is already an exclusive event on. 778 779 */ 779 - if (!group_can_go_on(counter, cpuctx, 1)) 780 + if (!group_can_go_on(event, cpuctx, 1)) 780 781 err = -EEXIST; 781 782 else 782 - err = counter_sched_in(counter, cpuctx, ctx, cpu); 783 + err = event_sched_in(event, cpuctx, ctx, cpu); 783 784 784 785 if (err) { 785 786 /* 786 - * This counter couldn't go on. If it is in a group 787 + * This event couldn't go on. If it is in a group 787 788 * then we have to pull the whole group off. 788 - * If the counter group is pinned then put it in error state. 789 + * If the event group is pinned then put it in error state. 789 790 */ 790 - if (leader != counter) 791 + if (leader != event) 791 792 group_sched_out(leader, cpuctx, ctx); 792 793 if (leader->attr.pinned) { 793 794 update_group_times(leader); 794 - leader->state = PERF_COUNTER_STATE_ERROR; 795 + leader->state = PERF_EVENT_STATE_ERROR; 795 796 } 796 797 } 797 798 ··· 805 806 } 806 807 807 808 /* 808 - * Attach a performance counter to a context 809 + * Attach a performance event to a context 809 810 * 810 - * First we add the counter to the list with the hardware enable bit 811 - * in counter->hw_config cleared. 811 + * First we add the event to the list with the hardware enable bit 812 + * in event->hw_config cleared. 812 813 * 813 - * If the counter is attached to a task which is on a CPU we use a smp 814 + * If the event is attached to a task which is on a CPU we use a smp 814 815 * call to enable it in the task context. The task might have been 815 816 * scheduled away, but we check this in the smp call again. 816 817 * 817 818 * Must be called with ctx->mutex held. 818 819 */ 819 820 static void 820 - perf_install_in_context(struct perf_counter_context *ctx, 821 - struct perf_counter *counter, 821 + perf_install_in_context(struct perf_event_context *ctx, 822 + struct perf_event *event, 822 823 int cpu) 823 824 { 824 825 struct task_struct *task = ctx->task; 825 826 826 827 if (!task) { 827 828 /* 828 - * Per cpu counters are installed via an smp call and 829 + * Per cpu events are installed via an smp call and 829 830 * the install is always sucessful. 830 831 */ 831 832 smp_call_function_single(cpu, __perf_install_in_context, 832 - counter, 1); 833 + event, 1); 833 834 return; 834 835 } 835 836 836 837 retry: 837 838 task_oncpu_function_call(task, __perf_install_in_context, 838 - counter); 839 + event); 839 840 840 841 spin_lock_irq(&ctx->lock); 841 842 /* 842 843 * we need to retry the smp call. 843 844 */ 844 - if (ctx->is_active && list_empty(&counter->list_entry)) { 845 + if (ctx->is_active && list_empty(&event->group_entry)) { 845 846 spin_unlock_irq(&ctx->lock); 846 847 goto retry; 847 848 } 848 849 849 850 /* 850 851 * The lock prevents that this context is scheduled in so we 851 - * can add the counter safely, if it the call above did not 852 + * can add the event safely, if it the call above did not 852 853 * succeed. 853 854 */ 854 - if (list_empty(&counter->list_entry)) 855 - add_counter_to_ctx(counter, ctx); 855 + if (list_empty(&event->group_entry)) 856 + add_event_to_ctx(event, ctx); 856 857 spin_unlock_irq(&ctx->lock); 857 858 } 858 859 859 860 /* 860 - * Put a counter into inactive state and update time fields. 861 + * Put a event into inactive state and update time fields. 861 862 * Enabling the leader of a group effectively enables all 862 863 * the group members that aren't explicitly disabled, so we 863 864 * have to update their ->tstamp_enabled also. 864 865 * Note: this works for group members as well as group leaders 865 866 * since the non-leader members' sibling_lists will be empty. 866 867 */ 867 - static void __perf_counter_mark_enabled(struct perf_counter *counter, 868 - struct perf_counter_context *ctx) 868 + static void __perf_event_mark_enabled(struct perf_event *event, 869 + struct perf_event_context *ctx) 869 870 { 870 - struct perf_counter *sub; 871 + struct perf_event *sub; 871 872 872 - counter->state = PERF_COUNTER_STATE_INACTIVE; 873 - counter->tstamp_enabled = ctx->time - counter->total_time_enabled; 874 - list_for_each_entry(sub, &counter->sibling_list, list_entry) 875 - if (sub->state >= PERF_COUNTER_STATE_INACTIVE) 873 + event->state = PERF_EVENT_STATE_INACTIVE; 874 + event->tstamp_enabled = ctx->time - event->total_time_enabled; 875 + list_for_each_entry(sub, &event->sibling_list, group_entry) 876 + if (sub->state >= PERF_EVENT_STATE_INACTIVE) 876 877 sub->tstamp_enabled = 877 878 ctx->time - sub->total_time_enabled; 878 879 } 879 880 880 881 /* 881 - * Cross CPU call to enable a performance counter 882 + * Cross CPU call to enable a performance event 882 883 */ 883 - static void __perf_counter_enable(void *info) 884 + static void __perf_event_enable(void *info) 884 885 { 885 - struct perf_counter *counter = info; 886 + struct perf_event *event = info; 886 887 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); 887 - struct perf_counter_context *ctx = counter->ctx; 888 - struct perf_counter *leader = counter->group_leader; 888 + struct perf_event_context *ctx = event->ctx; 889 + struct perf_event *leader = event->group_leader; 889 890 int err; 890 891 891 892 /* 892 - * If this is a per-task counter, need to check whether this 893 - * counter's task is the current task on this cpu. 893 + * If this is a per-task event, need to check whether this 894 + * event's task is the current task on this cpu. 894 895 */ 895 896 if (ctx->task && cpuctx->task_ctx != ctx) { 896 897 if (cpuctx->task_ctx || ctx->task != current) ··· 902 903 ctx->is_active = 1; 903 904 update_context_time(ctx); 904 905 905 - if (counter->state >= PERF_COUNTER_STATE_INACTIVE) 906 + if (event->state >= PERF_EVENT_STATE_INACTIVE) 906 907 goto unlock; 907 - __perf_counter_mark_enabled(counter, ctx); 908 + __perf_event_mark_enabled(event, ctx); 908 909 909 910 /* 910 - * If the counter is in a group and isn't the group leader, 911 + * If the event is in a group and isn't the group leader, 911 912 * then don't put it on unless the group is on. 912 913 */ 913 - if (leader != counter && leader->state != PERF_COUNTER_STATE_ACTIVE) 914 + if (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE) 914 915 goto unlock; 915 916 916 - if (!group_can_go_on(counter, cpuctx, 1)) { 917 + if (!group_can_go_on(event, cpuctx, 1)) { 917 918 err = -EEXIST; 918 919 } else { 919 920 perf_disable(); 920 - if (counter == leader) 921 - err = group_sched_in(counter, cpuctx, ctx, 921 + if (event == leader) 922 + err = group_sched_in(event, cpuctx, ctx, 922 923 smp_processor_id()); 923 924 else 924 - err = counter_sched_in(counter, cpuctx, ctx, 925 + err = event_sched_in(event, cpuctx, ctx, 925 926 smp_processor_id()); 926 927 perf_enable(); 927 928 } 928 929 929 930 if (err) { 930 931 /* 931 - * If this counter can't go on and it's part of a 932 + * If this event can't go on and it's part of a 932 933 * group, then the whole group has to come off. 933 934 */ 934 - if (leader != counter) 935 + if (leader != event) 935 936 group_sched_out(leader, cpuctx, ctx); 936 937 if (leader->attr.pinned) { 937 938 update_group_times(leader); 938 - leader->state = PERF_COUNTER_STATE_ERROR; 939 + leader->state = PERF_EVENT_STATE_ERROR; 939 940 } 940 941 } 941 942 ··· 944 945 } 945 946 946 947 /* 947 - * Enable a counter. 948 + * Enable a event. 948 949 * 949 - * If counter->ctx is a cloned context, callers must make sure that 950 - * every task struct that counter->ctx->task could possibly point to 950 + * If event->ctx is a cloned context, callers must make sure that 951 + * every task struct that event->ctx->task could possibly point to 951 952 * remains valid. This condition is satisfied when called through 952 - * perf_counter_for_each_child or perf_counter_for_each as described 953 - * for perf_counter_disable. 953 + * perf_event_for_each_child or perf_event_for_each as described 954 + * for perf_event_disable. 954 955 */ 955 - static void perf_counter_enable(struct perf_counter *counter) 956 + static void perf_event_enable(struct perf_event *event) 956 957 { 957 - struct perf_counter_context *ctx = counter->ctx; 958 + struct perf_event_context *ctx = event->ctx; 958 959 struct task_struct *task = ctx->task; 959 960 960 961 if (!task) { 961 962 /* 962 - * Enable the counter on the cpu that it's on 963 + * Enable the event on the cpu that it's on 963 964 */ 964 - smp_call_function_single(counter->cpu, __perf_counter_enable, 965 - counter, 1); 965 + smp_call_function_single(event->cpu, __perf_event_enable, 966 + event, 1); 966 967 return; 967 968 } 968 969 969 970 spin_lock_irq(&ctx->lock); 970 - if (counter->state >= PERF_COUNTER_STATE_INACTIVE) 971 + if (event->state >= PERF_EVENT_STATE_INACTIVE) 971 972 goto out; 972 973 973 974 /* 974 - * If the counter is in error state, clear that first. 975 - * That way, if we see the counter in error state below, we 975 + * If the event is in error state, clear that first. 976 + * That way, if we see the event in error state below, we 976 977 * know that it has gone back into error state, as distinct 977 978 * from the task having been scheduled away before the 978 979 * cross-call arrived. 979 980 */ 980 - if (counter->state == PERF_COUNTER_STATE_ERROR) 981 - counter->state = PERF_COUNTER_STATE_OFF; 981 + if (event->state == PERF_EVENT_STATE_ERROR) 982 + event->state = PERF_EVENT_STATE_OFF; 982 983 983 984 retry: 984 985 spin_unlock_irq(&ctx->lock); 985 - task_oncpu_function_call(task, __perf_counter_enable, counter); 986 + task_oncpu_function_call(task, __perf_event_enable, event); 986 987 987 988 spin_lock_irq(&ctx->lock); 988 989 989 990 /* 990 - * If the context is active and the counter is still off, 991 + * If the context is active and the event is still off, 991 992 * we need to retry the cross-call. 992 993 */ 993 - if (ctx->is_active && counter->state == PERF_COUNTER_STATE_OFF) 994 + if (ctx->is_active && event->state == PERF_EVENT_STATE_OFF) 994 995 goto retry; 995 996 996 997 /* 997 998 * Since we have the lock this context can't be scheduled 998 999 * in, so we can change the state safely. 999 1000 */ 1000 - if (counter->state == PERF_COUNTER_STATE_OFF) 1001 - __perf_counter_mark_enabled(counter, ctx); 1001 + if (event->state == PERF_EVENT_STATE_OFF) 1002 + __perf_event_mark_enabled(event, ctx); 1002 1003 1003 1004 out: 1004 1005 spin_unlock_irq(&ctx->lock); 1005 1006 } 1006 1007 1007 - static int perf_counter_refresh(struct perf_counter *counter, int refresh) 1008 + static int perf_event_refresh(struct perf_event *event, int refresh) 1008 1009 { 1009 1010 /* 1010 - * not supported on inherited counters 1011 + * not supported on inherited events 1011 1012 */ 1012 - if (counter->attr.inherit) 1013 + if (event->attr.inherit) 1013 1014 return -EINVAL; 1014 1015 1015 - atomic_add(refresh, &counter->event_limit); 1016 - perf_counter_enable(counter); 1016 + atomic_add(refresh, &event->event_limit); 1017 + perf_event_enable(event); 1017 1018 1018 1019 return 0; 1019 1020 } 1020 1021 1021 - void __perf_counter_sched_out(struct perf_counter_context *ctx, 1022 + void __perf_event_sched_out(struct perf_event_context *ctx, 1022 1023 struct perf_cpu_context *cpuctx) 1023 1024 { 1024 - struct perf_counter *counter; 1025 + struct perf_event *event; 1025 1026 1026 1027 spin_lock(&ctx->lock); 1027 1028 ctx->is_active = 0; 1028 - if (likely(!ctx->nr_counters)) 1029 + if (likely(!ctx->nr_events)) 1029 1030 goto out; 1030 1031 update_context_time(ctx); 1031 1032 1032 1033 perf_disable(); 1033 1034 if (ctx->nr_active) { 1034 - list_for_each_entry(counter, &ctx->counter_list, list_entry) { 1035 - if (counter != counter->group_leader) 1036 - counter_sched_out(counter, cpuctx, ctx); 1035 + list_for_each_entry(event, &ctx->group_list, group_entry) { 1036 + if (event != event->group_leader) 1037 + event_sched_out(event, cpuctx, ctx); 1037 1038 else 1038 - group_sched_out(counter, cpuctx, ctx); 1039 + group_sched_out(event, cpuctx, ctx); 1039 1040 } 1040 1041 } 1041 1042 perf_enable(); ··· 1046 1047 /* 1047 1048 * Test whether two contexts are equivalent, i.e. whether they 1048 1049 * have both been cloned from the same version of the same context 1049 - * and they both have the same number of enabled counters. 1050 - * If the number of enabled counters is the same, then the set 1051 - * of enabled counters should be the same, because these are both 1052 - * inherited contexts, therefore we can't access individual counters 1050 + * and they both have the same number of enabled events. 1051 + * If the number of enabled events is the same, then the set 1052 + * of enabled events should be the same, because these are both 1053 + * inherited contexts, therefore we can't access individual events 1053 1054 * in them directly with an fd; we can only enable/disable all 1054 - * counters via prctl, or enable/disable all counters in a family 1055 + * events via prctl, or enable/disable all events in a family 1055 1056 * via ioctl, which will have the same effect on both contexts. 1056 1057 */ 1057 - static int context_equiv(struct perf_counter_context *ctx1, 1058 - struct perf_counter_context *ctx2) 1058 + static int context_equiv(struct perf_event_context *ctx1, 1059 + struct perf_event_context *ctx2) 1059 1060 { 1060 1061 return ctx1->parent_ctx && ctx1->parent_ctx == ctx2->parent_ctx 1061 1062 && ctx1->parent_gen == ctx2->parent_gen 1062 1063 && !ctx1->pin_count && !ctx2->pin_count; 1063 1064 } 1064 1065 1065 - static void __perf_counter_read(void *counter); 1066 + static void __perf_event_read(void *event); 1066 1067 1067 - static void __perf_counter_sync_stat(struct perf_counter *counter, 1068 - struct perf_counter *next_counter) 1068 + static void __perf_event_sync_stat(struct perf_event *event, 1069 + struct perf_event *next_event) 1069 1070 { 1070 1071 u64 value; 1071 1072 1072 - if (!counter->attr.inherit_stat) 1073 + if (!event->attr.inherit_stat) 1073 1074 return; 1074 1075 1075 1076 /* 1076 - * Update the counter value, we cannot use perf_counter_read() 1077 + * Update the event value, we cannot use perf_event_read() 1077 1078 * because we're in the middle of a context switch and have IRQs 1078 1079 * disabled, which upsets smp_call_function_single(), however 1079 - * we know the counter must be on the current CPU, therefore we 1080 + * we know the event must be on the current CPU, therefore we 1080 1081 * don't need to use it. 1081 1082 */ 1082 - switch (counter->state) { 1083 - case PERF_COUNTER_STATE_ACTIVE: 1084 - __perf_counter_read(counter); 1083 + switch (event->state) { 1084 + case PERF_EVENT_STATE_ACTIVE: 1085 + __perf_event_read(event); 1085 1086 break; 1086 1087 1087 - case PERF_COUNTER_STATE_INACTIVE: 1088 - update_counter_times(counter); 1088 + case PERF_EVENT_STATE_INACTIVE: 1089 + update_event_times(event); 1089 1090 break; 1090 1091 1091 1092 default: ··· 1093 1094 } 1094 1095 1095 1096 /* 1096 - * In order to keep per-task stats reliable we need to flip the counter 1097 + * In order to keep per-task stats reliable we need to flip the event 1097 1098 * values when we flip the contexts. 1098 1099 */ 1099 - value = atomic64_read(&next_counter->count); 1100 - value = atomic64_xchg(&counter->count, value); 1101 - atomic64_set(&next_counter->count, value); 1100 + value = atomic64_read(&next_event->count); 1101 + value = atomic64_xchg(&event->count, value); 1102 + atomic64_set(&next_event->count, value); 1102 1103 1103 - swap(counter->total_time_enabled, next_counter->total_time_enabled); 1104 - swap(counter->total_time_running, next_counter->total_time_running); 1104 + swap(event->total_time_enabled, next_event->total_time_enabled); 1105 + swap(event->total_time_running, next_event->total_time_running); 1105 1106 1106 1107 /* 1107 1108 * Since we swizzled the values, update the user visible data too. 1108 1109 */ 1109 - perf_counter_update_userpage(counter); 1110 - perf_counter_update_userpage(next_counter); 1110 + perf_event_update_userpage(event); 1111 + perf_event_update_userpage(next_event); 1111 1112 } 1112 1113 1113 1114 #define list_next_entry(pos, member) \ 1114 1115 list_entry(pos->member.next, typeof(*pos), member) 1115 1116 1116 - static void perf_counter_sync_stat(struct perf_counter_context *ctx, 1117 - struct perf_counter_context *next_ctx) 1117 + static void perf_event_sync_stat(struct perf_event_context *ctx, 1118 + struct perf_event_context *next_ctx) 1118 1119 { 1119 - struct perf_counter *counter, *next_counter; 1120 + struct perf_event *event, *next_event; 1120 1121 1121 1122 if (!ctx->nr_stat) 1122 1123 return; 1123 1124 1124 - counter = list_first_entry(&ctx->event_list, 1125 - struct perf_counter, event_entry); 1125 + event = list_first_entry(&ctx->event_list, 1126 + struct perf_event, event_entry); 1126 1127 1127 - next_counter = list_first_entry(&next_ctx->event_list, 1128 - struct perf_counter, event_entry); 1128 + next_event = list_first_entry(&next_ctx->event_list, 1129 + struct perf_event, event_entry); 1129 1130 1130 - while (&counter->event_entry != &ctx->event_list && 1131 - &next_counter->event_entry != &next_ctx->event_list) { 1131 + while (&event->event_entry != &ctx->event_list && 1132 + &next_event->event_entry != &next_ctx->event_list) { 1132 1133 1133 - __perf_counter_sync_stat(counter, next_counter); 1134 + __perf_event_sync_stat(event, next_event); 1134 1135 1135 - counter = list_next_entry(counter, event_entry); 1136 - next_counter = list_next_entry(next_counter, event_entry); 1136 + event = list_next_entry(event, event_entry); 1137 + next_event = list_next_entry(next_event, event_entry); 1137 1138 } 1138 1139 } 1139 1140 1140 1141 /* 1141 - * Called from scheduler to remove the counters of the current task, 1142 + * Called from scheduler to remove the events of the current task, 1142 1143 * with interrupts disabled. 1143 1144 * 1144 - * We stop each counter and update the counter value in counter->count. 1145 + * We stop each event and update the event value in event->count. 1145 1146 * 1146 1147 * This does not protect us against NMI, but disable() 1147 - * sets the disabled bit in the control field of counter _before_ 1148 - * accessing the counter control register. If a NMI hits, then it will 1149 - * not restart the counter. 1148 + * sets the disabled bit in the control field of event _before_ 1149 + * accessing the event control register. If a NMI hits, then it will 1150 + * not restart the event. 1150 1151 */ 1151 - void perf_counter_task_sched_out(struct task_struct *task, 1152 + void perf_event_task_sched_out(struct task_struct *task, 1152 1153 struct task_struct *next, int cpu) 1153 1154 { 1154 1155 struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu); 1155 - struct perf_counter_context *ctx = task->perf_counter_ctxp; 1156 - struct perf_counter_context *next_ctx; 1157 - struct perf_counter_context *parent; 1156 + struct perf_event_context *ctx = task->perf_event_ctxp; 1157 + struct perf_event_context *next_ctx; 1158 + struct perf_event_context *parent; 1158 1159 struct pt_regs *regs; 1159 1160 int do_switch = 1; 1160 1161 1161 1162 regs = task_pt_regs(task); 1162 - perf_swcounter_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 1, regs, 0); 1163 + perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 1, regs, 0); 1163 1164 1164 1165 if (likely(!ctx || !cpuctx->task_ctx)) 1165 1166 return; ··· 1168 1169 1169 1170 rcu_read_lock(); 1170 1171 parent = rcu_dereference(ctx->parent_ctx); 1171 - next_ctx = next->perf_counter_ctxp; 1172 + next_ctx = next->perf_event_ctxp; 1172 1173 if (parent && next_ctx && 1173 1174 rcu_dereference(next_ctx->parent_ctx) == parent) { 1174 1175 /* ··· 1185 1186 if (context_equiv(ctx, next_ctx)) { 1186 1187 /* 1187 1188 * XXX do we need a memory barrier of sorts 1188 - * wrt to rcu_dereference() of perf_counter_ctxp 1189 + * wrt to rcu_dereference() of perf_event_ctxp 1189 1190 */ 1190 - task->perf_counter_ctxp = next_ctx; 1191 - next->perf_counter_ctxp = ctx; 1191 + task->perf_event_ctxp = next_ctx; 1192 + next->perf_event_ctxp = ctx; 1192 1193 ctx->task = next; 1193 1194 next_ctx->task = task; 1194 1195 do_switch = 0; 1195 1196 1196 - perf_counter_sync_stat(ctx, next_ctx); 1197 + perf_event_sync_stat(ctx, next_ctx); 1197 1198 } 1198 1199 spin_unlock(&next_ctx->lock); 1199 1200 spin_unlock(&ctx->lock); ··· 1201 1202 rcu_read_unlock(); 1202 1203 1203 1204 if (do_switch) { 1204 - __perf_counter_sched_out(ctx, cpuctx); 1205 + __perf_event_sched_out(ctx, cpuctx); 1205 1206 cpuctx->task_ctx = NULL; 1206 1207 } 1207 1208 } ··· 1209 1210 /* 1210 1211 * Called with IRQs disabled 1211 1212 */ 1212 - static void __perf_counter_task_sched_out(struct perf_counter_context *ctx) 1213 + static void __perf_event_task_sched_out(struct perf_event_context *ctx) 1213 1214 { 1214 1215 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); 1215 1216 ··· 1219 1220 if (WARN_ON_ONCE(ctx != cpuctx->task_ctx)) 1220 1221 return; 1221 1222 1222 - __perf_counter_sched_out(ctx, cpuctx); 1223 + __perf_event_sched_out(ctx, cpuctx); 1223 1224 cpuctx->task_ctx = NULL; 1224 1225 } 1225 1226 1226 1227 /* 1227 1228 * Called with IRQs disabled 1228 1229 */ 1229 - static void perf_counter_cpu_sched_out(struct perf_cpu_context *cpuctx) 1230 + static void perf_event_cpu_sched_out(struct perf_cpu_context *cpuctx) 1230 1231 { 1231 - __perf_counter_sched_out(&cpuctx->ctx, cpuctx); 1232 + __perf_event_sched_out(&cpuctx->ctx, cpuctx); 1232 1233 } 1233 1234 1234 1235 static void 1235 - __perf_counter_sched_in(struct perf_counter_context *ctx, 1236 + __perf_event_sched_in(struct perf_event_context *ctx, 1236 1237 struct perf_cpu_context *cpuctx, int cpu) 1237 1238 { 1238 - struct perf_counter *counter; 1239 + struct perf_event *event; 1239 1240 int can_add_hw = 1; 1240 1241 1241 1242 spin_lock(&ctx->lock); 1242 1243 ctx->is_active = 1; 1243 - if (likely(!ctx->nr_counters)) 1244 + if (likely(!ctx->nr_events)) 1244 1245 goto out; 1245 1246 1246 1247 ctx->timestamp = perf_clock(); ··· 1251 1252 * First go through the list and put on any pinned groups 1252 1253 * in order to give them the best chance of going on. 1253 1254 */ 1254 - list_for_each_entry(counter, &ctx->counter_list, list_entry) { 1255 - if (counter->state <= PERF_COUNTER_STATE_OFF || 1256 - !counter->attr.pinned) 1255 + list_for_each_entry(event, &ctx->group_list, group_entry) { 1256 + if (event->state <= PERF_EVENT_STATE_OFF || 1257 + !event->attr.pinned) 1257 1258 continue; 1258 - if (counter->cpu != -1 && counter->cpu != cpu) 1259 + if (event->cpu != -1 && event->cpu != cpu) 1259 1260 continue; 1260 1261 1261 - if (counter != counter->group_leader) 1262 - counter_sched_in(counter, cpuctx, ctx, cpu); 1262 + if (event != event->group_leader) 1263 + event_sched_in(event, cpuctx, ctx, cpu); 1263 1264 else { 1264 - if (group_can_go_on(counter, cpuctx, 1)) 1265 - group_sched_in(counter, cpuctx, ctx, cpu); 1265 + if (group_can_go_on(event, cpuctx, 1)) 1266 + group_sched_in(event, cpuctx, ctx, cpu); 1266 1267 } 1267 1268 1268 1269 /* 1269 1270 * If this pinned group hasn't been scheduled, 1270 1271 * put it in error state. 1271 1272 */ 1272 - if (counter->state == PERF_COUNTER_STATE_INACTIVE) { 1273 - update_group_times(counter); 1274 - counter->state = PERF_COUNTER_STATE_ERROR; 1273 + if (event->state == PERF_EVENT_STATE_INACTIVE) { 1274 + update_group_times(event); 1275 + event->state = PERF_EVENT_STATE_ERROR; 1275 1276 } 1276 1277 } 1277 1278 1278 - list_for_each_entry(counter, &ctx->counter_list, list_entry) { 1279 + list_for_each_entry(event, &ctx->group_list, group_entry) { 1279 1280 /* 1280 - * Ignore counters in OFF or ERROR state, and 1281 - * ignore pinned counters since we did them already. 1281 + * Ignore events in OFF or ERROR state, and 1282 + * ignore pinned events since we did them already. 1282 1283 */ 1283 - if (counter->state <= PERF_COUNTER_STATE_OFF || 1284 - counter->attr.pinned) 1284 + if (event->state <= PERF_EVENT_STATE_OFF || 1285 + event->attr.pinned) 1285 1286 continue; 1286 1287 1287 1288 /* 1288 1289 * Listen to the 'cpu' scheduling filter constraint 1289 - * of counters: 1290 + * of events: 1290 1291 */ 1291 - if (counter->cpu != -1 && counter->cpu != cpu) 1292 + if (event->cpu != -1 && event->cpu != cpu) 1292 1293 continue; 1293 1294 1294 - if (counter != counter->group_leader) { 1295 - if (counter_sched_in(counter, cpuctx, ctx, cpu)) 1295 + if (event != event->group_leader) { 1296 + if (event_sched_in(event, cpuctx, ctx, cpu)) 1296 1297 can_add_hw = 0; 1297 1298 } else { 1298 - if (group_can_go_on(counter, cpuctx, can_add_hw)) { 1299 - if (group_sched_in(counter, cpuctx, ctx, cpu)) 1299 + if (group_can_go_on(event, cpuctx, can_add_hw)) { 1300 + if (group_sched_in(event, cpuctx, ctx, cpu)) 1300 1301 can_add_hw = 0; 1301 1302 } 1302 1303 } ··· 1307 1308 } 1308 1309 1309 1310 /* 1310 - * Called from scheduler to add the counters of the current task 1311 + * Called from scheduler to add the events of the current task 1311 1312 * with interrupts disabled. 1312 1313 * 1313 - * We restore the counter value and then enable it. 1314 + * We restore the event value and then enable it. 1314 1315 * 1315 1316 * This does not protect us against NMI, but enable() 1316 - * sets the enabled bit in the control field of counter _before_ 1317 - * accessing the counter control register. If a NMI hits, then it will 1318 - * keep the counter running. 1317 + * sets the enabled bit in the control field of event _before_ 1318 + * accessing the event control register. If a NMI hits, then it will 1319 + * keep the event running. 1319 1320 */ 1320 - void perf_counter_task_sched_in(struct task_struct *task, int cpu) 1321 + void perf_event_task_sched_in(struct task_struct *task, int cpu) 1321 1322 { 1322 1323 struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu); 1323 - struct perf_counter_context *ctx = task->perf_counter_ctxp; 1324 + struct perf_event_context *ctx = task->perf_event_ctxp; 1324 1325 1325 1326 if (likely(!ctx)) 1326 1327 return; 1327 1328 if (cpuctx->task_ctx == ctx) 1328 1329 return; 1329 - __perf_counter_sched_in(ctx, cpuctx, cpu); 1330 + __perf_event_sched_in(ctx, cpuctx, cpu); 1330 1331 cpuctx->task_ctx = ctx; 1331 1332 } 1332 1333 1333 - static void perf_counter_cpu_sched_in(struct perf_cpu_context *cpuctx, int cpu) 1334 + static void perf_event_cpu_sched_in(struct perf_cpu_context *cpuctx, int cpu) 1334 1335 { 1335 - struct perf_counter_context *ctx = &cpuctx->ctx; 1336 + struct perf_event_context *ctx = &cpuctx->ctx; 1336 1337 1337 - __perf_counter_sched_in(ctx, cpuctx, cpu); 1338 + __perf_event_sched_in(ctx, cpuctx, cpu); 1338 1339 } 1339 1340 1340 1341 #define MAX_INTERRUPTS (~0ULL) 1341 1342 1342 - static void perf_log_throttle(struct perf_counter *counter, int enable); 1343 + static void perf_log_throttle(struct perf_event *event, int enable); 1343 1344 1344 - static void perf_adjust_period(struct perf_counter *counter, u64 events) 1345 + static void perf_adjust_period(struct perf_event *event, u64 events) 1345 1346 { 1346 - struct hw_perf_counter *hwc = &counter->hw; 1347 + struct hw_perf_event *hwc = &event->hw; 1347 1348 u64 period, sample_period; 1348 1349 s64 delta; 1349 1350 1350 1351 events *= hwc->sample_period; 1351 - period = div64_u64(events, counter->attr.sample_freq); 1352 + period = div64_u64(events, event->attr.sample_freq); 1352 1353 1353 1354 delta = (s64)(period - hwc->sample_period); 1354 1355 delta = (delta + 7) / 8; /* low pass filter */ ··· 1361 1362 hwc->sample_period = sample_period; 1362 1363 } 1363 1364 1364 - static void perf_ctx_adjust_freq(struct perf_counter_context *ctx) 1365 + static void perf_ctx_adjust_freq(struct perf_event_context *ctx) 1365 1366 { 1366 - struct perf_counter *counter; 1367 - struct hw_perf_counter *hwc; 1367 + struct perf_event *event; 1368 + struct hw_perf_event *hwc; 1368 1369 u64 interrupts, freq; 1369 1370 1370 1371 spin_lock(&ctx->lock); 1371 - list_for_each_entry(counter, &ctx->counter_list, list_entry) { 1372 - if (counter->state != PERF_COUNTER_STATE_ACTIVE) 1372 + list_for_each_entry(event, &ctx->group_list, group_entry) { 1373 + if (event->state != PERF_EVENT_STATE_ACTIVE) 1373 1374 continue; 1374 1375 1375 - hwc = &counter->hw; 1376 + hwc = &event->hw; 1376 1377 1377 1378 interrupts = hwc->interrupts; 1378 1379 hwc->interrupts = 0; 1379 1380 1380 1381 /* 1381 - * unthrottle counters on the tick 1382 + * unthrottle events on the tick 1382 1383 */ 1383 1384 if (interrupts == MAX_INTERRUPTS) { 1384 - perf_log_throttle(counter, 1); 1385 - counter->pmu->unthrottle(counter); 1386 - interrupts = 2*sysctl_perf_counter_sample_rate/HZ; 1385 + perf_log_throttle(event, 1); 1386 + event->pmu->unthrottle(event); 1387 + interrupts = 2*sysctl_perf_event_sample_rate/HZ; 1387 1388 } 1388 1389 1389 - if (!counter->attr.freq || !counter->attr.sample_freq) 1390 + if (!event->attr.freq || !event->attr.sample_freq) 1390 1391 continue; 1391 1392 1392 1393 /* 1393 1394 * if the specified freq < HZ then we need to skip ticks 1394 1395 */ 1395 - if (counter->attr.sample_freq < HZ) { 1396 - freq = counter->attr.sample_freq; 1396 + if (event->attr.sample_freq < HZ) { 1397 + freq = event->attr.sample_freq; 1397 1398 1398 1399 hwc->freq_count += freq; 1399 1400 hwc->freq_interrupts += interrupts; ··· 1407 1408 } else 1408 1409 freq = HZ; 1409 1410 1410 - perf_adjust_period(counter, freq * interrupts); 1411 + perf_adjust_period(event, freq * interrupts); 1411 1412 1412 1413 /* 1413 1414 * In order to avoid being stalled by an (accidental) huge ··· 1416 1417 */ 1417 1418 if (!interrupts) { 1418 1419 perf_disable(); 1419 - counter->pmu->disable(counter); 1420 + event->pmu->disable(event); 1420 1421 atomic64_set(&hwc->period_left, 0); 1421 - counter->pmu->enable(counter); 1422 + event->pmu->enable(event); 1422 1423 perf_enable(); 1423 1424 } 1424 1425 } ··· 1426 1427 } 1427 1428 1428 1429 /* 1429 - * Round-robin a context's counters: 1430 + * Round-robin a context's events: 1430 1431 */ 1431 - static void rotate_ctx(struct perf_counter_context *ctx) 1432 + static void rotate_ctx(struct perf_event_context *ctx) 1432 1433 { 1433 - struct perf_counter *counter; 1434 + struct perf_event *event; 1434 1435 1435 - if (!ctx->nr_counters) 1436 + if (!ctx->nr_events) 1436 1437 return; 1437 1438 1438 1439 spin_lock(&ctx->lock); 1439 1440 /* 1440 - * Rotate the first entry last (works just fine for group counters too): 1441 + * Rotate the first entry last (works just fine for group events too): 1441 1442 */ 1442 1443 perf_disable(); 1443 - list_for_each_entry(counter, &ctx->counter_list, list_entry) { 1444 - list_move_tail(&counter->list_entry, &ctx->counter_list); 1444 + list_for_each_entry(event, &ctx->group_list, group_entry) { 1445 + list_move_tail(&event->group_entry, &ctx->group_list); 1445 1446 break; 1446 1447 } 1447 1448 perf_enable(); ··· 1449 1450 spin_unlock(&ctx->lock); 1450 1451 } 1451 1452 1452 - void perf_counter_task_tick(struct task_struct *curr, int cpu) 1453 + void perf_event_task_tick(struct task_struct *curr, int cpu) 1453 1454 { 1454 1455 struct perf_cpu_context *cpuctx; 1455 - struct perf_counter_context *ctx; 1456 + struct perf_event_context *ctx; 1456 1457 1457 - if (!atomic_read(&nr_counters)) 1458 + if (!atomic_read(&nr_events)) 1458 1459 return; 1459 1460 1460 1461 cpuctx = &per_cpu(perf_cpu_context, cpu); 1461 - ctx = curr->perf_counter_ctxp; 1462 + ctx = curr->perf_event_ctxp; 1462 1463 1463 1464 perf_ctx_adjust_freq(&cpuctx->ctx); 1464 1465 if (ctx) 1465 1466 perf_ctx_adjust_freq(ctx); 1466 1467 1467 - perf_counter_cpu_sched_out(cpuctx); 1468 + perf_event_cpu_sched_out(cpuctx); 1468 1469 if (ctx) 1469 - __perf_counter_task_sched_out(ctx); 1470 + __perf_event_task_sched_out(ctx); 1470 1471 1471 1472 rotate_ctx(&cpuctx->ctx); 1472 1473 if (ctx) 1473 1474 rotate_ctx(ctx); 1474 1475 1475 - perf_counter_cpu_sched_in(cpuctx, cpu); 1476 + perf_event_cpu_sched_in(cpuctx, cpu); 1476 1477 if (ctx) 1477 - perf_counter_task_sched_in(curr, cpu); 1478 + perf_event_task_sched_in(curr, cpu); 1478 1479 } 1479 1480 1480 1481 /* 1481 - * Enable all of a task's counters that have been marked enable-on-exec. 1482 + * Enable all of a task's events that have been marked enable-on-exec. 1482 1483 * This expects task == current. 1483 1484 */ 1484 - static void perf_counter_enable_on_exec(struct task_struct *task) 1485 + static void perf_event_enable_on_exec(struct task_struct *task) 1485 1486 { 1486 - struct perf_counter_context *ctx; 1487 - struct perf_counter *counter; 1487 + struct perf_event_context *ctx; 1488 + struct perf_event *event; 1488 1489 unsigned long flags; 1489 1490 int enabled = 0; 1490 1491 1491 1492 local_irq_save(flags); 1492 - ctx = task->perf_counter_ctxp; 1493 - if (!ctx || !ctx->nr_counters) 1493 + ctx = task->perf_event_ctxp; 1494 + if (!ctx || !ctx->nr_events) 1494 1495 goto out; 1495 1496 1496 - __perf_counter_task_sched_out(ctx); 1497 + __perf_event_task_sched_out(ctx); 1497 1498 1498 1499 spin_lock(&ctx->lock); 1499 1500 1500 - list_for_each_entry(counter, &ctx->counter_list, list_entry) { 1501 - if (!counter->attr.enable_on_exec) 1501 + list_for_each_entry(event, &ctx->group_list, group_entry) { 1502 + if (!event->attr.enable_on_exec) 1502 1503 continue; 1503 - counter->attr.enable_on_exec = 0; 1504 - if (counter->state >= PERF_COUNTER_STATE_INACTIVE) 1504 + event->attr.enable_on_exec = 0; 1505 + if (event->state >= PERF_EVENT_STATE_INACTIVE) 1505 1506 continue; 1506 - __perf_counter_mark_enabled(counter, ctx); 1507 + __perf_event_mark_enabled(event, ctx); 1507 1508 enabled = 1; 1508 1509 } 1509 1510 1510 1511 /* 1511 - * Unclone this context if we enabled any counter. 1512 + * Unclone this context if we enabled any event. 1512 1513 */ 1513 1514 if (enabled) 1514 1515 unclone_ctx(ctx); 1515 1516 1516 1517 spin_unlock(&ctx->lock); 1517 1518 1518 - perf_counter_task_sched_in(task, smp_processor_id()); 1519 + perf_event_task_sched_in(task, smp_processor_id()); 1519 1520 out: 1520 1521 local_irq_restore(flags); 1521 1522 } 1522 1523 1523 1524 /* 1524 - * Cross CPU call to read the hardware counter 1525 + * Cross CPU call to read the hardware event 1525 1526 */ 1526 - static void __perf_counter_read(void *info) 1527 + static void __perf_event_read(void *info) 1527 1528 { 1528 1529 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); 1529 - struct perf_counter *counter = info; 1530 - struct perf_counter_context *ctx = counter->ctx; 1530 + struct perf_event *event = info; 1531 + struct perf_event_context *ctx = event->ctx; 1531 1532 unsigned long flags; 1532 1533 1533 1534 /* 1534 1535 * If this is a task context, we need to check whether it is 1535 1536 * the current task context of this cpu. If not it has been 1536 1537 * scheduled out before the smp call arrived. In that case 1537 - * counter->count would have been updated to a recent sample 1538 - * when the counter was scheduled out. 1538 + * event->count would have been updated to a recent sample 1539 + * when the event was scheduled out. 1539 1540 */ 1540 1541 if (ctx->task && cpuctx->task_ctx != ctx) 1541 1542 return; ··· 1543 1544 local_irq_save(flags); 1544 1545 if (ctx->is_active) 1545 1546 update_context_time(ctx); 1546 - counter->pmu->read(counter); 1547 - update_counter_times(counter); 1547 + event->pmu->read(event); 1548 + update_event_times(event); 1548 1549 local_irq_restore(flags); 1549 1550 } 1550 1551 1551 - static u64 perf_counter_read(struct perf_counter *counter) 1552 + static u64 perf_event_read(struct perf_event *event) 1552 1553 { 1553 1554 /* 1554 - * If counter is enabled and currently active on a CPU, update the 1555 - * value in the counter structure: 1555 + * If event is enabled and currently active on a CPU, update the 1556 + * value in the event structure: 1556 1557 */ 1557 - if (counter->state == PERF_COUNTER_STATE_ACTIVE) { 1558 - smp_call_function_single(counter->oncpu, 1559 - __perf_counter_read, counter, 1); 1560 - } else if (counter->state == PERF_COUNTER_STATE_INACTIVE) { 1561 - update_counter_times(counter); 1558 + if (event->state == PERF_EVENT_STATE_ACTIVE) { 1559 + smp_call_function_single(event->oncpu, 1560 + __perf_event_read, event, 1); 1561 + } else if (event->state == PERF_EVENT_STATE_INACTIVE) { 1562 + update_event_times(event); 1562 1563 } 1563 1564 1564 - return atomic64_read(&counter->count); 1565 + return atomic64_read(&event->count); 1565 1566 } 1566 1567 1567 1568 /* 1568 - * Initialize the perf_counter context in a task_struct: 1569 + * Initialize the perf_event context in a task_struct: 1569 1570 */ 1570 1571 static void 1571 - __perf_counter_init_context(struct perf_counter_context *ctx, 1572 + __perf_event_init_context(struct perf_event_context *ctx, 1572 1573 struct task_struct *task) 1573 1574 { 1574 1575 memset(ctx, 0, sizeof(*ctx)); 1575 1576 spin_lock_init(&ctx->lock); 1576 1577 mutex_init(&ctx->mutex); 1577 - INIT_LIST_HEAD(&ctx->counter_list); 1578 + INIT_LIST_HEAD(&ctx->group_list); 1578 1579 INIT_LIST_HEAD(&ctx->event_list); 1579 1580 atomic_set(&ctx->refcount, 1); 1580 1581 ctx->task = task; 1581 1582 } 1582 1583 1583 - static struct perf_counter_context *find_get_context(pid_t pid, int cpu) 1584 + static struct perf_event_context *find_get_context(pid_t pid, int cpu) 1584 1585 { 1585 - struct perf_counter_context *ctx; 1586 + struct perf_event_context *ctx; 1586 1587 struct perf_cpu_context *cpuctx; 1587 1588 struct task_struct *task; 1588 1589 unsigned long flags; 1589 1590 int err; 1590 1591 1591 1592 /* 1592 - * If cpu is not a wildcard then this is a percpu counter: 1593 + * If cpu is not a wildcard then this is a percpu event: 1593 1594 */ 1594 1595 if (cpu != -1) { 1595 - /* Must be root to operate on a CPU counter: */ 1596 + /* Must be root to operate on a CPU event: */ 1596 1597 if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN)) 1597 1598 return ERR_PTR(-EACCES); 1598 1599 ··· 1600 1601 return ERR_PTR(-EINVAL); 1601 1602 1602 1603 /* 1603 - * We could be clever and allow to attach a counter to an 1604 + * We could be clever and allow to attach a event to an 1604 1605 * offline CPU and activate it when the CPU comes up, but 1605 1606 * that's for later. 1606 1607 */ ··· 1627 1628 return ERR_PTR(-ESRCH); 1628 1629 1629 1630 /* 1630 - * Can't attach counters to a dying task. 1631 + * Can't attach events to a dying task. 1631 1632 */ 1632 1633 err = -ESRCH; 1633 1634 if (task->flags & PF_EXITING) ··· 1646 1647 } 1647 1648 1648 1649 if (!ctx) { 1649 - ctx = kmalloc(sizeof(struct perf_counter_context), GFP_KERNEL); 1650 + ctx = kmalloc(sizeof(struct perf_event_context), GFP_KERNEL); 1650 1651 err = -ENOMEM; 1651 1652 if (!ctx) 1652 1653 goto errout; 1653 - __perf_counter_init_context(ctx, task); 1654 + __perf_event_init_context(ctx, task); 1654 1655 get_ctx(ctx); 1655 - if (cmpxchg(&task->perf_counter_ctxp, NULL, ctx)) { 1656 + if (cmpxchg(&task->perf_event_ctxp, NULL, ctx)) { 1656 1657 /* 1657 1658 * We raced with some other task; use 1658 1659 * the context they set. ··· 1671 1672 return ERR_PTR(err); 1672 1673 } 1673 1674 1674 - static void free_counter_rcu(struct rcu_head *head) 1675 + static void free_event_rcu(struct rcu_head *head) 1675 1676 { 1676 - struct perf_counter *counter; 1677 + struct perf_event *event; 1677 1678 1678 - counter = container_of(head, struct perf_counter, rcu_head); 1679 - if (counter->ns) 1680 - put_pid_ns(counter->ns); 1681 - kfree(counter); 1679 + event = container_of(head, struct perf_event, rcu_head); 1680 + if (event->ns) 1681 + put_pid_ns(event->ns); 1682 + kfree(event); 1682 1683 } 1683 1684 1684 - static void perf_pending_sync(struct perf_counter *counter); 1685 + static void perf_pending_sync(struct perf_event *event); 1685 1686 1686 - static void free_counter(struct perf_counter *counter) 1687 + static void free_event(struct perf_event *event) 1687 1688 { 1688 - perf_pending_sync(counter); 1689 + perf_pending_sync(event); 1689 1690 1690 - if (!counter->parent) { 1691 - atomic_dec(&nr_counters); 1692 - if (counter->attr.mmap) 1693 - atomic_dec(&nr_mmap_counters); 1694 - if (counter->attr.comm) 1695 - atomic_dec(&nr_comm_counters); 1696 - if (counter->attr.task) 1697 - atomic_dec(&nr_task_counters); 1691 + if (!event->parent) { 1692 + atomic_dec(&nr_events); 1693 + if (event->attr.mmap) 1694 + atomic_dec(&nr_mmap_events); 1695 + if (event->attr.comm) 1696 + atomic_dec(&nr_comm_events); 1697 + if (event->attr.task) 1698 + atomic_dec(&nr_task_events); 1698 1699 } 1699 1700 1700 - if (counter->output) { 1701 - fput(counter->output->filp); 1702 - counter->output = NULL; 1701 + if (event->output) { 1702 + fput(event->output->filp); 1703 + event->output = NULL; 1703 1704 } 1704 1705 1705 - if (counter->destroy) 1706 - counter->destroy(counter); 1706 + if (event->destroy) 1707 + event->destroy(event); 1707 1708 1708 - put_ctx(counter->ctx); 1709 - call_rcu(&counter->rcu_head, free_counter_rcu); 1709 + put_ctx(event->ctx); 1710 + call_rcu(&event->rcu_head, free_event_rcu); 1710 1711 } 1711 1712 1712 1713 /* ··· 1714 1715 */ 1715 1716 static int perf_release(struct inode *inode, struct file *file) 1716 1717 { 1717 - struct perf_counter *counter = file->private_data; 1718 - struct perf_counter_context *ctx = counter->ctx; 1718 + struct perf_event *event = file->private_data; 1719 + struct perf_event_context *ctx = event->ctx; 1719 1720 1720 1721 file->private_data = NULL; 1721 1722 1722 1723 WARN_ON_ONCE(ctx->parent_ctx); 1723 1724 mutex_lock(&ctx->mutex); 1724 - perf_counter_remove_from_context(counter); 1725 + perf_event_remove_from_context(event); 1725 1726 mutex_unlock(&ctx->mutex); 1726 1727 1727 - mutex_lock(&counter->owner->perf_counter_mutex); 1728 - list_del_init(&counter->owner_entry); 1729 - mutex_unlock(&counter->owner->perf_counter_mutex); 1730 - put_task_struct(counter->owner); 1728 + mutex_lock(&event->owner->perf_event_mutex); 1729 + list_del_init(&event->owner_entry); 1730 + mutex_unlock(&event->owner->perf_event_mutex); 1731 + put_task_struct(event->owner); 1731 1732 1732 - free_counter(counter); 1733 + free_event(event); 1733 1734 1734 1735 return 0; 1735 1736 } 1736 1737 1737 - static int perf_counter_read_size(struct perf_counter *counter) 1738 + static int perf_event_read_size(struct perf_event *event) 1738 1739 { 1739 1740 int entry = sizeof(u64); /* value */ 1740 1741 int size = 0; 1741 1742 int nr = 1; 1742 1743 1743 - if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) 1744 + if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) 1744 1745 size += sizeof(u64); 1745 1746 1746 - if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) 1747 + if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) 1747 1748 size += sizeof(u64); 1748 1749 1749 - if (counter->attr.read_format & PERF_FORMAT_ID) 1750 + if (event->attr.read_format & PERF_FORMAT_ID) 1750 1751 entry += sizeof(u64); 1751 1752 1752 - if (counter->attr.read_format & PERF_FORMAT_GROUP) { 1753 - nr += counter->group_leader->nr_siblings; 1753 + if (event->attr.read_format & PERF_FORMAT_GROUP) { 1754 + nr += event->group_leader->nr_siblings; 1754 1755 size += sizeof(u64); 1755 1756 } 1756 1757 ··· 1759 1760 return size; 1760 1761 } 1761 1762 1762 - static u64 perf_counter_read_value(struct perf_counter *counter) 1763 + static u64 perf_event_read_value(struct perf_event *event) 1763 1764 { 1764 - struct perf_counter *child; 1765 + struct perf_event *child; 1765 1766 u64 total = 0; 1766 1767 1767 - total += perf_counter_read(counter); 1768 - list_for_each_entry(child, &counter->child_list, child_list) 1769 - total += perf_counter_read(child); 1768 + total += perf_event_read(event); 1769 + list_for_each_entry(child, &event->child_list, child_list) 1770 + total += perf_event_read(child); 1770 1771 1771 1772 return total; 1772 1773 } 1773 1774 1774 - static int perf_counter_read_entry(struct perf_counter *counter, 1775 + static int perf_event_read_entry(struct perf_event *event, 1775 1776 u64 read_format, char __user *buf) 1776 1777 { 1777 1778 int n = 0, count = 0; 1778 1779 u64 values[2]; 1779 1780 1780 - values[n++] = perf_counter_read_value(counter); 1781 + values[n++] = perf_event_read_value(event); 1781 1782 if (read_format & PERF_FORMAT_ID) 1782 - values[n++] = primary_counter_id(counter); 1783 + values[n++] = primary_event_id(event); 1783 1784 1784 1785 count = n * sizeof(u64); 1785 1786 ··· 1789 1790 return count; 1790 1791 } 1791 1792 1792 - static int perf_counter_read_group(struct perf_counter *counter, 1793 + static int perf_event_read_group(struct perf_event *event, 1793 1794 u64 read_format, char __user *buf) 1794 1795 { 1795 - struct perf_counter *leader = counter->group_leader, *sub; 1796 + struct perf_event *leader = event->group_leader, *sub; 1796 1797 int n = 0, size = 0, err = -EFAULT; 1797 1798 u64 values[3]; 1798 1799 ··· 1811 1812 if (copy_to_user(buf, values, size)) 1812 1813 return -EFAULT; 1813 1814 1814 - err = perf_counter_read_entry(leader, read_format, buf + size); 1815 + err = perf_event_read_entry(leader, read_format, buf + size); 1815 1816 if (err < 0) 1816 1817 return err; 1817 1818 1818 1819 size += err; 1819 1820 1820 - list_for_each_entry(sub, &leader->sibling_list, list_entry) { 1821 - err = perf_counter_read_entry(sub, read_format, 1821 + list_for_each_entry(sub, &leader->sibling_list, group_entry) { 1822 + err = perf_event_read_entry(sub, read_format, 1822 1823 buf + size); 1823 1824 if (err < 0) 1824 1825 return err; ··· 1829 1830 return size; 1830 1831 } 1831 1832 1832 - static int perf_counter_read_one(struct perf_counter *counter, 1833 + static int perf_event_read_one(struct perf_event *event, 1833 1834 u64 read_format, char __user *buf) 1834 1835 { 1835 1836 u64 values[4]; 1836 1837 int n = 0; 1837 1838 1838 - values[n++] = perf_counter_read_value(counter); 1839 + values[n++] = perf_event_read_value(event); 1839 1840 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) { 1840 - values[n++] = counter->total_time_enabled + 1841 - atomic64_read(&counter->child_total_time_enabled); 1841 + values[n++] = event->total_time_enabled + 1842 + atomic64_read(&event->child_total_time_enabled); 1842 1843 } 1843 1844 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) { 1844 - values[n++] = counter->total_time_running + 1845 - atomic64_read(&counter->child_total_time_running); 1845 + values[n++] = event->total_time_running + 1846 + atomic64_read(&event->child_total_time_running); 1846 1847 } 1847 1848 if (read_format & PERF_FORMAT_ID) 1848 - values[n++] = primary_counter_id(counter); 1849 + values[n++] = primary_event_id(event); 1849 1850 1850 1851 if (copy_to_user(buf, values, n * sizeof(u64))) 1851 1852 return -EFAULT; ··· 1854 1855 } 1855 1856 1856 1857 /* 1857 - * Read the performance counter - simple non blocking version for now 1858 + * Read the performance event - simple non blocking version for now 1858 1859 */ 1859 1860 static ssize_t 1860 - perf_read_hw(struct perf_counter *counter, char __user *buf, size_t count) 1861 + perf_read_hw(struct perf_event *event, char __user *buf, size_t count) 1861 1862 { 1862 - u64 read_format = counter->attr.read_format; 1863 + u64 read_format = event->attr.read_format; 1863 1864 int ret; 1864 1865 1865 1866 /* 1866 - * Return end-of-file for a read on a counter that is in 1867 + * Return end-of-file for a read on a event that is in 1867 1868 * error state (i.e. because it was pinned but it couldn't be 1868 1869 * scheduled on to the CPU at some point). 1869 1870 */ 1870 - if (counter->state == PERF_COUNTER_STATE_ERROR) 1871 + if (event->state == PERF_EVENT_STATE_ERROR) 1871 1872 return 0; 1872 1873 1873 - if (count < perf_counter_read_size(counter)) 1874 + if (count < perf_event_read_size(event)) 1874 1875 return -ENOSPC; 1875 1876 1876 - WARN_ON_ONCE(counter->ctx->parent_ctx); 1877 - mutex_lock(&counter->child_mutex); 1877 + WARN_ON_ONCE(event->ctx->parent_ctx); 1878 + mutex_lock(&event->child_mutex); 1878 1879 if (read_format & PERF_FORMAT_GROUP) 1879 - ret = perf_counter_read_group(counter, read_format, buf); 1880 + ret = perf_event_read_group(event, read_format, buf); 1880 1881 else 1881 - ret = perf_counter_read_one(counter, read_format, buf); 1882 - mutex_unlock(&counter->child_mutex); 1882 + ret = perf_event_read_one(event, read_format, buf); 1883 + mutex_unlock(&event->child_mutex); 1883 1884 1884 1885 return ret; 1885 1886 } ··· 1887 1888 static ssize_t 1888 1889 perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) 1889 1890 { 1890 - struct perf_counter *counter = file->private_data; 1891 + struct perf_event *event = file->private_data; 1891 1892 1892 - return perf_read_hw(counter, buf, count); 1893 + return perf_read_hw(event, buf, count); 1893 1894 } 1894 1895 1895 1896 static unsigned int perf_poll(struct file *file, poll_table *wait) 1896 1897 { 1897 - struct perf_counter *counter = file->private_data; 1898 + struct perf_event *event = file->private_data; 1898 1899 struct perf_mmap_data *data; 1899 1900 unsigned int events = POLL_HUP; 1900 1901 1901 1902 rcu_read_lock(); 1902 - data = rcu_dereference(counter->data); 1903 + data = rcu_dereference(event->data); 1903 1904 if (data) 1904 1905 events = atomic_xchg(&data->poll, 0); 1905 1906 rcu_read_unlock(); 1906 1907 1907 - poll_wait(file, &counter->waitq, wait); 1908 + poll_wait(file, &event->waitq, wait); 1908 1909 1909 1910 return events; 1910 1911 } 1911 1912 1912 - static void perf_counter_reset(struct perf_counter *counter) 1913 + static void perf_event_reset(struct perf_event *event) 1913 1914 { 1914 - (void)perf_counter_read(counter); 1915 - atomic64_set(&counter->count, 0); 1916 - perf_counter_update_userpage(counter); 1915 + (void)perf_event_read(event); 1916 + atomic64_set(&event->count, 0); 1917 + perf_event_update_userpage(event); 1917 1918 } 1918 1919 1919 1920 /* 1920 - * Holding the top-level counter's child_mutex means that any 1921 - * descendant process that has inherited this counter will block 1922 - * in sync_child_counter if it goes to exit, thus satisfying the 1923 - * task existence requirements of perf_counter_enable/disable. 1921 + * Holding the top-level event's child_mutex means that any 1922 + * descendant process that has inherited this event will block 1923 + * in sync_child_event if it goes to exit, thus satisfying the 1924 + * task existence requirements of perf_event_enable/disable. 1924 1925 */ 1925 - static void perf_counter_for_each_child(struct perf_counter *counter, 1926 - void (*func)(struct perf_counter *)) 1926 + static void perf_event_for_each_child(struct perf_event *event, 1927 + void (*func)(struct perf_event *)) 1927 1928 { 1928 - struct perf_counter *child; 1929 + struct perf_event *child; 1929 1930 1930 - WARN_ON_ONCE(counter->ctx->parent_ctx); 1931 - mutex_lock(&counter->child_mutex); 1932 - func(counter); 1933 - list_for_each_entry(child, &counter->child_list, child_list) 1931 + WARN_ON_ONCE(event->ctx->parent_ctx); 1932 + mutex_lock(&event->child_mutex); 1933 + func(event); 1934 + list_for_each_entry(child, &event->child_list, child_list) 1934 1935 func(child); 1935 - mutex_unlock(&counter->child_mutex); 1936 + mutex_unlock(&event->child_mutex); 1936 1937 } 1937 1938 1938 - static void perf_counter_for_each(struct perf_counter *counter, 1939 - void (*func)(struct perf_counter *)) 1939 + static void perf_event_for_each(struct perf_event *event, 1940 + void (*func)(struct perf_event *)) 1940 1941 { 1941 - struct perf_counter_context *ctx = counter->ctx; 1942 - struct perf_counter *sibling; 1942 + struct perf_event_context *ctx = event->ctx; 1943 + struct perf_event *sibling; 1943 1944 1944 1945 WARN_ON_ONCE(ctx->parent_ctx); 1945 1946 mutex_lock(&ctx->mutex); 1946 - counter = counter->group_leader; 1947 + event = event->group_leader; 1947 1948 1948 - perf_counter_for_each_child(counter, func); 1949 - func(counter); 1950 - list_for_each_entry(sibling, &counter->sibling_list, list_entry) 1951 - perf_counter_for_each_child(counter, func); 1949 + perf_event_for_each_child(event, func); 1950 + func(event); 1951 + list_for_each_entry(sibling, &event->sibling_list, group_entry) 1952 + perf_event_for_each_child(event, func); 1952 1953 mutex_unlock(&ctx->mutex); 1953 1954 } 1954 1955 1955 - static int perf_counter_period(struct perf_counter *counter, u64 __user *arg) 1956 + static int perf_event_period(struct perf_event *event, u64 __user *arg) 1956 1957 { 1957 - struct perf_counter_context *ctx = counter->ctx; 1958 + struct perf_event_context *ctx = event->ctx; 1958 1959 unsigned long size; 1959 1960 int ret = 0; 1960 1961 u64 value; 1961 1962 1962 - if (!counter->attr.sample_period) 1963 + if (!event->attr.sample_period) 1963 1964 return -EINVAL; 1964 1965 1965 1966 size = copy_from_user(&value, arg, sizeof(value)); ··· 1970 1971 return -EINVAL; 1971 1972 1972 1973 spin_lock_irq(&ctx->lock); 1973 - if (counter->attr.freq) { 1974 - if (value > sysctl_perf_counter_sample_rate) { 1974 + if (event->attr.freq) { 1975 + if (value > sysctl_perf_event_sample_rate) { 1975 1976 ret = -EINVAL; 1976 1977 goto unlock; 1977 1978 } 1978 1979 1979 - counter->attr.sample_freq = value; 1980 + event->attr.sample_freq = value; 1980 1981 } else { 1981 - counter->attr.sample_period = value; 1982 - counter->hw.sample_period = value; 1982 + event->attr.sample_period = value; 1983 + event->hw.sample_period = value; 1983 1984 } 1984 1985 unlock: 1985 1986 spin_unlock_irq(&ctx->lock); ··· 1987 1988 return ret; 1988 1989 } 1989 1990 1990 - int perf_counter_set_output(struct perf_counter *counter, int output_fd); 1991 + int perf_event_set_output(struct perf_event *event, int output_fd); 1991 1992 1992 1993 static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 1993 1994 { 1994 - struct perf_counter *counter = file->private_data; 1995 - void (*func)(struct perf_counter *); 1995 + struct perf_event *event = file->private_data; 1996 + void (*func)(struct perf_event *); 1996 1997 u32 flags = arg; 1997 1998 1998 1999 switch (cmd) { 1999 - case PERF_COUNTER_IOC_ENABLE: 2000 - func = perf_counter_enable; 2000 + case PERF_EVENT_IOC_ENABLE: 2001 + func = perf_event_enable; 2001 2002 break; 2002 - case PERF_COUNTER_IOC_DISABLE: 2003 - func = perf_counter_disable; 2003 + case PERF_EVENT_IOC_DISABLE: 2004 + func = perf_event_disable; 2004 2005 break; 2005 - case PERF_COUNTER_IOC_RESET: 2006 - func = perf_counter_reset; 2006 + case PERF_EVENT_IOC_RESET: 2007 + func = perf_event_reset; 2007 2008 break; 2008 2009 2009 - case PERF_COUNTER_IOC_REFRESH: 2010 - return perf_counter_refresh(counter, arg); 2010 + case PERF_EVENT_IOC_REFRESH: 2011 + return perf_event_refresh(event, arg); 2011 2012 2012 - case PERF_COUNTER_IOC_PERIOD: 2013 - return perf_counter_period(counter, (u64 __user *)arg); 2013 + case PERF_EVENT_IOC_PERIOD: 2014 + return perf_event_period(event, (u64 __user *)arg); 2014 2015 2015 - case PERF_COUNTER_IOC_SET_OUTPUT: 2016 - return perf_counter_set_output(counter, arg); 2016 + case PERF_EVENT_IOC_SET_OUTPUT: 2017 + return perf_event_set_output(event, arg); 2017 2018 2018 2019 default: 2019 2020 return -ENOTTY; 2020 2021 } 2021 2022 2022 2023 if (flags & PERF_IOC_FLAG_GROUP) 2023 - perf_counter_for_each(counter, func); 2024 + perf_event_for_each(event, func); 2024 2025 else 2025 - perf_counter_for_each_child(counter, func); 2026 + perf_event_for_each_child(event, func); 2026 2027 2027 2028 return 0; 2028 2029 } 2029 2030 2030 - int perf_counter_task_enable(void) 2031 + int perf_event_task_enable(void) 2031 2032 { 2032 - struct perf_counter *counter; 2033 + struct perf_event *event; 2033 2034 2034 - mutex_lock(&current->perf_counter_mutex); 2035 - list_for_each_entry(counter, &current->perf_counter_list, owner_entry) 2036 - perf_counter_for_each_child(counter, perf_counter_enable); 2037 - mutex_unlock(&current->perf_counter_mutex); 2035 + mutex_lock(&current->perf_event_mutex); 2036 + list_for_each_entry(event, &current->perf_event_list, owner_entry) 2037 + perf_event_for_each_child(event, perf_event_enable); 2038 + mutex_unlock(&current->perf_event_mutex); 2038 2039 2039 2040 return 0; 2040 2041 } 2041 2042 2042 - int perf_counter_task_disable(void) 2043 + int perf_event_task_disable(void) 2043 2044 { 2044 - struct perf_counter *counter; 2045 + struct perf_event *event; 2045 2046 2046 - mutex_lock(&current->perf_counter_mutex); 2047 - list_for_each_entry(counter, &current->perf_counter_list, owner_entry) 2048 - perf_counter_for_each_child(counter, perf_counter_disable); 2049 - mutex_unlock(&current->perf_counter_mutex); 2047 + mutex_lock(&current->perf_event_mutex); 2048 + list_for_each_entry(event, &current->perf_event_list, owner_entry) 2049 + perf_event_for_each_child(event, perf_event_disable); 2050 + mutex_unlock(&current->perf_event_mutex); 2050 2051 2051 2052 return 0; 2052 2053 } 2053 2054 2054 - #ifndef PERF_COUNTER_INDEX_OFFSET 2055 - # define PERF_COUNTER_INDEX_OFFSET 0 2055 + #ifndef PERF_EVENT_INDEX_OFFSET 2056 + # define PERF_EVENT_INDEX_OFFSET 0 2056 2057 #endif 2057 2058 2058 - static int perf_counter_index(struct perf_counter *counter) 2059 + static int perf_event_index(struct perf_event *event) 2059 2060 { 2060 - if (counter->state != PERF_COUNTER_STATE_ACTIVE) 2061 + if (event->state != PERF_EVENT_STATE_ACTIVE) 2061 2062 return 0; 2062 2063 2063 - return counter->hw.idx + 1 - PERF_COUNTER_INDEX_OFFSET; 2064 + return event->hw.idx + 1 - PERF_EVENT_INDEX_OFFSET; 2064 2065 } 2065 2066 2066 2067 /* ··· 2068 2069 * the seqlock logic goes bad. We can not serialize this because the arch 2069 2070 * code calls this from NMI context. 2070 2071 */ 2071 - void perf_counter_update_userpage(struct perf_counter *counter) 2072 + void perf_event_update_userpage(struct perf_event *event) 2072 2073 { 2073 - struct perf_counter_mmap_page *userpg; 2074 + struct perf_event_mmap_page *userpg; 2074 2075 struct perf_mmap_data *data; 2075 2076 2076 2077 rcu_read_lock(); 2077 - data = rcu_dereference(counter->data); 2078 + data = rcu_dereference(event->data); 2078 2079 if (!data) 2079 2080 goto unlock; 2080 2081 ··· 2087 2088 preempt_disable(); 2088 2089 ++userpg->lock; 2089 2090 barrier(); 2090 - userpg->index = perf_counter_index(counter); 2091 - userpg->offset = atomic64_read(&counter->count); 2092 - if (counter->state == PERF_COUNTER_STATE_ACTIVE) 2093 - userpg->offset -= atomic64_read(&counter->hw.prev_count); 2091 + userpg->index = perf_event_index(event); 2092 + userpg->offset = atomic64_read(&event->count); 2093 + if (event->state == PERF_EVENT_STATE_ACTIVE) 2094 + userpg->offset -= atomic64_read(&event->hw.prev_count); 2094 2095 2095 - userpg->time_enabled = counter->total_time_enabled + 2096 - atomic64_read(&counter->child_total_time_enabled); 2096 + userpg->time_enabled = event->total_time_enabled + 2097 + atomic64_read(&event->child_total_time_enabled); 2097 2098 2098 - userpg->time_running = counter->total_time_running + 2099 - atomic64_read(&counter->child_total_time_running); 2099 + userpg->time_running = event->total_time_running + 2100 + atomic64_read(&event->child_total_time_running); 2100 2101 2101 2102 barrier(); 2102 2103 ++userpg->lock; ··· 2107 2108 2108 2109 static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 2109 2110 { 2110 - struct perf_counter *counter = vma->vm_file->private_data; 2111 + struct perf_event *event = vma->vm_file->private_data; 2111 2112 struct perf_mmap_data *data; 2112 2113 int ret = VM_FAULT_SIGBUS; 2113 2114 ··· 2118 2119 } 2119 2120 2120 2121 rcu_read_lock(); 2121 - data = rcu_dereference(counter->data); 2122 + data = rcu_dereference(event->data); 2122 2123 if (!data) 2123 2124 goto unlock; 2124 2125 ··· 2147 2148 return ret; 2148 2149 } 2149 2150 2150 - static int perf_mmap_data_alloc(struct perf_counter *counter, int nr_pages) 2151 + static int perf_mmap_data_alloc(struct perf_event *event, int nr_pages) 2151 2152 { 2152 2153 struct perf_mmap_data *data; 2153 2154 unsigned long size; 2154 2155 int i; 2155 2156 2156 - WARN_ON(atomic_read(&counter->mmap_count)); 2157 + WARN_ON(atomic_read(&event->mmap_count)); 2157 2158 2158 2159 size = sizeof(struct perf_mmap_data); 2159 2160 size += nr_pages * sizeof(void *); ··· 2175 2176 data->nr_pages = nr_pages; 2176 2177 atomic_set(&data->lock, -1); 2177 2178 2178 - if (counter->attr.watermark) { 2179 + if (event->attr.watermark) { 2179 2180 data->watermark = min_t(long, PAGE_SIZE * nr_pages, 2180 - counter->attr.wakeup_watermark); 2181 + event->attr.wakeup_watermark); 2181 2182 } 2182 2183 if (!data->watermark) 2183 2184 data->watermark = max(PAGE_SIZE, PAGE_SIZE * nr_pages / 4); 2184 2185 2185 - rcu_assign_pointer(counter->data, data); 2186 + rcu_assign_pointer(event->data, data); 2186 2187 2187 2188 return 0; 2188 2189 ··· 2221 2222 kfree(data); 2222 2223 } 2223 2224 2224 - static void perf_mmap_data_free(struct perf_counter *counter) 2225 + static void perf_mmap_data_free(struct perf_event *event) 2225 2226 { 2226 - struct perf_mmap_data *data = counter->data; 2227 + struct perf_mmap_data *data = event->data; 2227 2228 2228 - WARN_ON(atomic_read(&counter->mmap_count)); 2229 + WARN_ON(atomic_read(&event->mmap_count)); 2229 2230 2230 - rcu_assign_pointer(counter->data, NULL); 2231 + rcu_assign_pointer(event->data, NULL); 2231 2232 call_rcu(&data->rcu_head, __perf_mmap_data_free); 2232 2233 } 2233 2234 2234 2235 static void perf_mmap_open(struct vm_area_struct *vma) 2235 2236 { 2236 - struct perf_counter *counter = vma->vm_file->private_data; 2237 + struct perf_event *event = vma->vm_file->private_data; 2237 2238 2238 - atomic_inc(&counter->mmap_count); 2239 + atomic_inc(&event->mmap_count); 2239 2240 } 2240 2241 2241 2242 static void perf_mmap_close(struct vm_area_struct *vma) 2242 2243 { 2243 - struct perf_counter *counter = vma->vm_file->private_data; 2244 + struct perf_event *event = vma->vm_file->private_data; 2244 2245 2245 - WARN_ON_ONCE(counter->ctx->parent_ctx); 2246 - if (atomic_dec_and_mutex_lock(&counter->mmap_count, &counter->mmap_mutex)) { 2246 + WARN_ON_ONCE(event->ctx->parent_ctx); 2247 + if (atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex)) { 2247 2248 struct user_struct *user = current_user(); 2248 2249 2249 - atomic_long_sub(counter->data->nr_pages + 1, &user->locked_vm); 2250 - vma->vm_mm->locked_vm -= counter->data->nr_locked; 2251 - perf_mmap_data_free(counter); 2252 - mutex_unlock(&counter->mmap_mutex); 2250 + atomic_long_sub(event->data->nr_pages + 1, &user->locked_vm); 2251 + vma->vm_mm->locked_vm -= event->data->nr_locked; 2252 + perf_mmap_data_free(event); 2253 + mutex_unlock(&event->mmap_mutex); 2253 2254 } 2254 2255 } 2255 2256 ··· 2262 2263 2263 2264 static int perf_mmap(struct file *file, struct vm_area_struct *vma) 2264 2265 { 2265 - struct perf_counter *counter = file->private_data; 2266 + struct perf_event *event = file->private_data; 2266 2267 unsigned long user_locked, user_lock_limit; 2267 2268 struct user_struct *user = current_user(); 2268 2269 unsigned long locked, lock_limit; ··· 2290 2291 if (vma->vm_pgoff != 0) 2291 2292 return -EINVAL; 2292 2293 2293 - WARN_ON_ONCE(counter->ctx->parent_ctx); 2294 - mutex_lock(&counter->mmap_mutex); 2295 - if (counter->output) { 2294 + WARN_ON_ONCE(event->ctx->parent_ctx); 2295 + mutex_lock(&event->mmap_mutex); 2296 + if (event->output) { 2296 2297 ret = -EINVAL; 2297 2298 goto unlock; 2298 2299 } 2299 2300 2300 - if (atomic_inc_not_zero(&counter->mmap_count)) { 2301 - if (nr_pages != counter->data->nr_pages) 2301 + if (atomic_inc_not_zero(&event->mmap_count)) { 2302 + if (nr_pages != event->data->nr_pages) 2302 2303 ret = -EINVAL; 2303 2304 goto unlock; 2304 2305 } 2305 2306 2306 2307 user_extra = nr_pages + 1; 2307 - user_lock_limit = sysctl_perf_counter_mlock >> (PAGE_SHIFT - 10); 2308 + user_lock_limit = sysctl_perf_event_mlock >> (PAGE_SHIFT - 10); 2308 2309 2309 2310 /* 2310 2311 * Increase the limit linearly with more CPUs: ··· 2327 2328 goto unlock; 2328 2329 } 2329 2330 2330 - WARN_ON(counter->data); 2331 - ret = perf_mmap_data_alloc(counter, nr_pages); 2331 + WARN_ON(event->data); 2332 + ret = perf_mmap_data_alloc(event, nr_pages); 2332 2333 if (ret) 2333 2334 goto unlock; 2334 2335 2335 - atomic_set(&counter->mmap_count, 1); 2336 + atomic_set(&event->mmap_count, 1); 2336 2337 atomic_long_add(user_extra, &user->locked_vm); 2337 2338 vma->vm_mm->locked_vm += extra; 2338 - counter->data->nr_locked = extra; 2339 + event->data->nr_locked = extra; 2339 2340 if (vma->vm_flags & VM_WRITE) 2340 - counter->data->writable = 1; 2341 + event->data->writable = 1; 2341 2342 2342 2343 unlock: 2343 - mutex_unlock(&counter->mmap_mutex); 2344 + mutex_unlock(&event->mmap_mutex); 2344 2345 2345 2346 vma->vm_flags |= VM_RESERVED; 2346 2347 vma->vm_ops = &perf_mmap_vmops; ··· 2351 2352 static int perf_fasync(int fd, struct file *filp, int on) 2352 2353 { 2353 2354 struct inode *inode = filp->f_path.dentry->d_inode; 2354 - struct perf_counter *counter = filp->private_data; 2355 + struct perf_event *event = filp->private_data; 2355 2356 int retval; 2356 2357 2357 2358 mutex_lock(&inode->i_mutex); 2358 - retval = fasync_helper(fd, filp, on, &counter->fasync); 2359 + retval = fasync_helper(fd, filp, on, &event->fasync); 2359 2360 mutex_unlock(&inode->i_mutex); 2360 2361 2361 2362 if (retval < 0) ··· 2375 2376 }; 2376 2377 2377 2378 /* 2378 - * Perf counter wakeup 2379 + * Perf event wakeup 2379 2380 * 2380 2381 * If there's data, ensure we set the poll() state and publish everything 2381 2382 * to user-space before waking everybody up. 2382 2383 */ 2383 2384 2384 - void perf_counter_wakeup(struct perf_counter *counter) 2385 + void perf_event_wakeup(struct perf_event *event) 2385 2386 { 2386 - wake_up_all(&counter->waitq); 2387 + wake_up_all(&event->waitq); 2387 2388 2388 - if (counter->pending_kill) { 2389 - kill_fasync(&counter->fasync, SIGIO, counter->pending_kill); 2390 - counter->pending_kill = 0; 2389 + if (event->pending_kill) { 2390 + kill_fasync(&event->fasync, SIGIO, event->pending_kill); 2391 + event->pending_kill = 0; 2391 2392 } 2392 2393 } 2393 2394 ··· 2400 2401 * single linked list and use cmpxchg() to add entries lockless. 2401 2402 */ 2402 2403 2403 - static void perf_pending_counter(struct perf_pending_entry *entry) 2404 + static void perf_pending_event(struct perf_pending_entry *entry) 2404 2405 { 2405 - struct perf_counter *counter = container_of(entry, 2406 - struct perf_counter, pending); 2406 + struct perf_event *event = container_of(entry, 2407 + struct perf_event, pending); 2407 2408 2408 - if (counter->pending_disable) { 2409 - counter->pending_disable = 0; 2410 - __perf_counter_disable(counter); 2409 + if (event->pending_disable) { 2410 + event->pending_disable = 0; 2411 + __perf_event_disable(event); 2411 2412 } 2412 2413 2413 - if (counter->pending_wakeup) { 2414 - counter->pending_wakeup = 0; 2415 - perf_counter_wakeup(counter); 2414 + if (event->pending_wakeup) { 2415 + event->pending_wakeup = 0; 2416 + perf_event_wakeup(event); 2416 2417 } 2417 2418 } 2418 2419 ··· 2438 2439 entry->next = *head; 2439 2440 } while (cmpxchg(head, entry->next, entry) != entry->next); 2440 2441 2441 - set_perf_counter_pending(); 2442 + set_perf_event_pending(); 2442 2443 2443 2444 put_cpu_var(perf_pending_head); 2444 2445 } ··· 2471 2472 return nr; 2472 2473 } 2473 2474 2474 - static inline int perf_not_pending(struct perf_counter *counter) 2475 + static inline int perf_not_pending(struct perf_event *event) 2475 2476 { 2476 2477 /* 2477 2478 * If we flush on whatever cpu we run, there is a chance we don't ··· 2486 2487 * so that we do not miss the wakeup. -- see perf_pending_handle() 2487 2488 */ 2488 2489 smp_rmb(); 2489 - return counter->pending.next == NULL; 2490 + return event->pending.next == NULL; 2490 2491 } 2491 2492 2492 - static void perf_pending_sync(struct perf_counter *counter) 2493 + static void perf_pending_sync(struct perf_event *event) 2493 2494 { 2494 - wait_event(counter->waitq, perf_not_pending(counter)); 2495 + wait_event(event->waitq, perf_not_pending(event)); 2495 2496 } 2496 2497 2497 - void perf_counter_do_pending(void) 2498 + void perf_event_do_pending(void) 2498 2499 { 2499 2500 __perf_pending_run(); 2500 2501 } ··· 2535 2536 atomic_set(&handle->data->poll, POLL_IN); 2536 2537 2537 2538 if (handle->nmi) { 2538 - handle->counter->pending_wakeup = 1; 2539 - perf_pending_queue(&handle->counter->pending, 2540 - perf_pending_counter); 2539 + handle->event->pending_wakeup = 1; 2540 + perf_pending_queue(&handle->event->pending, 2541 + perf_pending_event); 2541 2542 } else 2542 - perf_counter_wakeup(handle->counter); 2543 + perf_event_wakeup(handle->event); 2543 2544 } 2544 2545 2545 2546 /* 2546 2547 * Curious locking construct. 2547 2548 * 2548 - * We need to ensure a later event doesn't publish a head when a former 2549 - * event isn't done writing. However since we need to deal with NMIs we 2549 + * We need to ensure a later event_id doesn't publish a head when a former 2550 + * event_id isn't done writing. However since we need to deal with NMIs we 2550 2551 * cannot fully serialize things. 2551 2552 * 2552 2553 * What we do is serialize between CPUs so we only have to deal with NMI 2553 2554 * nesting on a single CPU. 2554 2555 * 2555 2556 * We only publish the head (and generate a wakeup) when the outer-most 2556 - * event completes. 2557 + * event_id completes. 2557 2558 */ 2558 2559 static void perf_output_lock(struct perf_output_handle *handle) 2559 2560 { ··· 2657 2658 } 2658 2659 2659 2660 int perf_output_begin(struct perf_output_handle *handle, 2660 - struct perf_counter *counter, unsigned int size, 2661 + struct perf_event *event, unsigned int size, 2661 2662 int nmi, int sample) 2662 2663 { 2663 - struct perf_counter *output_counter; 2664 + struct perf_event *output_event; 2664 2665 struct perf_mmap_data *data; 2665 2666 unsigned long tail, offset, head; 2666 2667 int have_lost; ··· 2672 2673 2673 2674 rcu_read_lock(); 2674 2675 /* 2675 - * For inherited counters we send all the output towards the parent. 2676 + * For inherited events we send all the output towards the parent. 2676 2677 */ 2677 - if (counter->parent) 2678 - counter = counter->parent; 2678 + if (event->parent) 2679 + event = event->parent; 2679 2680 2680 - output_counter = rcu_dereference(counter->output); 2681 - if (output_counter) 2682 - counter = output_counter; 2681 + output_event = rcu_dereference(event->output); 2682 + if (output_event) 2683 + event = output_event; 2683 2684 2684 - data = rcu_dereference(counter->data); 2685 + data = rcu_dereference(event->data); 2685 2686 if (!data) 2686 2687 goto out; 2687 2688 2688 2689 handle->data = data; 2689 - handle->counter = counter; 2690 + handle->event = event; 2690 2691 handle->nmi = nmi; 2691 2692 handle->sample = sample; 2692 2693 ··· 2720 2721 atomic_set(&data->wakeup, 1); 2721 2722 2722 2723 if (have_lost) { 2723 - lost_event.header.type = PERF_EVENT_LOST; 2724 + lost_event.header.type = PERF_RECORD_LOST; 2724 2725 lost_event.header.misc = 0; 2725 2726 lost_event.header.size = sizeof(lost_event); 2726 - lost_event.id = counter->id; 2727 + lost_event.id = event->id; 2727 2728 lost_event.lost = atomic_xchg(&data->lost, 0); 2728 2729 2729 2730 perf_output_put(handle, lost_event); ··· 2742 2743 2743 2744 void perf_output_end(struct perf_output_handle *handle) 2744 2745 { 2745 - struct perf_counter *counter = handle->counter; 2746 + struct perf_event *event = handle->event; 2746 2747 struct perf_mmap_data *data = handle->data; 2747 2748 2748 - int wakeup_events = counter->attr.wakeup_events; 2749 + int wakeup_events = event->attr.wakeup_events; 2749 2750 2750 2751 if (handle->sample && wakeup_events) { 2751 2752 int events = atomic_inc_return(&data->events); ··· 2759 2760 rcu_read_unlock(); 2760 2761 } 2761 2762 2762 - static u32 perf_counter_pid(struct perf_counter *counter, struct task_struct *p) 2763 + static u32 perf_event_pid(struct perf_event *event, struct task_struct *p) 2763 2764 { 2764 2765 /* 2765 - * only top level counters have the pid namespace they were created in 2766 + * only top level events have the pid namespace they were created in 2766 2767 */ 2767 - if (counter->parent) 2768 - counter = counter->parent; 2768 + if (event->parent) 2769 + event = event->parent; 2769 2770 2770 - return task_tgid_nr_ns(p, counter->ns); 2771 + return task_tgid_nr_ns(p, event->ns); 2771 2772 } 2772 2773 2773 - static u32 perf_counter_tid(struct perf_counter *counter, struct task_struct *p) 2774 + static u32 perf_event_tid(struct perf_event *event, struct task_struct *p) 2774 2775 { 2775 2776 /* 2776 - * only top level counters have the pid namespace they were created in 2777 + * only top level events have the pid namespace they were created in 2777 2778 */ 2778 - if (counter->parent) 2779 - counter = counter->parent; 2779 + if (event->parent) 2780 + event = event->parent; 2780 2781 2781 - return task_pid_nr_ns(p, counter->ns); 2782 + return task_pid_nr_ns(p, event->ns); 2782 2783 } 2783 2784 2784 2785 static void perf_output_read_one(struct perf_output_handle *handle, 2785 - struct perf_counter *counter) 2786 + struct perf_event *event) 2786 2787 { 2787 - u64 read_format = counter->attr.read_format; 2788 + u64 read_format = event->attr.read_format; 2788 2789 u64 values[4]; 2789 2790 int n = 0; 2790 2791 2791 - values[n++] = atomic64_read(&counter->count); 2792 + values[n++] = atomic64_read(&event->count); 2792 2793 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) { 2793 - values[n++] = counter->total_time_enabled + 2794 - atomic64_read(&counter->child_total_time_enabled); 2794 + values[n++] = event->total_time_enabled + 2795 + atomic64_read(&event->child_total_time_enabled); 2795 2796 } 2796 2797 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) { 2797 - values[n++] = counter->total_time_running + 2798 - atomic64_read(&counter->child_total_time_running); 2798 + values[n++] = event->total_time_running + 2799 + atomic64_read(&event->child_total_time_running); 2799 2800 } 2800 2801 if (read_format & PERF_FORMAT_ID) 2801 - values[n++] = primary_counter_id(counter); 2802 + values[n++] = primary_event_id(event); 2802 2803 2803 2804 perf_output_copy(handle, values, n * sizeof(u64)); 2804 2805 } 2805 2806 2806 2807 /* 2807 - * XXX PERF_FORMAT_GROUP vs inherited counters seems difficult. 2808 + * XXX PERF_FORMAT_GROUP vs inherited events seems difficult. 2808 2809 */ 2809 2810 static void perf_output_read_group(struct perf_output_handle *handle, 2810 - struct perf_counter *counter) 2811 + struct perf_event *event) 2811 2812 { 2812 - struct perf_counter *leader = counter->group_leader, *sub; 2813 - u64 read_format = counter->attr.read_format; 2813 + struct perf_event *leader = event->group_leader, *sub; 2814 + u64 read_format = event->attr.read_format; 2814 2815 u64 values[5]; 2815 2816 int n = 0; 2816 2817 ··· 2822 2823 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) 2823 2824 values[n++] = leader->total_time_running; 2824 2825 2825 - if (leader != counter) 2826 + if (leader != event) 2826 2827 leader->pmu->read(leader); 2827 2828 2828 2829 values[n++] = atomic64_read(&leader->count); 2829 2830 if (read_format & PERF_FORMAT_ID) 2830 - values[n++] = primary_counter_id(leader); 2831 + values[n++] = primary_event_id(leader); 2831 2832 2832 2833 perf_output_copy(handle, values, n * sizeof(u64)); 2833 2834 2834 - list_for_each_entry(sub, &leader->sibling_list, list_entry) { 2835 + list_for_each_entry(sub, &leader->sibling_list, group_entry) { 2835 2836 n = 0; 2836 2837 2837 - if (sub != counter) 2838 + if (sub != event) 2838 2839 sub->pmu->read(sub); 2839 2840 2840 2841 values[n++] = atomic64_read(&sub->count); 2841 2842 if (read_format & PERF_FORMAT_ID) 2842 - values[n++] = primary_counter_id(sub); 2843 + values[n++] = primary_event_id(sub); 2843 2844 2844 2845 perf_output_copy(handle, values, n * sizeof(u64)); 2845 2846 } 2846 2847 } 2847 2848 2848 2849 static void perf_output_read(struct perf_output_handle *handle, 2849 - struct perf_counter *counter) 2850 + struct perf_event *event) 2850 2851 { 2851 - if (counter->attr.read_format & PERF_FORMAT_GROUP) 2852 - perf_output_read_group(handle, counter); 2852 + if (event->attr.read_format & PERF_FORMAT_GROUP) 2853 + perf_output_read_group(handle, event); 2853 2854 else 2854 - perf_output_read_one(handle, counter); 2855 + perf_output_read_one(handle, event); 2855 2856 } 2856 2857 2857 2858 void perf_output_sample(struct perf_output_handle *handle, 2858 2859 struct perf_event_header *header, 2859 2860 struct perf_sample_data *data, 2860 - struct perf_counter *counter) 2861 + struct perf_event *event) 2861 2862 { 2862 2863 u64 sample_type = data->type; 2863 2864 ··· 2888 2889 perf_output_put(handle, data->period); 2889 2890 2890 2891 if (sample_type & PERF_SAMPLE_READ) 2891 - perf_output_read(handle, counter); 2892 + perf_output_read(handle, event); 2892 2893 2893 2894 if (sample_type & PERF_SAMPLE_CALLCHAIN) { 2894 2895 if (data->callchain) { ··· 2926 2927 2927 2928 void perf_prepare_sample(struct perf_event_header *header, 2928 2929 struct perf_sample_data *data, 2929 - struct perf_counter *counter, 2930 + struct perf_event *event, 2930 2931 struct pt_regs *regs) 2931 2932 { 2932 - u64 sample_type = counter->attr.sample_type; 2933 + u64 sample_type = event->attr.sample_type; 2933 2934 2934 2935 data->type = sample_type; 2935 2936 2936 - header->type = PERF_EVENT_SAMPLE; 2937 + header->type = PERF_RECORD_SAMPLE; 2937 2938 header->size = sizeof(*header); 2938 2939 2939 2940 header->misc = 0; ··· 2947 2948 2948 2949 if (sample_type & PERF_SAMPLE_TID) { 2949 2950 /* namespace issues */ 2950 - data->tid_entry.pid = perf_counter_pid(counter, current); 2951 - data->tid_entry.tid = perf_counter_tid(counter, current); 2951 + data->tid_entry.pid = perf_event_pid(event, current); 2952 + data->tid_entry.tid = perf_event_tid(event, current); 2952 2953 2953 2954 header->size += sizeof(data->tid_entry); 2954 2955 } ··· 2963 2964 header->size += sizeof(data->addr); 2964 2965 2965 2966 if (sample_type & PERF_SAMPLE_ID) { 2966 - data->id = primary_counter_id(counter); 2967 + data->id = primary_event_id(event); 2967 2968 2968 2969 header->size += sizeof(data->id); 2969 2970 } 2970 2971 2971 2972 if (sample_type & PERF_SAMPLE_STREAM_ID) { 2972 - data->stream_id = counter->id; 2973 + data->stream_id = event->id; 2973 2974 2974 2975 header->size += sizeof(data->stream_id); 2975 2976 } ··· 2985 2986 header->size += sizeof(data->period); 2986 2987 2987 2988 if (sample_type & PERF_SAMPLE_READ) 2988 - header->size += perf_counter_read_size(counter); 2989 + header->size += perf_event_read_size(event); 2989 2990 2990 2991 if (sample_type & PERF_SAMPLE_CALLCHAIN) { 2991 2992 int size = 1; ··· 3011 3012 } 3012 3013 } 3013 3014 3014 - static void perf_counter_output(struct perf_counter *counter, int nmi, 3015 + static void perf_event_output(struct perf_event *event, int nmi, 3015 3016 struct perf_sample_data *data, 3016 3017 struct pt_regs *regs) 3017 3018 { 3018 3019 struct perf_output_handle handle; 3019 3020 struct perf_event_header header; 3020 3021 3021 - perf_prepare_sample(&header, data, counter, regs); 3022 + perf_prepare_sample(&header, data, event, regs); 3022 3023 3023 - if (perf_output_begin(&handle, counter, header.size, nmi, 1)) 3024 + if (perf_output_begin(&handle, event, header.size, nmi, 1)) 3024 3025 return; 3025 3026 3026 - perf_output_sample(&handle, &header, data, counter); 3027 + perf_output_sample(&handle, &header, data, event); 3027 3028 3028 3029 perf_output_end(&handle); 3029 3030 } 3030 3031 3031 3032 /* 3032 - * read event 3033 + * read event_id 3033 3034 */ 3034 3035 3035 3036 struct perf_read_event { ··· 3040 3041 }; 3041 3042 3042 3043 static void 3043 - perf_counter_read_event(struct perf_counter *counter, 3044 + perf_event_read_event(struct perf_event *event, 3044 3045 struct task_struct *task) 3045 3046 { 3046 3047 struct perf_output_handle handle; 3047 - struct perf_read_event event = { 3048 + struct perf_read_event read_event = { 3048 3049 .header = { 3049 - .type = PERF_EVENT_READ, 3050 + .type = PERF_RECORD_READ, 3050 3051 .misc = 0, 3051 - .size = sizeof(event) + perf_counter_read_size(counter), 3052 + .size = sizeof(read_event) + perf_event_read_size(event), 3052 3053 }, 3053 - .pid = perf_counter_pid(counter, task), 3054 - .tid = perf_counter_tid(counter, task), 3054 + .pid = perf_event_pid(event, task), 3055 + .tid = perf_event_tid(event, task), 3055 3056 }; 3056 3057 int ret; 3057 3058 3058 - ret = perf_output_begin(&handle, counter, event.header.size, 0, 0); 3059 + ret = perf_output_begin(&handle, event, read_event.header.size, 0, 0); 3059 3060 if (ret) 3060 3061 return; 3061 3062 3062 - perf_output_put(&handle, event); 3063 - perf_output_read(&handle, counter); 3063 + perf_output_put(&handle, read_event); 3064 + perf_output_read(&handle, event); 3064 3065 3065 3066 perf_output_end(&handle); 3066 3067 } ··· 3073 3074 3074 3075 struct perf_task_event { 3075 3076 struct task_struct *task; 3076 - struct perf_counter_context *task_ctx; 3077 + struct perf_event_context *task_ctx; 3077 3078 3078 3079 struct { 3079 3080 struct perf_event_header header; ··· 3083 3084 u32 tid; 3084 3085 u32 ptid; 3085 3086 u64 time; 3086 - } event; 3087 + } event_id; 3087 3088 }; 3088 3089 3089 - static void perf_counter_task_output(struct perf_counter *counter, 3090 + static void perf_event_task_output(struct perf_event *event, 3090 3091 struct perf_task_event *task_event) 3091 3092 { 3092 3093 struct perf_output_handle handle; ··· 3094 3095 struct task_struct *task = task_event->task; 3095 3096 int ret; 3096 3097 3097 - size = task_event->event.header.size; 3098 - ret = perf_output_begin(&handle, counter, size, 0, 0); 3098 + size = task_event->event_id.header.size; 3099 + ret = perf_output_begin(&handle, event, size, 0, 0); 3099 3100 3100 3101 if (ret) 3101 3102 return; 3102 3103 3103 - task_event->event.pid = perf_counter_pid(counter, task); 3104 - task_event->event.ppid = perf_counter_pid(counter, current); 3104 + task_event->event_id.pid = perf_event_pid(event, task); 3105 + task_event->event_id.ppid = perf_event_pid(event, current); 3105 3106 3106 - task_event->event.tid = perf_counter_tid(counter, task); 3107 - task_event->event.ptid = perf_counter_tid(counter, current); 3107 + task_event->event_id.tid = perf_event_tid(event, task); 3108 + task_event->event_id.ptid = perf_event_tid(event, current); 3108 3109 3109 - task_event->event.time = perf_clock(); 3110 + task_event->event_id.time = perf_clock(); 3110 3111 3111 - perf_output_put(&handle, task_event->event); 3112 + perf_output_put(&handle, task_event->event_id); 3112 3113 3113 3114 perf_output_end(&handle); 3114 3115 } 3115 3116 3116 - static int perf_counter_task_match(struct perf_counter *counter) 3117 + static int perf_event_task_match(struct perf_event *event) 3117 3118 { 3118 - if (counter->attr.comm || counter->attr.mmap || counter->attr.task) 3119 + if (event->attr.comm || event->attr.mmap || event->attr.task) 3119 3120 return 1; 3120 3121 3121 3122 return 0; 3122 3123 } 3123 3124 3124 - static void perf_counter_task_ctx(struct perf_counter_context *ctx, 3125 + static void perf_event_task_ctx(struct perf_event_context *ctx, 3125 3126 struct perf_task_event *task_event) 3126 3127 { 3127 - struct perf_counter *counter; 3128 + struct perf_event *event; 3128 3129 3129 3130 if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list)) 3130 3131 return; 3131 3132 3132 3133 rcu_read_lock(); 3133 - list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) { 3134 - if (perf_counter_task_match(counter)) 3135 - perf_counter_task_output(counter, task_event); 3134 + list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { 3135 + if (perf_event_task_match(event)) 3136 + perf_event_task_output(event, task_event); 3136 3137 } 3137 3138 rcu_read_unlock(); 3138 3139 } 3139 3140 3140 - static void perf_counter_task_event(struct perf_task_event *task_event) 3141 + static void perf_event_task_event(struct perf_task_event *task_event) 3141 3142 { 3142 3143 struct perf_cpu_context *cpuctx; 3143 - struct perf_counter_context *ctx = task_event->task_ctx; 3144 + struct perf_event_context *ctx = task_event->task_ctx; 3144 3145 3145 3146 cpuctx = &get_cpu_var(perf_cpu_context); 3146 - perf_counter_task_ctx(&cpuctx->ctx, task_event); 3147 + perf_event_task_ctx(&cpuctx->ctx, task_event); 3147 3148 put_cpu_var(perf_cpu_context); 3148 3149 3149 3150 rcu_read_lock(); 3150 3151 if (!ctx) 3151 - ctx = rcu_dereference(task_event->task->perf_counter_ctxp); 3152 + ctx = rcu_dereference(task_event->task->perf_event_ctxp); 3152 3153 if (ctx) 3153 - perf_counter_task_ctx(ctx, task_event); 3154 + perf_event_task_ctx(ctx, task_event); 3154 3155 rcu_read_unlock(); 3155 3156 } 3156 3157 3157 - static void perf_counter_task(struct task_struct *task, 3158 - struct perf_counter_context *task_ctx, 3158 + static void perf_event_task(struct task_struct *task, 3159 + struct perf_event_context *task_ctx, 3159 3160 int new) 3160 3161 { 3161 3162 struct perf_task_event task_event; 3162 3163 3163 - if (!atomic_read(&nr_comm_counters) && 3164 - !atomic_read(&nr_mmap_counters) && 3165 - !atomic_read(&nr_task_counters)) 3164 + if (!atomic_read(&nr_comm_events) && 3165 + !atomic_read(&nr_mmap_events) && 3166 + !atomic_read(&nr_task_events)) 3166 3167 return; 3167 3168 3168 3169 task_event = (struct perf_task_event){ 3169 3170 .task = task, 3170 3171 .task_ctx = task_ctx, 3171 - .event = { 3172 + .event_id = { 3172 3173 .header = { 3173 - .type = new ? PERF_EVENT_FORK : PERF_EVENT_EXIT, 3174 + .type = new ? PERF_RECORD_FORK : PERF_RECORD_EXIT, 3174 3175 .misc = 0, 3175 - .size = sizeof(task_event.event), 3176 + .size = sizeof(task_event.event_id), 3176 3177 }, 3177 3178 /* .pid */ 3178 3179 /* .ppid */ ··· 3181 3182 }, 3182 3183 }; 3183 3184 3184 - perf_counter_task_event(&task_event); 3185 + perf_event_task_event(&task_event); 3185 3186 } 3186 3187 3187 - void perf_counter_fork(struct task_struct *task) 3188 + void perf_event_fork(struct task_struct *task) 3188 3189 { 3189 - perf_counter_task(task, NULL, 1); 3190 + perf_event_task(task, NULL, 1); 3190 3191 } 3191 3192 3192 3193 /* ··· 3203 3204 3204 3205 u32 pid; 3205 3206 u32 tid; 3206 - } event; 3207 + } event_id; 3207 3208 }; 3208 3209 3209 - static void perf_counter_comm_output(struct perf_counter *counter, 3210 + static void perf_event_comm_output(struct perf_event *event, 3210 3211 struct perf_comm_event *comm_event) 3211 3212 { 3212 3213 struct perf_output_handle handle; 3213 - int size = comm_event->event.header.size; 3214 - int ret = perf_output_begin(&handle, counter, size, 0, 0); 3214 + int size = comm_event->event_id.header.size; 3215 + int ret = perf_output_begin(&handle, event, size, 0, 0); 3215 3216 3216 3217 if (ret) 3217 3218 return; 3218 3219 3219 - comm_event->event.pid = perf_counter_pid(counter, comm_event->task); 3220 - comm_event->event.tid = perf_counter_tid(counter, comm_event->task); 3220 + comm_event->event_id.pid = perf_event_pid(event, comm_event->task); 3221 + comm_event->event_id.tid = perf_event_tid(event, comm_event->task); 3221 3222 3222 - perf_output_put(&handle, comm_event->event); 3223 + perf_output_put(&handle, comm_event->event_id); 3223 3224 perf_output_copy(&handle, comm_event->comm, 3224 3225 comm_event->comm_size); 3225 3226 perf_output_end(&handle); 3226 3227 } 3227 3228 3228 - static int perf_counter_comm_match(struct perf_counter *counter) 3229 + static int perf_event_comm_match(struct perf_event *event) 3229 3230 { 3230 - if (counter->attr.comm) 3231 + if (event->attr.comm) 3231 3232 return 1; 3232 3233 3233 3234 return 0; 3234 3235 } 3235 3236 3236 - static void perf_counter_comm_ctx(struct perf_counter_context *ctx, 3237 + static void perf_event_comm_ctx(struct perf_event_context *ctx, 3237 3238 struct perf_comm_event *comm_event) 3238 3239 { 3239 - struct perf_counter *counter; 3240 + struct perf_event *event; 3240 3241 3241 3242 if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list)) 3242 3243 return; 3243 3244 3244 3245 rcu_read_lock(); 3245 - list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) { 3246 - if (perf_counter_comm_match(counter)) 3247 - perf_counter_comm_output(counter, comm_event); 3246 + list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { 3247 + if (perf_event_comm_match(event)) 3248 + perf_event_comm_output(event, comm_event); 3248 3249 } 3249 3250 rcu_read_unlock(); 3250 3251 } 3251 3252 3252 - static void perf_counter_comm_event(struct perf_comm_event *comm_event) 3253 + static void perf_event_comm_event(struct perf_comm_event *comm_event) 3253 3254 { 3254 3255 struct perf_cpu_context *cpuctx; 3255 - struct perf_counter_context *ctx; 3256 + struct perf_event_context *ctx; 3256 3257 unsigned int size; 3257 3258 char comm[TASK_COMM_LEN]; 3258 3259 ··· 3263 3264 comm_event->comm = comm; 3264 3265 comm_event->comm_size = size; 3265 3266 3266 - comm_event->event.header.size = sizeof(comm_event->event) + size; 3267 + comm_event->event_id.header.size = sizeof(comm_event->event_id) + size; 3267 3268 3268 3269 cpuctx = &get_cpu_var(perf_cpu_context); 3269 - perf_counter_comm_ctx(&cpuctx->ctx, comm_event); 3270 + perf_event_comm_ctx(&cpuctx->ctx, comm_event); 3270 3271 put_cpu_var(perf_cpu_context); 3271 3272 3272 3273 rcu_read_lock(); ··· 3274 3275 * doesn't really matter which of the child contexts the 3275 3276 * events ends up in. 3276 3277 */ 3277 - ctx = rcu_dereference(current->perf_counter_ctxp); 3278 + ctx = rcu_dereference(current->perf_event_ctxp); 3278 3279 if (ctx) 3279 - perf_counter_comm_ctx(ctx, comm_event); 3280 + perf_event_comm_ctx(ctx, comm_event); 3280 3281 rcu_read_unlock(); 3281 3282 } 3282 3283 3283 - void perf_counter_comm(struct task_struct *task) 3284 + void perf_event_comm(struct task_struct *task) 3284 3285 { 3285 3286 struct perf_comm_event comm_event; 3286 3287 3287 - if (task->perf_counter_ctxp) 3288 - perf_counter_enable_on_exec(task); 3288 + if (task->perf_event_ctxp) 3289 + perf_event_enable_on_exec(task); 3289 3290 3290 - if (!atomic_read(&nr_comm_counters)) 3291 + if (!atomic_read(&nr_comm_events)) 3291 3292 return; 3292 3293 3293 3294 comm_event = (struct perf_comm_event){ 3294 3295 .task = task, 3295 3296 /* .comm */ 3296 3297 /* .comm_size */ 3297 - .event = { 3298 + .event_id = { 3298 3299 .header = { 3299 - .type = PERF_EVENT_COMM, 3300 + .type = PERF_RECORD_COMM, 3300 3301 .misc = 0, 3301 3302 /* .size */ 3302 3303 }, ··· 3305 3306 }, 3306 3307 }; 3307 3308 3308 - perf_counter_comm_event(&comm_event); 3309 + perf_event_comm_event(&comm_event); 3309 3310 } 3310 3311 3311 3312 /* ··· 3326 3327 u64 start; 3327 3328 u64 len; 3328 3329 u64 pgoff; 3329 - } event; 3330 + } event_id; 3330 3331 }; 3331 3332 3332 - static void perf_counter_mmap_output(struct perf_counter *counter, 3333 + static void perf_event_mmap_output(struct perf_event *event, 3333 3334 struct perf_mmap_event *mmap_event) 3334 3335 { 3335 3336 struct perf_output_handle handle; 3336 - int size = mmap_event->event.header.size; 3337 - int ret = perf_output_begin(&handle, counter, size, 0, 0); 3337 + int size = mmap_event->event_id.header.size; 3338 + int ret = perf_output_begin(&handle, event, size, 0, 0); 3338 3339 3339 3340 if (ret) 3340 3341 return; 3341 3342 3342 - mmap_event->event.pid = perf_counter_pid(counter, current); 3343 - mmap_event->event.tid = perf_counter_tid(counter, current); 3343 + mmap_event->event_id.pid = perf_event_pid(event, current); 3344 + mmap_event->event_id.tid = perf_event_tid(event, current); 3344 3345 3345 - perf_output_put(&handle, mmap_event->event); 3346 + perf_output_put(&handle, mmap_event->event_id); 3346 3347 perf_output_copy(&handle, mmap_event->file_name, 3347 3348 mmap_event->file_size); 3348 3349 perf_output_end(&handle); 3349 3350 } 3350 3351 3351 - static int perf_counter_mmap_match(struct perf_counter *counter, 3352 + static int perf_event_mmap_match(struct perf_event *event, 3352 3353 struct perf_mmap_event *mmap_event) 3353 3354 { 3354 - if (counter->attr.mmap) 3355 + if (event->attr.mmap) 3355 3356 return 1; 3356 3357 3357 3358 return 0; 3358 3359 } 3359 3360 3360 - static void perf_counter_mmap_ctx(struct perf_counter_context *ctx, 3361 + static void perf_event_mmap_ctx(struct perf_event_context *ctx, 3361 3362 struct perf_mmap_event *mmap_event) 3362 3363 { 3363 - struct perf_counter *counter; 3364 + struct perf_event *event; 3364 3365 3365 3366 if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list)) 3366 3367 return; 3367 3368 3368 3369 rcu_read_lock(); 3369 - list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) { 3370 - if (perf_counter_mmap_match(counter, mmap_event)) 3371 - perf_counter_mmap_output(counter, mmap_event); 3370 + list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { 3371 + if (perf_event_mmap_match(event, mmap_event)) 3372 + perf_event_mmap_output(event, mmap_event); 3372 3373 } 3373 3374 rcu_read_unlock(); 3374 3375 } 3375 3376 3376 - static void perf_counter_mmap_event(struct perf_mmap_event *mmap_event) 3377 + static void perf_event_mmap_event(struct perf_mmap_event *mmap_event) 3377 3378 { 3378 3379 struct perf_cpu_context *cpuctx; 3379 - struct perf_counter_context *ctx; 3380 + struct perf_event_context *ctx; 3380 3381 struct vm_area_struct *vma = mmap_event->vma; 3381 3382 struct file *file = vma->vm_file; 3382 3383 unsigned int size; ··· 3424 3425 mmap_event->file_name = name; 3425 3426 mmap_event->file_size = size; 3426 3427 3427 - mmap_event->event.header.size = sizeof(mmap_event->event) + size; 3428 + mmap_event->event_id.header.size = sizeof(mmap_event->event_id) + size; 3428 3429 3429 3430 cpuctx = &get_cpu_var(perf_cpu_context); 3430 - perf_counter_mmap_ctx(&cpuctx->ctx, mmap_event); 3431 + perf_event_mmap_ctx(&cpuctx->ctx, mmap_event); 3431 3432 put_cpu_var(perf_cpu_context); 3432 3433 3433 3434 rcu_read_lock(); ··· 3435 3436 * doesn't really matter which of the child contexts the 3436 3437 * events ends up in. 3437 3438 */ 3438 - ctx = rcu_dereference(current->perf_counter_ctxp); 3439 + ctx = rcu_dereference(current->perf_event_ctxp); 3439 3440 if (ctx) 3440 - perf_counter_mmap_ctx(ctx, mmap_event); 3441 + perf_event_mmap_ctx(ctx, mmap_event); 3441 3442 rcu_read_unlock(); 3442 3443 3443 3444 kfree(buf); 3444 3445 } 3445 3446 3446 - void __perf_counter_mmap(struct vm_area_struct *vma) 3447 + void __perf_event_mmap(struct vm_area_struct *vma) 3447 3448 { 3448 3449 struct perf_mmap_event mmap_event; 3449 3450 3450 - if (!atomic_read(&nr_mmap_counters)) 3451 + if (!atomic_read(&nr_mmap_events)) 3451 3452 return; 3452 3453 3453 3454 mmap_event = (struct perf_mmap_event){ 3454 3455 .vma = vma, 3455 3456 /* .file_name */ 3456 3457 /* .file_size */ 3457 - .event = { 3458 + .event_id = { 3458 3459 .header = { 3459 - .type = PERF_EVENT_MMAP, 3460 + .type = PERF_RECORD_MMAP, 3460 3461 .misc = 0, 3461 3462 /* .size */ 3462 3463 }, ··· 3468 3469 }, 3469 3470 }; 3470 3471 3471 - perf_counter_mmap_event(&mmap_event); 3472 + perf_event_mmap_event(&mmap_event); 3472 3473 } 3473 3474 3474 3475 /* 3475 3476 * IRQ throttle logging 3476 3477 */ 3477 3478 3478 - static void perf_log_throttle(struct perf_counter *counter, int enable) 3479 + static void perf_log_throttle(struct perf_event *event, int enable) 3479 3480 { 3480 3481 struct perf_output_handle handle; 3481 3482 int ret; ··· 3487 3488 u64 stream_id; 3488 3489 } throttle_event = { 3489 3490 .header = { 3490 - .type = PERF_EVENT_THROTTLE, 3491 + .type = PERF_RECORD_THROTTLE, 3491 3492 .misc = 0, 3492 3493 .size = sizeof(throttle_event), 3493 3494 }, 3494 3495 .time = perf_clock(), 3495 - .id = primary_counter_id(counter), 3496 - .stream_id = counter->id, 3496 + .id = primary_event_id(event), 3497 + .stream_id = event->id, 3497 3498 }; 3498 3499 3499 3500 if (enable) 3500 - throttle_event.header.type = PERF_EVENT_UNTHROTTLE; 3501 + throttle_event.header.type = PERF_RECORD_UNTHROTTLE; 3501 3502 3502 - ret = perf_output_begin(&handle, counter, sizeof(throttle_event), 1, 0); 3503 + ret = perf_output_begin(&handle, event, sizeof(throttle_event), 1, 0); 3503 3504 if (ret) 3504 3505 return; 3505 3506 ··· 3508 3509 } 3509 3510 3510 3511 /* 3511 - * Generic counter overflow handling, sampling. 3512 + * Generic event overflow handling, sampling. 3512 3513 */ 3513 3514 3514 - static int __perf_counter_overflow(struct perf_counter *counter, int nmi, 3515 + static int __perf_event_overflow(struct perf_event *event, int nmi, 3515 3516 int throttle, struct perf_sample_data *data, 3516 3517 struct pt_regs *regs) 3517 3518 { 3518 - int events = atomic_read(&counter->event_limit); 3519 - struct hw_perf_counter *hwc = &counter->hw; 3519 + int events = atomic_read(&event->event_limit); 3520 + struct hw_perf_event *hwc = &event->hw; 3520 3521 int ret = 0; 3521 3522 3522 - throttle = (throttle && counter->pmu->unthrottle != NULL); 3523 + throttle = (throttle && event->pmu->unthrottle != NULL); 3523 3524 3524 3525 if (!throttle) { 3525 3526 hwc->interrupts++; ··· 3527 3528 if (hwc->interrupts != MAX_INTERRUPTS) { 3528 3529 hwc->interrupts++; 3529 3530 if (HZ * hwc->interrupts > 3530 - (u64)sysctl_perf_counter_sample_rate) { 3531 + (u64)sysctl_perf_event_sample_rate) { 3531 3532 hwc->interrupts = MAX_INTERRUPTS; 3532 - perf_log_throttle(counter, 0); 3533 + perf_log_throttle(event, 0); 3533 3534 ret = 1; 3534 3535 } 3535 3536 } else { 3536 3537 /* 3537 - * Keep re-disabling counters even though on the previous 3538 + * Keep re-disabling events even though on the previous 3538 3539 * pass we disabled it - just in case we raced with a 3539 - * sched-in and the counter got enabled again: 3540 + * sched-in and the event got enabled again: 3540 3541 */ 3541 3542 ret = 1; 3542 3543 } 3543 3544 } 3544 3545 3545 - if (counter->attr.freq) { 3546 + if (event->attr.freq) { 3546 3547 u64 now = perf_clock(); 3547 3548 s64 delta = now - hwc->freq_stamp; 3548 3549 3549 3550 hwc->freq_stamp = now; 3550 3551 3551 3552 if (delta > 0 && delta < TICK_NSEC) 3552 - perf_adjust_period(counter, NSEC_PER_SEC / (int)delta); 3553 + perf_adjust_period(event, NSEC_PER_SEC / (int)delta); 3553 3554 } 3554 3555 3555 3556 /* 3556 3557 * XXX event_limit might not quite work as expected on inherited 3557 - * counters 3558 + * events 3558 3559 */ 3559 3560 3560 - counter->pending_kill = POLL_IN; 3561 - if (events && atomic_dec_and_test(&counter->event_limit)) { 3561 + event->pending_kill = POLL_IN; 3562 + if (events && atomic_dec_and_test(&event->event_limit)) { 3562 3563 ret = 1; 3563 - counter->pending_kill = POLL_HUP; 3564 + event->pending_kill = POLL_HUP; 3564 3565 if (nmi) { 3565 - counter->pending_disable = 1; 3566 - perf_pending_queue(&counter->pending, 3567 - perf_pending_counter); 3566 + event->pending_disable = 1; 3567 + perf_pending_queue(&event->pending, 3568 + perf_pending_event); 3568 3569 } else 3569 - perf_counter_disable(counter); 3570 + perf_event_disable(event); 3570 3571 } 3571 3572 3572 - perf_counter_output(counter, nmi, data, regs); 3573 + perf_event_output(event, nmi, data, regs); 3573 3574 return ret; 3574 3575 } 3575 3576 3576 - int perf_counter_overflow(struct perf_counter *counter, int nmi, 3577 + int perf_event_overflow(struct perf_event *event, int nmi, 3577 3578 struct perf_sample_data *data, 3578 3579 struct pt_regs *regs) 3579 3580 { 3580 - return __perf_counter_overflow(counter, nmi, 1, data, regs); 3581 + return __perf_event_overflow(event, nmi, 1, data, regs); 3581 3582 } 3582 3583 3583 3584 /* 3584 - * Generic software counter infrastructure 3585 + * Generic software event infrastructure 3585 3586 */ 3586 3587 3587 3588 /* 3588 - * We directly increment counter->count and keep a second value in 3589 - * counter->hw.period_left to count intervals. This period counter 3589 + * We directly increment event->count and keep a second value in 3590 + * event->hw.period_left to count intervals. This period event 3590 3591 * is kept in the range [-sample_period, 0] so that we can use the 3591 3592 * sign as trigger. 3592 3593 */ 3593 3594 3594 - static u64 perf_swcounter_set_period(struct perf_counter *counter) 3595 + static u64 perf_swevent_set_period(struct perf_event *event) 3595 3596 { 3596 - struct hw_perf_counter *hwc = &counter->hw; 3597 + struct hw_perf_event *hwc = &event->hw; 3597 3598 u64 period = hwc->last_period; 3598 3599 u64 nr, offset; 3599 3600 s64 old, val; ··· 3614 3615 return nr; 3615 3616 } 3616 3617 3617 - static void perf_swcounter_overflow(struct perf_counter *counter, 3618 + static void perf_swevent_overflow(struct perf_event *event, 3618 3619 int nmi, struct perf_sample_data *data, 3619 3620 struct pt_regs *regs) 3620 3621 { 3621 - struct hw_perf_counter *hwc = &counter->hw; 3622 + struct hw_perf_event *hwc = &event->hw; 3622 3623 int throttle = 0; 3623 3624 u64 overflow; 3624 3625 3625 - data->period = counter->hw.last_period; 3626 - overflow = perf_swcounter_set_period(counter); 3626 + data->period = event->hw.last_period; 3627 + overflow = perf_swevent_set_period(event); 3627 3628 3628 3629 if (hwc->interrupts == MAX_INTERRUPTS) 3629 3630 return; 3630 3631 3631 3632 for (; overflow; overflow--) { 3632 - if (__perf_counter_overflow(counter, nmi, throttle, 3633 + if (__perf_event_overflow(event, nmi, throttle, 3633 3634 data, regs)) { 3634 3635 /* 3635 3636 * We inhibit the overflow from happening when ··· 3641 3642 } 3642 3643 } 3643 3644 3644 - static void perf_swcounter_unthrottle(struct perf_counter *counter) 3645 + static void perf_swevent_unthrottle(struct perf_event *event) 3645 3646 { 3646 3647 /* 3647 3648 * Nothing to do, we already reset hwc->interrupts. 3648 3649 */ 3649 3650 } 3650 3651 3651 - static void perf_swcounter_add(struct perf_counter *counter, u64 nr, 3652 + static void perf_swevent_add(struct perf_event *event, u64 nr, 3652 3653 int nmi, struct perf_sample_data *data, 3653 3654 struct pt_regs *regs) 3654 3655 { 3655 - struct hw_perf_counter *hwc = &counter->hw; 3656 + struct hw_perf_event *hwc = &event->hw; 3656 3657 3657 - atomic64_add(nr, &counter->count); 3658 + atomic64_add(nr, &event->count); 3658 3659 3659 3660 if (!hwc->sample_period) 3660 3661 return; ··· 3663 3664 return; 3664 3665 3665 3666 if (!atomic64_add_negative(nr, &hwc->period_left)) 3666 - perf_swcounter_overflow(counter, nmi, data, regs); 3667 + perf_swevent_overflow(event, nmi, data, regs); 3667 3668 } 3668 3669 3669 - static int perf_swcounter_is_counting(struct perf_counter *counter) 3670 + static int perf_swevent_is_counting(struct perf_event *event) 3670 3671 { 3671 3672 /* 3672 - * The counter is active, we're good! 3673 + * The event is active, we're good! 3673 3674 */ 3674 - if (counter->state == PERF_COUNTER_STATE_ACTIVE) 3675 + if (event->state == PERF_EVENT_STATE_ACTIVE) 3675 3676 return 1; 3676 3677 3677 3678 /* 3678 - * The counter is off/error, not counting. 3679 + * The event is off/error, not counting. 3679 3680 */ 3680 - if (counter->state != PERF_COUNTER_STATE_INACTIVE) 3681 + if (event->state != PERF_EVENT_STATE_INACTIVE) 3681 3682 return 0; 3682 3683 3683 3684 /* 3684 - * The counter is inactive, if the context is active 3685 + * The event is inactive, if the context is active 3685 3686 * we're part of a group that didn't make it on the 'pmu', 3686 3687 * not counting. 3687 3688 */ 3688 - if (counter->ctx->is_active) 3689 + if (event->ctx->is_active) 3689 3690 return 0; 3690 3691 3691 3692 /* ··· 3696 3697 return 1; 3697 3698 } 3698 3699 3699 - static int perf_swcounter_match(struct perf_counter *counter, 3700 + static int perf_swevent_match(struct perf_event *event, 3700 3701 enum perf_type_id type, 3701 - u32 event, struct pt_regs *regs) 3702 + u32 event_id, struct pt_regs *regs) 3702 3703 { 3703 - if (!perf_swcounter_is_counting(counter)) 3704 + if (!perf_swevent_is_counting(event)) 3704 3705 return 0; 3705 3706 3706 - if (counter->attr.type != type) 3707 + if (event->attr.type != type) 3707 3708 return 0; 3708 - if (counter->attr.config != event) 3709 + if (event->attr.config != event_id) 3709 3710 return 0; 3710 3711 3711 3712 if (regs) { 3712 - if (counter->attr.exclude_user && user_mode(regs)) 3713 + if (event->attr.exclude_user && user_mode(regs)) 3713 3714 return 0; 3714 3715 3715 - if (counter->attr.exclude_kernel && !user_mode(regs)) 3716 + if (event->attr.exclude_kernel && !user_mode(regs)) 3716 3717 return 0; 3717 3718 } 3718 3719 3719 3720 return 1; 3720 3721 } 3721 3722 3722 - static void perf_swcounter_ctx_event(struct perf_counter_context *ctx, 3723 + static void perf_swevent_ctx_event(struct perf_event_context *ctx, 3723 3724 enum perf_type_id type, 3724 - u32 event, u64 nr, int nmi, 3725 + u32 event_id, u64 nr, int nmi, 3725 3726 struct perf_sample_data *data, 3726 3727 struct pt_regs *regs) 3727 3728 { 3728 - struct perf_counter *counter; 3729 + struct perf_event *event; 3729 3730 3730 3731 if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list)) 3731 3732 return; 3732 3733 3733 3734 rcu_read_lock(); 3734 - list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) { 3735 - if (perf_swcounter_match(counter, type, event, regs)) 3736 - perf_swcounter_add(counter, nr, nmi, data, regs); 3735 + list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { 3736 + if (perf_swevent_match(event, type, event_id, regs)) 3737 + perf_swevent_add(event, nr, nmi, data, regs); 3737 3738 } 3738 3739 rcu_read_unlock(); 3739 3740 } 3740 3741 3741 - static int *perf_swcounter_recursion_context(struct perf_cpu_context *cpuctx) 3742 + static int *perf_swevent_recursion_context(struct perf_cpu_context *cpuctx) 3742 3743 { 3743 3744 if (in_nmi()) 3744 3745 return &cpuctx->recursion[3]; ··· 3752 3753 return &cpuctx->recursion[0]; 3753 3754 } 3754 3755 3755 - static void do_perf_swcounter_event(enum perf_type_id type, u32 event, 3756 + static void do_perf_sw_event(enum perf_type_id type, u32 event_id, 3756 3757 u64 nr, int nmi, 3757 3758 struct perf_sample_data *data, 3758 3759 struct pt_regs *regs) 3759 3760 { 3760 3761 struct perf_cpu_context *cpuctx = &get_cpu_var(perf_cpu_context); 3761 - int *recursion = perf_swcounter_recursion_context(cpuctx); 3762 - struct perf_counter_context *ctx; 3762 + int *recursion = perf_swevent_recursion_context(cpuctx); 3763 + struct perf_event_context *ctx; 3763 3764 3764 3765 if (*recursion) 3765 3766 goto out; ··· 3767 3768 (*recursion)++; 3768 3769 barrier(); 3769 3770 3770 - perf_swcounter_ctx_event(&cpuctx->ctx, type, event, 3771 + perf_swevent_ctx_event(&cpuctx->ctx, type, event_id, 3771 3772 nr, nmi, data, regs); 3772 3773 rcu_read_lock(); 3773 3774 /* 3774 3775 * doesn't really matter which of the child contexts the 3775 3776 * events ends up in. 3776 3777 */ 3777 - ctx = rcu_dereference(current->perf_counter_ctxp); 3778 + ctx = rcu_dereference(current->perf_event_ctxp); 3778 3779 if (ctx) 3779 - perf_swcounter_ctx_event(ctx, type, event, nr, nmi, data, regs); 3780 + perf_swevent_ctx_event(ctx, type, event_id, nr, nmi, data, regs); 3780 3781 rcu_read_unlock(); 3781 3782 3782 3783 barrier(); ··· 3786 3787 put_cpu_var(perf_cpu_context); 3787 3788 } 3788 3789 3789 - void __perf_swcounter_event(u32 event, u64 nr, int nmi, 3790 + void __perf_sw_event(u32 event_id, u64 nr, int nmi, 3790 3791 struct pt_regs *regs, u64 addr) 3791 3792 { 3792 3793 struct perf_sample_data data = { 3793 3794 .addr = addr, 3794 3795 }; 3795 3796 3796 - do_perf_swcounter_event(PERF_TYPE_SOFTWARE, event, nr, nmi, 3797 + do_perf_sw_event(PERF_TYPE_SOFTWARE, event_id, nr, nmi, 3797 3798 &data, regs); 3798 3799 } 3799 3800 3800 - static void perf_swcounter_read(struct perf_counter *counter) 3801 + static void perf_swevent_read(struct perf_event *event) 3801 3802 { 3802 3803 } 3803 3804 3804 - static int perf_swcounter_enable(struct perf_counter *counter) 3805 + static int perf_swevent_enable(struct perf_event *event) 3805 3806 { 3806 - struct hw_perf_counter *hwc = &counter->hw; 3807 + struct hw_perf_event *hwc = &event->hw; 3807 3808 3808 3809 if (hwc->sample_period) { 3809 3810 hwc->last_period = hwc->sample_period; 3810 - perf_swcounter_set_period(counter); 3811 + perf_swevent_set_period(event); 3811 3812 } 3812 3813 return 0; 3813 3814 } 3814 3815 3815 - static void perf_swcounter_disable(struct perf_counter *counter) 3816 + static void perf_swevent_disable(struct perf_event *event) 3816 3817 { 3817 3818 } 3818 3819 3819 3820 static const struct pmu perf_ops_generic = { 3820 - .enable = perf_swcounter_enable, 3821 - .disable = perf_swcounter_disable, 3822 - .read = perf_swcounter_read, 3823 - .unthrottle = perf_swcounter_unthrottle, 3821 + .enable = perf_swevent_enable, 3822 + .disable = perf_swevent_disable, 3823 + .read = perf_swevent_read, 3824 + .unthrottle = perf_swevent_unthrottle, 3824 3825 }; 3825 3826 3826 3827 /* 3827 - * hrtimer based swcounter callback 3828 + * hrtimer based swevent callback 3828 3829 */ 3829 3830 3830 - static enum hrtimer_restart perf_swcounter_hrtimer(struct hrtimer *hrtimer) 3831 + static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer) 3831 3832 { 3832 3833 enum hrtimer_restart ret = HRTIMER_RESTART; 3833 3834 struct perf_sample_data data; 3834 3835 struct pt_regs *regs; 3835 - struct perf_counter *counter; 3836 + struct perf_event *event; 3836 3837 u64 period; 3837 3838 3838 - counter = container_of(hrtimer, struct perf_counter, hw.hrtimer); 3839 - counter->pmu->read(counter); 3839 + event = container_of(hrtimer, struct perf_event, hw.hrtimer); 3840 + event->pmu->read(event); 3840 3841 3841 3842 data.addr = 0; 3842 3843 regs = get_irq_regs(); ··· 3844 3845 * In case we exclude kernel IPs or are somehow not in interrupt 3845 3846 * context, provide the next best thing, the user IP. 3846 3847 */ 3847 - if ((counter->attr.exclude_kernel || !regs) && 3848 - !counter->attr.exclude_user) 3848 + if ((event->attr.exclude_kernel || !regs) && 3849 + !event->attr.exclude_user) 3849 3850 regs = task_pt_regs(current); 3850 3851 3851 3852 if (regs) { 3852 - if (perf_counter_overflow(counter, 0, &data, regs)) 3853 + if (perf_event_overflow(event, 0, &data, regs)) 3853 3854 ret = HRTIMER_NORESTART; 3854 3855 } 3855 3856 3856 - period = max_t(u64, 10000, counter->hw.sample_period); 3857 + period = max_t(u64, 10000, event->hw.sample_period); 3857 3858 hrtimer_forward_now(hrtimer, ns_to_ktime(period)); 3858 3859 3859 3860 return ret; 3860 3861 } 3861 3862 3862 3863 /* 3863 - * Software counter: cpu wall time clock 3864 + * Software event: cpu wall time clock 3864 3865 */ 3865 3866 3866 - static void cpu_clock_perf_counter_update(struct perf_counter *counter) 3867 + static void cpu_clock_perf_event_update(struct perf_event *event) 3867 3868 { 3868 3869 int cpu = raw_smp_processor_id(); 3869 3870 s64 prev; 3870 3871 u64 now; 3871 3872 3872 3873 now = cpu_clock(cpu); 3873 - prev = atomic64_read(&counter->hw.prev_count); 3874 - atomic64_set(&counter->hw.prev_count, now); 3875 - atomic64_add(now - prev, &counter->count); 3874 + prev = atomic64_read(&event->hw.prev_count); 3875 + atomic64_set(&event->hw.prev_count, now); 3876 + atomic64_add(now - prev, &event->count); 3876 3877 } 3877 3878 3878 - static int cpu_clock_perf_counter_enable(struct perf_counter *counter) 3879 + static int cpu_clock_perf_event_enable(struct perf_event *event) 3879 3880 { 3880 - struct hw_perf_counter *hwc = &counter->hw; 3881 + struct hw_perf_event *hwc = &event->hw; 3881 3882 int cpu = raw_smp_processor_id(); 3882 3883 3883 3884 atomic64_set(&hwc->prev_count, cpu_clock(cpu)); 3884 3885 hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 3885 - hwc->hrtimer.function = perf_swcounter_hrtimer; 3886 + hwc->hrtimer.function = perf_swevent_hrtimer; 3886 3887 if (hwc->sample_period) { 3887 3888 u64 period = max_t(u64, 10000, hwc->sample_period); 3888 3889 __hrtimer_start_range_ns(&hwc->hrtimer, ··· 3893 3894 return 0; 3894 3895 } 3895 3896 3896 - static void cpu_clock_perf_counter_disable(struct perf_counter *counter) 3897 + static void cpu_clock_perf_event_disable(struct perf_event *event) 3897 3898 { 3898 - if (counter->hw.sample_period) 3899 - hrtimer_cancel(&counter->hw.hrtimer); 3900 - cpu_clock_perf_counter_update(counter); 3899 + if (event->hw.sample_period) 3900 + hrtimer_cancel(&event->hw.hrtimer); 3901 + cpu_clock_perf_event_update(event); 3901 3902 } 3902 3903 3903 - static void cpu_clock_perf_counter_read(struct perf_counter *counter) 3904 + static void cpu_clock_perf_event_read(struct perf_event *event) 3904 3905 { 3905 - cpu_clock_perf_counter_update(counter); 3906 + cpu_clock_perf_event_update(event); 3906 3907 } 3907 3908 3908 3909 static const struct pmu perf_ops_cpu_clock = { 3909 - .enable = cpu_clock_perf_counter_enable, 3910 - .disable = cpu_clock_perf_counter_disable, 3911 - .read = cpu_clock_perf_counter_read, 3910 + .enable = cpu_clock_perf_event_enable, 3911 + .disable = cpu_clock_perf_event_disable, 3912 + .read = cpu_clock_perf_event_read, 3912 3913 }; 3913 3914 3914 3915 /* 3915 - * Software counter: task time clock 3916 + * Software event: task time clock 3916 3917 */ 3917 3918 3918 - static void task_clock_perf_counter_update(struct perf_counter *counter, u64 now) 3919 + static void task_clock_perf_event_update(struct perf_event *event, u64 now) 3919 3920 { 3920 3921 u64 prev; 3921 3922 s64 delta; 3922 3923 3923 - prev = atomic64_xchg(&counter->hw.prev_count, now); 3924 + prev = atomic64_xchg(&event->hw.prev_count, now); 3924 3925 delta = now - prev; 3925 - atomic64_add(delta, &counter->count); 3926 + atomic64_add(delta, &event->count); 3926 3927 } 3927 3928 3928 - static int task_clock_perf_counter_enable(struct perf_counter *counter) 3929 + static int task_clock_perf_event_enable(struct perf_event *event) 3929 3930 { 3930 - struct hw_perf_counter *hwc = &counter->hw; 3931 + struct hw_perf_event *hwc = &event->hw; 3931 3932 u64 now; 3932 3933 3933 - now = counter->ctx->time; 3934 + now = event->ctx->time; 3934 3935 3935 3936 atomic64_set(&hwc->prev_count, now); 3936 3937 hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 3937 - hwc->hrtimer.function = perf_swcounter_hrtimer; 3938 + hwc->hrtimer.function = perf_swevent_hrtimer; 3938 3939 if (hwc->sample_period) { 3939 3940 u64 period = max_t(u64, 10000, hwc->sample_period); 3940 3941 __hrtimer_start_range_ns(&hwc->hrtimer, ··· 3945 3946 return 0; 3946 3947 } 3947 3948 3948 - static void task_clock_perf_counter_disable(struct perf_counter *counter) 3949 + static void task_clock_perf_event_disable(struct perf_event *event) 3949 3950 { 3950 - if (counter->hw.sample_period) 3951 - hrtimer_cancel(&counter->hw.hrtimer); 3952 - task_clock_perf_counter_update(counter, counter->ctx->time); 3951 + if (event->hw.sample_period) 3952 + hrtimer_cancel(&event->hw.hrtimer); 3953 + task_clock_perf_event_update(event, event->ctx->time); 3953 3954 3954 3955 } 3955 3956 3956 - static void task_clock_perf_counter_read(struct perf_counter *counter) 3957 + static void task_clock_perf_event_read(struct perf_event *event) 3957 3958 { 3958 3959 u64 time; 3959 3960 3960 3961 if (!in_nmi()) { 3961 - update_context_time(counter->ctx); 3962 - time = counter->ctx->time; 3962 + update_context_time(event->ctx); 3963 + time = event->ctx->time; 3963 3964 } else { 3964 3965 u64 now = perf_clock(); 3965 - u64 delta = now - counter->ctx->timestamp; 3966 - time = counter->ctx->time + delta; 3966 + u64 delta = now - event->ctx->timestamp; 3967 + time = event->ctx->time + delta; 3967 3968 } 3968 3969 3969 - task_clock_perf_counter_update(counter, time); 3970 + task_clock_perf_event_update(event, time); 3970 3971 } 3971 3972 3972 3973 static const struct pmu perf_ops_task_clock = { 3973 - .enable = task_clock_perf_counter_enable, 3974 - .disable = task_clock_perf_counter_disable, 3975 - .read = task_clock_perf_counter_read, 3974 + .enable = task_clock_perf_event_enable, 3975 + .disable = task_clock_perf_event_disable, 3976 + .read = task_clock_perf_event_read, 3976 3977 }; 3977 3978 3978 3979 #ifdef CONFIG_EVENT_PROFILE 3979 - void perf_tpcounter_event(int event_id, u64 addr, u64 count, void *record, 3980 + void perf_tp_event(int event_id, u64 addr, u64 count, void *record, 3980 3981 int entry_size) 3981 3982 { 3982 3983 struct perf_raw_record raw = { ··· 3994 3995 if (!regs) 3995 3996 regs = task_pt_regs(current); 3996 3997 3997 - do_perf_swcounter_event(PERF_TYPE_TRACEPOINT, event_id, count, 1, 3998 + do_perf_sw_event(PERF_TYPE_TRACEPOINT, event_id, count, 1, 3998 3999 &data, regs); 3999 4000 } 4000 - EXPORT_SYMBOL_GPL(perf_tpcounter_event); 4001 + EXPORT_SYMBOL_GPL(perf_tp_event); 4001 4002 4002 4003 extern int ftrace_profile_enable(int); 4003 4004 extern void ftrace_profile_disable(int); 4004 4005 4005 - static void tp_perf_counter_destroy(struct perf_counter *counter) 4006 + static void tp_perf_event_destroy(struct perf_event *event) 4006 4007 { 4007 - ftrace_profile_disable(counter->attr.config); 4008 + ftrace_profile_disable(event->attr.config); 4008 4009 } 4009 4010 4010 - static const struct pmu *tp_perf_counter_init(struct perf_counter *counter) 4011 + static const struct pmu *tp_perf_event_init(struct perf_event *event) 4011 4012 { 4012 4013 /* 4013 4014 * Raw tracepoint data is a severe data leak, only allow root to 4014 4015 * have these. 4015 4016 */ 4016 - if ((counter->attr.sample_type & PERF_SAMPLE_RAW) && 4017 + if ((event->attr.sample_type & PERF_SAMPLE_RAW) && 4017 4018 perf_paranoid_tracepoint_raw() && 4018 4019 !capable(CAP_SYS_ADMIN)) 4019 4020 return ERR_PTR(-EPERM); 4020 4021 4021 - if (ftrace_profile_enable(counter->attr.config)) 4022 + if (ftrace_profile_enable(event->attr.config)) 4022 4023 return NULL; 4023 4024 4024 - counter->destroy = tp_perf_counter_destroy; 4025 + event->destroy = tp_perf_event_destroy; 4025 4026 4026 4027 return &perf_ops_generic; 4027 4028 } 4028 4029 #else 4029 - static const struct pmu *tp_perf_counter_init(struct perf_counter *counter) 4030 + static const struct pmu *tp_perf_event_init(struct perf_event *event) 4030 4031 { 4031 4032 return NULL; 4032 4033 } 4033 4034 #endif 4034 4035 4035 - atomic_t perf_swcounter_enabled[PERF_COUNT_SW_MAX]; 4036 + atomic_t perf_swevent_enabled[PERF_COUNT_SW_MAX]; 4036 4037 4037 - static void sw_perf_counter_destroy(struct perf_counter *counter) 4038 + static void sw_perf_event_destroy(struct perf_event *event) 4038 4039 { 4039 - u64 event = counter->attr.config; 4040 + u64 event_id = event->attr.config; 4040 4041 4041 - WARN_ON(counter->parent); 4042 + WARN_ON(event->parent); 4042 4043 4043 - atomic_dec(&perf_swcounter_enabled[event]); 4044 + atomic_dec(&perf_swevent_enabled[event_id]); 4044 4045 } 4045 4046 4046 - static const struct pmu *sw_perf_counter_init(struct perf_counter *counter) 4047 + static const struct pmu *sw_perf_event_init(struct perf_event *event) 4047 4048 { 4048 4049 const struct pmu *pmu = NULL; 4049 - u64 event = counter->attr.config; 4050 + u64 event_id = event->attr.config; 4050 4051 4051 4052 /* 4052 - * Software counters (currently) can't in general distinguish 4053 + * Software events (currently) can't in general distinguish 4053 4054 * between user, kernel and hypervisor events. 4054 4055 * However, context switches and cpu migrations are considered 4055 4056 * to be kernel events, and page faults are never hypervisor 4056 4057 * events. 4057 4058 */ 4058 - switch (event) { 4059 + switch (event_id) { 4059 4060 case PERF_COUNT_SW_CPU_CLOCK: 4060 4061 pmu = &perf_ops_cpu_clock; 4061 4062 4062 4063 break; 4063 4064 case PERF_COUNT_SW_TASK_CLOCK: 4064 4065 /* 4065 - * If the user instantiates this as a per-cpu counter, 4066 - * use the cpu_clock counter instead. 4066 + * If the user instantiates this as a per-cpu event, 4067 + * use the cpu_clock event instead. 4067 4068 */ 4068 - if (counter->ctx->task) 4069 + if (event->ctx->task) 4069 4070 pmu = &perf_ops_task_clock; 4070 4071 else 4071 4072 pmu = &perf_ops_cpu_clock; ··· 4076 4077 case PERF_COUNT_SW_PAGE_FAULTS_MAJ: 4077 4078 case PERF_COUNT_SW_CONTEXT_SWITCHES: 4078 4079 case PERF_COUNT_SW_CPU_MIGRATIONS: 4079 - if (!counter->parent) { 4080 - atomic_inc(&perf_swcounter_enabled[event]); 4081 - counter->destroy = sw_perf_counter_destroy; 4080 + if (!event->parent) { 4081 + atomic_inc(&perf_swevent_enabled[event_id]); 4082 + event->destroy = sw_perf_event_destroy; 4082 4083 } 4083 4084 pmu = &perf_ops_generic; 4084 4085 break; ··· 4088 4089 } 4089 4090 4090 4091 /* 4091 - * Allocate and initialize a counter structure 4092 + * Allocate and initialize a event structure 4092 4093 */ 4093 - static struct perf_counter * 4094 - perf_counter_alloc(struct perf_counter_attr *attr, 4094 + static struct perf_event * 4095 + perf_event_alloc(struct perf_event_attr *attr, 4095 4096 int cpu, 4096 - struct perf_counter_context *ctx, 4097 - struct perf_counter *group_leader, 4098 - struct perf_counter *parent_counter, 4097 + struct perf_event_context *ctx, 4098 + struct perf_event *group_leader, 4099 + struct perf_event *parent_event, 4099 4100 gfp_t gfpflags) 4100 4101 { 4101 4102 const struct pmu *pmu; 4102 - struct perf_counter *counter; 4103 - struct hw_perf_counter *hwc; 4103 + struct perf_event *event; 4104 + struct hw_perf_event *hwc; 4104 4105 long err; 4105 4106 4106 - counter = kzalloc(sizeof(*counter), gfpflags); 4107 - if (!counter) 4107 + event = kzalloc(sizeof(*event), gfpflags); 4108 + if (!event) 4108 4109 return ERR_PTR(-ENOMEM); 4109 4110 4110 4111 /* 4111 - * Single counters are their own group leaders, with an 4112 + * Single events are their own group leaders, with an 4112 4113 * empty sibling list: 4113 4114 */ 4114 4115 if (!group_leader) 4115 - group_leader = counter; 4116 + group_leader = event; 4116 4117 4117 - mutex_init(&counter->child_mutex); 4118 - INIT_LIST_HEAD(&counter->child_list); 4118 + mutex_init(&event->child_mutex); 4119 + INIT_LIST_HEAD(&event->child_list); 4119 4120 4120 - INIT_LIST_HEAD(&counter->list_entry); 4121 - INIT_LIST_HEAD(&counter->event_entry); 4122 - INIT_LIST_HEAD(&counter->sibling_list); 4123 - init_waitqueue_head(&counter->waitq); 4121 + INIT_LIST_HEAD(&event->group_entry); 4122 + INIT_LIST_HEAD(&event->event_entry); 4123 + INIT_LIST_HEAD(&event->sibling_list); 4124 + init_waitqueue_head(&event->waitq); 4124 4125 4125 - mutex_init(&counter->mmap_mutex); 4126 + mutex_init(&event->mmap_mutex); 4126 4127 4127 - counter->cpu = cpu; 4128 - counter->attr = *attr; 4129 - counter->group_leader = group_leader; 4130 - counter->pmu = NULL; 4131 - counter->ctx = ctx; 4132 - counter->oncpu = -1; 4128 + event->cpu = cpu; 4129 + event->attr = *attr; 4130 + event->group_leader = group_leader; 4131 + event->pmu = NULL; 4132 + event->ctx = ctx; 4133 + event->oncpu = -1; 4133 4134 4134 - counter->parent = parent_counter; 4135 + event->parent = parent_event; 4135 4136 4136 - counter->ns = get_pid_ns(current->nsproxy->pid_ns); 4137 - counter->id = atomic64_inc_return(&perf_counter_id); 4137 + event->ns = get_pid_ns(current->nsproxy->pid_ns); 4138 + event->id = atomic64_inc_return(&perf_event_id); 4138 4139 4139 - counter->state = PERF_COUNTER_STATE_INACTIVE; 4140 + event->state = PERF_EVENT_STATE_INACTIVE; 4140 4141 4141 4142 if (attr->disabled) 4142 - counter->state = PERF_COUNTER_STATE_OFF; 4143 + event->state = PERF_EVENT_STATE_OFF; 4143 4144 4144 4145 pmu = NULL; 4145 4146 4146 - hwc = &counter->hw; 4147 + hwc = &event->hw; 4147 4148 hwc->sample_period = attr->sample_period; 4148 4149 if (attr->freq && attr->sample_freq) 4149 4150 hwc->sample_period = 1; ··· 4152 4153 atomic64_set(&hwc->period_left, hwc->sample_period); 4153 4154 4154 4155 /* 4155 - * we currently do not support PERF_FORMAT_GROUP on inherited counters 4156 + * we currently do not support PERF_FORMAT_GROUP on inherited events 4156 4157 */ 4157 4158 if (attr->inherit && (attr->read_format & PERF_FORMAT_GROUP)) 4158 4159 goto done; ··· 4161 4162 case PERF_TYPE_RAW: 4162 4163 case PERF_TYPE_HARDWARE: 4163 4164 case PERF_TYPE_HW_CACHE: 4164 - pmu = hw_perf_counter_init(counter); 4165 + pmu = hw_perf_event_init(event); 4165 4166 break; 4166 4167 4167 4168 case PERF_TYPE_SOFTWARE: 4168 - pmu = sw_perf_counter_init(counter); 4169 + pmu = sw_perf_event_init(event); 4169 4170 break; 4170 4171 4171 4172 case PERF_TYPE_TRACEPOINT: 4172 - pmu = tp_perf_counter_init(counter); 4173 + pmu = tp_perf_event_init(event); 4173 4174 break; 4174 4175 4175 4176 default: ··· 4183 4184 err = PTR_ERR(pmu); 4184 4185 4185 4186 if (err) { 4186 - if (counter->ns) 4187 - put_pid_ns(counter->ns); 4188 - kfree(counter); 4187 + if (event->ns) 4188 + put_pid_ns(event->ns); 4189 + kfree(event); 4189 4190 return ERR_PTR(err); 4190 4191 } 4191 4192 4192 - counter->pmu = pmu; 4193 + event->pmu = pmu; 4193 4194 4194 - if (!counter->parent) { 4195 - atomic_inc(&nr_counters); 4196 - if (counter->attr.mmap) 4197 - atomic_inc(&nr_mmap_counters); 4198 - if (counter->attr.comm) 4199 - atomic_inc(&nr_comm_counters); 4200 - if (counter->attr.task) 4201 - atomic_inc(&nr_task_counters); 4195 + if (!event->parent) { 4196 + atomic_inc(&nr_events); 4197 + if (event->attr.mmap) 4198 + atomic_inc(&nr_mmap_events); 4199 + if (event->attr.comm) 4200 + atomic_inc(&nr_comm_events); 4201 + if (event->attr.task) 4202 + atomic_inc(&nr_task_events); 4202 4203 } 4203 4204 4204 - return counter; 4205 + return event; 4205 4206 } 4206 4207 4207 - static int perf_copy_attr(struct perf_counter_attr __user *uattr, 4208 - struct perf_counter_attr *attr) 4208 + static int perf_copy_attr(struct perf_event_attr __user *uattr, 4209 + struct perf_event_attr *attr) 4209 4210 { 4210 4211 u32 size; 4211 4212 int ret; ··· 4284 4285 goto out; 4285 4286 } 4286 4287 4287 - int perf_counter_set_output(struct perf_counter *counter, int output_fd) 4288 + int perf_event_set_output(struct perf_event *event, int output_fd) 4288 4289 { 4289 - struct perf_counter *output_counter = NULL; 4290 + struct perf_event *output_event = NULL; 4290 4291 struct file *output_file = NULL; 4291 - struct perf_counter *old_output; 4292 + struct perf_event *old_output; 4292 4293 int fput_needed = 0; 4293 4294 int ret = -EINVAL; 4294 4295 ··· 4302 4303 if (output_file->f_op != &perf_fops) 4303 4304 goto out; 4304 4305 4305 - output_counter = output_file->private_data; 4306 + output_event = output_file->private_data; 4306 4307 4307 4308 /* Don't chain output fds */ 4308 - if (output_counter->output) 4309 + if (output_event->output) 4309 4310 goto out; 4310 4311 4311 4312 /* Don't set an output fd when we already have an output channel */ 4312 - if (counter->data) 4313 + if (event->data) 4313 4314 goto out; 4314 4315 4315 4316 atomic_long_inc(&output_file->f_count); 4316 4317 4317 4318 set: 4318 - mutex_lock(&counter->mmap_mutex); 4319 - old_output = counter->output; 4320 - rcu_assign_pointer(counter->output, output_counter); 4321 - mutex_unlock(&counter->mmap_mutex); 4319 + mutex_lock(&event->mmap_mutex); 4320 + old_output = event->output; 4321 + rcu_assign_pointer(event->output, output_event); 4322 + mutex_unlock(&event->mmap_mutex); 4322 4323 4323 4324 if (old_output) { 4324 4325 /* 4325 4326 * we need to make sure no existing perf_output_*() 4326 - * is still referencing this counter. 4327 + * is still referencing this event. 4327 4328 */ 4328 4329 synchronize_rcu(); 4329 4330 fput(old_output->filp); ··· 4336 4337 } 4337 4338 4338 4339 /** 4339 - * sys_perf_counter_open - open a performance counter, associate it to a task/cpu 4340 + * sys_perf_event_open - open a performance event, associate it to a task/cpu 4340 4341 * 4341 - * @attr_uptr: event type attributes for monitoring/sampling 4342 + * @attr_uptr: event_id type attributes for monitoring/sampling 4342 4343 * @pid: target pid 4343 4344 * @cpu: target cpu 4344 - * @group_fd: group leader counter fd 4345 + * @group_fd: group leader event fd 4345 4346 */ 4346 - SYSCALL_DEFINE5(perf_counter_open, 4347 - struct perf_counter_attr __user *, attr_uptr, 4347 + SYSCALL_DEFINE5(perf_event_open, 4348 + struct perf_event_attr __user *, attr_uptr, 4348 4349 pid_t, pid, int, cpu, int, group_fd, unsigned long, flags) 4349 4350 { 4350 - struct perf_counter *counter, *group_leader; 4351 - struct perf_counter_attr attr; 4352 - struct perf_counter_context *ctx; 4353 - struct file *counter_file = NULL; 4351 + struct perf_event *event, *group_leader; 4352 + struct perf_event_attr attr; 4353 + struct perf_event_context *ctx; 4354 + struct file *event_file = NULL; 4354 4355 struct file *group_file = NULL; 4355 4356 int fput_needed = 0; 4356 4357 int fput_needed2 = 0; ··· 4370 4371 } 4371 4372 4372 4373 if (attr.freq) { 4373 - if (attr.sample_freq > sysctl_perf_counter_sample_rate) 4374 + if (attr.sample_freq > sysctl_perf_event_sample_rate) 4374 4375 return -EINVAL; 4375 4376 } 4376 4377 ··· 4382 4383 return PTR_ERR(ctx); 4383 4384 4384 4385 /* 4385 - * Look up the group leader (we will attach this counter to it): 4386 + * Look up the group leader (we will attach this event to it): 4386 4387 */ 4387 4388 group_leader = NULL; 4388 4389 if (group_fd != -1 && !(flags & PERF_FLAG_FD_NO_GROUP)) { ··· 4413 4414 goto err_put_context; 4414 4415 } 4415 4416 4416 - counter = perf_counter_alloc(&attr, cpu, ctx, group_leader, 4417 + event = perf_event_alloc(&attr, cpu, ctx, group_leader, 4417 4418 NULL, GFP_KERNEL); 4418 - err = PTR_ERR(counter); 4419 - if (IS_ERR(counter)) 4419 + err = PTR_ERR(event); 4420 + if (IS_ERR(event)) 4420 4421 goto err_put_context; 4421 4422 4422 - err = anon_inode_getfd("[perf_counter]", &perf_fops, counter, 0); 4423 + err = anon_inode_getfd("[perf_event]", &perf_fops, event, 0); 4423 4424 if (err < 0) 4424 4425 goto err_free_put_context; 4425 4426 4426 - counter_file = fget_light(err, &fput_needed2); 4427 - if (!counter_file) 4427 + event_file = fget_light(err, &fput_needed2); 4428 + if (!event_file) 4428 4429 goto err_free_put_context; 4429 4430 4430 4431 if (flags & PERF_FLAG_FD_OUTPUT) { 4431 - err = perf_counter_set_output(counter, group_fd); 4432 + err = perf_event_set_output(event, group_fd); 4432 4433 if (err) 4433 4434 goto err_fput_free_put_context; 4434 4435 } 4435 4436 4436 - counter->filp = counter_file; 4437 + event->filp = event_file; 4437 4438 WARN_ON_ONCE(ctx->parent_ctx); 4438 4439 mutex_lock(&ctx->mutex); 4439 - perf_install_in_context(ctx, counter, cpu); 4440 + perf_install_in_context(ctx, event, cpu); 4440 4441 ++ctx->generation; 4441 4442 mutex_unlock(&ctx->mutex); 4442 4443 4443 - counter->owner = current; 4444 + event->owner = current; 4444 4445 get_task_struct(current); 4445 - mutex_lock(&current->perf_counter_mutex); 4446 - list_add_tail(&counter->owner_entry, &current->perf_counter_list); 4447 - mutex_unlock(&current->perf_counter_mutex); 4446 + mutex_lock(&current->perf_event_mutex); 4447 + list_add_tail(&event->owner_entry, &current->perf_event_list); 4448 + mutex_unlock(&current->perf_event_mutex); 4448 4449 4449 4450 err_fput_free_put_context: 4450 - fput_light(counter_file, fput_needed2); 4451 + fput_light(event_file, fput_needed2); 4451 4452 4452 4453 err_free_put_context: 4453 4454 if (err < 0) 4454 - kfree(counter); 4455 + kfree(event); 4455 4456 4456 4457 err_put_context: 4457 4458 if (err < 0) ··· 4463 4464 } 4464 4465 4465 4466 /* 4466 - * inherit a counter from parent task to child task: 4467 + * inherit a event from parent task to child task: 4467 4468 */ 4468 - static struct perf_counter * 4469 - inherit_counter(struct perf_counter *parent_counter, 4469 + static struct perf_event * 4470 + inherit_event(struct perf_event *parent_event, 4470 4471 struct task_struct *parent, 4471 - struct perf_counter_context *parent_ctx, 4472 + struct perf_event_context *parent_ctx, 4472 4473 struct task_struct *child, 4473 - struct perf_counter *group_leader, 4474 - struct perf_counter_context *child_ctx) 4474 + struct perf_event *group_leader, 4475 + struct perf_event_context *child_ctx) 4475 4476 { 4476 - struct perf_counter *child_counter; 4477 + struct perf_event *child_event; 4477 4478 4478 4479 /* 4479 - * Instead of creating recursive hierarchies of counters, 4480 - * we link inherited counters back to the original parent, 4480 + * Instead of creating recursive hierarchies of events, 4481 + * we link inherited events back to the original parent, 4481 4482 * which has a filp for sure, which we use as the reference 4482 4483 * count: 4483 4484 */ 4484 - if (parent_counter->parent) 4485 - parent_counter = parent_counter->parent; 4485 + if (parent_event->parent) 4486 + parent_event = parent_event->parent; 4486 4487 4487 - child_counter = perf_counter_alloc(&parent_counter->attr, 4488 - parent_counter->cpu, child_ctx, 4489 - group_leader, parent_counter, 4488 + child_event = perf_event_alloc(&parent_event->attr, 4489 + parent_event->cpu, child_ctx, 4490 + group_leader, parent_event, 4490 4491 GFP_KERNEL); 4491 - if (IS_ERR(child_counter)) 4492 - return child_counter; 4492 + if (IS_ERR(child_event)) 4493 + return child_event; 4493 4494 get_ctx(child_ctx); 4494 4495 4495 4496 /* 4496 - * Make the child state follow the state of the parent counter, 4497 + * Make the child state follow the state of the parent event, 4497 4498 * not its attr.disabled bit. We hold the parent's mutex, 4498 - * so we won't race with perf_counter_{en, dis}able_family. 4499 + * so we won't race with perf_event_{en, dis}able_family. 4499 4500 */ 4500 - if (parent_counter->state >= PERF_COUNTER_STATE_INACTIVE) 4501 - child_counter->state = PERF_COUNTER_STATE_INACTIVE; 4501 + if (parent_event->state >= PERF_EVENT_STATE_INACTIVE) 4502 + child_event->state = PERF_EVENT_STATE_INACTIVE; 4502 4503 else 4503 - child_counter->state = PERF_COUNTER_STATE_OFF; 4504 + child_event->state = PERF_EVENT_STATE_OFF; 4504 4505 4505 - if (parent_counter->attr.freq) 4506 - child_counter->hw.sample_period = parent_counter->hw.sample_period; 4506 + if (parent_event->attr.freq) 4507 + child_event->hw.sample_period = parent_event->hw.sample_period; 4507 4508 4508 4509 /* 4509 4510 * Link it up in the child's context: 4510 4511 */ 4511 - add_counter_to_ctx(child_counter, child_ctx); 4512 + add_event_to_ctx(child_event, child_ctx); 4512 4513 4513 4514 /* 4514 4515 * Get a reference to the parent filp - we will fput it 4515 - * when the child counter exits. This is safe to do because 4516 + * when the child event exits. This is safe to do because 4516 4517 * we are in the parent and we know that the filp still 4517 4518 * exists and has a nonzero count: 4518 4519 */ 4519 - atomic_long_inc(&parent_counter->filp->f_count); 4520 + atomic_long_inc(&parent_event->filp->f_count); 4520 4521 4521 4522 /* 4522 - * Link this into the parent counter's child list 4523 + * Link this into the parent event's child list 4523 4524 */ 4524 - WARN_ON_ONCE(parent_counter->ctx->parent_ctx); 4525 - mutex_lock(&parent_counter->child_mutex); 4526 - list_add_tail(&child_counter->child_list, &parent_counter->child_list); 4527 - mutex_unlock(&parent_counter->child_mutex); 4525 + WARN_ON_ONCE(parent_event->ctx->parent_ctx); 4526 + mutex_lock(&parent_event->child_mutex); 4527 + list_add_tail(&child_event->child_list, &parent_event->child_list); 4528 + mutex_unlock(&parent_event->child_mutex); 4528 4529 4529 - return child_counter; 4530 + return child_event; 4530 4531 } 4531 4532 4532 - static int inherit_group(struct perf_counter *parent_counter, 4533 + static int inherit_group(struct perf_event *parent_event, 4533 4534 struct task_struct *parent, 4534 - struct perf_counter_context *parent_ctx, 4535 + struct perf_event_context *parent_ctx, 4535 4536 struct task_struct *child, 4536 - struct perf_counter_context *child_ctx) 4537 + struct perf_event_context *child_ctx) 4537 4538 { 4538 - struct perf_counter *leader; 4539 - struct perf_counter *sub; 4540 - struct perf_counter *child_ctr; 4539 + struct perf_event *leader; 4540 + struct perf_event *sub; 4541 + struct perf_event *child_ctr; 4541 4542 4542 - leader = inherit_counter(parent_counter, parent, parent_ctx, 4543 + leader = inherit_event(parent_event, parent, parent_ctx, 4543 4544 child, NULL, child_ctx); 4544 4545 if (IS_ERR(leader)) 4545 4546 return PTR_ERR(leader); 4546 - list_for_each_entry(sub, &parent_counter->sibling_list, list_entry) { 4547 - child_ctr = inherit_counter(sub, parent, parent_ctx, 4547 + list_for_each_entry(sub, &parent_event->sibling_list, group_entry) { 4548 + child_ctr = inherit_event(sub, parent, parent_ctx, 4548 4549 child, leader, child_ctx); 4549 4550 if (IS_ERR(child_ctr)) 4550 4551 return PTR_ERR(child_ctr); ··· 4552 4553 return 0; 4553 4554 } 4554 4555 4555 - static void sync_child_counter(struct perf_counter *child_counter, 4556 + static void sync_child_event(struct perf_event *child_event, 4556 4557 struct task_struct *child) 4557 4558 { 4558 - struct perf_counter *parent_counter = child_counter->parent; 4559 + struct perf_event *parent_event = child_event->parent; 4559 4560 u64 child_val; 4560 4561 4561 - if (child_counter->attr.inherit_stat) 4562 - perf_counter_read_event(child_counter, child); 4562 + if (child_event->attr.inherit_stat) 4563 + perf_event_read_event(child_event, child); 4563 4564 4564 - child_val = atomic64_read(&child_counter->count); 4565 + child_val = atomic64_read(&child_event->count); 4565 4566 4566 4567 /* 4567 4568 * Add back the child's count to the parent's count: 4568 4569 */ 4569 - atomic64_add(child_val, &parent_counter->count); 4570 - atomic64_add(child_counter->total_time_enabled, 4571 - &parent_counter->child_total_time_enabled); 4572 - atomic64_add(child_counter->total_time_running, 4573 - &parent_counter->child_total_time_running); 4570 + atomic64_add(child_val, &parent_event->count); 4571 + atomic64_add(child_event->total_time_enabled, 4572 + &parent_event->child_total_time_enabled); 4573 + atomic64_add(child_event->total_time_running, 4574 + &parent_event->child_total_time_running); 4574 4575 4575 4576 /* 4576 - * Remove this counter from the parent's list 4577 + * Remove this event from the parent's list 4577 4578 */ 4578 - WARN_ON_ONCE(parent_counter->ctx->parent_ctx); 4579 - mutex_lock(&parent_counter->child_mutex); 4580 - list_del_init(&child_counter->child_list); 4581 - mutex_unlock(&parent_counter->child_mutex); 4579 + WARN_ON_ONCE(parent_event->ctx->parent_ctx); 4580 + mutex_lock(&parent_event->child_mutex); 4581 + list_del_init(&child_event->child_list); 4582 + mutex_unlock(&parent_event->child_mutex); 4582 4583 4583 4584 /* 4584 - * Release the parent counter, if this was the last 4585 + * Release the parent event, if this was the last 4585 4586 * reference to it. 4586 4587 */ 4587 - fput(parent_counter->filp); 4588 + fput(parent_event->filp); 4588 4589 } 4589 4590 4590 4591 static void 4591 - __perf_counter_exit_task(struct perf_counter *child_counter, 4592 - struct perf_counter_context *child_ctx, 4592 + __perf_event_exit_task(struct perf_event *child_event, 4593 + struct perf_event_context *child_ctx, 4593 4594 struct task_struct *child) 4594 4595 { 4595 - struct perf_counter *parent_counter; 4596 + struct perf_event *parent_event; 4596 4597 4597 - update_counter_times(child_counter); 4598 - perf_counter_remove_from_context(child_counter); 4598 + update_event_times(child_event); 4599 + perf_event_remove_from_context(child_event); 4599 4600 4600 - parent_counter = child_counter->parent; 4601 + parent_event = child_event->parent; 4601 4602 /* 4602 - * It can happen that parent exits first, and has counters 4603 + * It can happen that parent exits first, and has events 4603 4604 * that are still around due to the child reference. These 4604 - * counters need to be zapped - but otherwise linger. 4605 + * events need to be zapped - but otherwise linger. 4605 4606 */ 4606 - if (parent_counter) { 4607 - sync_child_counter(child_counter, child); 4608 - free_counter(child_counter); 4607 + if (parent_event) { 4608 + sync_child_event(child_event, child); 4609 + free_event(child_event); 4609 4610 } 4610 4611 } 4611 4612 4612 4613 /* 4613 - * When a child task exits, feed back counter values to parent counters. 4614 + * When a child task exits, feed back event values to parent events. 4614 4615 */ 4615 - void perf_counter_exit_task(struct task_struct *child) 4616 + void perf_event_exit_task(struct task_struct *child) 4616 4617 { 4617 - struct perf_counter *child_counter, *tmp; 4618 - struct perf_counter_context *child_ctx; 4618 + struct perf_event *child_event, *tmp; 4619 + struct perf_event_context *child_ctx; 4619 4620 unsigned long flags; 4620 4621 4621 - if (likely(!child->perf_counter_ctxp)) { 4622 - perf_counter_task(child, NULL, 0); 4622 + if (likely(!child->perf_event_ctxp)) { 4623 + perf_event_task(child, NULL, 0); 4623 4624 return; 4624 4625 } 4625 4626 ··· 4630 4631 * scheduled, so we are now safe from rescheduling changing 4631 4632 * our context. 4632 4633 */ 4633 - child_ctx = child->perf_counter_ctxp; 4634 - __perf_counter_task_sched_out(child_ctx); 4634 + child_ctx = child->perf_event_ctxp; 4635 + __perf_event_task_sched_out(child_ctx); 4635 4636 4636 4637 /* 4637 4638 * Take the context lock here so that if find_get_context is 4638 - * reading child->perf_counter_ctxp, we wait until it has 4639 + * reading child->perf_event_ctxp, we wait until it has 4639 4640 * incremented the context's refcount before we do put_ctx below. 4640 4641 */ 4641 4642 spin_lock(&child_ctx->lock); 4642 - child->perf_counter_ctxp = NULL; 4643 + child->perf_event_ctxp = NULL; 4643 4644 /* 4644 4645 * If this context is a clone; unclone it so it can't get 4645 4646 * swapped to another process while we're removing all 4646 - * the counters from it. 4647 + * the events from it. 4647 4648 */ 4648 4649 unclone_ctx(child_ctx); 4649 4650 spin_unlock_irqrestore(&child_ctx->lock, flags); 4650 4651 4651 4652 /* 4652 - * Report the task dead after unscheduling the counters so that we 4653 - * won't get any samples after PERF_EVENT_EXIT. We can however still 4654 - * get a few PERF_EVENT_READ events. 4653 + * Report the task dead after unscheduling the events so that we 4654 + * won't get any samples after PERF_RECORD_EXIT. We can however still 4655 + * get a few PERF_RECORD_READ events. 4655 4656 */ 4656 - perf_counter_task(child, child_ctx, 0); 4657 + perf_event_task(child, child_ctx, 0); 4657 4658 4658 4659 /* 4659 4660 * We can recurse on the same lock type through: 4660 4661 * 4661 - * __perf_counter_exit_task() 4662 - * sync_child_counter() 4663 - * fput(parent_counter->filp) 4662 + * __perf_event_exit_task() 4663 + * sync_child_event() 4664 + * fput(parent_event->filp) 4664 4665 * perf_release() 4665 4666 * mutex_lock(&ctx->mutex) 4666 4667 * ··· 4669 4670 mutex_lock_nested(&child_ctx->mutex, SINGLE_DEPTH_NESTING); 4670 4671 4671 4672 again: 4672 - list_for_each_entry_safe(child_counter, tmp, &child_ctx->counter_list, 4673 - list_entry) 4674 - __perf_counter_exit_task(child_counter, child_ctx, child); 4673 + list_for_each_entry_safe(child_event, tmp, &child_ctx->group_list, 4674 + group_entry) 4675 + __perf_event_exit_task(child_event, child_ctx, child); 4675 4676 4676 4677 /* 4677 - * If the last counter was a group counter, it will have appended all 4678 + * If the last event was a group event, it will have appended all 4678 4679 * its siblings to the list, but we obtained 'tmp' before that which 4679 4680 * will still point to the list head terminating the iteration. 4680 4681 */ 4681 - if (!list_empty(&child_ctx->counter_list)) 4682 + if (!list_empty(&child_ctx->group_list)) 4682 4683 goto again; 4683 4684 4684 4685 mutex_unlock(&child_ctx->mutex); ··· 4690 4691 * free an unexposed, unused context as created by inheritance by 4691 4692 * init_task below, used by fork() in case of fail. 4692 4693 */ 4693 - void perf_counter_free_task(struct task_struct *task) 4694 + void perf_event_free_task(struct task_struct *task) 4694 4695 { 4695 - struct perf_counter_context *ctx = task->perf_counter_ctxp; 4696 - struct perf_counter *counter, *tmp; 4696 + struct perf_event_context *ctx = task->perf_event_ctxp; 4697 + struct perf_event *event, *tmp; 4697 4698 4698 4699 if (!ctx) 4699 4700 return; 4700 4701 4701 4702 mutex_lock(&ctx->mutex); 4702 4703 again: 4703 - list_for_each_entry_safe(counter, tmp, &ctx->counter_list, list_entry) { 4704 - struct perf_counter *parent = counter->parent; 4704 + list_for_each_entry_safe(event, tmp, &ctx->group_list, group_entry) { 4705 + struct perf_event *parent = event->parent; 4705 4706 4706 4707 if (WARN_ON_ONCE(!parent)) 4707 4708 continue; 4708 4709 4709 4710 mutex_lock(&parent->child_mutex); 4710 - list_del_init(&counter->child_list); 4711 + list_del_init(&event->child_list); 4711 4712 mutex_unlock(&parent->child_mutex); 4712 4713 4713 4714 fput(parent->filp); 4714 4715 4715 - list_del_counter(counter, ctx); 4716 - free_counter(counter); 4716 + list_del_event(event, ctx); 4717 + free_event(event); 4717 4718 } 4718 4719 4719 - if (!list_empty(&ctx->counter_list)) 4720 + if (!list_empty(&ctx->group_list)) 4720 4721 goto again; 4721 4722 4722 4723 mutex_unlock(&ctx->mutex); ··· 4725 4726 } 4726 4727 4727 4728 /* 4728 - * Initialize the perf_counter context in task_struct 4729 + * Initialize the perf_event context in task_struct 4729 4730 */ 4730 - int perf_counter_init_task(struct task_struct *child) 4731 + int perf_event_init_task(struct task_struct *child) 4731 4732 { 4732 - struct perf_counter_context *child_ctx, *parent_ctx; 4733 - struct perf_counter_context *cloned_ctx; 4734 - struct perf_counter *counter; 4733 + struct perf_event_context *child_ctx, *parent_ctx; 4734 + struct perf_event_context *cloned_ctx; 4735 + struct perf_event *event; 4735 4736 struct task_struct *parent = current; 4736 4737 int inherited_all = 1; 4737 4738 int ret = 0; 4738 4739 4739 - child->perf_counter_ctxp = NULL; 4740 + child->perf_event_ctxp = NULL; 4740 4741 4741 - mutex_init(&child->perf_counter_mutex); 4742 - INIT_LIST_HEAD(&child->perf_counter_list); 4742 + mutex_init(&child->perf_event_mutex); 4743 + INIT_LIST_HEAD(&child->perf_event_list); 4743 4744 4744 - if (likely(!parent->perf_counter_ctxp)) 4745 + if (likely(!parent->perf_event_ctxp)) 4745 4746 return 0; 4746 4747 4747 4748 /* 4748 4749 * This is executed from the parent task context, so inherit 4749 - * counters that have been marked for cloning. 4750 + * events that have been marked for cloning. 4750 4751 * First allocate and initialize a context for the child. 4751 4752 */ 4752 4753 4753 - child_ctx = kmalloc(sizeof(struct perf_counter_context), GFP_KERNEL); 4754 + child_ctx = kmalloc(sizeof(struct perf_event_context), GFP_KERNEL); 4754 4755 if (!child_ctx) 4755 4756 return -ENOMEM; 4756 4757 4757 - __perf_counter_init_context(child_ctx, child); 4758 - child->perf_counter_ctxp = child_ctx; 4758 + __perf_event_init_context(child_ctx, child); 4759 + child->perf_event_ctxp = child_ctx; 4759 4760 get_task_struct(child); 4760 4761 4761 4762 /* ··· 4781 4782 * We dont have to disable NMIs - we are only looking at 4782 4783 * the list, not manipulating it: 4783 4784 */ 4784 - list_for_each_entry_rcu(counter, &parent_ctx->event_list, event_entry) { 4785 - if (counter != counter->group_leader) 4785 + list_for_each_entry_rcu(event, &parent_ctx->event_list, event_entry) { 4786 + if (event != event->group_leader) 4786 4787 continue; 4787 4788 4788 - if (!counter->attr.inherit) { 4789 + if (!event->attr.inherit) { 4789 4790 inherited_all = 0; 4790 4791 continue; 4791 4792 } 4792 4793 4793 - ret = inherit_group(counter, parent, parent_ctx, 4794 + ret = inherit_group(event, parent, parent_ctx, 4794 4795 child, child_ctx); 4795 4796 if (ret) { 4796 4797 inherited_all = 0; ··· 4804 4805 * context, or of whatever the parent is a clone of. 4805 4806 * Note that if the parent is a clone, it could get 4806 4807 * uncloned at any point, but that doesn't matter 4807 - * because the list of counters and the generation 4808 + * because the list of events and the generation 4808 4809 * count can't have changed since we took the mutex. 4809 4810 */ 4810 4811 cloned_ctx = rcu_dereference(parent_ctx->parent_ctx); ··· 4825 4826 return ret; 4826 4827 } 4827 4828 4828 - static void __cpuinit perf_counter_init_cpu(int cpu) 4829 + static void __cpuinit perf_event_init_cpu(int cpu) 4829 4830 { 4830 4831 struct perf_cpu_context *cpuctx; 4831 4832 4832 4833 cpuctx = &per_cpu(perf_cpu_context, cpu); 4833 - __perf_counter_init_context(&cpuctx->ctx, NULL); 4834 + __perf_event_init_context(&cpuctx->ctx, NULL); 4834 4835 4835 4836 spin_lock(&perf_resource_lock); 4836 - cpuctx->max_pertask = perf_max_counters - perf_reserved_percpu; 4837 + cpuctx->max_pertask = perf_max_events - perf_reserved_percpu; 4837 4838 spin_unlock(&perf_resource_lock); 4838 4839 4839 - hw_perf_counter_setup(cpu); 4840 + hw_perf_event_setup(cpu); 4840 4841 } 4841 4842 4842 4843 #ifdef CONFIG_HOTPLUG_CPU 4843 - static void __perf_counter_exit_cpu(void *info) 4844 + static void __perf_event_exit_cpu(void *info) 4844 4845 { 4845 4846 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); 4846 - struct perf_counter_context *ctx = &cpuctx->ctx; 4847 - struct perf_counter *counter, *tmp; 4847 + struct perf_event_context *ctx = &cpuctx->ctx; 4848 + struct perf_event *event, *tmp; 4848 4849 4849 - list_for_each_entry_safe(counter, tmp, &ctx->counter_list, list_entry) 4850 - __perf_counter_remove_from_context(counter); 4850 + list_for_each_entry_safe(event, tmp, &ctx->group_list, group_entry) 4851 + __perf_event_remove_from_context(event); 4851 4852 } 4852 - static void perf_counter_exit_cpu(int cpu) 4853 + static void perf_event_exit_cpu(int cpu) 4853 4854 { 4854 4855 struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu); 4855 - struct perf_counter_context *ctx = &cpuctx->ctx; 4856 + struct perf_event_context *ctx = &cpuctx->ctx; 4856 4857 4857 4858 mutex_lock(&ctx->mutex); 4858 - smp_call_function_single(cpu, __perf_counter_exit_cpu, NULL, 1); 4859 + smp_call_function_single(cpu, __perf_event_exit_cpu, NULL, 1); 4859 4860 mutex_unlock(&ctx->mutex); 4860 4861 } 4861 4862 #else 4862 - static inline void perf_counter_exit_cpu(int cpu) { } 4863 + static inline void perf_event_exit_cpu(int cpu) { } 4863 4864 #endif 4864 4865 4865 4866 static int __cpuinit ··· 4871 4872 4872 4873 case CPU_UP_PREPARE: 4873 4874 case CPU_UP_PREPARE_FROZEN: 4874 - perf_counter_init_cpu(cpu); 4875 + perf_event_init_cpu(cpu); 4875 4876 break; 4876 4877 4877 4878 case CPU_ONLINE: 4878 4879 case CPU_ONLINE_FROZEN: 4879 - hw_perf_counter_setup_online(cpu); 4880 + hw_perf_event_setup_online(cpu); 4880 4881 break; 4881 4882 4882 4883 case CPU_DOWN_PREPARE: 4883 4884 case CPU_DOWN_PREPARE_FROZEN: 4884 - perf_counter_exit_cpu(cpu); 4885 + perf_event_exit_cpu(cpu); 4885 4886 break; 4886 4887 4887 4888 default: ··· 4899 4900 .priority = 20, 4900 4901 }; 4901 4902 4902 - void __init perf_counter_init(void) 4903 + void __init perf_event_init(void) 4903 4904 { 4904 4905 perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_UP_PREPARE, 4905 4906 (void *)(long)smp_processor_id()); ··· 4925 4926 err = strict_strtoul(buf, 10, &val); 4926 4927 if (err) 4927 4928 return err; 4928 - if (val > perf_max_counters) 4929 + if (val > perf_max_events) 4929 4930 return -EINVAL; 4930 4931 4931 4932 spin_lock(&perf_resource_lock); ··· 4933 4934 for_each_online_cpu(cpu) { 4934 4935 cpuctx = &per_cpu(perf_cpu_context, cpu); 4935 4936 spin_lock_irq(&cpuctx->ctx.lock); 4936 - mpt = min(perf_max_counters - cpuctx->ctx.nr_counters, 4937 - perf_max_counters - perf_reserved_percpu); 4937 + mpt = min(perf_max_events - cpuctx->ctx.nr_events, 4938 + perf_max_events - perf_reserved_percpu); 4938 4939 cpuctx->max_pertask = mpt; 4939 4940 spin_unlock_irq(&cpuctx->ctx.lock); 4940 4941 } ··· 4989 4990 4990 4991 static struct attribute_group perfclass_attr_group = { 4991 4992 .attrs = perfclass_attrs, 4992 - .name = "perf_counters", 4993 + .name = "perf_events", 4993 4994 }; 4994 4995 4995 - static int __init perf_counter_sysfs_init(void) 4996 + static int __init perf_event_sysfs_init(void) 4996 4997 { 4997 4998 return sysfs_create_group(&cpu_sysdev_class.kset.kobj, 4998 4999 &perfclass_attr_group); 4999 5000 } 5000 - device_initcall(perf_counter_sysfs_init); 5001 + device_initcall(perf_event_sysfs_init);
+7 -7
kernel/sched.c
··· 39 39 #include <linux/completion.h> 40 40 #include <linux/kernel_stat.h> 41 41 #include <linux/debug_locks.h> 42 - #include <linux/perf_counter.h> 42 + #include <linux/perf_event.h> 43 43 #include <linux/security.h> 44 44 #include <linux/notifier.h> 45 45 #include <linux/profile.h> ··· 2053 2053 if (task_hot(p, old_rq->clock, NULL)) 2054 2054 schedstat_inc(p, se.nr_forced2_migrations); 2055 2055 #endif 2056 - perf_swcounter_event(PERF_COUNT_SW_CPU_MIGRATIONS, 2056 + perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS, 2057 2057 1, 1, NULL, 0); 2058 2058 } 2059 2059 p->se.vruntime -= old_cfsrq->min_vruntime - ··· 2718 2718 */ 2719 2719 prev_state = prev->state; 2720 2720 finish_arch_switch(prev); 2721 - perf_counter_task_sched_in(current, cpu_of(rq)); 2721 + perf_event_task_sched_in(current, cpu_of(rq)); 2722 2722 finish_lock_switch(rq, prev); 2723 2723 2724 2724 fire_sched_in_preempt_notifiers(current); ··· 5193 5193 curr->sched_class->task_tick(rq, curr, 0); 5194 5194 spin_unlock(&rq->lock); 5195 5195 5196 - perf_counter_task_tick(curr, cpu); 5196 + perf_event_task_tick(curr, cpu); 5197 5197 5198 5198 #ifdef CONFIG_SMP 5199 5199 rq->idle_at_tick = idle_cpu(cpu); ··· 5409 5409 5410 5410 if (likely(prev != next)) { 5411 5411 sched_info_switch(prev, next); 5412 - perf_counter_task_sched_out(prev, next, cpu); 5412 + perf_event_task_sched_out(prev, next, cpu); 5413 5413 5414 5414 rq->nr_switches++; 5415 5415 rq->curr = next; ··· 7671 7671 /* 7672 7672 * Register at high priority so that task migration (migrate_all_tasks) 7673 7673 * happens before everything else. This has to be lower priority than 7674 - * the notifier in the perf_counter subsystem, though. 7674 + * the notifier in the perf_event subsystem, though. 7675 7675 */ 7676 7676 static struct notifier_block __cpuinitdata migration_notifier = { 7677 7677 .notifier_call = migration_call, ··· 9528 9528 alloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT); 9529 9529 #endif /* SMP */ 9530 9530 9531 - perf_counter_init(); 9531 + perf_event_init(); 9532 9532 9533 9533 scheduler_running = 1; 9534 9534 }
+5 -5
kernel/sys.c
··· 14 14 #include <linux/prctl.h> 15 15 #include <linux/highuid.h> 16 16 #include <linux/fs.h> 17 - #include <linux/perf_counter.h> 17 + #include <linux/perf_event.h> 18 18 #include <linux/resource.h> 19 19 #include <linux/kernel.h> 20 20 #include <linux/kexec.h> ··· 1511 1511 case PR_SET_TSC: 1512 1512 error = SET_TSC_CTL(arg2); 1513 1513 break; 1514 - case PR_TASK_PERF_COUNTERS_DISABLE: 1515 - error = perf_counter_task_disable(); 1514 + case PR_TASK_PERF_EVENTS_DISABLE: 1515 + error = perf_event_task_disable(); 1516 1516 break; 1517 - case PR_TASK_PERF_COUNTERS_ENABLE: 1518 - error = perf_counter_task_enable(); 1517 + case PR_TASK_PERF_EVENTS_ENABLE: 1518 + error = perf_event_task_enable(); 1519 1519 break; 1520 1520 case PR_GET_TIMERSLACK: 1521 1521 error = current->timer_slack_ns;
+1 -1
kernel/sys_ni.c
··· 177 177 cond_syscall(sys_eventfd2); 178 178 179 179 /* performance counters: */ 180 - cond_syscall(sys_perf_counter_open); 180 + cond_syscall(sys_perf_event_open);
+11 -11
kernel/sysctl.c
··· 50 50 #include <linux/reboot.h> 51 51 #include <linux/ftrace.h> 52 52 #include <linux/slow-work.h> 53 - #include <linux/perf_counter.h> 53 + #include <linux/perf_event.h> 54 54 55 55 #include <asm/uaccess.h> 56 56 #include <asm/processor.h> ··· 964 964 .child = slow_work_sysctls, 965 965 }, 966 966 #endif 967 - #ifdef CONFIG_PERF_COUNTERS 967 + #ifdef CONFIG_PERF_EVENTS 968 968 { 969 969 .ctl_name = CTL_UNNUMBERED, 970 - .procname = "perf_counter_paranoid", 971 - .data = &sysctl_perf_counter_paranoid, 972 - .maxlen = sizeof(sysctl_perf_counter_paranoid), 970 + .procname = "perf_event_paranoid", 971 + .data = &sysctl_perf_event_paranoid, 972 + .maxlen = sizeof(sysctl_perf_event_paranoid), 973 973 .mode = 0644, 974 974 .proc_handler = &proc_dointvec, 975 975 }, 976 976 { 977 977 .ctl_name = CTL_UNNUMBERED, 978 - .procname = "perf_counter_mlock_kb", 979 - .data = &sysctl_perf_counter_mlock, 980 - .maxlen = sizeof(sysctl_perf_counter_mlock), 978 + .procname = "perf_event_mlock_kb", 979 + .data = &sysctl_perf_event_mlock, 980 + .maxlen = sizeof(sysctl_perf_event_mlock), 981 981 .mode = 0644, 982 982 .proc_handler = &proc_dointvec, 983 983 }, 984 984 { 985 985 .ctl_name = CTL_UNNUMBERED, 986 - .procname = "perf_counter_max_sample_rate", 987 - .data = &sysctl_perf_counter_sample_rate, 988 - .maxlen = sizeof(sysctl_perf_counter_sample_rate), 986 + .procname = "perf_event_max_sample_rate", 987 + .data = &sysctl_perf_event_sample_rate, 988 + .maxlen = sizeof(sysctl_perf_event_sample_rate), 989 989 .mode = 0644, 990 990 .proc_handler = &proc_dointvec, 991 991 },
+2 -2
kernel/timer.c
··· 37 37 #include <linux/delay.h> 38 38 #include <linux/tick.h> 39 39 #include <linux/kallsyms.h> 40 - #include <linux/perf_counter.h> 40 + #include <linux/perf_event.h> 41 41 #include <linux/sched.h> 42 42 43 43 #include <asm/uaccess.h> ··· 1187 1187 { 1188 1188 struct tvec_base *base = __get_cpu_var(tvec_bases); 1189 1189 1190 - perf_counter_do_pending(); 1190 + perf_event_do_pending(); 1191 1191 1192 1192 hrtimer_run_pending(); 1193 1193
+3 -3
kernel/trace/trace_syscalls.c
··· 2 2 #include <trace/events/syscalls.h> 3 3 #include <linux/kernel.h> 4 4 #include <linux/ftrace.h> 5 - #include <linux/perf_counter.h> 5 + #include <linux/perf_event.h> 6 6 #include <asm/syscall.h> 7 7 8 8 #include "trace_output.h" ··· 433 433 rec->nr = syscall_nr; 434 434 syscall_get_arguments(current, regs, 0, sys_data->nb_args, 435 435 (unsigned long *)&rec->args); 436 - perf_tpcounter_event(sys_data->enter_id, 0, 1, rec, size); 436 + perf_tp_event(sys_data->enter_id, 0, 1, rec, size); 437 437 438 438 end: 439 439 local_irq_restore(flags); ··· 532 532 rec->nr = syscall_nr; 533 533 rec->ret = syscall_get_return_value(current, regs); 534 534 535 - perf_tpcounter_event(sys_data->exit_id, 0, 1, rec, size); 535 + perf_tp_event(sys_data->exit_id, 0, 1, rec, size); 536 536 537 537 end: 538 538 local_irq_restore(flags);
+3 -3
mm/mmap.c
··· 28 28 #include <linux/mempolicy.h> 29 29 #include <linux/rmap.h> 30 30 #include <linux/mmu_notifier.h> 31 - #include <linux/perf_counter.h> 31 + #include <linux/perf_event.h> 32 32 33 33 #include <asm/uaccess.h> 34 34 #include <asm/cacheflush.h> ··· 1220 1220 if (correct_wcount) 1221 1221 atomic_inc(&inode->i_writecount); 1222 1222 out: 1223 - perf_counter_mmap(vma); 1223 + perf_event_mmap(vma); 1224 1224 1225 1225 mm->total_vm += len >> PAGE_SHIFT; 1226 1226 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT); ··· 2308 2308 2309 2309 mm->total_vm += len >> PAGE_SHIFT; 2310 2310 2311 - perf_counter_mmap(vma); 2311 + perf_event_mmap(vma); 2312 2312 2313 2313 return 0; 2314 2314 }
+2 -2
mm/mprotect.c
··· 23 23 #include <linux/swapops.h> 24 24 #include <linux/mmu_notifier.h> 25 25 #include <linux/migrate.h> 26 - #include <linux/perf_counter.h> 26 + #include <linux/perf_event.h> 27 27 #include <asm/uaccess.h> 28 28 #include <asm/pgtable.h> 29 29 #include <asm/cacheflush.h> ··· 300 300 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags); 301 301 if (error) 302 302 goto out; 303 - perf_counter_mmap(vma); 303 + perf_event_mmap(vma); 304 304 nstart = tmp; 305 305 306 306 if (nstart < prev->vm_end)
+1 -1
tools/perf/Makefile
··· 318 318 319 319 LIB_FILE=libperf.a 320 320 321 - LIB_H += ../../include/linux/perf_counter.h 321 + LIB_H += ../../include/linux/perf_event.h 322 322 LIB_H += ../../include/linux/rbtree.h 323 323 LIB_H += ../../include/linux/list.h 324 324 LIB_H += util/include/linux/list.h
+14 -14
tools/perf/builtin-annotate.c
··· 505 505 return -1; 506 506 } 507 507 508 - if (event->header.misc & PERF_EVENT_MISC_KERNEL) { 508 + if (event->header.misc & PERF_RECORD_MISC_KERNEL) { 509 509 show = SHOW_KERNEL; 510 510 level = 'k'; 511 511 ··· 513 513 514 514 dump_printf(" ...... dso: %s\n", dso->name); 515 515 516 - } else if (event->header.misc & PERF_EVENT_MISC_USER) { 516 + } else if (event->header.misc & PERF_RECORD_MISC_USER) { 517 517 518 518 show = SHOW_USER; 519 519 level = '.'; ··· 565 565 566 566 thread = threads__findnew(event->mmap.pid, &threads, &last_match); 567 567 568 - dump_printf("%p [%p]: PERF_EVENT_MMAP %d: [%p(%p) @ %p]: %s\n", 568 + dump_printf("%p [%p]: PERF_RECORD_MMAP %d: [%p(%p) @ %p]: %s\n", 569 569 (void *)(offset + head), 570 570 (void *)(long)(event->header.size), 571 571 event->mmap.pid, ··· 575 575 event->mmap.filename); 576 576 577 577 if (thread == NULL || map == NULL) { 578 - dump_printf("problem processing PERF_EVENT_MMAP, skipping event.\n"); 578 + dump_printf("problem processing PERF_RECORD_MMAP, skipping event.\n"); 579 579 return 0; 580 580 } 581 581 ··· 591 591 struct thread *thread; 592 592 593 593 thread = threads__findnew(event->comm.pid, &threads, &last_match); 594 - dump_printf("%p [%p]: PERF_EVENT_COMM: %s:%d\n", 594 + dump_printf("%p [%p]: PERF_RECORD_COMM: %s:%d\n", 595 595 (void *)(offset + head), 596 596 (void *)(long)(event->header.size), 597 597 event->comm.comm, event->comm.pid); 598 598 599 599 if (thread == NULL || 600 600 thread__set_comm(thread, event->comm.comm)) { 601 - dump_printf("problem processing PERF_EVENT_COMM, skipping event.\n"); 601 + dump_printf("problem processing PERF_RECORD_COMM, skipping event.\n"); 602 602 return -1; 603 603 } 604 604 total_comm++; ··· 614 614 615 615 thread = threads__findnew(event->fork.pid, &threads, &last_match); 616 616 parent = threads__findnew(event->fork.ppid, &threads, &last_match); 617 - dump_printf("%p [%p]: PERF_EVENT_FORK: %d:%d\n", 617 + dump_printf("%p [%p]: PERF_RECORD_FORK: %d:%d\n", 618 618 (void *)(offset + head), 619 619 (void *)(long)(event->header.size), 620 620 event->fork.pid, event->fork.ppid); ··· 627 627 return 0; 628 628 629 629 if (!thread || !parent || thread__fork(thread, parent)) { 630 - dump_printf("problem processing PERF_EVENT_FORK, skipping event.\n"); 630 + dump_printf("problem processing PERF_RECORD_FORK, skipping event.\n"); 631 631 return -1; 632 632 } 633 633 total_fork++; ··· 639 639 process_event(event_t *event, unsigned long offset, unsigned long head) 640 640 { 641 641 switch (event->header.type) { 642 - case PERF_EVENT_SAMPLE: 642 + case PERF_RECORD_SAMPLE: 643 643 return process_sample_event(event, offset, head); 644 644 645 - case PERF_EVENT_MMAP: 645 + case PERF_RECORD_MMAP: 646 646 return process_mmap_event(event, offset, head); 647 647 648 - case PERF_EVENT_COMM: 648 + case PERF_RECORD_COMM: 649 649 return process_comm_event(event, offset, head); 650 650 651 - case PERF_EVENT_FORK: 651 + case PERF_RECORD_FORK: 652 652 return process_fork_event(event, offset, head); 653 653 /* 654 654 * We dont process them right now but they are fine: 655 655 */ 656 656 657 - case PERF_EVENT_THROTTLE: 658 - case PERF_EVENT_UNTHROTTLE: 657 + case PERF_RECORD_THROTTLE: 658 + case PERF_RECORD_UNTHROTTLE: 659 659 return 0; 660 660 661 661 default:
+11 -11
tools/perf/builtin-record.c
··· 77 77 78 78 static unsigned long mmap_read_head(struct mmap_data *md) 79 79 { 80 - struct perf_counter_mmap_page *pc = md->base; 80 + struct perf_event_mmap_page *pc = md->base; 81 81 long head; 82 82 83 83 head = pc->data_head; ··· 88 88 89 89 static void mmap_write_tail(struct mmap_data *md, unsigned long tail) 90 90 { 91 - struct perf_counter_mmap_page *pc = md->base; 91 + struct perf_event_mmap_page *pc = md->base; 92 92 93 93 /* 94 94 * ensure all reads are done before we write the tail out. ··· 233 233 } 234 234 } 235 235 236 - comm_ev.header.type = PERF_EVENT_COMM; 236 + comm_ev.header.type = PERF_RECORD_COMM; 237 237 size = ALIGN(size, sizeof(u64)); 238 238 comm_ev.header.size = sizeof(comm_ev) - (sizeof(comm_ev.comm) - size); 239 239 ··· 288 288 while (1) { 289 289 char bf[BUFSIZ], *pbf = bf; 290 290 struct mmap_event mmap_ev = { 291 - .header = { .type = PERF_EVENT_MMAP }, 291 + .header = { .type = PERF_RECORD_MMAP }, 292 292 }; 293 293 int n; 294 294 size_t size; ··· 355 355 356 356 static int group_fd; 357 357 358 - static struct perf_header_attr *get_header_attr(struct perf_counter_attr *a, int nr) 358 + static struct perf_header_attr *get_header_attr(struct perf_event_attr *a, int nr) 359 359 { 360 360 struct perf_header_attr *h_attr; 361 361 ··· 371 371 372 372 static void create_counter(int counter, int cpu, pid_t pid) 373 373 { 374 - struct perf_counter_attr *attr = attrs + counter; 374 + struct perf_event_attr *attr = attrs + counter; 375 375 struct perf_header_attr *h_attr; 376 376 int track = !counter; /* only the first counter needs these */ 377 377 struct { ··· 417 417 attr->disabled = 1; 418 418 419 419 try_again: 420 - fd[nr_cpu][counter] = sys_perf_counter_open(attr, pid, cpu, group_fd, 0); 420 + fd[nr_cpu][counter] = sys_perf_event_open(attr, pid, cpu, group_fd, 0); 421 421 422 422 if (fd[nr_cpu][counter] < 0) { 423 423 int err = errno; ··· 444 444 printf("\n"); 445 445 error("perfcounter syscall returned with %d (%s)\n", 446 446 fd[nr_cpu][counter], strerror(err)); 447 - die("No CONFIG_PERF_COUNTERS=y kernel support configured?\n"); 447 + die("No CONFIG_PERF_EVENTS=y kernel support configured?\n"); 448 448 exit(-1); 449 449 } 450 450 ··· 478 478 if (multiplex && fd[nr_cpu][counter] != multiplex_fd) { 479 479 int ret; 480 480 481 - ret = ioctl(fd[nr_cpu][counter], PERF_COUNTER_IOC_SET_OUTPUT, multiplex_fd); 481 + ret = ioctl(fd[nr_cpu][counter], PERF_EVENT_IOC_SET_OUTPUT, multiplex_fd); 482 482 assert(ret != -1); 483 483 } else { 484 484 event_array[nr_poll].fd = fd[nr_cpu][counter]; ··· 496 496 } 497 497 } 498 498 499 - ioctl(fd[nr_cpu][counter], PERF_COUNTER_IOC_ENABLE); 499 + ioctl(fd[nr_cpu][counter], PERF_EVENT_IOC_ENABLE); 500 500 } 501 501 502 502 static void open_counters(int cpu, pid_t pid) ··· 642 642 if (done) { 643 643 for (i = 0; i < nr_cpu; i++) { 644 644 for (counter = 0; counter < nr_counters; counter++) 645 - ioctl(fd[i][counter], PERF_COUNTER_IOC_DISABLE); 645 + ioctl(fd[i][counter], PERF_EVENT_IOC_DISABLE); 646 646 } 647 647 } 648 648 }
+24 -24
tools/perf/builtin-report.c
··· 1121 1121 more_data += sizeof(u64); 1122 1122 } 1123 1123 1124 - dump_printf("%p [%p]: PERF_EVENT_SAMPLE (IP, %d): %d/%d: %p period: %Ld\n", 1124 + dump_printf("%p [%p]: PERF_RECORD_SAMPLE (IP, %d): %d/%d: %p period: %Ld\n", 1125 1125 (void *)(offset + head), 1126 1126 (void *)(long)(event->header.size), 1127 1127 event->header.misc, ··· 1158 1158 if (comm_list && !strlist__has_entry(comm_list, thread->comm)) 1159 1159 return 0; 1160 1160 1161 - cpumode = event->header.misc & PERF_EVENT_MISC_CPUMODE_MASK; 1161 + cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; 1162 1162 1163 - if (cpumode == PERF_EVENT_MISC_KERNEL) { 1163 + if (cpumode == PERF_RECORD_MISC_KERNEL) { 1164 1164 show = SHOW_KERNEL; 1165 1165 level = 'k'; 1166 1166 ··· 1168 1168 1169 1169 dump_printf(" ...... dso: %s\n", dso->name); 1170 1170 1171 - } else if (cpumode == PERF_EVENT_MISC_USER) { 1171 + } else if (cpumode == PERF_RECORD_MISC_USER) { 1172 1172 1173 1173 show = SHOW_USER; 1174 1174 level = '.'; ··· 1210 1210 1211 1211 thread = threads__findnew(event->mmap.pid, &threads, &last_match); 1212 1212 1213 - dump_printf("%p [%p]: PERF_EVENT_MMAP %d/%d: [%p(%p) @ %p]: %s\n", 1213 + dump_printf("%p [%p]: PERF_RECORD_MMAP %d/%d: [%p(%p) @ %p]: %s\n", 1214 1214 (void *)(offset + head), 1215 1215 (void *)(long)(event->header.size), 1216 1216 event->mmap.pid, ··· 1221 1221 event->mmap.filename); 1222 1222 1223 1223 if (thread == NULL || map == NULL) { 1224 - dump_printf("problem processing PERF_EVENT_MMAP, skipping event.\n"); 1224 + dump_printf("problem processing PERF_RECORD_MMAP, skipping event.\n"); 1225 1225 return 0; 1226 1226 } 1227 1227 ··· 1238 1238 1239 1239 thread = threads__findnew(event->comm.pid, &threads, &last_match); 1240 1240 1241 - dump_printf("%p [%p]: PERF_EVENT_COMM: %s:%d\n", 1241 + dump_printf("%p [%p]: PERF_RECORD_COMM: %s:%d\n", 1242 1242 (void *)(offset + head), 1243 1243 (void *)(long)(event->header.size), 1244 1244 event->comm.comm, event->comm.pid); 1245 1245 1246 1246 if (thread == NULL || 1247 1247 thread__set_comm_adjust(thread, event->comm.comm)) { 1248 - dump_printf("problem processing PERF_EVENT_COMM, skipping event.\n"); 1248 + dump_printf("problem processing PERF_RECORD_COMM, skipping event.\n"); 1249 1249 return -1; 1250 1250 } 1251 1251 total_comm++; ··· 1262 1262 thread = threads__findnew(event->fork.pid, &threads, &last_match); 1263 1263 parent = threads__findnew(event->fork.ppid, &threads, &last_match); 1264 1264 1265 - dump_printf("%p [%p]: PERF_EVENT_%s: (%d:%d):(%d:%d)\n", 1265 + dump_printf("%p [%p]: PERF_RECORD_%s: (%d:%d):(%d:%d)\n", 1266 1266 (void *)(offset + head), 1267 1267 (void *)(long)(event->header.size), 1268 - event->header.type == PERF_EVENT_FORK ? "FORK" : "EXIT", 1268 + event->header.type == PERF_RECORD_FORK ? "FORK" : "EXIT", 1269 1269 event->fork.pid, event->fork.tid, 1270 1270 event->fork.ppid, event->fork.ptid); 1271 1271 ··· 1276 1276 if (thread == parent) 1277 1277 return 0; 1278 1278 1279 - if (event->header.type == PERF_EVENT_EXIT) 1279 + if (event->header.type == PERF_RECORD_EXIT) 1280 1280 return 0; 1281 1281 1282 1282 if (!thread || !parent || thread__fork(thread, parent)) { 1283 - dump_printf("problem processing PERF_EVENT_FORK, skipping event.\n"); 1283 + dump_printf("problem processing PERF_RECORD_FORK, skipping event.\n"); 1284 1284 return -1; 1285 1285 } 1286 1286 total_fork++; ··· 1291 1291 static int 1292 1292 process_lost_event(event_t *event, unsigned long offset, unsigned long head) 1293 1293 { 1294 - dump_printf("%p [%p]: PERF_EVENT_LOST: id:%Ld: lost:%Ld\n", 1294 + dump_printf("%p [%p]: PERF_RECORD_LOST: id:%Ld: lost:%Ld\n", 1295 1295 (void *)(offset + head), 1296 1296 (void *)(long)(event->header.size), 1297 1297 event->lost.id, ··· 1305 1305 static int 1306 1306 process_read_event(event_t *event, unsigned long offset, unsigned long head) 1307 1307 { 1308 - struct perf_counter_attr *attr; 1308 + struct perf_event_attr *attr; 1309 1309 1310 1310 attr = perf_header__find_attr(event->read.id, header); 1311 1311 ··· 1319 1319 event->read.value); 1320 1320 } 1321 1321 1322 - dump_printf("%p [%p]: PERF_EVENT_READ: %d %d %s %Lu\n", 1322 + dump_printf("%p [%p]: PERF_RECORD_READ: %d %d %s %Lu\n", 1323 1323 (void *)(offset + head), 1324 1324 (void *)(long)(event->header.size), 1325 1325 event->read.pid, ··· 1337 1337 trace_event(event); 1338 1338 1339 1339 switch (event->header.type) { 1340 - case PERF_EVENT_SAMPLE: 1340 + case PERF_RECORD_SAMPLE: 1341 1341 return process_sample_event(event, offset, head); 1342 1342 1343 - case PERF_EVENT_MMAP: 1343 + case PERF_RECORD_MMAP: 1344 1344 return process_mmap_event(event, offset, head); 1345 1345 1346 - case PERF_EVENT_COMM: 1346 + case PERF_RECORD_COMM: 1347 1347 return process_comm_event(event, offset, head); 1348 1348 1349 - case PERF_EVENT_FORK: 1350 - case PERF_EVENT_EXIT: 1349 + case PERF_RECORD_FORK: 1350 + case PERF_RECORD_EXIT: 1351 1351 return process_task_event(event, offset, head); 1352 1352 1353 - case PERF_EVENT_LOST: 1353 + case PERF_RECORD_LOST: 1354 1354 return process_lost_event(event, offset, head); 1355 1355 1356 - case PERF_EVENT_READ: 1356 + case PERF_RECORD_READ: 1357 1357 return process_read_event(event, offset, head); 1358 1358 1359 1359 /* 1360 1360 * We dont process them right now but they are fine: 1361 1361 */ 1362 1362 1363 - case PERF_EVENT_THROTTLE: 1364 - case PERF_EVENT_UNTHROTTLE: 1363 + case PERF_RECORD_THROTTLE: 1364 + case PERF_RECORD_UNTHROTTLE: 1365 1365 return 0; 1366 1366 1367 1367 default:
+10 -10
tools/perf/builtin-sched.c
··· 1573 1573 more_data += sizeof(u64); 1574 1574 } 1575 1575 1576 - dump_printf("%p [%p]: PERF_EVENT_SAMPLE (IP, %d): %d/%d: %p period: %Ld\n", 1576 + dump_printf("%p [%p]: PERF_RECORD_SAMPLE (IP, %d): %d/%d: %p period: %Ld\n", 1577 1577 (void *)(offset + head), 1578 1578 (void *)(long)(event->header.size), 1579 1579 event->header.misc, ··· 1589 1589 return -1; 1590 1590 } 1591 1591 1592 - cpumode = event->header.misc & PERF_EVENT_MISC_CPUMODE_MASK; 1592 + cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; 1593 1593 1594 - if (cpumode == PERF_EVENT_MISC_KERNEL) { 1594 + if (cpumode == PERF_RECORD_MISC_KERNEL) { 1595 1595 show = SHOW_KERNEL; 1596 1596 level = 'k'; 1597 1597 ··· 1599 1599 1600 1600 dump_printf(" ...... dso: %s\n", dso->name); 1601 1601 1602 - } else if (cpumode == PERF_EVENT_MISC_USER) { 1602 + } else if (cpumode == PERF_RECORD_MISC_USER) { 1603 1603 1604 1604 show = SHOW_USER; 1605 1605 level = '.'; ··· 1626 1626 1627 1627 nr_events++; 1628 1628 switch (event->header.type) { 1629 - case PERF_EVENT_MMAP: 1629 + case PERF_RECORD_MMAP: 1630 1630 return 0; 1631 - case PERF_EVENT_LOST: 1631 + case PERF_RECORD_LOST: 1632 1632 nr_lost_chunks++; 1633 1633 nr_lost_events += event->lost.lost; 1634 1634 return 0; 1635 1635 1636 - case PERF_EVENT_COMM: 1636 + case PERF_RECORD_COMM: 1637 1637 return process_comm_event(event, offset, head); 1638 1638 1639 - case PERF_EVENT_EXIT ... PERF_EVENT_READ: 1639 + case PERF_RECORD_EXIT ... PERF_RECORD_READ: 1640 1640 return 0; 1641 1641 1642 - case PERF_EVENT_SAMPLE: 1642 + case PERF_RECORD_SAMPLE: 1643 1643 return process_sample_event(event, offset, head); 1644 1644 1645 - case PERF_EVENT_MAX: 1645 + case PERF_RECORD_MAX: 1646 1646 default: 1647 1647 return -1; 1648 1648 }
+5 -5
tools/perf/builtin-stat.c
··· 48 48 #include <sys/prctl.h> 49 49 #include <math.h> 50 50 51 - static struct perf_counter_attr default_attrs[] = { 51 + static struct perf_event_attr default_attrs[] = { 52 52 53 53 { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_TASK_CLOCK }, 54 54 { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_CONTEXT_SWITCHES}, ··· 130 130 attrs[counter].config == PERF_COUNT_##c) 131 131 132 132 #define ERR_PERF_OPEN \ 133 - "Error: counter %d, sys_perf_counter_open() syscall returned with %d (%s)\n" 133 + "Error: counter %d, sys_perf_event_open() syscall returned with %d (%s)\n" 134 134 135 135 static void create_perf_stat_counter(int counter, int pid) 136 136 { 137 - struct perf_counter_attr *attr = attrs + counter; 137 + struct perf_event_attr *attr = attrs + counter; 138 138 139 139 if (scale) 140 140 attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED | ··· 144 144 unsigned int cpu; 145 145 146 146 for (cpu = 0; cpu < nr_cpus; cpu++) { 147 - fd[cpu][counter] = sys_perf_counter_open(attr, -1, cpu, -1, 0); 147 + fd[cpu][counter] = sys_perf_event_open(attr, -1, cpu, -1, 0); 148 148 if (fd[cpu][counter] < 0 && verbose) 149 149 fprintf(stderr, ERR_PERF_OPEN, counter, 150 150 fd[cpu][counter], strerror(errno)); ··· 154 154 attr->disabled = 1; 155 155 attr->enable_on_exec = 1; 156 156 157 - fd[0][counter] = sys_perf_counter_open(attr, pid, -1, -1, 0); 157 + fd[0][counter] = sys_perf_event_open(attr, pid, -1, -1, 0); 158 158 if (fd[0][counter] < 0 && verbose) 159 159 fprintf(stderr, ERR_PERF_OPEN, counter, 160 160 fd[0][counter], strerror(errno));
+7 -7
tools/perf/builtin-timechart.c
··· 937 937 938 938 switch (event->header.type) { 939 939 940 - case PERF_EVENT_COMM: 940 + case PERF_RECORD_COMM: 941 941 return process_comm_event(event); 942 - case PERF_EVENT_FORK: 942 + case PERF_RECORD_FORK: 943 943 return process_fork_event(event); 944 - case PERF_EVENT_EXIT: 944 + case PERF_RECORD_EXIT: 945 945 return process_exit_event(event); 946 - case PERF_EVENT_SAMPLE: 946 + case PERF_RECORD_SAMPLE: 947 947 return queue_sample_event(event); 948 948 949 949 /* 950 950 * We dont process them right now but they are fine: 951 951 */ 952 - case PERF_EVENT_MMAP: 953 - case PERF_EVENT_THROTTLE: 954 - case PERF_EVENT_UNTHROTTLE: 952 + case PERF_RECORD_MMAP: 953 + case PERF_RECORD_THROTTLE: 954 + case PERF_RECORD_UNTHROTTLE: 955 955 return 0; 956 956 957 957 default:
+6 -6
tools/perf/builtin-top.c
··· 901 901 902 902 static unsigned int mmap_read_head(struct mmap_data *md) 903 903 { 904 - struct perf_counter_mmap_page *pc = md->base; 904 + struct perf_event_mmap_page *pc = md->base; 905 905 int head; 906 906 907 907 head = pc->data_head; ··· 977 977 978 978 old += size; 979 979 980 - if (event->header.type == PERF_EVENT_SAMPLE) { 980 + if (event->header.type == PERF_RECORD_SAMPLE) { 981 981 int user = 982 - (event->header.misc & PERF_EVENT_MISC_CPUMODE_MASK) == PERF_EVENT_MISC_USER; 982 + (event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK) == PERF_RECORD_MISC_USER; 983 983 process_event(event->ip.ip, md->counter, user); 984 984 } 985 985 } ··· 1005 1005 1006 1006 static void start_counter(int i, int counter) 1007 1007 { 1008 - struct perf_counter_attr *attr; 1008 + struct perf_event_attr *attr; 1009 1009 int cpu; 1010 1010 1011 1011 cpu = profile_cpu; ··· 1019 1019 attr->inherit = (cpu < 0) && inherit; 1020 1020 1021 1021 try_again: 1022 - fd[i][counter] = sys_perf_counter_open(attr, target_pid, cpu, group_fd, 0); 1022 + fd[i][counter] = sys_perf_event_open(attr, target_pid, cpu, group_fd, 0); 1023 1023 1024 1024 if (fd[i][counter] < 0) { 1025 1025 int err = errno; ··· 1044 1044 printf("\n"); 1045 1045 error("perfcounter syscall returned with %d (%s)\n", 1046 1046 fd[i][counter], strerror(err)); 1047 - die("No CONFIG_PERF_COUNTERS=y kernel support configured?\n"); 1047 + die("No CONFIG_PERF_EVENTS=y kernel support configured?\n"); 1048 1048 exit(-1); 1049 1049 } 1050 1050 assert(fd[i][counter] >= 0);
+11 -11
tools/perf/builtin-trace.c
··· 35 35 36 36 thread = threads__findnew(event->comm.pid, &threads, &last_match); 37 37 38 - dump_printf("%p [%p]: PERF_EVENT_COMM: %s:%d\n", 38 + dump_printf("%p [%p]: PERF_RECORD_COMM: %s:%d\n", 39 39 (void *)(offset + head), 40 40 (void *)(long)(event->header.size), 41 41 event->comm.comm, event->comm.pid); 42 42 43 43 if (thread == NULL || 44 44 thread__set_comm(thread, event->comm.comm)) { 45 - dump_printf("problem processing PERF_EVENT_COMM, skipping event.\n"); 45 + dump_printf("problem processing PERF_RECORD_COMM, skipping event.\n"); 46 46 return -1; 47 47 } 48 48 total_comm++; ··· 82 82 more_data += sizeof(u64); 83 83 } 84 84 85 - dump_printf("%p [%p]: PERF_EVENT_SAMPLE (IP, %d): %d/%d: %p period: %Ld\n", 85 + dump_printf("%p [%p]: PERF_RECORD_SAMPLE (IP, %d): %d/%d: %p period: %Ld\n", 86 86 (void *)(offset + head), 87 87 (void *)(long)(event->header.size), 88 88 event->header.misc, ··· 98 98 return -1; 99 99 } 100 100 101 - cpumode = event->header.misc & PERF_EVENT_MISC_CPUMODE_MASK; 101 + cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; 102 102 103 - if (cpumode == PERF_EVENT_MISC_KERNEL) { 103 + if (cpumode == PERF_RECORD_MISC_KERNEL) { 104 104 show = SHOW_KERNEL; 105 105 level = 'k'; 106 106 ··· 108 108 109 109 dump_printf(" ...... dso: %s\n", dso->name); 110 110 111 - } else if (cpumode == PERF_EVENT_MISC_USER) { 111 + } else if (cpumode == PERF_RECORD_MISC_USER) { 112 112 113 113 show = SHOW_USER; 114 114 level = '.'; ··· 146 146 trace_event(event); 147 147 148 148 switch (event->header.type) { 149 - case PERF_EVENT_MMAP ... PERF_EVENT_LOST: 149 + case PERF_RECORD_MMAP ... PERF_RECORD_LOST: 150 150 return 0; 151 151 152 - case PERF_EVENT_COMM: 152 + case PERF_RECORD_COMM: 153 153 return process_comm_event(event, offset, head); 154 154 155 - case PERF_EVENT_EXIT ... PERF_EVENT_READ: 155 + case PERF_RECORD_EXIT ... PERF_RECORD_READ: 156 156 return 0; 157 157 158 - case PERF_EVENT_SAMPLE: 158 + case PERF_RECORD_SAMPLE: 159 159 return process_sample_event(event, offset, head); 160 160 161 - case PERF_EVENT_MAX: 161 + case PERF_RECORD_MAX: 162 162 default: 163 163 return -1; 164 164 }
+29 -29
tools/perf/design.txt
··· 18 18 Performance counters are accessed via special file descriptors. 19 19 There's one file descriptor per virtual counter used. 20 20 21 - The special file descriptor is opened via the perf_counter_open() 21 + The special file descriptor is opened via the perf_event_open() 22 22 system call: 23 23 24 - int sys_perf_counter_open(struct perf_counter_hw_event *hw_event_uptr, 24 + int sys_perf_event_open(struct perf_event_hw_event *hw_event_uptr, 25 25 pid_t pid, int cpu, int group_fd, 26 26 unsigned long flags); 27 27 ··· 32 32 Multiple counters can be kept open at a time, and the counters 33 33 can be poll()ed. 34 34 35 - When creating a new counter fd, 'perf_counter_hw_event' is: 35 + When creating a new counter fd, 'perf_event_hw_event' is: 36 36 37 - struct perf_counter_hw_event { 37 + struct perf_event_hw_event { 38 38 /* 39 39 * The MSB of the config word signifies if the rest contains cpu 40 40 * specific (raw) counter configuration data, if unset, the next ··· 93 93 94 94 /* 95 95 * Generalized performance counter event types, used by the hw_event.event_id 96 - * parameter of the sys_perf_counter_open() syscall: 96 + * parameter of the sys_perf_event_open() syscall: 97 97 */ 98 98 enum hw_event_ids { 99 99 /* ··· 159 159 * reads on the counter should return the indicated quantities, 160 160 * in increasing order of bit value, after the counter value. 161 161 */ 162 - enum perf_counter_read_format { 162 + enum perf_event_read_format { 163 163 PERF_FORMAT_TOTAL_TIME_ENABLED = 1, 164 164 PERF_FORMAT_TOTAL_TIME_RUNNING = 2, 165 165 }; ··· 178 178 * Bits that can be set in hw_event.record_type to request information 179 179 * in the overflow packets. 180 180 */ 181 - enum perf_counter_record_format { 181 + enum perf_event_record_format { 182 182 PERF_RECORD_IP = 1U << 0, 183 183 PERF_RECORD_TID = 1U << 1, 184 184 PERF_RECORD_TIME = 1U << 2, ··· 228 228 The 'comm' bit allows tracking of process comm data on process creation. 229 229 This too is recorded in the ring-buffer (see below). 230 230 231 - The 'pid' parameter to the perf_counter_open() system call allows the 231 + The 'pid' parameter to the perf_event_open() system call allows the 232 232 counter to be specific to a task: 233 233 234 234 pid == 0: if the pid parameter is zero, the counter is attached to the ··· 258 258 259 259 The 'group_fd' parameter allows counter "groups" to be set up. A 260 260 counter group has one counter which is the group "leader". The leader 261 - is created first, with group_fd = -1 in the perf_counter_open call 261 + is created first, with group_fd = -1 in the perf_event_open call 262 262 that creates it. The rest of the group members are created 263 263 subsequently, with group_fd giving the fd of the group leader. 264 264 (A single counter on its own is created with group_fd = -1 and is ··· 277 277 accessed through mmap(). 278 278 279 279 The mmap size should be 1+2^n pages, where the first page is a meta-data page 280 - (struct perf_counter_mmap_page) that contains various bits of information such 280 + (struct perf_event_mmap_page) that contains various bits of information such 281 281 as where the ring-buffer head is. 282 282 283 283 /* 284 284 * Structure of the page that can be mapped via mmap 285 285 */ 286 - struct perf_counter_mmap_page { 286 + struct perf_event_mmap_page { 287 287 __u32 version; /* version number of this structure */ 288 288 __u32 compat_version; /* lowest version this is compat with */ 289 289 ··· 317 317 * Control data for the mmap() data buffer. 318 318 * 319 319 * User-space reading this value should issue an rmb(), on SMP capable 320 - * platforms, after reading this value -- see perf_counter_wakeup(). 320 + * platforms, after reading this value -- see perf_event_wakeup(). 321 321 */ 322 322 __u32 data_head; /* head in the data section */ 323 323 }; ··· 327 327 328 328 The following 2^n pages are the ring-buffer which contains events of the form: 329 329 330 - #define PERF_EVENT_MISC_KERNEL (1 << 0) 331 - #define PERF_EVENT_MISC_USER (1 << 1) 332 - #define PERF_EVENT_MISC_OVERFLOW (1 << 2) 330 + #define PERF_RECORD_MISC_KERNEL (1 << 0) 331 + #define PERF_RECORD_MISC_USER (1 << 1) 332 + #define PERF_RECORD_MISC_OVERFLOW (1 << 2) 333 333 334 334 struct perf_event_header { 335 335 __u32 type; ··· 353 353 * char filename[]; 354 354 * }; 355 355 */ 356 - PERF_EVENT_MMAP = 1, 357 - PERF_EVENT_MUNMAP = 2, 356 + PERF_RECORD_MMAP = 1, 357 + PERF_RECORD_MUNMAP = 2, 358 358 359 359 /* 360 360 * struct { ··· 364 364 * char comm[]; 365 365 * }; 366 366 */ 367 - PERF_EVENT_COMM = 3, 367 + PERF_RECORD_COMM = 3, 368 368 369 369 /* 370 - * When header.misc & PERF_EVENT_MISC_OVERFLOW the event_type field 370 + * When header.misc & PERF_RECORD_MISC_OVERFLOW the event_type field 371 371 * will be PERF_RECORD_* 372 372 * 373 373 * struct { ··· 397 397 fcntl() managing signals. 398 398 399 399 Normally a notification is generated for every page filled, however one can 400 - additionally set perf_counter_hw_event.wakeup_events to generate one every 400 + additionally set perf_event_hw_event.wakeup_events to generate one every 401 401 so many counter overflow events. 402 402 403 403 Future work will include a splice() interface to the ring-buffer. ··· 409 409 410 410 An individual counter or counter group can be enabled with 411 411 412 - ioctl(fd, PERF_COUNTER_IOC_ENABLE); 412 + ioctl(fd, PERF_EVENT_IOC_ENABLE); 413 413 414 414 or disabled with 415 415 416 - ioctl(fd, PERF_COUNTER_IOC_DISABLE); 416 + ioctl(fd, PERF_EVENT_IOC_DISABLE); 417 417 418 418 Enabling or disabling the leader of a group enables or disables the 419 419 whole group; that is, while the group leader is disabled, none of the ··· 424 424 425 425 Additionally, non-inherited overflow counters can use 426 426 427 - ioctl(fd, PERF_COUNTER_IOC_REFRESH, nr); 427 + ioctl(fd, PERF_EVENT_IOC_REFRESH, nr); 428 428 429 429 to enable a counter for 'nr' events, after which it gets disabled again. 430 430 431 431 A process can enable or disable all the counter groups that are 432 432 attached to it, using prctl: 433 433 434 - prctl(PR_TASK_PERF_COUNTERS_ENABLE); 434 + prctl(PR_TASK_PERF_EVENTS_ENABLE); 435 435 436 - prctl(PR_TASK_PERF_COUNTERS_DISABLE); 436 + prctl(PR_TASK_PERF_EVENTS_DISABLE); 437 437 438 438 This applies to all counters on the current process, whether created 439 439 by this process or by another, and doesn't affect any counters that ··· 447 447 If your architecture does not have hardware performance metrics, you can 448 448 still use the generic software counters based on hrtimers for sampling. 449 449 450 - So to start with, in order to add HAVE_PERF_COUNTERS to your Kconfig, you 450 + So to start with, in order to add HAVE_PERF_EVENTS to your Kconfig, you 451 451 will need at least this: 452 - - asm/perf_counter.h - a basic stub will suffice at first 452 + - asm/perf_event.h - a basic stub will suffice at first 453 453 - support for atomic64 types (and associated helper functions) 454 - - set_perf_counter_pending() implemented 454 + - set_perf_event_pending() implemented 455 455 456 456 If your architecture does have hardware capabilities, you can override the 457 - weak stub hw_perf_counter_init() to register hardware counters. 457 + weak stub hw_perf_event_init() to register hardware counters.
+6 -6
tools/perf/perf.h
··· 52 52 #include <sys/types.h> 53 53 #include <sys/syscall.h> 54 54 55 - #include "../../include/linux/perf_counter.h" 55 + #include "../../include/linux/perf_event.h" 56 56 #include "util/types.h" 57 57 58 58 /* 59 - * prctl(PR_TASK_PERF_COUNTERS_DISABLE) will (cheaply) disable all 59 + * prctl(PR_TASK_PERF_EVENTS_DISABLE) will (cheaply) disable all 60 60 * counters in the current task. 61 61 */ 62 - #define PR_TASK_PERF_COUNTERS_DISABLE 31 63 - #define PR_TASK_PERF_COUNTERS_ENABLE 32 62 + #define PR_TASK_PERF_EVENTS_DISABLE 31 63 + #define PR_TASK_PERF_EVENTS_ENABLE 32 64 64 65 65 #ifndef NSEC_PER_SEC 66 66 # define NSEC_PER_SEC 1000000000ULL ··· 90 90 _min1 < _min2 ? _min1 : _min2; }) 91 91 92 92 static inline int 93 - sys_perf_counter_open(struct perf_counter_attr *attr, 93 + sys_perf_event_open(struct perf_event_attr *attr, 94 94 pid_t pid, int cpu, int group_fd, 95 95 unsigned long flags) 96 96 { 97 97 attr->size = sizeof(*attr); 98 - return syscall(__NR_perf_counter_open, attr, pid, cpu, 98 + return syscall(__NR_perf_event_open, attr, pid, cpu, 99 99 group_fd, flags); 100 100 } 101 101
+2 -2
tools/perf/util/event.h
··· 1 - #ifndef __PERF_EVENT_H 2 - #define __PERF_EVENT_H 1 + #ifndef __PERF_RECORD_H 2 + #define __PERF_RECORD_H 3 3 #include "../perf.h" 4 4 #include "util.h" 5 5 #include <linux/list.h>
+3 -3
tools/perf/util/header.c
··· 9 9 /* 10 10 * Create new perf.data header attribute: 11 11 */ 12 - struct perf_header_attr *perf_header_attr__new(struct perf_counter_attr *attr) 12 + struct perf_header_attr *perf_header_attr__new(struct perf_event_attr *attr) 13 13 { 14 14 struct perf_header_attr *self = malloc(sizeof(*self)); 15 15 ··· 134 134 }; 135 135 136 136 struct perf_file_attr { 137 - struct perf_counter_attr attr; 137 + struct perf_event_attr attr; 138 138 struct perf_file_section ids; 139 139 }; 140 140 ··· 320 320 return type; 321 321 } 322 322 323 - struct perf_counter_attr * 323 + struct perf_event_attr * 324 324 perf_header__find_attr(u64 id, struct perf_header *header) 325 325 { 326 326 int i;
+4 -4
tools/perf/util/header.h
··· 1 1 #ifndef _PERF_HEADER_H 2 2 #define _PERF_HEADER_H 3 3 4 - #include "../../../include/linux/perf_counter.h" 4 + #include "../../../include/linux/perf_event.h" 5 5 #include <sys/types.h> 6 6 #include "types.h" 7 7 8 8 struct perf_header_attr { 9 - struct perf_counter_attr attr; 9 + struct perf_event_attr attr; 10 10 int ids, size; 11 11 u64 *id; 12 12 off_t id_offset; ··· 34 34 35 35 36 36 struct perf_header_attr * 37 - perf_header_attr__new(struct perf_counter_attr *attr); 37 + perf_header_attr__new(struct perf_event_attr *attr); 38 38 void perf_header_attr__add_id(struct perf_header_attr *self, u64 id); 39 39 40 40 u64 perf_header__sample_type(struct perf_header *header); 41 - struct perf_counter_attr * 41 + struct perf_event_attr * 42 42 perf_header__find_attr(u64 id, struct perf_header *header); 43 43 44 44
+16 -16
tools/perf/util/parse-events.c
··· 10 10 11 11 int nr_counters; 12 12 13 - struct perf_counter_attr attrs[MAX_COUNTERS]; 13 + struct perf_event_attr attrs[MAX_COUNTERS]; 14 14 15 15 struct event_symbol { 16 16 u8 type; ··· 48 48 { CSW(CPU_MIGRATIONS), "cpu-migrations", "migrations" }, 49 49 }; 50 50 51 - #define __PERF_COUNTER_FIELD(config, name) \ 52 - ((config & PERF_COUNTER_##name##_MASK) >> PERF_COUNTER_##name##_SHIFT) 51 + #define __PERF_EVENT_FIELD(config, name) \ 52 + ((config & PERF_EVENT_##name##_MASK) >> PERF_EVENT_##name##_SHIFT) 53 53 54 - #define PERF_COUNTER_RAW(config) __PERF_COUNTER_FIELD(config, RAW) 55 - #define PERF_COUNTER_CONFIG(config) __PERF_COUNTER_FIELD(config, CONFIG) 56 - #define PERF_COUNTER_TYPE(config) __PERF_COUNTER_FIELD(config, TYPE) 57 - #define PERF_COUNTER_ID(config) __PERF_COUNTER_FIELD(config, EVENT) 54 + #define PERF_EVENT_RAW(config) __PERF_EVENT_FIELD(config, RAW) 55 + #define PERF_EVENT_CONFIG(config) __PERF_EVENT_FIELD(config, CONFIG) 56 + #define PERF_EVENT_TYPE(config) __PERF_EVENT_FIELD(config, TYPE) 57 + #define PERF_EVENT_ID(config) __PERF_EVENT_FIELD(config, EVENT) 58 58 59 59 static const char *hw_event_names[] = { 60 60 "cycles", ··· 352 352 } 353 353 354 354 static enum event_result 355 - parse_generic_hw_event(const char **str, struct perf_counter_attr *attr) 355 + parse_generic_hw_event(const char **str, struct perf_event_attr *attr) 356 356 { 357 357 const char *s = *str; 358 358 int cache_type = -1, cache_op = -1, cache_result = -1; ··· 417 417 const char *evt_name, 418 418 unsigned int evt_length, 419 419 char *flags, 420 - struct perf_counter_attr *attr, 420 + struct perf_event_attr *attr, 421 421 const char **strp) 422 422 { 423 423 char evt_path[MAXPATHLEN]; ··· 505 505 506 506 507 507 static enum event_result parse_tracepoint_event(const char **strp, 508 - struct perf_counter_attr *attr) 508 + struct perf_event_attr *attr) 509 509 { 510 510 const char *evt_name; 511 511 char *flags; ··· 563 563 } 564 564 565 565 static enum event_result 566 - parse_symbolic_event(const char **strp, struct perf_counter_attr *attr) 566 + parse_symbolic_event(const char **strp, struct perf_event_attr *attr) 567 567 { 568 568 const char *str = *strp; 569 569 unsigned int i; ··· 582 582 } 583 583 584 584 static enum event_result 585 - parse_raw_event(const char **strp, struct perf_counter_attr *attr) 585 + parse_raw_event(const char **strp, struct perf_event_attr *attr) 586 586 { 587 587 const char *str = *strp; 588 588 u64 config; ··· 601 601 } 602 602 603 603 static enum event_result 604 - parse_numeric_event(const char **strp, struct perf_counter_attr *attr) 604 + parse_numeric_event(const char **strp, struct perf_event_attr *attr) 605 605 { 606 606 const char *str = *strp; 607 607 char *endp; ··· 623 623 } 624 624 625 625 static enum event_result 626 - parse_event_modifier(const char **strp, struct perf_counter_attr *attr) 626 + parse_event_modifier(const char **strp, struct perf_event_attr *attr) 627 627 { 628 628 const char *str = *strp; 629 629 int eu = 1, ek = 1, eh = 1; ··· 656 656 * Symbolic names are (almost) exactly matched. 657 657 */ 658 658 static enum event_result 659 - parse_event_symbols(const char **str, struct perf_counter_attr *attr) 659 + parse_event_symbols(const char **str, struct perf_event_attr *attr) 660 660 { 661 661 enum event_result ret; 662 662 ··· 711 711 712 712 int parse_events(const struct option *opt __used, const char *str, int unset __used) 713 713 { 714 - struct perf_counter_attr attr; 714 + struct perf_event_attr attr; 715 715 enum event_result ret; 716 716 717 717 if (strchr(str, ':'))
+1 -1
tools/perf/util/parse-events.h
··· 16 16 17 17 extern int nr_counters; 18 18 19 - extern struct perf_counter_attr attrs[MAX_COUNTERS]; 19 + extern struct perf_event_attr attrs[MAX_COUNTERS]; 20 20 21 21 extern const char *event_name(int ctr); 22 22 extern const char *__event_name(int type, u64 config);
+4 -4
tools/perf/util/trace-event-info.c
··· 480 480 } 481 481 482 482 static struct tracepoint_path * 483 - get_tracepoints_path(struct perf_counter_attr *pattrs, int nb_counters) 483 + get_tracepoints_path(struct perf_event_attr *pattrs, int nb_events) 484 484 { 485 485 struct tracepoint_path path, *ppath = &path; 486 486 int i; 487 487 488 - for (i = 0; i < nb_counters; i++) { 488 + for (i = 0; i < nb_events; i++) { 489 489 if (pattrs[i].type != PERF_TYPE_TRACEPOINT) 490 490 continue; 491 491 ppath->next = tracepoint_id_to_path(pattrs[i].config); ··· 496 496 497 497 return path.next; 498 498 } 499 - void read_tracing_data(struct perf_counter_attr *pattrs, int nb_counters) 499 + void read_tracing_data(struct perf_event_attr *pattrs, int nb_events) 500 500 { 501 501 char buf[BUFSIZ]; 502 502 struct tracepoint_path *tps; ··· 530 530 page_size = getpagesize(); 531 531 write_or_die(&page_size, 4); 532 532 533 - tps = get_tracepoints_path(pattrs, nb_counters); 533 + tps = get_tracepoints_path(pattrs, nb_events); 534 534 535 535 read_header_files(); 536 536 read_ftrace_files(tps);
+1 -1
tools/perf/util/trace-event.h
··· 240 240 raw_field_value(struct event *event, const char *name, void *data); 241 241 void *raw_field_ptr(struct event *event, const char *name, void *data); 242 242 243 - void read_tracing_data(struct perf_counter_attr *pattrs, int nb_counters); 243 + void read_tracing_data(struct perf_event_attr *pattrs, int nb_events); 244 244 245 245 #endif /* _TRACE_EVENTS_H */