Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'perf-core-for-mingo' of git://git.kernel.org/pub/scm/linux/kernel/git/acme/linux into perf/core

Pull perf/core improvements and fixes from Arnaldo Carvalho de Melo:

* Convert the trace builtins to use the growing evsel/evlist
tracepoint infrastructure, removing several open coded constructs
like switch like series of strcmp to dispatch events, etc.
Basically what had already been showcased in 'perf sched'.

* Add evsel constructor for tracepoints, that uses libtraceevent
just to parse the /format events file, use it in a new 'perf test'
to make sure the libtraceevent format parsing regressions can
be more readily caught.

* Some strange errors were happening in some builds, but not on the
next, reported by several people, problem was some parser related
files, generated during the build, didn't had proper make deps,
fix from Eric Sandeen.

* Fix some compiling errors on 32-bit, from Feng Tang.

* Don't use sscanf extension %as, not available on bionic, reimplementation
by Irina Tirdea.

* Fix bfd.h/libbfd detection with recent binutils, from Markus Trippelsdorf.

* Introduce struct and cache information about the environment where a
perf.data file was captured, from Namhyung Kim.

* Fix several error paths in libtraceevent, from Namhyung Kim.

Print event causing perf_event_open() to fail in 'perf record',
from Stephane Eranian.

* New 'kvm' analysis tool, from Xiao Guangrong.

Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>

+2280 -808
+16
arch/x86/include/asm/kvm.h
··· 9 9 #include <linux/types.h> 10 10 #include <linux/ioctl.h> 11 11 12 + #define DE_VECTOR 0 13 + #define DB_VECTOR 1 14 + #define BP_VECTOR 3 15 + #define OF_VECTOR 4 16 + #define BR_VECTOR 5 17 + #define UD_VECTOR 6 18 + #define NM_VECTOR 7 19 + #define DF_VECTOR 8 20 + #define TS_VECTOR 10 21 + #define NP_VECTOR 11 22 + #define SS_VECTOR 12 23 + #define GP_VECTOR 13 24 + #define PF_VECTOR 14 25 + #define MF_VECTOR 16 26 + #define MC_VECTOR 18 27 + 12 28 /* Select x86 specific features in <linux/kvm.h> */ 13 29 #define __KVM_HAVE_PIT 14 30 #define __KVM_HAVE_IOAPIC
-16
arch/x86/include/asm/kvm_host.h
··· 75 75 #define KVM_HPAGE_MASK(x) (~(KVM_HPAGE_SIZE(x) - 1)) 76 76 #define KVM_PAGES_PER_HPAGE(x) (KVM_HPAGE_SIZE(x) / PAGE_SIZE) 77 77 78 - #define DE_VECTOR 0 79 - #define DB_VECTOR 1 80 - #define BP_VECTOR 3 81 - #define OF_VECTOR 4 82 - #define BR_VECTOR 5 83 - #define UD_VECTOR 6 84 - #define NM_VECTOR 7 85 - #define DF_VECTOR 8 86 - #define TS_VECTOR 10 87 - #define NP_VECTOR 11 88 - #define SS_VECTOR 12 89 - #define GP_VECTOR 13 90 - #define PF_VECTOR 14 91 - #define MF_VECTOR 16 92 - #define MC_VECTOR 18 93 - 94 78 #define SELECTOR_TI_MASK (1 << 2) 95 79 #define SELECTOR_RPL_MASK 0x03 96 80
+130 -75
arch/x86/include/asm/svm.h
··· 1 1 #ifndef __SVM_H 2 2 #define __SVM_H 3 3 4 + #define SVM_EXIT_READ_CR0 0x000 5 + #define SVM_EXIT_READ_CR3 0x003 6 + #define SVM_EXIT_READ_CR4 0x004 7 + #define SVM_EXIT_READ_CR8 0x008 8 + #define SVM_EXIT_WRITE_CR0 0x010 9 + #define SVM_EXIT_WRITE_CR3 0x013 10 + #define SVM_EXIT_WRITE_CR4 0x014 11 + #define SVM_EXIT_WRITE_CR8 0x018 12 + #define SVM_EXIT_READ_DR0 0x020 13 + #define SVM_EXIT_READ_DR1 0x021 14 + #define SVM_EXIT_READ_DR2 0x022 15 + #define SVM_EXIT_READ_DR3 0x023 16 + #define SVM_EXIT_READ_DR4 0x024 17 + #define SVM_EXIT_READ_DR5 0x025 18 + #define SVM_EXIT_READ_DR6 0x026 19 + #define SVM_EXIT_READ_DR7 0x027 20 + #define SVM_EXIT_WRITE_DR0 0x030 21 + #define SVM_EXIT_WRITE_DR1 0x031 22 + #define SVM_EXIT_WRITE_DR2 0x032 23 + #define SVM_EXIT_WRITE_DR3 0x033 24 + #define SVM_EXIT_WRITE_DR4 0x034 25 + #define SVM_EXIT_WRITE_DR5 0x035 26 + #define SVM_EXIT_WRITE_DR6 0x036 27 + #define SVM_EXIT_WRITE_DR7 0x037 28 + #define SVM_EXIT_EXCP_BASE 0x040 29 + #define SVM_EXIT_INTR 0x060 30 + #define SVM_EXIT_NMI 0x061 31 + #define SVM_EXIT_SMI 0x062 32 + #define SVM_EXIT_INIT 0x063 33 + #define SVM_EXIT_VINTR 0x064 34 + #define SVM_EXIT_CR0_SEL_WRITE 0x065 35 + #define SVM_EXIT_IDTR_READ 0x066 36 + #define SVM_EXIT_GDTR_READ 0x067 37 + #define SVM_EXIT_LDTR_READ 0x068 38 + #define SVM_EXIT_TR_READ 0x069 39 + #define SVM_EXIT_IDTR_WRITE 0x06a 40 + #define SVM_EXIT_GDTR_WRITE 0x06b 41 + #define SVM_EXIT_LDTR_WRITE 0x06c 42 + #define SVM_EXIT_TR_WRITE 0x06d 43 + #define SVM_EXIT_RDTSC 0x06e 44 + #define SVM_EXIT_RDPMC 0x06f 45 + #define SVM_EXIT_PUSHF 0x070 46 + #define SVM_EXIT_POPF 0x071 47 + #define SVM_EXIT_CPUID 0x072 48 + #define SVM_EXIT_RSM 0x073 49 + #define SVM_EXIT_IRET 0x074 50 + #define SVM_EXIT_SWINT 0x075 51 + #define SVM_EXIT_INVD 0x076 52 + #define SVM_EXIT_PAUSE 0x077 53 + #define SVM_EXIT_HLT 0x078 54 + #define SVM_EXIT_INVLPG 0x079 55 + #define SVM_EXIT_INVLPGA 0x07a 56 + #define SVM_EXIT_IOIO 0x07b 57 + #define SVM_EXIT_MSR 0x07c 58 + #define SVM_EXIT_TASK_SWITCH 0x07d 59 + #define SVM_EXIT_FERR_FREEZE 0x07e 60 + #define SVM_EXIT_SHUTDOWN 0x07f 61 + #define SVM_EXIT_VMRUN 0x080 62 + #define SVM_EXIT_VMMCALL 0x081 63 + #define SVM_EXIT_VMLOAD 0x082 64 + #define SVM_EXIT_VMSAVE 0x083 65 + #define SVM_EXIT_STGI 0x084 66 + #define SVM_EXIT_CLGI 0x085 67 + #define SVM_EXIT_SKINIT 0x086 68 + #define SVM_EXIT_RDTSCP 0x087 69 + #define SVM_EXIT_ICEBP 0x088 70 + #define SVM_EXIT_WBINVD 0x089 71 + #define SVM_EXIT_MONITOR 0x08a 72 + #define SVM_EXIT_MWAIT 0x08b 73 + #define SVM_EXIT_MWAIT_COND 0x08c 74 + #define SVM_EXIT_XSETBV 0x08d 75 + #define SVM_EXIT_NPF 0x400 76 + 77 + #define SVM_EXIT_ERR -1 78 + 79 + #define SVM_EXIT_REASONS \ 80 + { SVM_EXIT_READ_CR0, "read_cr0" }, \ 81 + { SVM_EXIT_READ_CR3, "read_cr3" }, \ 82 + { SVM_EXIT_READ_CR4, "read_cr4" }, \ 83 + { SVM_EXIT_READ_CR8, "read_cr8" }, \ 84 + { SVM_EXIT_WRITE_CR0, "write_cr0" }, \ 85 + { SVM_EXIT_WRITE_CR3, "write_cr3" }, \ 86 + { SVM_EXIT_WRITE_CR4, "write_cr4" }, \ 87 + { SVM_EXIT_WRITE_CR8, "write_cr8" }, \ 88 + { SVM_EXIT_READ_DR0, "read_dr0" }, \ 89 + { SVM_EXIT_READ_DR1, "read_dr1" }, \ 90 + { SVM_EXIT_READ_DR2, "read_dr2" }, \ 91 + { SVM_EXIT_READ_DR3, "read_dr3" }, \ 92 + { SVM_EXIT_WRITE_DR0, "write_dr0" }, \ 93 + { SVM_EXIT_WRITE_DR1, "write_dr1" }, \ 94 + { SVM_EXIT_WRITE_DR2, "write_dr2" }, \ 95 + { SVM_EXIT_WRITE_DR3, "write_dr3" }, \ 96 + { SVM_EXIT_WRITE_DR5, "write_dr5" }, \ 97 + { SVM_EXIT_WRITE_DR7, "write_dr7" }, \ 98 + { SVM_EXIT_EXCP_BASE + DB_VECTOR, "DB excp" }, \ 99 + { SVM_EXIT_EXCP_BASE + BP_VECTOR, "BP excp" }, \ 100 + { SVM_EXIT_EXCP_BASE + UD_VECTOR, "UD excp" }, \ 101 + { SVM_EXIT_EXCP_BASE + PF_VECTOR, "PF excp" }, \ 102 + { SVM_EXIT_EXCP_BASE + NM_VECTOR, "NM excp" }, \ 103 + { SVM_EXIT_EXCP_BASE + MC_VECTOR, "MC excp" }, \ 104 + { SVM_EXIT_INTR, "interrupt" }, \ 105 + { SVM_EXIT_NMI, "nmi" }, \ 106 + { SVM_EXIT_SMI, "smi" }, \ 107 + { SVM_EXIT_INIT, "init" }, \ 108 + { SVM_EXIT_VINTR, "vintr" }, \ 109 + { SVM_EXIT_CPUID, "cpuid" }, \ 110 + { SVM_EXIT_INVD, "invd" }, \ 111 + { SVM_EXIT_HLT, "hlt" }, \ 112 + { SVM_EXIT_INVLPG, "invlpg" }, \ 113 + { SVM_EXIT_INVLPGA, "invlpga" }, \ 114 + { SVM_EXIT_IOIO, "io" }, \ 115 + { SVM_EXIT_MSR, "msr" }, \ 116 + { SVM_EXIT_TASK_SWITCH, "task_switch" }, \ 117 + { SVM_EXIT_SHUTDOWN, "shutdown" }, \ 118 + { SVM_EXIT_VMRUN, "vmrun" }, \ 119 + { SVM_EXIT_VMMCALL, "hypercall" }, \ 120 + { SVM_EXIT_VMLOAD, "vmload" }, \ 121 + { SVM_EXIT_VMSAVE, "vmsave" }, \ 122 + { SVM_EXIT_STGI, "stgi" }, \ 123 + { SVM_EXIT_CLGI, "clgi" }, \ 124 + { SVM_EXIT_SKINIT, "skinit" }, \ 125 + { SVM_EXIT_WBINVD, "wbinvd" }, \ 126 + { SVM_EXIT_MONITOR, "monitor" }, \ 127 + { SVM_EXIT_MWAIT, "mwait" }, \ 128 + { SVM_EXIT_XSETBV, "xsetbv" }, \ 129 + { SVM_EXIT_NPF, "npf" } 130 + 131 + #ifdef __KERNEL__ 132 + 4 133 enum { 5 134 INTERCEPT_INTR, 6 135 INTERCEPT_NMI, ··· 393 264 394 265 #define SVM_EXITINFO_REG_MASK 0x0F 395 266 396 - #define SVM_EXIT_READ_CR0 0x000 397 - #define SVM_EXIT_READ_CR3 0x003 398 - #define SVM_EXIT_READ_CR4 0x004 399 - #define SVM_EXIT_READ_CR8 0x008 400 - #define SVM_EXIT_WRITE_CR0 0x010 401 - #define SVM_EXIT_WRITE_CR3 0x013 402 - #define SVM_EXIT_WRITE_CR4 0x014 403 - #define SVM_EXIT_WRITE_CR8 0x018 404 - #define SVM_EXIT_READ_DR0 0x020 405 - #define SVM_EXIT_READ_DR1 0x021 406 - #define SVM_EXIT_READ_DR2 0x022 407 - #define SVM_EXIT_READ_DR3 0x023 408 - #define SVM_EXIT_READ_DR4 0x024 409 - #define SVM_EXIT_READ_DR5 0x025 410 - #define SVM_EXIT_READ_DR6 0x026 411 - #define SVM_EXIT_READ_DR7 0x027 412 - #define SVM_EXIT_WRITE_DR0 0x030 413 - #define SVM_EXIT_WRITE_DR1 0x031 414 - #define SVM_EXIT_WRITE_DR2 0x032 415 - #define SVM_EXIT_WRITE_DR3 0x033 416 - #define SVM_EXIT_WRITE_DR4 0x034 417 - #define SVM_EXIT_WRITE_DR5 0x035 418 - #define SVM_EXIT_WRITE_DR6 0x036 419 - #define SVM_EXIT_WRITE_DR7 0x037 420 - #define SVM_EXIT_EXCP_BASE 0x040 421 - #define SVM_EXIT_INTR 0x060 422 - #define SVM_EXIT_NMI 0x061 423 - #define SVM_EXIT_SMI 0x062 424 - #define SVM_EXIT_INIT 0x063 425 - #define SVM_EXIT_VINTR 0x064 426 - #define SVM_EXIT_CR0_SEL_WRITE 0x065 427 - #define SVM_EXIT_IDTR_READ 0x066 428 - #define SVM_EXIT_GDTR_READ 0x067 429 - #define SVM_EXIT_LDTR_READ 0x068 430 - #define SVM_EXIT_TR_READ 0x069 431 - #define SVM_EXIT_IDTR_WRITE 0x06a 432 - #define SVM_EXIT_GDTR_WRITE 0x06b 433 - #define SVM_EXIT_LDTR_WRITE 0x06c 434 - #define SVM_EXIT_TR_WRITE 0x06d 435 - #define SVM_EXIT_RDTSC 0x06e 436 - #define SVM_EXIT_RDPMC 0x06f 437 - #define SVM_EXIT_PUSHF 0x070 438 - #define SVM_EXIT_POPF 0x071 439 - #define SVM_EXIT_CPUID 0x072 440 - #define SVM_EXIT_RSM 0x073 441 - #define SVM_EXIT_IRET 0x074 442 - #define SVM_EXIT_SWINT 0x075 443 - #define SVM_EXIT_INVD 0x076 444 - #define SVM_EXIT_PAUSE 0x077 445 - #define SVM_EXIT_HLT 0x078 446 - #define SVM_EXIT_INVLPG 0x079 447 - #define SVM_EXIT_INVLPGA 0x07a 448 - #define SVM_EXIT_IOIO 0x07b 449 - #define SVM_EXIT_MSR 0x07c 450 - #define SVM_EXIT_TASK_SWITCH 0x07d 451 - #define SVM_EXIT_FERR_FREEZE 0x07e 452 - #define SVM_EXIT_SHUTDOWN 0x07f 453 - #define SVM_EXIT_VMRUN 0x080 454 - #define SVM_EXIT_VMMCALL 0x081 455 - #define SVM_EXIT_VMLOAD 0x082 456 - #define SVM_EXIT_VMSAVE 0x083 457 - #define SVM_EXIT_STGI 0x084 458 - #define SVM_EXIT_CLGI 0x085 459 - #define SVM_EXIT_SKINIT 0x086 460 - #define SVM_EXIT_RDTSCP 0x087 461 - #define SVM_EXIT_ICEBP 0x088 462 - #define SVM_EXIT_WBINVD 0x089 463 - #define SVM_EXIT_MONITOR 0x08a 464 - #define SVM_EXIT_MWAIT 0x08b 465 - #define SVM_EXIT_MWAIT_COND 0x08c 466 - #define SVM_EXIT_XSETBV 0x08d 467 - #define SVM_EXIT_NPF 0x400 468 - 469 - #define SVM_EXIT_ERR -1 470 - 471 267 #define SVM_CR0_SELECTIVE_MASK (X86_CR0_TS | X86_CR0_MP) 472 268 473 269 #define SVM_VMLOAD ".byte 0x0f, 0x01, 0xda" ··· 404 350 405 351 #endif 406 352 353 + #endif
+84 -43
arch/x86/include/asm/vmx.h
··· 25 25 * 26 26 */ 27 27 28 + #define VMX_EXIT_REASONS_FAILED_VMENTRY 0x80000000 29 + 30 + #define EXIT_REASON_EXCEPTION_NMI 0 31 + #define EXIT_REASON_EXTERNAL_INTERRUPT 1 32 + #define EXIT_REASON_TRIPLE_FAULT 2 33 + 34 + #define EXIT_REASON_PENDING_INTERRUPT 7 35 + #define EXIT_REASON_NMI_WINDOW 8 36 + #define EXIT_REASON_TASK_SWITCH 9 37 + #define EXIT_REASON_CPUID 10 38 + #define EXIT_REASON_HLT 12 39 + #define EXIT_REASON_INVD 13 40 + #define EXIT_REASON_INVLPG 14 41 + #define EXIT_REASON_RDPMC 15 42 + #define EXIT_REASON_RDTSC 16 43 + #define EXIT_REASON_VMCALL 18 44 + #define EXIT_REASON_VMCLEAR 19 45 + #define EXIT_REASON_VMLAUNCH 20 46 + #define EXIT_REASON_VMPTRLD 21 47 + #define EXIT_REASON_VMPTRST 22 48 + #define EXIT_REASON_VMREAD 23 49 + #define EXIT_REASON_VMRESUME 24 50 + #define EXIT_REASON_VMWRITE 25 51 + #define EXIT_REASON_VMOFF 26 52 + #define EXIT_REASON_VMON 27 53 + #define EXIT_REASON_CR_ACCESS 28 54 + #define EXIT_REASON_DR_ACCESS 29 55 + #define EXIT_REASON_IO_INSTRUCTION 30 56 + #define EXIT_REASON_MSR_READ 31 57 + #define EXIT_REASON_MSR_WRITE 32 58 + #define EXIT_REASON_INVALID_STATE 33 59 + #define EXIT_REASON_MWAIT_INSTRUCTION 36 60 + #define EXIT_REASON_MONITOR_INSTRUCTION 39 61 + #define EXIT_REASON_PAUSE_INSTRUCTION 40 62 + #define EXIT_REASON_MCE_DURING_VMENTRY 41 63 + #define EXIT_REASON_TPR_BELOW_THRESHOLD 43 64 + #define EXIT_REASON_APIC_ACCESS 44 65 + #define EXIT_REASON_EPT_VIOLATION 48 66 + #define EXIT_REASON_EPT_MISCONFIG 49 67 + #define EXIT_REASON_WBINVD 54 68 + #define EXIT_REASON_XSETBV 55 69 + #define EXIT_REASON_INVPCID 58 70 + 71 + #define VMX_EXIT_REASONS \ 72 + { EXIT_REASON_EXCEPTION_NMI, "EXCEPTION_NMI" }, \ 73 + { EXIT_REASON_EXTERNAL_INTERRUPT, "EXTERNAL_INTERRUPT" }, \ 74 + { EXIT_REASON_TRIPLE_FAULT, "TRIPLE_FAULT" }, \ 75 + { EXIT_REASON_PENDING_INTERRUPT, "PENDING_INTERRUPT" }, \ 76 + { EXIT_REASON_NMI_WINDOW, "NMI_WINDOW" }, \ 77 + { EXIT_REASON_TASK_SWITCH, "TASK_SWITCH" }, \ 78 + { EXIT_REASON_CPUID, "CPUID" }, \ 79 + { EXIT_REASON_HLT, "HLT" }, \ 80 + { EXIT_REASON_INVLPG, "INVLPG" }, \ 81 + { EXIT_REASON_RDPMC, "RDPMC" }, \ 82 + { EXIT_REASON_RDTSC, "RDTSC" }, \ 83 + { EXIT_REASON_VMCALL, "VMCALL" }, \ 84 + { EXIT_REASON_VMCLEAR, "VMCLEAR" }, \ 85 + { EXIT_REASON_VMLAUNCH, "VMLAUNCH" }, \ 86 + { EXIT_REASON_VMPTRLD, "VMPTRLD" }, \ 87 + { EXIT_REASON_VMPTRST, "VMPTRST" }, \ 88 + { EXIT_REASON_VMREAD, "VMREAD" }, \ 89 + { EXIT_REASON_VMRESUME, "VMRESUME" }, \ 90 + { EXIT_REASON_VMWRITE, "VMWRITE" }, \ 91 + { EXIT_REASON_VMOFF, "VMOFF" }, \ 92 + { EXIT_REASON_VMON, "VMON" }, \ 93 + { EXIT_REASON_CR_ACCESS, "CR_ACCESS" }, \ 94 + { EXIT_REASON_DR_ACCESS, "DR_ACCESS" }, \ 95 + { EXIT_REASON_IO_INSTRUCTION, "IO_INSTRUCTION" }, \ 96 + { EXIT_REASON_MSR_READ, "MSR_READ" }, \ 97 + { EXIT_REASON_MSR_WRITE, "MSR_WRITE" }, \ 98 + { EXIT_REASON_MWAIT_INSTRUCTION, "MWAIT_INSTRUCTION" }, \ 99 + { EXIT_REASON_MONITOR_INSTRUCTION, "MONITOR_INSTRUCTION" }, \ 100 + { EXIT_REASON_PAUSE_INSTRUCTION, "PAUSE_INSTRUCTION" }, \ 101 + { EXIT_REASON_MCE_DURING_VMENTRY, "MCE_DURING_VMENTRY" }, \ 102 + { EXIT_REASON_TPR_BELOW_THRESHOLD, "TPR_BELOW_THRESHOLD" }, \ 103 + { EXIT_REASON_APIC_ACCESS, "APIC_ACCESS" }, \ 104 + { EXIT_REASON_EPT_VIOLATION, "EPT_VIOLATION" }, \ 105 + { EXIT_REASON_EPT_MISCONFIG, "EPT_MISCONFIG" }, \ 106 + { EXIT_REASON_WBINVD, "WBINVD" } 107 + 108 + #ifdef __KERNEL__ 109 + 28 110 #include <linux/types.h> 29 111 30 112 /* ··· 323 241 HOST_RIP = 0x00006c16, 324 242 }; 325 243 326 - #define VMX_EXIT_REASONS_FAILED_VMENTRY 0x80000000 327 - 328 - #define EXIT_REASON_EXCEPTION_NMI 0 329 - #define EXIT_REASON_EXTERNAL_INTERRUPT 1 330 - #define EXIT_REASON_TRIPLE_FAULT 2 331 - 332 - #define EXIT_REASON_PENDING_INTERRUPT 7 333 - #define EXIT_REASON_NMI_WINDOW 8 334 - #define EXIT_REASON_TASK_SWITCH 9 335 - #define EXIT_REASON_CPUID 10 336 - #define EXIT_REASON_HLT 12 337 - #define EXIT_REASON_INVD 13 338 - #define EXIT_REASON_INVLPG 14 339 - #define EXIT_REASON_RDPMC 15 340 - #define EXIT_REASON_RDTSC 16 341 - #define EXIT_REASON_VMCALL 18 342 - #define EXIT_REASON_VMCLEAR 19 343 - #define EXIT_REASON_VMLAUNCH 20 344 - #define EXIT_REASON_VMPTRLD 21 345 - #define EXIT_REASON_VMPTRST 22 346 - #define EXIT_REASON_VMREAD 23 347 - #define EXIT_REASON_VMRESUME 24 348 - #define EXIT_REASON_VMWRITE 25 349 - #define EXIT_REASON_VMOFF 26 350 - #define EXIT_REASON_VMON 27 351 - #define EXIT_REASON_CR_ACCESS 28 352 - #define EXIT_REASON_DR_ACCESS 29 353 - #define EXIT_REASON_IO_INSTRUCTION 30 354 - #define EXIT_REASON_MSR_READ 31 355 - #define EXIT_REASON_MSR_WRITE 32 356 - #define EXIT_REASON_INVALID_STATE 33 357 - #define EXIT_REASON_MWAIT_INSTRUCTION 36 358 - #define EXIT_REASON_MONITOR_INSTRUCTION 39 359 - #define EXIT_REASON_PAUSE_INSTRUCTION 40 360 - #define EXIT_REASON_MCE_DURING_VMENTRY 41 361 - #define EXIT_REASON_TPR_BELOW_THRESHOLD 43 362 - #define EXIT_REASON_APIC_ACCESS 44 363 - #define EXIT_REASON_EPT_VIOLATION 48 364 - #define EXIT_REASON_EPT_MISCONFIG 49 365 - #define EXIT_REASON_WBINVD 54 366 - #define EXIT_REASON_XSETBV 55 367 - #define EXIT_REASON_INVPCID 58 368 - 369 244 /* 370 245 * Interruption-information format 371 246 */ ··· 525 486 VMXERR_ENTRY_EVENTS_BLOCKED_BY_MOV_SS = 26, 526 487 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID = 28, 527 488 }; 489 + 490 + #endif 528 491 529 492 #endif
-89
arch/x86/kvm/trace.h
··· 183 183 #define KVM_ISA_VMX 1 184 184 #define KVM_ISA_SVM 2 185 185 186 - #define VMX_EXIT_REASONS \ 187 - { EXIT_REASON_EXCEPTION_NMI, "EXCEPTION_NMI" }, \ 188 - { EXIT_REASON_EXTERNAL_INTERRUPT, "EXTERNAL_INTERRUPT" }, \ 189 - { EXIT_REASON_TRIPLE_FAULT, "TRIPLE_FAULT" }, \ 190 - { EXIT_REASON_PENDING_INTERRUPT, "PENDING_INTERRUPT" }, \ 191 - { EXIT_REASON_NMI_WINDOW, "NMI_WINDOW" }, \ 192 - { EXIT_REASON_TASK_SWITCH, "TASK_SWITCH" }, \ 193 - { EXIT_REASON_CPUID, "CPUID" }, \ 194 - { EXIT_REASON_HLT, "HLT" }, \ 195 - { EXIT_REASON_INVLPG, "INVLPG" }, \ 196 - { EXIT_REASON_RDPMC, "RDPMC" }, \ 197 - { EXIT_REASON_RDTSC, "RDTSC" }, \ 198 - { EXIT_REASON_VMCALL, "VMCALL" }, \ 199 - { EXIT_REASON_VMCLEAR, "VMCLEAR" }, \ 200 - { EXIT_REASON_VMLAUNCH, "VMLAUNCH" }, \ 201 - { EXIT_REASON_VMPTRLD, "VMPTRLD" }, \ 202 - { EXIT_REASON_VMPTRST, "VMPTRST" }, \ 203 - { EXIT_REASON_VMREAD, "VMREAD" }, \ 204 - { EXIT_REASON_VMRESUME, "VMRESUME" }, \ 205 - { EXIT_REASON_VMWRITE, "VMWRITE" }, \ 206 - { EXIT_REASON_VMOFF, "VMOFF" }, \ 207 - { EXIT_REASON_VMON, "VMON" }, \ 208 - { EXIT_REASON_CR_ACCESS, "CR_ACCESS" }, \ 209 - { EXIT_REASON_DR_ACCESS, "DR_ACCESS" }, \ 210 - { EXIT_REASON_IO_INSTRUCTION, "IO_INSTRUCTION" }, \ 211 - { EXIT_REASON_MSR_READ, "MSR_READ" }, \ 212 - { EXIT_REASON_MSR_WRITE, "MSR_WRITE" }, \ 213 - { EXIT_REASON_MWAIT_INSTRUCTION, "MWAIT_INSTRUCTION" }, \ 214 - { EXIT_REASON_MONITOR_INSTRUCTION, "MONITOR_INSTRUCTION" }, \ 215 - { EXIT_REASON_PAUSE_INSTRUCTION, "PAUSE_INSTRUCTION" }, \ 216 - { EXIT_REASON_MCE_DURING_VMENTRY, "MCE_DURING_VMENTRY" }, \ 217 - { EXIT_REASON_TPR_BELOW_THRESHOLD, "TPR_BELOW_THRESHOLD" }, \ 218 - { EXIT_REASON_APIC_ACCESS, "APIC_ACCESS" }, \ 219 - { EXIT_REASON_EPT_VIOLATION, "EPT_VIOLATION" }, \ 220 - { EXIT_REASON_EPT_MISCONFIG, "EPT_MISCONFIG" }, \ 221 - { EXIT_REASON_WBINVD, "WBINVD" } 222 - 223 - #define SVM_EXIT_REASONS \ 224 - { SVM_EXIT_READ_CR0, "read_cr0" }, \ 225 - { SVM_EXIT_READ_CR3, "read_cr3" }, \ 226 - { SVM_EXIT_READ_CR4, "read_cr4" }, \ 227 - { SVM_EXIT_READ_CR8, "read_cr8" }, \ 228 - { SVM_EXIT_WRITE_CR0, "write_cr0" }, \ 229 - { SVM_EXIT_WRITE_CR3, "write_cr3" }, \ 230 - { SVM_EXIT_WRITE_CR4, "write_cr4" }, \ 231 - { SVM_EXIT_WRITE_CR8, "write_cr8" }, \ 232 - { SVM_EXIT_READ_DR0, "read_dr0" }, \ 233 - { SVM_EXIT_READ_DR1, "read_dr1" }, \ 234 - { SVM_EXIT_READ_DR2, "read_dr2" }, \ 235 - { SVM_EXIT_READ_DR3, "read_dr3" }, \ 236 - { SVM_EXIT_WRITE_DR0, "write_dr0" }, \ 237 - { SVM_EXIT_WRITE_DR1, "write_dr1" }, \ 238 - { SVM_EXIT_WRITE_DR2, "write_dr2" }, \ 239 - { SVM_EXIT_WRITE_DR3, "write_dr3" }, \ 240 - { SVM_EXIT_WRITE_DR5, "write_dr5" }, \ 241 - { SVM_EXIT_WRITE_DR7, "write_dr7" }, \ 242 - { SVM_EXIT_EXCP_BASE + DB_VECTOR, "DB excp" }, \ 243 - { SVM_EXIT_EXCP_BASE + BP_VECTOR, "BP excp" }, \ 244 - { SVM_EXIT_EXCP_BASE + UD_VECTOR, "UD excp" }, \ 245 - { SVM_EXIT_EXCP_BASE + PF_VECTOR, "PF excp" }, \ 246 - { SVM_EXIT_EXCP_BASE + NM_VECTOR, "NM excp" }, \ 247 - { SVM_EXIT_EXCP_BASE + MC_VECTOR, "MC excp" }, \ 248 - { SVM_EXIT_INTR, "interrupt" }, \ 249 - { SVM_EXIT_NMI, "nmi" }, \ 250 - { SVM_EXIT_SMI, "smi" }, \ 251 - { SVM_EXIT_INIT, "init" }, \ 252 - { SVM_EXIT_VINTR, "vintr" }, \ 253 - { SVM_EXIT_CPUID, "cpuid" }, \ 254 - { SVM_EXIT_INVD, "invd" }, \ 255 - { SVM_EXIT_HLT, "hlt" }, \ 256 - { SVM_EXIT_INVLPG, "invlpg" }, \ 257 - { SVM_EXIT_INVLPGA, "invlpga" }, \ 258 - { SVM_EXIT_IOIO, "io" }, \ 259 - { SVM_EXIT_MSR, "msr" }, \ 260 - { SVM_EXIT_TASK_SWITCH, "task_switch" }, \ 261 - { SVM_EXIT_SHUTDOWN, "shutdown" }, \ 262 - { SVM_EXIT_VMRUN, "vmrun" }, \ 263 - { SVM_EXIT_VMMCALL, "hypercall" }, \ 264 - { SVM_EXIT_VMLOAD, "vmload" }, \ 265 - { SVM_EXIT_VMSAVE, "vmsave" }, \ 266 - { SVM_EXIT_STGI, "stgi" }, \ 267 - { SVM_EXIT_CLGI, "clgi" }, \ 268 - { SVM_EXIT_SKINIT, "skinit" }, \ 269 - { SVM_EXIT_WBINVD, "wbinvd" }, \ 270 - { SVM_EXIT_MONITOR, "monitor" }, \ 271 - { SVM_EXIT_MWAIT, "mwait" }, \ 272 - { SVM_EXIT_XSETBV, "xsetbv" }, \ 273 - { SVM_EXIT_NPF, "npf" } 274 - 275 186 /* 276 187 * Tracepoint for kvm guest exit: 277 188 */
+405 -167
tools/lib/traceevent/event-parse.c
··· 31 31 #include <ctype.h> 32 32 #include <errno.h> 33 33 #include <stdint.h> 34 + #include <limits.h> 34 35 35 36 #include "event-parse.h" 36 37 #include "event-utils.h" ··· 118 117 119 118 struct print_arg *alloc_arg(void) 120 119 { 121 - struct print_arg *arg; 122 - 123 - arg = malloc_or_die(sizeof(*arg)); 124 - if (!arg) 125 - return NULL; 126 - memset(arg, 0, sizeof(*arg)); 127 - 128 - return arg; 120 + return calloc(1, sizeof(struct print_arg)); 129 121 } 130 122 131 123 struct cmdline { ··· 152 158 struct cmdline *cmdlines; 153 159 int i; 154 160 155 - cmdlines = malloc_or_die(sizeof(*cmdlines) * pevent->cmdline_count); 161 + cmdlines = malloc(sizeof(*cmdlines) * pevent->cmdline_count); 162 + if (!cmdlines) 163 + return -1; 156 164 157 165 i = 0; 158 166 while (cmdlist) { ··· 182 186 if (!pid) 183 187 return "<idle>"; 184 188 185 - if (!pevent->cmdlines) 186 - cmdline_init(pevent); 189 + if (!pevent->cmdlines && cmdline_init(pevent)) 190 + return "<not enough memory for cmdlines!>"; 187 191 188 192 key.pid = pid; 189 193 ··· 211 215 if (!pid) 212 216 return 1; 213 217 214 - if (!pevent->cmdlines) 215 - cmdline_init(pevent); 218 + if (!pevent->cmdlines && cmdline_init(pevent)) 219 + return 0; 216 220 217 221 key.pid = pid; 218 222 ··· 254 258 return -1; 255 259 } 256 260 257 - cmdlines[pevent->cmdline_count].pid = pid; 258 261 cmdlines[pevent->cmdline_count].comm = strdup(comm); 259 - if (!cmdlines[pevent->cmdline_count].comm) 260 - die("malloc comm"); 262 + if (!cmdlines[pevent->cmdline_count].comm) { 263 + free(cmdlines); 264 + errno = ENOMEM; 265 + return -1; 266 + } 267 + 268 + cmdlines[pevent->cmdline_count].pid = pid; 261 269 262 270 if (cmdlines[pevent->cmdline_count].comm) 263 271 pevent->cmdline_count++; ··· 288 288 if (pevent->cmdlines) 289 289 return add_new_comm(pevent, comm, pid); 290 290 291 - item = malloc_or_die(sizeof(*item)); 291 + item = malloc(sizeof(*item)); 292 + if (!item) 293 + return -1; 294 + 292 295 item->comm = strdup(comm); 293 - if (!item->comm) 294 - die("malloc comm"); 296 + if (!item->comm) { 297 + free(item); 298 + return -1; 299 + } 295 300 item->pid = pid; 296 301 item->next = pevent->cmdlist; 297 302 ··· 360 355 struct func_map *func_map; 361 356 int i; 362 357 363 - func_map = malloc_or_die(sizeof(*func_map) * (pevent->func_count + 1)); 358 + func_map = malloc(sizeof(*func_map) * (pevent->func_count + 1)); 359 + if (!func_map) 360 + return -1; 361 + 364 362 funclist = pevent->funclist; 365 363 366 364 i = 0; ··· 463 455 int pevent_register_function(struct pevent *pevent, char *func, 464 456 unsigned long long addr, char *mod) 465 457 { 466 - struct func_list *item; 458 + struct func_list *item = malloc(sizeof(*item)); 467 459 468 - item = malloc_or_die(sizeof(*item)); 460 + if (!item) 461 + return -1; 469 462 470 463 item->next = pevent->funclist; 471 464 item->func = strdup(func); 472 - if (mod) 465 + if (!item->func) 466 + goto out_free; 467 + 468 + if (mod) { 473 469 item->mod = strdup(mod); 474 - else 470 + if (!item->mod) 471 + goto out_free_func; 472 + } else 475 473 item->mod = NULL; 476 474 item->addr = addr; 477 - 478 - if (!item->func || (mod && !item->mod)) 479 - die("malloc func"); 480 475 481 476 pevent->funclist = item; 482 477 pevent->func_count++; 483 478 484 479 return 0; 480 + 481 + out_free_func: 482 + free(item->func); 483 + item->func = NULL; 484 + out_free: 485 + free(item); 486 + errno = ENOMEM; 487 + return -1; 485 488 } 486 489 487 490 /** ··· 543 524 return 0; 544 525 } 545 526 546 - static void printk_map_init(struct pevent *pevent) 527 + static int printk_map_init(struct pevent *pevent) 547 528 { 548 529 struct printk_list *printklist; 549 530 struct printk_list *item; 550 531 struct printk_map *printk_map; 551 532 int i; 552 533 553 - printk_map = malloc_or_die(sizeof(*printk_map) * (pevent->printk_count + 1)); 534 + printk_map = malloc(sizeof(*printk_map) * (pevent->printk_count + 1)); 535 + if (!printk_map) 536 + return -1; 554 537 555 538 printklist = pevent->printklist; 556 539 ··· 570 549 571 550 pevent->printk_map = printk_map; 572 551 pevent->printklist = NULL; 552 + 553 + return 0; 573 554 } 574 555 575 556 static struct printk_map * ··· 580 557 struct printk_map *printk; 581 558 struct printk_map key; 582 559 583 - if (!pevent->printk_map) 584 - printk_map_init(pevent); 560 + if (!pevent->printk_map && printk_map_init(pevent)) 561 + return NULL; 585 562 586 563 key.addr = addr; 587 564 ··· 603 580 int pevent_register_print_string(struct pevent *pevent, char *fmt, 604 581 unsigned long long addr) 605 582 { 606 - struct printk_list *item; 583 + struct printk_list *item = malloc(sizeof(*item)); 607 584 608 - item = malloc_or_die(sizeof(*item)); 585 + if (!item) 586 + return -1; 609 587 610 588 item->next = pevent->printklist; 611 - item->printk = strdup(fmt); 612 589 item->addr = addr; 613 590 591 + item->printk = strdup(fmt); 614 592 if (!item->printk) 615 - die("malloc fmt"); 593 + goto out_free; 616 594 617 595 pevent->printklist = item; 618 596 pevent->printk_count++; 619 597 620 598 return 0; 599 + 600 + out_free: 601 + free(item); 602 + errno = ENOMEM; 603 + return -1; 621 604 } 622 605 623 606 /** ··· 648 619 649 620 static struct event_format *alloc_event(void) 650 621 { 651 - struct event_format *event; 652 - 653 - event = malloc(sizeof(*event)); 654 - if (!event) 655 - return NULL; 656 - memset(event, 0, sizeof(*event)); 657 - 658 - return event; 622 + return calloc(1, sizeof(struct event_format)); 659 623 } 660 624 661 - static void add_event(struct pevent *pevent, struct event_format *event) 625 + static int add_event(struct pevent *pevent, struct event_format *event) 662 626 { 663 627 int i; 628 + struct event_format **events = realloc(pevent->events, sizeof(event) * 629 + (pevent->nr_events + 1)); 630 + if (!events) 631 + return -1; 664 632 665 - pevent->events = realloc(pevent->events, sizeof(event) * 666 - (pevent->nr_events + 1)); 667 - if (!pevent->events) 668 - die("Can not allocate events"); 633 + pevent->events = events; 669 634 670 635 for (i = 0; i < pevent->nr_events; i++) { 671 636 if (pevent->events[i]->id > event->id) ··· 674 651 pevent->nr_events++; 675 652 676 653 event->pevent = pevent; 654 + 655 + return 0; 677 656 } 678 657 679 658 static int event_item_type(enum event_type type) ··· 852 827 switch (type) { 853 828 case EVENT_NEWLINE: 854 829 case EVENT_DELIM: 855 - *tok = malloc_or_die(2); 856 - (*tok)[0] = ch; 857 - (*tok)[1] = 0; 830 + if (asprintf(tok, "%c", ch) < 0) 831 + return EVENT_ERROR; 832 + 858 833 return type; 859 834 860 835 case EVENT_OP: ··· 1265 1240 1266 1241 last_token = token; 1267 1242 1268 - field = malloc_or_die(sizeof(*field)); 1269 - memset(field, 0, sizeof(*field)); 1243 + field = calloc(1, sizeof(*field)); 1244 + if (!field) 1245 + goto fail; 1246 + 1270 1247 field->event = event; 1271 1248 1272 1249 /* read the rest of the type */ ··· 1309 1282 } 1310 1283 1311 1284 if (!field->type) { 1312 - die("no type found"); 1285 + do_warning("%s: no type found", __func__); 1313 1286 goto fail; 1314 1287 } 1315 1288 field->name = last_token; ··· 1356 1329 free_token(token); 1357 1330 type = read_token(&token); 1358 1331 if (type == EVENT_NONE) { 1359 - die("failed to find token"); 1332 + do_warning("failed to find token"); 1360 1333 goto fail; 1361 1334 } 1362 1335 } ··· 1565 1538 left = alloc_arg(); 1566 1539 right = alloc_arg(); 1567 1540 1541 + if (!arg || !left || !right) { 1542 + do_warning("%s: not enough memory!", __func__); 1543 + /* arg will be freed at out_free */ 1544 + free_arg(left); 1545 + free_arg(right); 1546 + goto out_free; 1547 + } 1548 + 1568 1549 arg->type = PRINT_OP; 1569 1550 arg->op.left = left; 1570 1551 arg->op.right = right; ··· 1615 1580 char *token = NULL; 1616 1581 1617 1582 arg = alloc_arg(); 1583 + if (!arg) { 1584 + do_warning("%s: not enough memory!", __func__); 1585 + /* '*tok' is set to top->op.op. No need to free. */ 1586 + *tok = NULL; 1587 + return EVENT_ERROR; 1588 + } 1618 1589 1619 1590 *tok = NULL; 1620 1591 type = process_arg(event, arg, &token); ··· 1636 1595 return type; 1637 1596 1638 1597 out_free: 1639 - free_token(*tok); 1640 - *tok = NULL; 1598 + free_token(token); 1641 1599 free_arg(arg); 1642 1600 return EVENT_ERROR; 1643 1601 } ··· 1722 1682 if (arg->type == PRINT_OP && !arg->op.left) { 1723 1683 /* handle single op */ 1724 1684 if (token[1]) { 1725 - die("bad op token %s", token); 1685 + do_warning("bad op token %s", token); 1726 1686 goto out_free; 1727 1687 } 1728 1688 switch (token[0]) { ··· 1739 1699 1740 1700 /* make an empty left */ 1741 1701 left = alloc_arg(); 1702 + if (!left) 1703 + goto out_warn_free; 1704 + 1742 1705 left->type = PRINT_NULL; 1743 1706 arg->op.left = left; 1744 1707 1745 1708 right = alloc_arg(); 1709 + if (!right) 1710 + goto out_warn_free; 1711 + 1746 1712 arg->op.right = right; 1747 1713 1748 1714 /* do not free the token, it belongs to an op */ ··· 1758 1712 } else if (strcmp(token, "?") == 0) { 1759 1713 1760 1714 left = alloc_arg(); 1715 + if (!left) 1716 + goto out_warn_free; 1717 + 1761 1718 /* copy the top arg to the left */ 1762 1719 *left = *arg; 1763 1720 ··· 1769 1720 arg->op.left = left; 1770 1721 arg->op.prio = 0; 1771 1722 1723 + /* it will set arg->op.right */ 1772 1724 type = process_cond(event, arg, tok); 1773 1725 1774 1726 } else if (strcmp(token, ">>") == 0 || ··· 1789 1739 strcmp(token, "!=") == 0) { 1790 1740 1791 1741 left = alloc_arg(); 1742 + if (!left) 1743 + goto out_warn_free; 1792 1744 1793 1745 /* copy the top arg to the left */ 1794 1746 *left = *arg; ··· 1798 1746 arg->type = PRINT_OP; 1799 1747 arg->op.op = token; 1800 1748 arg->op.left = left; 1749 + arg->op.right = NULL; 1801 1750 1802 1751 if (set_op_prio(arg) == -1) { 1803 1752 event->flags |= EVENT_FL_FAILED; ··· 1815 1762 type == EVENT_DELIM && (strcmp(token, ")") == 0)) { 1816 1763 char *new_atom; 1817 1764 1818 - if (left->type != PRINT_ATOM) 1819 - die("bad pointer type"); 1765 + if (left->type != PRINT_ATOM) { 1766 + do_warning("bad pointer type"); 1767 + goto out_free; 1768 + } 1820 1769 new_atom = realloc(left->atom.atom, 1821 1770 strlen(left->atom.atom) + 3); 1822 1771 if (!new_atom) 1823 - goto out_free; 1772 + goto out_warn_free; 1824 1773 1825 1774 left->atom.atom = new_atom; 1826 1775 strcat(left->atom.atom, " *"); ··· 1834 1779 } 1835 1780 1836 1781 right = alloc_arg(); 1782 + if (!right) 1783 + goto out_warn_free; 1784 + 1837 1785 type = process_arg_token(event, right, tok, type); 1838 1786 arg->op.right = right; 1839 1787 1840 1788 } else if (strcmp(token, "[") == 0) { 1841 1789 1842 1790 left = alloc_arg(); 1791 + if (!left) 1792 + goto out_warn_free; 1793 + 1843 1794 *left = *arg; 1844 1795 1845 1796 arg->type = PRINT_OP; ··· 1854 1793 1855 1794 arg->op.prio = 0; 1856 1795 1796 + /* it will set arg->op.right */ 1857 1797 type = process_array(event, arg, tok); 1858 1798 1859 1799 } else { ··· 1878 1816 1879 1817 return type; 1880 1818 1881 - out_free: 1819 + out_warn_free: 1820 + do_warning("%s: not enough memory!", __func__); 1821 + out_free: 1882 1822 free_token(token); 1883 1823 *tok = NULL; 1884 1824 return EVENT_ERROR; ··· 1944 1880 return val; 1945 1881 } 1946 1882 1947 - ref = malloc_or_die(len); 1883 + ref = malloc(len); 1884 + if (!ref) { 1885 + do_warning("%s: not enough memory!", __func__); 1886 + return val; 1887 + } 1948 1888 memcpy(ref, type, len); 1949 1889 1950 1890 /* chop off the " *" */ ··· 2025 1957 static unsigned long long 2026 1958 eval_type(unsigned long long val, struct print_arg *arg, int pointer) 2027 1959 { 2028 - if (arg->type != PRINT_TYPE) 2029 - die("expected type argument"); 1960 + if (arg->type != PRINT_TYPE) { 1961 + do_warning("expected type argument"); 1962 + return 0; 1963 + } 2030 1964 2031 1965 return eval_type_str(val, arg->typecast.type, pointer); 2032 1966 } ··· 2213 2143 case PRINT_STRING: 2214 2144 case PRINT_BSTRING: 2215 2145 default: 2216 - die("invalid eval type %d", arg->type); 2146 + do_warning("invalid eval type %d", arg->type); 2217 2147 break; 2218 2148 } 2219 2149 ··· 2236 2166 break; 2237 2167 2238 2168 arg = alloc_arg(); 2169 + if (!arg) 2170 + goto out_free; 2239 2171 2240 2172 free_token(token); 2241 2173 type = process_arg(event, arg, &token); ··· 2251 2179 if (test_type_token(type, token, EVENT_DELIM, ",")) 2252 2180 goto out_free; 2253 2181 2254 - field = malloc_or_die(sizeof(*field)); 2255 - memset(field, 0, sizeof(*field)); 2182 + field = calloc(1, sizeof(*field)); 2183 + if (!field) 2184 + goto out_free; 2256 2185 2257 2186 value = arg_eval(arg); 2258 2187 if (value == NULL) 2259 - goto out_free; 2188 + goto out_free_field; 2260 2189 field->value = strdup(value); 2261 2190 if (field->value == NULL) 2262 - goto out_free; 2191 + goto out_free_field; 2263 2192 2264 2193 free_arg(arg); 2265 2194 arg = alloc_arg(); 2195 + if (!arg) 2196 + goto out_free; 2266 2197 2267 2198 free_token(token); 2268 2199 type = process_arg(event, arg, &token); 2269 2200 if (test_type_token(type, token, EVENT_OP, "}")) 2270 - goto out_free; 2201 + goto out_free_field; 2271 2202 2272 2203 value = arg_eval(arg); 2273 2204 if (value == NULL) 2274 - goto out_free; 2205 + goto out_free_field; 2275 2206 field->str = strdup(value); 2276 2207 if (field->str == NULL) 2277 - goto out_free; 2208 + goto out_free_field; 2278 2209 free_arg(arg); 2279 2210 arg = NULL; 2280 2211 ··· 2291 2216 *tok = token; 2292 2217 return type; 2293 2218 2219 + out_free_field: 2220 + free_flag_sym(field); 2294 2221 out_free: 2295 2222 free_arg(arg); 2296 2223 free_token(token); ··· 2312 2235 arg->type = PRINT_FLAGS; 2313 2236 2314 2237 field = alloc_arg(); 2238 + if (!field) { 2239 + do_warning("%s: not enough memory!", __func__); 2240 + goto out_free; 2241 + } 2315 2242 2316 2243 type = process_arg(event, field, &token); 2317 2244 ··· 2324 2243 type = process_op(event, field, &token); 2325 2244 2326 2245 if (test_type_token(type, token, EVENT_DELIM, ",")) 2327 - goto out_free; 2246 + goto out_free_field; 2328 2247 free_token(token); 2329 2248 2330 2249 arg->flags.field = field; ··· 2346 2265 type = read_token_item(tok); 2347 2266 return type; 2348 2267 2349 - out_free: 2268 + out_free_field: 2269 + free_arg(field); 2270 + out_free: 2350 2271 free_token(token); 2351 2272 *tok = NULL; 2352 2273 return EVENT_ERROR; ··· 2365 2282 arg->type = PRINT_SYMBOL; 2366 2283 2367 2284 field = alloc_arg(); 2285 + if (!field) { 2286 + do_warning("%s: not enough memory!", __func__); 2287 + goto out_free; 2288 + } 2368 2289 2369 2290 type = process_arg(event, field, &token); 2370 2291 if (test_type_token(type, token, EVENT_DELIM, ",")) 2371 - goto out_free; 2292 + goto out_free_field; 2372 2293 2373 2294 arg->symbol.field = field; 2374 2295 ··· 2384 2297 type = read_token_item(tok); 2385 2298 return type; 2386 2299 2387 - out_free: 2300 + out_free_field: 2301 + free_arg(field); 2302 + out_free: 2388 2303 free_token(token); 2389 2304 *tok = NULL; 2390 2305 return EVENT_ERROR; ··· 2403 2314 arg->type = PRINT_HEX; 2404 2315 2405 2316 field = alloc_arg(); 2317 + if (!field) { 2318 + do_warning("%s: not enough memory!", __func__); 2319 + goto out_free; 2320 + } 2321 + 2406 2322 type = process_arg(event, field, &token); 2407 2323 2408 2324 if (test_type_token(type, token, EVENT_DELIM, ",")) ··· 2418 2324 free_token(token); 2419 2325 2420 2326 field = alloc_arg(); 2327 + if (!field) { 2328 + do_warning("%s: not enough memory!", __func__); 2329 + *tok = NULL; 2330 + return EVENT_ERROR; 2331 + } 2332 + 2421 2333 type = process_arg(event, field, &token); 2422 2334 2423 2335 if (test_type_token(type, token, EVENT_DELIM, ")")) ··· 2481 2381 2482 2382 free_token(token); 2483 2383 arg = alloc_arg(); 2384 + if (!field) { 2385 + do_warning("%s: not enough memory!", __func__); 2386 + *tok = NULL; 2387 + return EVENT_ERROR; 2388 + } 2389 + 2484 2390 type = process_arg(event, arg, &token); 2485 2391 if (type == EVENT_ERROR) 2486 2392 goto out_free_arg; ··· 2540 2434 /* make this a typecast and contine */ 2541 2435 2542 2436 /* prevous must be an atom */ 2543 - if (arg->type != PRINT_ATOM) 2544 - die("previous needed to be PRINT_ATOM"); 2437 + if (arg->type != PRINT_ATOM) { 2438 + do_warning("previous needed to be PRINT_ATOM"); 2439 + goto out_free; 2440 + } 2545 2441 2546 2442 item_arg = alloc_arg(); 2443 + if (!item_arg) { 2444 + do_warning("%s: not enough memory!", __func__); 2445 + goto out_free; 2446 + } 2547 2447 2548 2448 arg->type = PRINT_TYPE; 2549 2449 arg->typecast.type = arg->atom.atom; ··· 2645 2533 next_arg = &(arg->func.args); 2646 2534 for (i = 0; i < func->nr_args; i++) { 2647 2535 farg = alloc_arg(); 2536 + if (!farg) { 2537 + do_warning("%s: not enough memory!", __func__); 2538 + return EVENT_ERROR; 2539 + } 2540 + 2648 2541 type = process_arg(event, farg, &token); 2649 2542 if (i < (func->nr_args - 1)) 2650 2543 test = ","; ··· 2794 2677 2795 2678 case EVENT_ERROR ... EVENT_NEWLINE: 2796 2679 default: 2797 - die("unexpected type %d", type); 2680 + do_warning("unexpected type %d", type); 2681 + return EVENT_ERROR; 2798 2682 } 2799 2683 *tok = token; 2800 2684 ··· 2816 2698 } 2817 2699 2818 2700 arg = alloc_arg(); 2701 + if (!arg) { 2702 + do_warning("%s: not enough memory!", __func__); 2703 + return -1; 2704 + } 2819 2705 2820 2706 type = process_arg(event, arg, &token); 2821 2707 ··· 2891 2769 if (type == EVENT_DQUOTE) { 2892 2770 char *cat; 2893 2771 2894 - cat = malloc_or_die(strlen(event->print_fmt.format) + 2895 - strlen(token) + 1); 2896 - strcpy(cat, event->print_fmt.format); 2897 - strcat(cat, token); 2772 + if (asprintf(&cat, "%s%s", event->print_fmt.format, token) < 0) 2773 + goto fail; 2898 2774 free_token(token); 2899 2775 free_token(event->print_fmt.format); 2900 2776 event->print_fmt.format = NULL; ··· 3046 2926 * All events should have the same common elements. 3047 2927 * Pick any event to find where the type is; 3048 2928 */ 3049 - if (!pevent->events) 3050 - die("no event_list!"); 2929 + if (!pevent->events) { 2930 + do_warning("no event_list!"); 2931 + return -1; 2932 + } 3051 2933 3052 2934 event = pevent->events[0]; 3053 2935 field = pevent_find_common_field(event, type); ··· 3207 3085 if (!arg->field.field) { 3208 3086 arg->field.field = pevent_find_any_field(event, arg->field.name); 3209 3087 if (!arg->field.field) 3210 - die("field %s not found", arg->field.name); 3088 + goto out_warning_field; 3089 + 3211 3090 } 3212 3091 /* must be a number */ 3213 3092 val = pevent_read_number(pevent, data + arg->field.field->offset, ··· 3269 3146 if (!larg->field.field) { 3270 3147 larg->field.field = 3271 3148 pevent_find_any_field(event, larg->field.name); 3272 - if (!larg->field.field) 3273 - die("field %s not found", larg->field.name); 3149 + if (!larg->field.field) { 3150 + arg = larg; 3151 + goto out_warning_field; 3152 + } 3274 3153 } 3275 3154 field_size = larg->field.field->elementsize; 3276 3155 offset = larg->field.field->offset + ··· 3308 3183 val = left != right; 3309 3184 break; 3310 3185 default: 3311 - die("unknown op '%s'", arg->op.op); 3186 + goto out_warning_op; 3312 3187 } 3313 3188 break; 3314 3189 case '~': ··· 3338 3213 val = left <= right; 3339 3214 break; 3340 3215 default: 3341 - die("unknown op '%s'", arg->op.op); 3216 + goto out_warning_op; 3342 3217 } 3343 3218 break; 3344 3219 case '>': ··· 3353 3228 val = left >= right; 3354 3229 break; 3355 3230 default: 3356 - die("unknown op '%s'", arg->op.op); 3231 + goto out_warning_op; 3357 3232 } 3358 3233 break; 3359 3234 case '=': 3360 3235 if (arg->op.op[1] != '=') 3361 - die("unknown op '%s'", arg->op.op); 3236 + goto out_warning_op; 3237 + 3362 3238 val = left == right; 3363 3239 break; 3364 3240 case '-': ··· 3375 3249 val = left * right; 3376 3250 break; 3377 3251 default: 3378 - die("unknown op '%s'", arg->op.op); 3252 + goto out_warning_op; 3379 3253 } 3380 3254 break; 3381 3255 default: /* not sure what to do there */ 3382 3256 return 0; 3383 3257 } 3384 3258 return val; 3259 + 3260 + out_warning_op: 3261 + do_warning("%s: unknown op '%s'", __func__, arg->op.op); 3262 + return 0; 3263 + 3264 + out_warning_field: 3265 + do_warning("%s: field %s not found", __func__, arg->field.name); 3266 + return 0; 3385 3267 } 3386 3268 3387 3269 struct flag { ··· 3466 3332 field = arg->field.field; 3467 3333 if (!field) { 3468 3334 field = pevent_find_any_field(event, arg->field.name); 3469 - if (!field) 3470 - die("field %s not found", arg->field.name); 3335 + if (!field) { 3336 + str = arg->field.name; 3337 + goto out_warning_field; 3338 + } 3471 3339 arg->field.field = field; 3472 3340 } 3473 3341 /* Zero sized fields, mean the rest of the data */ ··· 3486 3350 trace_seq_printf(s, "%lx", addr); 3487 3351 break; 3488 3352 } 3489 - str = malloc_or_die(len + 1); 3353 + str = malloc(len + 1); 3354 + if (!str) { 3355 + do_warning("%s: not enough memory!", __func__); 3356 + return; 3357 + } 3490 3358 memcpy(str, data + field->offset, len); 3491 3359 str[len] = 0; 3492 3360 print_str_to_seq(s, format, len_arg, str); ··· 3530 3390 str = arg->hex.field->field.name; 3531 3391 field = pevent_find_any_field(event, str); 3532 3392 if (!field) 3533 - die("field %s not found", str); 3393 + goto out_warning_field; 3534 3394 arg->hex.field->field.field = field; 3535 3395 } 3536 3396 hex = data + field->offset; ··· 3582 3442 /* well... */ 3583 3443 break; 3584 3444 } 3445 + 3446 + return; 3447 + 3448 + out_warning_field: 3449 + do_warning("%s: field %s not found", __func__, arg->field.name); 3585 3450 } 3586 3451 3587 3452 static unsigned long long ··· 3613 3468 farg = arg->func.args; 3614 3469 param = func_handle->params; 3615 3470 3616 - args = malloc_or_die(sizeof(*args) * func_handle->nr_args); 3471 + ret = ULLONG_MAX; 3472 + args = malloc(sizeof(*args) * func_handle->nr_args); 3473 + if (!args) 3474 + goto out; 3475 + 3617 3476 for (i = 0; i < func_handle->nr_args; i++) { 3618 3477 switch (param->type) { 3619 3478 case PEVENT_FUNC_ARG_INT: ··· 3629 3480 trace_seq_init(&str); 3630 3481 print_str_arg(&str, data, size, event, "%s", -1, farg); 3631 3482 trace_seq_terminate(&str); 3632 - string = malloc_or_die(sizeof(*string)); 3483 + string = malloc(sizeof(*string)); 3484 + if (!string) { 3485 + do_warning("%s(%d): malloc str", __func__, __LINE__); 3486 + goto out_free; 3487 + } 3633 3488 string->next = strings; 3634 3489 string->str = strdup(str.buffer); 3635 - if (!string->str) 3636 - die("malloc str"); 3637 - 3490 + if (!string->str) { 3491 + free(string); 3492 + do_warning("%s(%d): malloc str", __func__, __LINE__); 3493 + goto out_free; 3494 + } 3638 3495 args[i] = (uintptr_t)string->str; 3639 3496 strings = string; 3640 3497 trace_seq_destroy(&str); ··· 3650 3495 * Something went totally wrong, this is not 3651 3496 * an input error, something in this code broke. 3652 3497 */ 3653 - die("Unexpected end of arguments\n"); 3654 - break; 3498 + do_warning("Unexpected end of arguments\n"); 3499 + goto out_free; 3655 3500 } 3656 3501 farg = farg->next; 3657 3502 param = param->next; 3658 3503 } 3659 3504 3660 3505 ret = (*func_handle->func)(s, args); 3506 + out_free: 3661 3507 free(args); 3662 3508 while (strings) { 3663 3509 string = strings; ··· 3670 3514 out: 3671 3515 /* TBD : handle return type here */ 3672 3516 return ret; 3517 + } 3518 + 3519 + static void free_args(struct print_arg *args) 3520 + { 3521 + struct print_arg *next; 3522 + 3523 + while (args) { 3524 + next = args->next; 3525 + 3526 + free_arg(args); 3527 + args = next; 3528 + } 3673 3529 } 3674 3530 3675 3531 static struct print_arg *make_bprint_args(char *fmt, void *data, int size, struct event_format *event) ··· 3699 3531 3700 3532 if (!field) { 3701 3533 field = pevent_find_field(event, "buf"); 3702 - if (!field) 3703 - die("can't find buffer field for binary printk"); 3534 + if (!field) { 3535 + do_warning("can't find buffer field for binary printk"); 3536 + return NULL; 3537 + } 3704 3538 ip_field = pevent_find_field(event, "ip"); 3705 - if (!ip_field) 3706 - die("can't find ip field for binary printk"); 3539 + if (!ip_field) { 3540 + do_warning("can't find ip field for binary printk"); 3541 + return NULL; 3542 + } 3707 3543 pevent->bprint_buf_field = field; 3708 3544 pevent->bprint_ip_field = ip_field; 3709 3545 } ··· 3718 3546 * The first arg is the IP pointer. 3719 3547 */ 3720 3548 args = alloc_arg(); 3549 + if (!args) { 3550 + do_warning("%s(%d): not enough memory!", __func__, __LINE__); 3551 + return NULL; 3552 + } 3721 3553 arg = args; 3722 3554 arg->next = NULL; 3723 3555 next = &arg->next; 3724 3556 3725 3557 arg->type = PRINT_ATOM; 3726 - arg->atom.atom = malloc_or_die(32); 3727 - sprintf(arg->atom.atom, "%lld", ip); 3558 + 3559 + if (asprintf(&arg->atom.atom, "%lld", ip) < 0) 3560 + goto out_free; 3728 3561 3729 3562 /* skip the first "%pf : " */ 3730 3563 for (ptr = fmt + 6, bptr = data + field->offset; ··· 3784 3607 val = pevent_read_number(pevent, bptr, vsize); 3785 3608 bptr += vsize; 3786 3609 arg = alloc_arg(); 3610 + if (!arg) { 3611 + do_warning("%s(%d): not enough memory!", 3612 + __func__, __LINE__); 3613 + goto out_free; 3614 + } 3787 3615 arg->next = NULL; 3788 3616 arg->type = PRINT_ATOM; 3789 - arg->atom.atom = malloc_or_die(32); 3790 - sprintf(arg->atom.atom, "%lld", val); 3617 + if (asprintf(&arg->atom.atom, "%lld", val) < 0) { 3618 + free(arg); 3619 + goto out_free; 3620 + } 3791 3621 *next = arg; 3792 3622 next = &arg->next; 3793 3623 /* ··· 3807 3623 break; 3808 3624 case 's': 3809 3625 arg = alloc_arg(); 3626 + if (!arg) { 3627 + do_warning("%s(%d): not enough memory!", 3628 + __func__, __LINE__); 3629 + goto out_free; 3630 + } 3810 3631 arg->next = NULL; 3811 3632 arg->type = PRINT_BSTRING; 3812 3633 arg->string.string = strdup(bptr); 3813 3634 if (!arg->string.string) 3814 - break; 3635 + goto out_free; 3815 3636 bptr += strlen(bptr) + 1; 3816 3637 *next = arg; 3817 3638 next = &arg->next; ··· 3827 3638 } 3828 3639 3829 3640 return args; 3830 - } 3831 3641 3832 - static void free_args(struct print_arg *args) 3833 - { 3834 - struct print_arg *next; 3835 - 3836 - while (args) { 3837 - next = args->next; 3838 - 3839 - free_arg(args); 3840 - args = next; 3841 - } 3642 + out_free: 3643 + free_args(args); 3644 + return NULL; 3842 3645 } 3843 3646 3844 3647 static char * ··· 3848 3667 3849 3668 if (!field) { 3850 3669 field = pevent_find_field(event, "fmt"); 3851 - if (!field) 3852 - die("can't find format field for binary printk"); 3670 + if (!field) { 3671 + do_warning("can't find format field for binary printk"); 3672 + return NULL; 3673 + } 3853 3674 pevent->bprint_fmt_field = field; 3854 3675 } 3855 3676 ··· 3859 3676 3860 3677 printk = find_printk(pevent, addr); 3861 3678 if (!printk) { 3862 - format = malloc_or_die(45); 3863 - sprintf(format, "%%pf : (NO FORMAT FOUND at %llx)\n", 3864 - addr); 3679 + if (asprintf(&format, "%%pf : (NO FORMAT FOUND at %llx)\n", addr) < 0) 3680 + return NULL; 3865 3681 return format; 3866 3682 } 3867 3683 ··· 3868 3686 /* Remove any quotes. */ 3869 3687 if (*p == '"') 3870 3688 p++; 3871 - format = malloc_or_die(strlen(p) + 10); 3872 - sprintf(format, "%s : %s", "%pf", p); 3689 + if (asprintf(&format, "%s : %s", "%pf", p) < 0) 3690 + return NULL; 3873 3691 /* remove ending quotes and new line since we will add one too */ 3874 3692 p = format + strlen(format) - 1; 3875 3693 if (*p == '"') ··· 3904 3722 if (!arg->field.field) { 3905 3723 arg->field.field = 3906 3724 pevent_find_any_field(event, arg->field.name); 3907 - if (!arg->field.field) 3908 - die("field %s not found", arg->field.name); 3725 + if (!arg->field.field) { 3726 + do_warning("%s: field %s not found", 3727 + __func__, arg->field.name); 3728 + return; 3729 + } 3909 3730 } 3910 3731 if (arg->field.field->size != 6) { 3911 3732 trace_seq_printf(s, "INVALIDMAC"); ··· 4564 4379 struct format_field *field; 4565 4380 int i = 0; 4566 4381 4567 - fields = malloc_or_die(sizeof(*fields) * (count + 1)); 4382 + fields = malloc(sizeof(*fields) * (count + 1)); 4383 + if (!fields) 4384 + return NULL; 4385 + 4568 4386 for (field = list; field; field = field->next) { 4569 4387 fields[i++] = field; 4570 4388 if (i == count + 1) { ··· 4883 4695 } 4884 4696 4885 4697 /** 4886 - * pevent_parse_event - parse the event format 4887 - * @pevent: the handle to the pevent 4698 + * __pevent_parse_format - parse the event format 4888 4699 * @buf: the buffer storing the event format string 4889 4700 * @size: the size of @buf 4890 4701 * @sys: the system the event belongs to ··· 4895 4708 * 4896 4709 * /sys/kernel/debug/tracing/events/.../.../format 4897 4710 */ 4898 - enum pevent_errno pevent_parse_event(struct pevent *pevent, const char *buf, 4899 - unsigned long size, const char *sys) 4711 + enum pevent_errno __pevent_parse_format(struct event_format **eventp, 4712 + struct pevent *pevent, const char *buf, 4713 + unsigned long size, const char *sys) 4900 4714 { 4901 4715 struct event_format *event; 4902 4716 int ret; 4903 4717 4904 4718 init_input_buf(buf, size); 4905 4719 4906 - event = alloc_event(); 4720 + *eventp = event = alloc_event(); 4907 4721 if (!event) 4908 4722 return PEVENT_ERRNO__MEM_ALLOC_FAILED; 4909 4723 ··· 4938 4750 goto event_alloc_failed; 4939 4751 } 4940 4752 4941 - /* Add pevent to event so that it can be referenced */ 4942 - event->pevent = pevent; 4943 - 4944 4753 ret = event_read_format(event); 4945 4754 if (ret < 0) { 4946 4755 ret = PEVENT_ERRNO__READ_FORMAT_FAILED; ··· 4948 4763 * If the event has an override, don't print warnings if the event 4949 4764 * print format fails to parse. 4950 4765 */ 4951 - if (find_event_handle(pevent, event)) 4766 + if (pevent && find_event_handle(pevent, event)) 4952 4767 show_warning = 0; 4953 4768 4954 4769 ret = event_read_print(event); 4770 + show_warning = 1; 4771 + 4955 4772 if (ret < 0) { 4956 - show_warning = 1; 4957 4773 ret = PEVENT_ERRNO__READ_PRINT_FAILED; 4958 4774 goto event_parse_failed; 4959 4775 } 4960 - show_warning = 1; 4961 - 4962 - add_event(pevent, event); 4963 4776 4964 4777 if (!ret && (event->flags & EVENT_FL_ISFTRACE)) { 4965 4778 struct format_field *field; ··· 4967 4784 list = &event->print_fmt.args; 4968 4785 for (field = event->format.fields; field; field = field->next) { 4969 4786 arg = alloc_arg(); 4787 + if (!arg) { 4788 + event->flags |= EVENT_FL_FAILED; 4789 + return PEVENT_ERRNO__OLD_FTRACE_ARG_FAILED; 4790 + } 4970 4791 arg->type = PRINT_FIELD; 4971 4792 arg->field.name = strdup(field->name); 4972 4793 if (!arg->field.name) { ··· 4985 4798 return 0; 4986 4799 } 4987 4800 4801 + return 0; 4802 + 4803 + event_parse_failed: 4804 + event->flags |= EVENT_FL_FAILED; 4805 + return ret; 4806 + 4807 + event_alloc_failed: 4808 + free(event->system); 4809 + free(event->name); 4810 + free(event); 4811 + *eventp = NULL; 4812 + return ret; 4813 + } 4814 + 4815 + /** 4816 + * pevent_parse_format - parse the event format 4817 + * @buf: the buffer storing the event format string 4818 + * @size: the size of @buf 4819 + * @sys: the system the event belongs to 4820 + * 4821 + * This parses the event format and creates an event structure 4822 + * to quickly parse raw data for a given event. 4823 + * 4824 + * These files currently come from: 4825 + * 4826 + * /sys/kernel/debug/tracing/events/.../.../format 4827 + */ 4828 + enum pevent_errno pevent_parse_format(struct event_format **eventp, const char *buf, 4829 + unsigned long size, const char *sys) 4830 + { 4831 + return __pevent_parse_format(eventp, NULL, buf, size, sys); 4832 + } 4833 + 4834 + /** 4835 + * pevent_parse_event - parse the event format 4836 + * @pevent: the handle to the pevent 4837 + * @buf: the buffer storing the event format string 4838 + * @size: the size of @buf 4839 + * @sys: the system the event belongs to 4840 + * 4841 + * This parses the event format and creates an event structure 4842 + * to quickly parse raw data for a given event. 4843 + * 4844 + * These files currently come from: 4845 + * 4846 + * /sys/kernel/debug/tracing/events/.../.../format 4847 + */ 4848 + enum pevent_errno pevent_parse_event(struct pevent *pevent, const char *buf, 4849 + unsigned long size, const char *sys) 4850 + { 4851 + struct event_format *event = NULL; 4852 + int ret = __pevent_parse_format(&event, pevent, buf, size, sys); 4853 + 4854 + if (event == NULL) 4855 + return ret; 4856 + 4857 + /* Add pevent to event so that it can be referenced */ 4858 + event->pevent = pevent; 4859 + 4860 + if (add_event(pevent, event)) 4861 + goto event_add_failed; 4862 + 4988 4863 #define PRINT_ARGS 0 4989 4864 if (PRINT_ARGS && event->print_fmt.args) 4990 4865 print_args(event->print_fmt.args); 4991 4866 4992 4867 return 0; 4993 4868 4994 - event_parse_failed: 4995 - event->flags |= EVENT_FL_FAILED; 4996 - /* still add it even if it failed */ 4997 - add_event(pevent, event); 4998 - return ret; 4999 - 5000 - event_alloc_failed: 4869 + event_add_failed: 5001 4870 free(event->system); 5002 4871 free(event->name); 5003 4872 free(event); ··· 5337 5094 remove_func_handler(pevent, name); 5338 5095 } 5339 5096 5340 - func_handle = malloc(sizeof(*func_handle)); 5097 + func_handle = calloc(1, sizeof(*func_handle)); 5341 5098 if (!func_handle) { 5342 5099 do_warning("Failed to allocate function handler"); 5343 5100 return PEVENT_ERRNO__MEM_ALLOC_FAILED; 5344 5101 } 5345 - memset(func_handle, 0, sizeof(*func_handle)); 5346 5102 5347 5103 func_handle->ret_type = ret_type; 5348 5104 func_handle->name = strdup(name); ··· 5440 5198 5441 5199 not_found: 5442 5200 /* Save for later use. */ 5443 - handle = malloc(sizeof(*handle)); 5201 + handle = calloc(1, sizeof(*handle)); 5444 5202 if (!handle) { 5445 5203 do_warning("Failed to allocate event handler"); 5446 5204 return PEVENT_ERRNO__MEM_ALLOC_FAILED; 5447 5205 } 5448 5206 5449 - memset(handle, 0, sizeof(*handle)); 5450 5207 handle->id = id; 5451 5208 if (event_name) 5452 5209 handle->event_name = strdup(event_name); ··· 5474 5233 */ 5475 5234 struct pevent *pevent_alloc(void) 5476 5235 { 5477 - struct pevent *pevent; 5236 + struct pevent *pevent = calloc(1, sizeof(*pevent)); 5478 5237 5479 - pevent = malloc(sizeof(*pevent)); 5480 - if (!pevent) 5481 - return NULL; 5482 - memset(pevent, 0, sizeof(*pevent)); 5483 - pevent->ref_count = 1; 5238 + if (pevent) 5239 + pevent->ref_count = 1; 5484 5240 5485 5241 return pevent; 5486 5242 } ··· 5506 5268 free_format_fields(format->fields); 5507 5269 } 5508 5270 5509 - static void free_event(struct event_format *event) 5271 + void pevent_free_format(struct event_format *event) 5510 5272 { 5511 5273 free(event->name); 5512 5274 free(event->system); ··· 5592 5354 } 5593 5355 5594 5356 for (i = 0; i < pevent->nr_events; i++) 5595 - free_event(pevent->events[i]); 5357 + pevent_free_format(pevent->events[i]); 5596 5358 5597 5359 while (pevent->handlers) { 5598 5360 handle = pevent->handlers;
+3
tools/lib/traceevent/event-parse.h
··· 540 540 541 541 enum pevent_errno pevent_parse_event(struct pevent *pevent, const char *buf, 542 542 unsigned long size, const char *sys); 543 + enum pevent_errno pevent_parse_format(struct event_format **eventp, const char *buf, 544 + unsigned long size, const char *sys); 545 + void pevent_free_format(struct event_format *event); 543 546 544 547 void *pevent_get_field_raw(struct trace_seq *s, struct event_format *event, 545 548 const char *name, struct pevent_record *record,
+28 -2
tools/perf/Documentation/perf-kvm.txt
··· 12 12 [--guestkallsyms=<path> --guestmodules=<path> | --guestvmlinux=<path>]] 13 13 {top|record|report|diff|buildid-list} 14 14 'perf kvm' [--host] [--guest] [--guestkallsyms=<path> --guestmodules=<path> 15 - | --guestvmlinux=<path>] {top|record|report|diff|buildid-list} 15 + | --guestvmlinux=<path>] {top|record|report|diff|buildid-list|stat} 16 16 17 17 DESCRIPTION 18 18 ----------- ··· 37 37 'perf kvm buildid-list' to display the buildids found in a perf data file, 38 38 so that other tools can be used to fetch packages with matching symbol tables 39 39 for use by perf report. 40 + 41 + 'perf kvm stat <command>' to run a command and gather performance counter 42 + statistics. 43 + Especially, perf 'kvm stat record/report' generates a statistical analysis 44 + of KVM events. Currently, vmexit, mmio and ioport events are supported. 45 + 'perf kvm stat record <command>' records kvm events and the events between 46 + start and end <command>. 47 + And this command produces a file which contains tracing results of kvm 48 + events. 49 + 50 + 'perf kvm stat report' reports statistical data which includes events 51 + handled time, samples, and so on. 40 52 41 53 OPTIONS 42 54 ------- ··· 80 68 --guestvmlinux=<path>:: 81 69 Guest os kernel vmlinux. 82 70 71 + STAT REPORT OPTIONS 72 + ------------------- 73 + --vcpu=<value>:: 74 + analyze events which occures on this vcpu. (default: all vcpus) 75 + 76 + --events=<value>:: 77 + events to be analyzed. Possible values: vmexit, mmio, ioport. 78 + (default: vmexit) 79 + -k:: 80 + --key=<value>:: 81 + Sorting key. Possible values: sample (default, sort by samples 82 + number), time (sort by average time). 83 + 83 84 SEE ALSO 84 85 -------- 85 86 linkperf:perf-top[1], linkperf:perf-record[1], linkperf:perf-report[1], 86 - linkperf:perf-diff[1], linkperf:perf-buildid-list[1] 87 + linkperf:perf-diff[1], linkperf:perf-buildid-list[1], 88 + linkperf:perf-stat[1]
+3
tools/perf/MANIFEST
··· 16 16 include/linux/poison.h 17 17 include/linux/magic.h 18 18 include/linux/hw_breakpoint.h 19 + arch/x86/include/asm/svm.h 20 + arch/x86/include/asm/vmx.h 21 + arch/x86/include/asm/kvm_host.h
+3 -3
tools/perf/Makefile
··· 233 233 FLEX = flex 234 234 BISON= bison 235 235 236 - $(OUTPUT)util/parse-events-flex.c: util/parse-events.l 236 + $(OUTPUT)util/parse-events-flex.c: util/parse-events.l $(OUTPUT)util/parse-events-bison.c 237 237 $(QUIET_FLEX)$(FLEX) --header-file=$(OUTPUT)util/parse-events-flex.h $(PARSER_DEBUG_FLEX) -t util/parse-events.l > $(OUTPUT)util/parse-events-flex.c 238 238 239 239 $(OUTPUT)util/parse-events-bison.c: util/parse-events.y 240 240 $(QUIET_BISON)$(BISON) -v util/parse-events.y -d $(PARSER_DEBUG_BISON) -o $(OUTPUT)util/parse-events-bison.c 241 241 242 - $(OUTPUT)util/pmu-flex.c: util/pmu.l 242 + $(OUTPUT)util/pmu-flex.c: util/pmu.l $(OUTPUT)util/pmu-bison.c 243 243 $(QUIET_FLEX)$(FLEX) --header-file=$(OUTPUT)util/pmu-flex.h -t util/pmu.l > $(OUTPUT)util/pmu-flex.c 244 244 245 245 $(OUTPUT)util/pmu-bison.c: util/pmu.y ··· 715 715 EXTLIBS += -liberty 716 716 BASIC_CFLAGS += -DHAVE_CPLUS_DEMANGLE 717 717 else 718 - FLAGS_BFD=$(ALL_CFLAGS) $(ALL_LDFLAGS) $(EXTLIBS) -lbfd 718 + FLAGS_BFD=$(ALL_CFLAGS) $(ALL_LDFLAGS) $(EXTLIBS) -DPACKAGE='perf' -lbfd 719 719 has_bfd := $(call try-cc,$(SOURCE_BFD),$(FLAGS_BFD)) 720 720 ifeq ($(has_bfd),y) 721 721 EXTLIBS += -lbfd
+44 -44
tools/perf/builtin-kmem.c
··· 1 1 #include "builtin.h" 2 2 #include "perf.h" 3 3 4 + #include "util/evlist.h" 4 5 #include "util/evsel.h" 5 6 #include "util/util.h" 6 7 #include "util/cache.h" ··· 213 212 } 214 213 215 214 static int perf_evsel__process_alloc_event(struct perf_evsel *evsel, 216 - struct perf_sample *sample, int node) 215 + struct perf_sample *sample) 217 216 { 218 - struct event_format *event = evsel->tp_format; 219 - void *data = sample->raw_data; 220 - unsigned long call_site; 221 - unsigned long ptr; 222 - int bytes_req, cpu = sample->cpu; 223 - int bytes_alloc; 224 - int node1, node2; 217 + unsigned long ptr = perf_evsel__intval(evsel, sample, "ptr"), 218 + call_site = perf_evsel__intval(evsel, sample, "call_site"); 219 + int bytes_req = perf_evsel__intval(evsel, sample, "bytes_req"), 220 + bytes_alloc = perf_evsel__intval(evsel, sample, "bytes_alloc"); 225 221 226 - ptr = raw_field_value(event, "ptr", data); 227 - call_site = raw_field_value(event, "call_site", data); 228 - bytes_req = raw_field_value(event, "bytes_req", data); 229 - bytes_alloc = raw_field_value(event, "bytes_alloc", data); 230 - 231 - if (insert_alloc_stat(call_site, ptr, bytes_req, bytes_alloc, cpu) || 222 + if (insert_alloc_stat(call_site, ptr, bytes_req, bytes_alloc, sample->cpu) || 232 223 insert_caller_stat(call_site, bytes_req, bytes_alloc)) 233 224 return -1; 234 225 235 226 total_requested += bytes_req; 236 227 total_allocated += bytes_alloc; 237 228 238 - if (node) { 239 - node1 = cpunode_map[cpu]; 240 - node2 = raw_field_value(event, "node", data); 229 + nr_allocs++; 230 + return 0; 231 + } 232 + 233 + static int perf_evsel__process_alloc_node_event(struct perf_evsel *evsel, 234 + struct perf_sample *sample) 235 + { 236 + int ret = perf_evsel__process_alloc_event(evsel, sample); 237 + 238 + if (!ret) { 239 + int node1 = cpunode_map[sample->cpu], 240 + node2 = perf_evsel__intval(evsel, sample, "node"); 241 + 241 242 if (node1 != node2) 242 243 nr_cross_allocs++; 243 244 } 244 - nr_allocs++; 245 - return 0; 245 + 246 + return ret; 246 247 } 247 248 248 249 static int ptr_cmp(struct alloc_stat *, struct alloc_stat *); ··· 278 275 static int perf_evsel__process_free_event(struct perf_evsel *evsel, 279 276 struct perf_sample *sample) 280 277 { 281 - unsigned long ptr = raw_field_value(evsel->tp_format, "ptr", 282 - sample->raw_data); 278 + unsigned long ptr = perf_evsel__intval(evsel, sample, "ptr"); 283 279 struct alloc_stat *s_alloc, *s_caller; 284 280 285 281 s_alloc = search_alloc_stat(ptr, 0, &root_alloc_stat, ptr_cmp); ··· 299 297 return 0; 300 298 } 301 299 302 - static int perf_evsel__process_kmem_event(struct perf_evsel *evsel, 303 - struct perf_sample *sample) 304 - { 305 - struct event_format *event = evsel->tp_format; 306 - 307 - if (!strcmp(event->name, "kmalloc") || 308 - !strcmp(event->name, "kmem_cache_alloc")) { 309 - return perf_evsel__process_alloc_event(evsel, sample, 0); 310 - } 311 - 312 - if (!strcmp(event->name, "kmalloc_node") || 313 - !strcmp(event->name, "kmem_cache_alloc_node")) { 314 - return perf_evsel__process_alloc_event(evsel, sample, 1); 315 - } 316 - 317 - if (!strcmp(event->name, "kfree") || 318 - !strcmp(event->name, "kmem_cache_free")) { 319 - return perf_evsel__process_free_event(evsel, sample); 320 - } 321 - 322 - return 0; 323 - } 300 + typedef int (*tracepoint_handler)(struct perf_evsel *evsel, 301 + struct perf_sample *sample); 324 302 325 303 static int process_sample_event(struct perf_tool *tool __maybe_unused, 326 304 union perf_event *event, ··· 318 336 319 337 dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid); 320 338 321 - return perf_evsel__process_kmem_event(evsel, sample); 339 + if (evsel->handler.func != NULL) { 340 + tracepoint_handler f = evsel->handler.func; 341 + return f(evsel, sample); 342 + } 343 + 344 + return 0; 322 345 } 323 346 324 347 static struct perf_tool perf_kmem = { ··· 485 498 { 486 499 int err = -EINVAL; 487 500 struct perf_session *session; 501 + const struct perf_evsel_str_handler kmem_tracepoints[] = { 502 + { "kmem:kmalloc", perf_evsel__process_alloc_event, }, 503 + { "kmem:kmem_cache_alloc", perf_evsel__process_alloc_event, }, 504 + { "kmem:kmalloc_node", perf_evsel__process_alloc_node_event, }, 505 + { "kmem:kmem_cache_alloc_node", perf_evsel__process_alloc_node_event, }, 506 + { "kmem:kfree", perf_evsel__process_free_event, }, 507 + { "kmem:kmem_cache_free", perf_evsel__process_free_event, }, 508 + }; 488 509 489 510 session = perf_session__new(input_name, O_RDONLY, 0, false, &perf_kmem); 490 511 if (session == NULL) ··· 503 508 504 509 if (!perf_session__has_traces(session, "kmem record")) 505 510 goto out_delete; 511 + 512 + if (perf_session__set_tracepoints_handlers(session, kmem_tracepoints)) { 513 + pr_err("Initializing perf session tracepoint handlers failed\n"); 514 + return -1; 515 + } 506 516 507 517 setup_pager(); 508 518 err = perf_session__process_events(session, &perf_kmem);
+833 -3
tools/perf/builtin-kvm.c
··· 1 1 #include "builtin.h" 2 2 #include "perf.h" 3 3 4 + #include "util/evsel.h" 4 5 #include "util/util.h" 5 6 #include "util/cache.h" 6 7 #include "util/symbol.h" ··· 11 10 12 11 #include "util/parse-options.h" 13 12 #include "util/trace-event.h" 14 - 15 13 #include "util/debug.h" 14 + #include "util/debugfs.h" 15 + #include "util/tool.h" 16 + #include "util/stat.h" 16 17 17 18 #include <sys/prctl.h> 18 19 ··· 22 19 #include <pthread.h> 23 20 #include <math.h> 24 21 25 - static const char *file_name; 22 + #include "../../arch/x86/include/asm/svm.h" 23 + #include "../../arch/x86/include/asm/vmx.h" 24 + #include "../../arch/x86/include/asm/kvm.h" 25 + 26 + struct event_key { 27 + #define INVALID_KEY (~0ULL) 28 + u64 key; 29 + int info; 30 + }; 31 + 32 + struct kvm_events_ops { 33 + bool (*is_begin_event)(struct perf_evsel *evsel, 34 + struct perf_sample *sample, 35 + struct event_key *key); 36 + bool (*is_end_event)(struct perf_evsel *evsel, 37 + struct perf_sample *sample, struct event_key *key); 38 + void (*decode_key)(struct event_key *key, char decode[20]); 39 + const char *name; 40 + }; 41 + 42 + static void exit_event_get_key(struct perf_evsel *evsel, 43 + struct perf_sample *sample, 44 + struct event_key *key) 45 + { 46 + key->info = 0; 47 + key->key = perf_evsel__intval(evsel, sample, "exit_reason"); 48 + } 49 + 50 + static bool kvm_exit_event(struct perf_evsel *evsel) 51 + { 52 + return !strcmp(evsel->name, "kvm:kvm_exit"); 53 + } 54 + 55 + static bool exit_event_begin(struct perf_evsel *evsel, 56 + struct perf_sample *sample, struct event_key *key) 57 + { 58 + if (kvm_exit_event(evsel)) { 59 + exit_event_get_key(evsel, sample, key); 60 + return true; 61 + } 62 + 63 + return false; 64 + } 65 + 66 + static bool kvm_entry_event(struct perf_evsel *evsel) 67 + { 68 + return !strcmp(evsel->name, "kvm:kvm_entry"); 69 + } 70 + 71 + static bool exit_event_end(struct perf_evsel *evsel, 72 + struct perf_sample *sample __maybe_unused, 73 + struct event_key *key __maybe_unused) 74 + { 75 + return kvm_entry_event(evsel); 76 + } 77 + 78 + struct exit_reasons_table { 79 + unsigned long exit_code; 80 + const char *reason; 81 + }; 82 + 83 + struct exit_reasons_table vmx_exit_reasons[] = { 84 + VMX_EXIT_REASONS 85 + }; 86 + 87 + struct exit_reasons_table svm_exit_reasons[] = { 88 + SVM_EXIT_REASONS 89 + }; 90 + 91 + static int cpu_isa; 92 + 93 + static const char *get_exit_reason(u64 exit_code) 94 + { 95 + int table_size = ARRAY_SIZE(svm_exit_reasons); 96 + struct exit_reasons_table *table = svm_exit_reasons; 97 + 98 + if (cpu_isa == 1) { 99 + table = vmx_exit_reasons; 100 + table_size = ARRAY_SIZE(vmx_exit_reasons); 101 + } 102 + 103 + while (table_size--) { 104 + if (table->exit_code == exit_code) 105 + return table->reason; 106 + table++; 107 + } 108 + 109 + pr_err("unknown kvm exit code:%lld on %s\n", 110 + (unsigned long long)exit_code, cpu_isa ? "VMX" : "SVM"); 111 + return "UNKNOWN"; 112 + } 113 + 114 + static void exit_event_decode_key(struct event_key *key, char decode[20]) 115 + { 116 + const char *exit_reason = get_exit_reason(key->key); 117 + 118 + scnprintf(decode, 20, "%s", exit_reason); 119 + } 120 + 121 + static struct kvm_events_ops exit_events = { 122 + .is_begin_event = exit_event_begin, 123 + .is_end_event = exit_event_end, 124 + .decode_key = exit_event_decode_key, 125 + .name = "VM-EXIT" 126 + }; 127 + 128 + /* 129 + * For the mmio events, we treat: 130 + * the time of MMIO write: kvm_mmio(KVM_TRACE_MMIO_WRITE...) -> kvm_entry 131 + * the time of MMIO read: kvm_exit -> kvm_mmio(KVM_TRACE_MMIO_READ...). 132 + */ 133 + static void mmio_event_get_key(struct perf_evsel *evsel, struct perf_sample *sample, 134 + struct event_key *key) 135 + { 136 + key->key = perf_evsel__intval(evsel, sample, "gpa"); 137 + key->info = perf_evsel__intval(evsel, sample, "type"); 138 + } 139 + 140 + #define KVM_TRACE_MMIO_READ_UNSATISFIED 0 141 + #define KVM_TRACE_MMIO_READ 1 142 + #define KVM_TRACE_MMIO_WRITE 2 143 + 144 + static bool mmio_event_begin(struct perf_evsel *evsel, 145 + struct perf_sample *sample, struct event_key *key) 146 + { 147 + /* MMIO read begin event in kernel. */ 148 + if (kvm_exit_event(evsel)) 149 + return true; 150 + 151 + /* MMIO write begin event in kernel. */ 152 + if (!strcmp(evsel->name, "kvm:kvm_mmio") && 153 + perf_evsel__intval(evsel, sample, "type") == KVM_TRACE_MMIO_WRITE) { 154 + mmio_event_get_key(evsel, sample, key); 155 + return true; 156 + } 157 + 158 + return false; 159 + } 160 + 161 + static bool mmio_event_end(struct perf_evsel *evsel, struct perf_sample *sample, 162 + struct event_key *key) 163 + { 164 + /* MMIO write end event in kernel. */ 165 + if (kvm_entry_event(evsel)) 166 + return true; 167 + 168 + /* MMIO read end event in kernel.*/ 169 + if (!strcmp(evsel->name, "kvm:kvm_mmio") && 170 + perf_evsel__intval(evsel, sample, "type") == KVM_TRACE_MMIO_READ) { 171 + mmio_event_get_key(evsel, sample, key); 172 + return true; 173 + } 174 + 175 + return false; 176 + } 177 + 178 + static void mmio_event_decode_key(struct event_key *key, char decode[20]) 179 + { 180 + scnprintf(decode, 20, "%#lx:%s", (unsigned long)key->key, 181 + key->info == KVM_TRACE_MMIO_WRITE ? "W" : "R"); 182 + } 183 + 184 + static struct kvm_events_ops mmio_events = { 185 + .is_begin_event = mmio_event_begin, 186 + .is_end_event = mmio_event_end, 187 + .decode_key = mmio_event_decode_key, 188 + .name = "MMIO Access" 189 + }; 190 + 191 + /* The time of emulation pio access is from kvm_pio to kvm_entry. */ 192 + static void ioport_event_get_key(struct perf_evsel *evsel, 193 + struct perf_sample *sample, 194 + struct event_key *key) 195 + { 196 + key->key = perf_evsel__intval(evsel, sample, "port"); 197 + key->info = perf_evsel__intval(evsel, sample, "rw"); 198 + } 199 + 200 + static bool ioport_event_begin(struct perf_evsel *evsel, 201 + struct perf_sample *sample, 202 + struct event_key *key) 203 + { 204 + if (!strcmp(evsel->name, "kvm:kvm_pio")) { 205 + ioport_event_get_key(evsel, sample, key); 206 + return true; 207 + } 208 + 209 + return false; 210 + } 211 + 212 + static bool ioport_event_end(struct perf_evsel *evsel, 213 + struct perf_sample *sample __maybe_unused, 214 + struct event_key *key __maybe_unused) 215 + { 216 + return kvm_entry_event(evsel); 217 + } 218 + 219 + static void ioport_event_decode_key(struct event_key *key, char decode[20]) 220 + { 221 + scnprintf(decode, 20, "%#llx:%s", (unsigned long long)key->key, 222 + key->info ? "POUT" : "PIN"); 223 + } 224 + 225 + static struct kvm_events_ops ioport_events = { 226 + .is_begin_event = ioport_event_begin, 227 + .is_end_event = ioport_event_end, 228 + .decode_key = ioport_event_decode_key, 229 + .name = "IO Port Access" 230 + }; 231 + 232 + static const char *report_event = "vmexit"; 233 + struct kvm_events_ops *events_ops; 234 + 235 + static bool register_kvm_events_ops(void) 236 + { 237 + bool ret = true; 238 + 239 + if (!strcmp(report_event, "vmexit")) 240 + events_ops = &exit_events; 241 + else if (!strcmp(report_event, "mmio")) 242 + events_ops = &mmio_events; 243 + else if (!strcmp(report_event, "ioport")) 244 + events_ops = &ioport_events; 245 + else { 246 + pr_err("Unknown report event:%s\n", report_event); 247 + ret = false; 248 + } 249 + 250 + return ret; 251 + } 252 + 253 + struct kvm_event_stats { 254 + u64 time; 255 + struct stats stats; 256 + }; 257 + 258 + struct kvm_event { 259 + struct list_head hash_entry; 260 + struct rb_node rb; 261 + 262 + struct event_key key; 263 + 264 + struct kvm_event_stats total; 265 + 266 + #define DEFAULT_VCPU_NUM 8 267 + int max_vcpu; 268 + struct kvm_event_stats *vcpu; 269 + }; 270 + 271 + struct vcpu_event_record { 272 + int vcpu_id; 273 + u64 start_time; 274 + struct kvm_event *last_event; 275 + }; 276 + 277 + #define EVENTS_BITS 12 278 + #define EVENTS_CACHE_SIZE (1UL << EVENTS_BITS) 279 + 280 + static u64 total_time; 281 + static u64 total_count; 282 + static struct list_head kvm_events_cache[EVENTS_CACHE_SIZE]; 283 + 284 + static void init_kvm_event_record(void) 285 + { 286 + int i; 287 + 288 + for (i = 0; i < (int)EVENTS_CACHE_SIZE; i++) 289 + INIT_LIST_HEAD(&kvm_events_cache[i]); 290 + } 291 + 292 + static int kvm_events_hash_fn(u64 key) 293 + { 294 + return key & (EVENTS_CACHE_SIZE - 1); 295 + } 296 + 297 + static bool kvm_event_expand(struct kvm_event *event, int vcpu_id) 298 + { 299 + int old_max_vcpu = event->max_vcpu; 300 + 301 + if (vcpu_id < event->max_vcpu) 302 + return true; 303 + 304 + while (event->max_vcpu <= vcpu_id) 305 + event->max_vcpu += DEFAULT_VCPU_NUM; 306 + 307 + event->vcpu = realloc(event->vcpu, 308 + event->max_vcpu * sizeof(*event->vcpu)); 309 + if (!event->vcpu) { 310 + pr_err("Not enough memory\n"); 311 + return false; 312 + } 313 + 314 + memset(event->vcpu + old_max_vcpu, 0, 315 + (event->max_vcpu - old_max_vcpu) * sizeof(*event->vcpu)); 316 + return true; 317 + } 318 + 319 + static struct kvm_event *kvm_alloc_init_event(struct event_key *key) 320 + { 321 + struct kvm_event *event; 322 + 323 + event = zalloc(sizeof(*event)); 324 + if (!event) { 325 + pr_err("Not enough memory\n"); 326 + return NULL; 327 + } 328 + 329 + event->key = *key; 330 + return event; 331 + } 332 + 333 + static struct kvm_event *find_create_kvm_event(struct event_key *key) 334 + { 335 + struct kvm_event *event; 336 + struct list_head *head; 337 + 338 + BUG_ON(key->key == INVALID_KEY); 339 + 340 + head = &kvm_events_cache[kvm_events_hash_fn(key->key)]; 341 + list_for_each_entry(event, head, hash_entry) 342 + if (event->key.key == key->key && event->key.info == key->info) 343 + return event; 344 + 345 + event = kvm_alloc_init_event(key); 346 + if (!event) 347 + return NULL; 348 + 349 + list_add(&event->hash_entry, head); 350 + return event; 351 + } 352 + 353 + static bool handle_begin_event(struct vcpu_event_record *vcpu_record, 354 + struct event_key *key, u64 timestamp) 355 + { 356 + struct kvm_event *event = NULL; 357 + 358 + if (key->key != INVALID_KEY) 359 + event = find_create_kvm_event(key); 360 + 361 + vcpu_record->last_event = event; 362 + vcpu_record->start_time = timestamp; 363 + return true; 364 + } 365 + 366 + static void 367 + kvm_update_event_stats(struct kvm_event_stats *kvm_stats, u64 time_diff) 368 + { 369 + kvm_stats->time += time_diff; 370 + update_stats(&kvm_stats->stats, time_diff); 371 + } 372 + 373 + static double kvm_event_rel_stddev(int vcpu_id, struct kvm_event *event) 374 + { 375 + struct kvm_event_stats *kvm_stats = &event->total; 376 + 377 + if (vcpu_id != -1) 378 + kvm_stats = &event->vcpu[vcpu_id]; 379 + 380 + return rel_stddev_stats(stddev_stats(&kvm_stats->stats), 381 + avg_stats(&kvm_stats->stats)); 382 + } 383 + 384 + static bool update_kvm_event(struct kvm_event *event, int vcpu_id, 385 + u64 time_diff) 386 + { 387 + kvm_update_event_stats(&event->total, time_diff); 388 + 389 + if (!kvm_event_expand(event, vcpu_id)) 390 + return false; 391 + 392 + kvm_update_event_stats(&event->vcpu[vcpu_id], time_diff); 393 + return true; 394 + } 395 + 396 + static bool handle_end_event(struct vcpu_event_record *vcpu_record, 397 + struct event_key *key, u64 timestamp) 398 + { 399 + struct kvm_event *event; 400 + u64 time_begin, time_diff; 401 + 402 + event = vcpu_record->last_event; 403 + time_begin = vcpu_record->start_time; 404 + 405 + /* The begin event is not caught. */ 406 + if (!time_begin) 407 + return true; 408 + 409 + /* 410 + * In some case, the 'begin event' only records the start timestamp, 411 + * the actual event is recognized in the 'end event' (e.g. mmio-event). 412 + */ 413 + 414 + /* Both begin and end events did not get the key. */ 415 + if (!event && key->key == INVALID_KEY) 416 + return true; 417 + 418 + if (!event) 419 + event = find_create_kvm_event(key); 420 + 421 + if (!event) 422 + return false; 423 + 424 + vcpu_record->last_event = NULL; 425 + vcpu_record->start_time = 0; 426 + 427 + BUG_ON(timestamp < time_begin); 428 + 429 + time_diff = timestamp - time_begin; 430 + return update_kvm_event(event, vcpu_record->vcpu_id, time_diff); 431 + } 432 + 433 + static 434 + struct vcpu_event_record *per_vcpu_record(struct thread *thread, 435 + struct perf_evsel *evsel, 436 + struct perf_sample *sample) 437 + { 438 + /* Only kvm_entry records vcpu id. */ 439 + if (!thread->priv && kvm_entry_event(evsel)) { 440 + struct vcpu_event_record *vcpu_record; 441 + 442 + vcpu_record = zalloc(sizeof(*vcpu_record)); 443 + if (!vcpu_record) { 444 + pr_err("%s: Not enough memory\n", __func__); 445 + return NULL; 446 + } 447 + 448 + vcpu_record->vcpu_id = perf_evsel__intval(evsel, sample, "vcpu_id"); 449 + thread->priv = vcpu_record; 450 + } 451 + 452 + return thread->priv; 453 + } 454 + 455 + static bool handle_kvm_event(struct thread *thread, struct perf_evsel *evsel, 456 + struct perf_sample *sample) 457 + { 458 + struct vcpu_event_record *vcpu_record; 459 + struct event_key key = {.key = INVALID_KEY}; 460 + 461 + vcpu_record = per_vcpu_record(thread, evsel, sample); 462 + if (!vcpu_record) 463 + return true; 464 + 465 + if (events_ops->is_begin_event(evsel, sample, &key)) 466 + return handle_begin_event(vcpu_record, &key, sample->time); 467 + 468 + if (events_ops->is_end_event(evsel, sample, &key)) 469 + return handle_end_event(vcpu_record, &key, sample->time); 470 + 471 + return true; 472 + } 473 + 474 + typedef int (*key_cmp_fun)(struct kvm_event*, struct kvm_event*, int); 475 + struct kvm_event_key { 476 + const char *name; 477 + key_cmp_fun key; 478 + }; 479 + 480 + static int trace_vcpu = -1; 481 + #define GET_EVENT_KEY(func, field) \ 482 + static u64 get_event_ ##func(struct kvm_event *event, int vcpu) \ 483 + { \ 484 + if (vcpu == -1) \ 485 + return event->total.field; \ 486 + \ 487 + if (vcpu >= event->max_vcpu) \ 488 + return 0; \ 489 + \ 490 + return event->vcpu[vcpu].field; \ 491 + } 492 + 493 + #define COMPARE_EVENT_KEY(func, field) \ 494 + GET_EVENT_KEY(func, field) \ 495 + static int compare_kvm_event_ ## func(struct kvm_event *one, \ 496 + struct kvm_event *two, int vcpu)\ 497 + { \ 498 + return get_event_ ##func(one, vcpu) > \ 499 + get_event_ ##func(two, vcpu); \ 500 + } 501 + 502 + GET_EVENT_KEY(time, time); 503 + COMPARE_EVENT_KEY(count, stats.n); 504 + COMPARE_EVENT_KEY(mean, stats.mean); 505 + 506 + #define DEF_SORT_NAME_KEY(name, compare_key) \ 507 + { #name, compare_kvm_event_ ## compare_key } 508 + 509 + static struct kvm_event_key keys[] = { 510 + DEF_SORT_NAME_KEY(sample, count), 511 + DEF_SORT_NAME_KEY(time, mean), 512 + { NULL, NULL } 513 + }; 514 + 515 + static const char *sort_key = "sample"; 516 + static key_cmp_fun compare; 517 + 518 + static bool select_key(void) 519 + { 520 + int i; 521 + 522 + for (i = 0; keys[i].name; i++) { 523 + if (!strcmp(keys[i].name, sort_key)) { 524 + compare = keys[i].key; 525 + return true; 526 + } 527 + } 528 + 529 + pr_err("Unknown compare key:%s\n", sort_key); 530 + return false; 531 + } 532 + 533 + static struct rb_root result; 534 + static void insert_to_result(struct kvm_event *event, key_cmp_fun bigger, 535 + int vcpu) 536 + { 537 + struct rb_node **rb = &result.rb_node; 538 + struct rb_node *parent = NULL; 539 + struct kvm_event *p; 540 + 541 + while (*rb) { 542 + p = container_of(*rb, struct kvm_event, rb); 543 + parent = *rb; 544 + 545 + if (bigger(event, p, vcpu)) 546 + rb = &(*rb)->rb_left; 547 + else 548 + rb = &(*rb)->rb_right; 549 + } 550 + 551 + rb_link_node(&event->rb, parent, rb); 552 + rb_insert_color(&event->rb, &result); 553 + } 554 + 555 + static void update_total_count(struct kvm_event *event, int vcpu) 556 + { 557 + total_count += get_event_count(event, vcpu); 558 + total_time += get_event_time(event, vcpu); 559 + } 560 + 561 + static bool event_is_valid(struct kvm_event *event, int vcpu) 562 + { 563 + return !!get_event_count(event, vcpu); 564 + } 565 + 566 + static void sort_result(int vcpu) 567 + { 568 + unsigned int i; 569 + struct kvm_event *event; 570 + 571 + for (i = 0; i < EVENTS_CACHE_SIZE; i++) 572 + list_for_each_entry(event, &kvm_events_cache[i], hash_entry) 573 + if (event_is_valid(event, vcpu)) { 574 + update_total_count(event, vcpu); 575 + insert_to_result(event, compare, vcpu); 576 + } 577 + } 578 + 579 + /* returns left most element of result, and erase it */ 580 + static struct kvm_event *pop_from_result(void) 581 + { 582 + struct rb_node *node = rb_first(&result); 583 + 584 + if (!node) 585 + return NULL; 586 + 587 + rb_erase(node, &result); 588 + return container_of(node, struct kvm_event, rb); 589 + } 590 + 591 + static void print_vcpu_info(int vcpu) 592 + { 593 + pr_info("Analyze events for "); 594 + 595 + if (vcpu == -1) 596 + pr_info("all VCPUs:\n\n"); 597 + else 598 + pr_info("VCPU %d:\n\n", vcpu); 599 + } 600 + 601 + static void print_result(int vcpu) 602 + { 603 + char decode[20]; 604 + struct kvm_event *event; 605 + 606 + pr_info("\n\n"); 607 + print_vcpu_info(vcpu); 608 + pr_info("%20s ", events_ops->name); 609 + pr_info("%10s ", "Samples"); 610 + pr_info("%9s ", "Samples%"); 611 + 612 + pr_info("%9s ", "Time%"); 613 + pr_info("%16s ", "Avg time"); 614 + pr_info("\n\n"); 615 + 616 + while ((event = pop_from_result())) { 617 + u64 ecount, etime; 618 + 619 + ecount = get_event_count(event, vcpu); 620 + etime = get_event_time(event, vcpu); 621 + 622 + events_ops->decode_key(&event->key, decode); 623 + pr_info("%20s ", decode); 624 + pr_info("%10llu ", (unsigned long long)ecount); 625 + pr_info("%8.2f%% ", (double)ecount / total_count * 100); 626 + pr_info("%8.2f%% ", (double)etime / total_time * 100); 627 + pr_info("%9.2fus ( +-%7.2f%% )", (double)etime / ecount/1e3, 628 + kvm_event_rel_stddev(vcpu, event)); 629 + pr_info("\n"); 630 + } 631 + 632 + pr_info("\nTotal Samples:%lld, Total events handled time:%.2fus.\n\n", 633 + (unsigned long long)total_count, total_time / 1e3); 634 + } 635 + 636 + static int process_sample_event(struct perf_tool *tool __maybe_unused, 637 + union perf_event *event, 638 + struct perf_sample *sample, 639 + struct perf_evsel *evsel, 640 + struct machine *machine) 641 + { 642 + struct thread *thread = machine__findnew_thread(machine, sample->tid); 643 + 644 + if (thread == NULL) { 645 + pr_debug("problem processing %d event, skipping it.\n", 646 + event->header.type); 647 + return -1; 648 + } 649 + 650 + if (!handle_kvm_event(thread, evsel, sample)) 651 + return -1; 652 + 653 + return 0; 654 + } 655 + 656 + static struct perf_tool eops = { 657 + .sample = process_sample_event, 658 + .comm = perf_event__process_comm, 659 + .ordered_samples = true, 660 + }; 661 + 662 + static int get_cpu_isa(struct perf_session *session) 663 + { 664 + char *cpuid = session->header.env.cpuid; 665 + int isa; 666 + 667 + if (strstr(cpuid, "Intel")) 668 + isa = 1; 669 + else if (strstr(cpuid, "AMD")) 670 + isa = 0; 671 + else { 672 + pr_err("CPU %s is not supported.\n", cpuid); 673 + isa = -ENOTSUP; 674 + } 675 + 676 + return isa; 677 + } 678 + 679 + static const char *file_name; 680 + 681 + static int read_events(void) 682 + { 683 + struct perf_session *kvm_session; 684 + int ret; 685 + 686 + kvm_session = perf_session__new(file_name, O_RDONLY, 0, false, &eops); 687 + if (!kvm_session) { 688 + pr_err("Initializing perf session failed\n"); 689 + return -EINVAL; 690 + } 691 + 692 + if (!perf_session__has_traces(kvm_session, "kvm record")) 693 + return -EINVAL; 694 + 695 + /* 696 + * Do not use 'isa' recorded in kvm_exit tracepoint since it is not 697 + * traced in the old kernel. 698 + */ 699 + ret = get_cpu_isa(kvm_session); 700 + 701 + if (ret < 0) 702 + return ret; 703 + 704 + cpu_isa = ret; 705 + 706 + return perf_session__process_events(kvm_session, &eops); 707 + } 708 + 709 + static bool verify_vcpu(int vcpu) 710 + { 711 + if (vcpu != -1 && vcpu < 0) { 712 + pr_err("Invalid vcpu:%d.\n", vcpu); 713 + return false; 714 + } 715 + 716 + return true; 717 + } 718 + 719 + static int kvm_events_report_vcpu(int vcpu) 720 + { 721 + int ret = -EINVAL; 722 + 723 + if (!verify_vcpu(vcpu)) 724 + goto exit; 725 + 726 + if (!select_key()) 727 + goto exit; 728 + 729 + if (!register_kvm_events_ops()) 730 + goto exit; 731 + 732 + init_kvm_event_record(); 733 + setup_pager(); 734 + 735 + ret = read_events(); 736 + if (ret) 737 + goto exit; 738 + 739 + sort_result(vcpu); 740 + print_result(vcpu); 741 + exit: 742 + return ret; 743 + } 744 + 745 + static const char * const record_args[] = { 746 + "record", 747 + "-R", 748 + "-f", 749 + "-m", "1024", 750 + "-c", "1", 751 + "-e", "kvm:kvm_entry", 752 + "-e", "kvm:kvm_exit", 753 + "-e", "kvm:kvm_mmio", 754 + "-e", "kvm:kvm_pio", 755 + }; 756 + 757 + #define STRDUP_FAIL_EXIT(s) \ 758 + ({ char *_p; \ 759 + _p = strdup(s); \ 760 + if (!_p) \ 761 + return -ENOMEM; \ 762 + _p; \ 763 + }) 764 + 765 + static int kvm_events_record(int argc, const char **argv) 766 + { 767 + unsigned int rec_argc, i, j; 768 + const char **rec_argv; 769 + 770 + rec_argc = ARRAY_SIZE(record_args) + argc + 2; 771 + rec_argv = calloc(rec_argc + 1, sizeof(char *)); 772 + 773 + if (rec_argv == NULL) 774 + return -ENOMEM; 775 + 776 + for (i = 0; i < ARRAY_SIZE(record_args); i++) 777 + rec_argv[i] = STRDUP_FAIL_EXIT(record_args[i]); 778 + 779 + rec_argv[i++] = STRDUP_FAIL_EXIT("-o"); 780 + rec_argv[i++] = STRDUP_FAIL_EXIT(file_name); 781 + 782 + for (j = 1; j < (unsigned int)argc; j++, i++) 783 + rec_argv[i] = argv[j]; 784 + 785 + return cmd_record(i, rec_argv, NULL); 786 + } 787 + 788 + static const char * const kvm_events_report_usage[] = { 789 + "perf kvm stat report [<options>]", 790 + NULL 791 + }; 792 + 793 + static const struct option kvm_events_report_options[] = { 794 + OPT_STRING(0, "event", &report_event, "report event", 795 + "event for reporting: vmexit, mmio, ioport"), 796 + OPT_INTEGER(0, "vcpu", &trace_vcpu, 797 + "vcpu id to report"), 798 + OPT_STRING('k', "key", &sort_key, "sort-key", 799 + "key for sorting: sample(sort by samples number)" 800 + " time (sort by avg time)"), 801 + OPT_END() 802 + }; 803 + 804 + static int kvm_events_report(int argc, const char **argv) 805 + { 806 + symbol__init(); 807 + 808 + if (argc) { 809 + argc = parse_options(argc, argv, 810 + kvm_events_report_options, 811 + kvm_events_report_usage, 0); 812 + if (argc) 813 + usage_with_options(kvm_events_report_usage, 814 + kvm_events_report_options); 815 + } 816 + 817 + return kvm_events_report_vcpu(trace_vcpu); 818 + } 819 + 820 + static void print_kvm_stat_usage(void) 821 + { 822 + printf("Usage: perf kvm stat <command>\n\n"); 823 + 824 + printf("# Available commands:\n"); 825 + printf("\trecord: record kvm events\n"); 826 + printf("\treport: report statistical data of kvm events\n"); 827 + 828 + printf("\nOtherwise, it is the alias of 'perf stat':\n"); 829 + } 830 + 831 + static int kvm_cmd_stat(int argc, const char **argv) 832 + { 833 + if (argc == 1) { 834 + print_kvm_stat_usage(); 835 + goto perf_stat; 836 + } 837 + 838 + if (!strncmp(argv[1], "rec", 3)) 839 + return kvm_events_record(argc - 1, argv + 1); 840 + 841 + if (!strncmp(argv[1], "rep", 3)) 842 + return kvm_events_report(argc - 1 , argv + 1); 843 + 844 + perf_stat: 845 + return cmd_stat(argc, argv, NULL); 846 + } 847 + 26 848 static char name_buffer[256]; 27 849 28 850 static const char * const kvm_usage[] = { 29 - "perf kvm [<options>] {top|record|report|diff|buildid-list}", 851 + "perf kvm [<options>] {top|record|report|diff|buildid-list|stat}", 30 852 NULL 31 853 }; 32 854 ··· 963 135 return cmd_top(argc, argv, NULL); 964 136 else if (!strncmp(argv[0], "buildid-list", 12)) 965 137 return __cmd_buildid_list(argc, argv); 138 + else if (!strncmp(argv[0], "stat", 4)) 139 + return kvm_cmd_stat(argc, argv); 966 140 else 967 141 usage_with_options(kvm_usage, kvm_options); 968 142
+87 -146
tools/perf/builtin-lock.c
··· 1 1 #include "builtin.h" 2 2 #include "perf.h" 3 3 4 + #include "util/evlist.h" 4 5 #include "util/evsel.h" 5 6 #include "util/util.h" 6 7 #include "util/cache.h" ··· 42 41 struct rb_node rb; /* used for sorting */ 43 42 44 43 /* 45 - * FIXME: raw_field_value() returns unsigned long long, 44 + * FIXME: perf_evsel__intval() returns u64, 46 45 * so address of lockdep_map should be dealed as 64bit. 47 46 * Is there more better solution? 48 47 */ ··· 337 336 338 337 static const char *input_name; 339 338 340 - struct raw_event_sample { 341 - u32 size; 342 - char data[0]; 343 - }; 344 - 345 - struct trace_acquire_event { 346 - void *addr; 347 - const char *name; 348 - int flag; 349 - }; 350 - 351 - struct trace_acquired_event { 352 - void *addr; 353 - const char *name; 354 - }; 355 - 356 - struct trace_contended_event { 357 - void *addr; 358 - const char *name; 359 - }; 360 - 361 - struct trace_release_event { 362 - void *addr; 363 - const char *name; 364 - }; 365 - 366 339 struct trace_lock_handler { 367 - int (*acquire_event)(struct trace_acquire_event *, 368 - const struct perf_sample *sample); 340 + int (*acquire_event)(struct perf_evsel *evsel, 341 + struct perf_sample *sample); 369 342 370 - int (*acquired_event)(struct trace_acquired_event *, 371 - const struct perf_sample *sample); 343 + int (*acquired_event)(struct perf_evsel *evsel, 344 + struct perf_sample *sample); 372 345 373 - int (*contended_event)(struct trace_contended_event *, 374 - const struct perf_sample *sample); 346 + int (*contended_event)(struct perf_evsel *evsel, 347 + struct perf_sample *sample); 375 348 376 - int (*release_event)(struct trace_release_event *, 377 - const struct perf_sample *sample); 349 + int (*release_event)(struct perf_evsel *evsel, 350 + struct perf_sample *sample); 378 351 }; 379 352 380 353 static struct lock_seq_stat *get_seq(struct thread_stat *ts, void *addr) ··· 387 412 READ_LOCK = 2, 388 413 }; 389 414 390 - static int 391 - report_lock_acquire_event(struct trace_acquire_event *acquire_event, 392 - const struct perf_sample *sample) 415 + static int report_lock_acquire_event(struct perf_evsel *evsel, 416 + struct perf_sample *sample) 393 417 { 418 + void *addr; 394 419 struct lock_stat *ls; 395 420 struct thread_stat *ts; 396 421 struct lock_seq_stat *seq; 422 + const char *name = perf_evsel__strval(evsel, sample, "name"); 423 + u64 tmp = perf_evsel__intval(evsel, sample, "lockdep_addr"); 424 + int flag = perf_evsel__intval(evsel, sample, "flag"); 397 425 398 - ls = lock_stat_findnew(acquire_event->addr, acquire_event->name); 426 + memcpy(&addr, &tmp, sizeof(void *)); 427 + 428 + ls = lock_stat_findnew(addr, name); 399 429 if (!ls) 400 430 return -1; 401 431 if (ls->discard) ··· 410 430 if (!ts) 411 431 return -1; 412 432 413 - seq = get_seq(ts, acquire_event->addr); 433 + seq = get_seq(ts, addr); 414 434 if (!seq) 415 435 return -1; 416 436 417 437 switch (seq->state) { 418 438 case SEQ_STATE_UNINITIALIZED: 419 439 case SEQ_STATE_RELEASED: 420 - if (!acquire_event->flag) { 440 + if (!flag) { 421 441 seq->state = SEQ_STATE_ACQUIRING; 422 442 } else { 423 - if (acquire_event->flag & TRY_LOCK) 443 + if (flag & TRY_LOCK) 424 444 ls->nr_trylock++; 425 - if (acquire_event->flag & READ_LOCK) 445 + if (flag & READ_LOCK) 426 446 ls->nr_readlock++; 427 447 seq->state = SEQ_STATE_READ_ACQUIRED; 428 448 seq->read_count = 1; ··· 430 450 } 431 451 break; 432 452 case SEQ_STATE_READ_ACQUIRED: 433 - if (acquire_event->flag & READ_LOCK) { 453 + if (flag & READ_LOCK) { 434 454 seq->read_count++; 435 455 ls->nr_acquired++; 436 456 goto end; ··· 460 480 return 0; 461 481 } 462 482 463 - static int 464 - report_lock_acquired_event(struct trace_acquired_event *acquired_event, 465 - const struct perf_sample *sample) 483 + static int report_lock_acquired_event(struct perf_evsel *evsel, 484 + struct perf_sample *sample) 466 485 { 467 - u64 timestamp = sample->time; 486 + void *addr; 468 487 struct lock_stat *ls; 469 488 struct thread_stat *ts; 470 489 struct lock_seq_stat *seq; 471 490 u64 contended_term; 491 + const char *name = perf_evsel__strval(evsel, sample, "name"); 492 + u64 tmp = perf_evsel__intval(evsel, sample, "lockdep_addr"); 472 493 473 - ls = lock_stat_findnew(acquired_event->addr, acquired_event->name); 494 + memcpy(&addr, &tmp, sizeof(void *)); 495 + 496 + ls = lock_stat_findnew(addr, name); 474 497 if (!ls) 475 498 return -1; 476 499 if (ls->discard) ··· 483 500 if (!ts) 484 501 return -1; 485 502 486 - seq = get_seq(ts, acquired_event->addr); 503 + seq = get_seq(ts, addr); 487 504 if (!seq) 488 505 return -1; 489 506 ··· 494 511 case SEQ_STATE_ACQUIRING: 495 512 break; 496 513 case SEQ_STATE_CONTENDED: 497 - contended_term = timestamp - seq->prev_event_time; 514 + contended_term = sample->time - seq->prev_event_time; 498 515 ls->wait_time_total += contended_term; 499 516 if (contended_term < ls->wait_time_min) 500 517 ls->wait_time_min = contended_term; ··· 519 536 520 537 seq->state = SEQ_STATE_ACQUIRED; 521 538 ls->nr_acquired++; 522 - seq->prev_event_time = timestamp; 539 + seq->prev_event_time = sample->time; 523 540 end: 524 541 return 0; 525 542 } 526 543 527 - static int 528 - report_lock_contended_event(struct trace_contended_event *contended_event, 529 - const struct perf_sample *sample) 544 + static int report_lock_contended_event(struct perf_evsel *evsel, 545 + struct perf_sample *sample) 530 546 { 547 + void *addr; 531 548 struct lock_stat *ls; 532 549 struct thread_stat *ts; 533 550 struct lock_seq_stat *seq; 551 + const char *name = perf_evsel__strval(evsel, sample, "name"); 552 + u64 tmp = perf_evsel__intval(evsel, sample, "lockdep_addr"); 534 553 535 - ls = lock_stat_findnew(contended_event->addr, contended_event->name); 554 + memcpy(&addr, &tmp, sizeof(void *)); 555 + 556 + ls = lock_stat_findnew(addr, name); 536 557 if (!ls) 537 558 return -1; 538 559 if (ls->discard) ··· 546 559 if (!ts) 547 560 return -1; 548 561 549 - seq = get_seq(ts, contended_event->addr); 562 + seq = get_seq(ts, addr); 550 563 if (!seq) 551 564 return -1; 552 565 ··· 579 592 return 0; 580 593 } 581 594 582 - static int 583 - report_lock_release_event(struct trace_release_event *release_event, 584 - const struct perf_sample *sample) 595 + static int report_lock_release_event(struct perf_evsel *evsel, 596 + struct perf_sample *sample) 585 597 { 598 + void *addr; 586 599 struct lock_stat *ls; 587 600 struct thread_stat *ts; 588 601 struct lock_seq_stat *seq; 602 + const char *name = perf_evsel__strval(evsel, sample, "name"); 603 + u64 tmp = perf_evsel__intval(evsel, sample, "lockdep_addr"); 589 604 590 - ls = lock_stat_findnew(release_event->addr, release_event->name); 605 + memcpy(&addr, &tmp, sizeof(void *)); 606 + 607 + ls = lock_stat_findnew(addr, name); 591 608 if (!ls) 592 609 return -1; 593 610 if (ls->discard) ··· 601 610 if (!ts) 602 611 return -1; 603 612 604 - seq = get_seq(ts, release_event->addr); 613 + seq = get_seq(ts, addr); 605 614 if (!seq) 606 615 return -1; 607 616 ··· 654 663 static int perf_evsel__process_lock_acquire(struct perf_evsel *evsel, 655 664 struct perf_sample *sample) 656 665 { 657 - struct trace_acquire_event acquire_event; 658 - struct event_format *event = evsel->tp_format; 659 - void *data = sample->raw_data; 660 - u64 tmp; /* this is required for casting... */ 661 - int rc = 0; 662 - 663 - tmp = raw_field_value(event, "lockdep_addr", data); 664 - memcpy(&acquire_event.addr, &tmp, sizeof(void *)); 665 - acquire_event.name = (char *)raw_field_ptr(event, "name", data); 666 - acquire_event.flag = (int)raw_field_value(event, "flag", data); 667 - 668 666 if (trace_handler->acquire_event) 669 - rc = trace_handler->acquire_event(&acquire_event, sample); 670 - 671 - return rc; 667 + return trace_handler->acquire_event(evsel, sample); 668 + return 0; 672 669 } 673 670 674 671 static int perf_evsel__process_lock_acquired(struct perf_evsel *evsel, 675 672 struct perf_sample *sample) 676 673 { 677 - struct trace_acquired_event acquired_event; 678 - struct event_format *event = evsel->tp_format; 679 - void *data = sample->raw_data; 680 - u64 tmp; /* this is required for casting... */ 681 - int rc = 0; 682 - 683 - tmp = raw_field_value(event, "lockdep_addr", data); 684 - memcpy(&acquired_event.addr, &tmp, sizeof(void *)); 685 - acquired_event.name = (char *)raw_field_ptr(event, "name", data); 686 - 687 674 if (trace_handler->acquired_event) 688 - rc = trace_handler->acquired_event(&acquired_event, sample); 689 - 690 - return rc; 675 + return trace_handler->acquired_event(evsel, sample); 676 + return 0; 691 677 } 692 678 693 679 static int perf_evsel__process_lock_contended(struct perf_evsel *evsel, 694 - struct perf_sample *sample) 680 + struct perf_sample *sample) 695 681 { 696 - struct trace_contended_event contended_event; 697 - struct event_format *event = evsel->tp_format; 698 - void *data = sample->raw_data; 699 - u64 tmp; /* this is required for casting... */ 700 - int rc = 0; 701 - 702 - tmp = raw_field_value(event, "lockdep_addr", data); 703 - memcpy(&contended_event.addr, &tmp, sizeof(void *)); 704 - contended_event.name = (char *)raw_field_ptr(event, "name", data); 705 - 706 682 if (trace_handler->contended_event) 707 - rc = trace_handler->contended_event(&contended_event, sample); 708 - 709 - return rc; 683 + return trace_handler->contended_event(evsel, sample); 684 + return 0; 710 685 } 711 686 712 687 static int perf_evsel__process_lock_release(struct perf_evsel *evsel, 713 - struct perf_sample *sample) 688 + struct perf_sample *sample) 714 689 { 715 - struct trace_release_event release_event; 716 - struct event_format *event = evsel->tp_format; 717 - void *data = sample->raw_data; 718 - u64 tmp; /* this is required for casting... */ 719 - int rc = 0; 720 - 721 - tmp = raw_field_value(event, "lockdep_addr", data); 722 - memcpy(&release_event.addr, &tmp, sizeof(void *)); 723 - release_event.name = (char *)raw_field_ptr(event, "name", data); 724 - 725 690 if (trace_handler->release_event) 726 - rc = trace_handler->release_event(&release_event, sample); 727 - 728 - return rc; 729 - } 730 - 731 - static int perf_evsel__process_lock_event(struct perf_evsel *evsel, 732 - struct perf_sample *sample) 733 - { 734 - struct event_format *event = evsel->tp_format; 735 - int rc = 0; 736 - 737 - if (!strcmp(event->name, "lock_acquire")) 738 - rc = perf_evsel__process_lock_acquire(evsel, sample); 739 - if (!strcmp(event->name, "lock_acquired")) 740 - rc = perf_evsel__process_lock_acquired(evsel, sample); 741 - if (!strcmp(event->name, "lock_contended")) 742 - rc = perf_evsel__process_lock_contended(evsel, sample); 743 - if (!strcmp(event->name, "lock_release")) 744 - rc = perf_evsel__process_lock_release(evsel, sample); 745 - 746 - return rc; 691 + return trace_handler->release_event(evsel, sample); 692 + return 0; 747 693 } 748 694 749 695 static void print_bad_events(int bad, int total) ··· 798 870 return rc; 799 871 } 800 872 873 + typedef int (*tracepoint_handler)(struct perf_evsel *evsel, 874 + struct perf_sample *sample); 875 + 801 876 static int process_sample_event(struct perf_tool *tool __maybe_unused, 802 877 union perf_event *event, 803 878 struct perf_sample *sample, ··· 815 884 return -1; 816 885 } 817 886 818 - return perf_evsel__process_lock_event(evsel, sample); 887 + if (evsel->handler.func != NULL) { 888 + tracepoint_handler f = evsel->handler.func; 889 + return f(evsel, sample); 890 + } 891 + 892 + return 0; 819 893 } 820 894 821 895 static struct perf_tool eops = { ··· 829 893 .ordered_samples = true, 830 894 }; 831 895 896 + static const struct perf_evsel_str_handler lock_tracepoints[] = { 897 + { "lock:lock_acquire", perf_evsel__process_lock_acquire, }, /* CONFIG_LOCKDEP */ 898 + { "lock:lock_acquired", perf_evsel__process_lock_acquired, }, /* CONFIG_LOCKDEP, CONFIG_LOCK_STAT */ 899 + { "lock:lock_contended", perf_evsel__process_lock_contended, }, /* CONFIG_LOCKDEP, CONFIG_LOCK_STAT */ 900 + { "lock:lock_release", perf_evsel__process_lock_release, }, /* CONFIG_LOCKDEP */ 901 + }; 902 + 832 903 static int read_events(void) 833 904 { 834 905 session = perf_session__new(input_name, O_RDONLY, 0, false, &eops); 835 906 if (!session) { 836 907 pr_err("Initializing perf session failed\n"); 908 + return -1; 909 + } 910 + 911 + if (perf_session__set_tracepoints_handlers(session, lock_tracepoints)) { 912 + pr_err("Initializing perf session tracepoint handlers failed\n"); 837 913 return -1; 838 914 } 839 915 ··· 915 967 OPT_END() 916 968 }; 917 969 918 - static const char * const lock_tracepoints[] = { 919 - "lock:lock_acquire", /* CONFIG_LOCKDEP */ 920 - "lock:lock_acquired", /* CONFIG_LOCKDEP, CONFIG_LOCK_STAT */ 921 - "lock:lock_contended", /* CONFIG_LOCKDEP, CONFIG_LOCK_STAT */ 922 - "lock:lock_release", /* CONFIG_LOCKDEP */ 923 - }; 924 - 925 970 static const char *record_args[] = { 926 971 "record", 927 972 "-R", ··· 929 988 const char **rec_argv; 930 989 931 990 for (i = 0; i < ARRAY_SIZE(lock_tracepoints); i++) { 932 - if (!is_valid_tracepoint(lock_tracepoints[i])) { 991 + if (!is_valid_tracepoint(lock_tracepoints[i].name)) { 933 992 pr_err("tracepoint %s is not enabled. " 934 993 "Are CONFIG_LOCKDEP and CONFIG_LOCK_STAT enabled?\n", 935 - lock_tracepoints[i]); 994 + lock_tracepoints[i].name); 936 995 return 1; 937 996 } 938 997 } ··· 950 1009 951 1010 for (j = 0; j < ARRAY_SIZE(lock_tracepoints); j++) { 952 1011 rec_argv[i++] = "-e"; 953 - rec_argv[i++] = strdup(lock_tracepoints[j]); 1012 + rec_argv[i++] = strdup(lock_tracepoints[j].name); 954 1013 } 955 1014 956 1015 for (j = 1; j < (unsigned int)argc; j++, i++)
+4 -2
tools/perf/builtin-record.c
··· 297 297 } 298 298 299 299 printf("\n"); 300 - error("sys_perf_event_open() syscall returned with %d (%s). /bin/dmesg may provide additional information.\n", 301 - err, strerror(err)); 300 + error("sys_perf_event_open() syscall returned with %d " 301 + "(%s) for event %s. /bin/dmesg may provide " 302 + "additional information.\n", 303 + err, strerror(err), perf_evsel__name(pos)); 302 304 303 305 #if defined(__i386__) || defined(__x86_64__) 304 306 if (attr->type == PERF_TYPE_HARDWARE &&
+86
tools/perf/builtin-test.c
··· 14 14 #include "util/symbol.h" 15 15 #include "util/thread_map.h" 16 16 #include "util/pmu.h" 17 + #include "event-parse.h" 17 18 #include "../../include/linux/hw_breakpoint.h" 18 19 19 20 #include <sys/mman.h> ··· 1208 1207 return ret; 1209 1208 } 1210 1209 1210 + static int perf_evsel__test_field(struct perf_evsel *evsel, const char *name, 1211 + int size, bool should_be_signed) 1212 + { 1213 + struct format_field *field = perf_evsel__field(evsel, name); 1214 + int is_signed; 1215 + int ret = 0; 1216 + 1217 + if (field == NULL) { 1218 + pr_debug("%s: \"%s\" field not found!\n", evsel->name, name); 1219 + return -1; 1220 + } 1221 + 1222 + is_signed = !!(field->flags | FIELD_IS_SIGNED); 1223 + if (should_be_signed && !is_signed) { 1224 + pr_debug("%s: \"%s\" signedness(%d) is wrong, should be %d\n", 1225 + evsel->name, name, is_signed, should_be_signed); 1226 + ret = -1; 1227 + } 1228 + 1229 + if (field->size != size) { 1230 + pr_debug("%s: \"%s\" size (%d) should be %d!\n", 1231 + evsel->name, name, field->size, size); 1232 + ret = -1; 1233 + } 1234 + 1235 + return 0; 1236 + } 1237 + 1238 + static int perf_evsel__tp_sched_test(void) 1239 + { 1240 + struct perf_evsel *evsel = perf_evsel__newtp("sched", "sched_switch", 0); 1241 + int ret = 0; 1242 + 1243 + if (evsel == NULL) { 1244 + pr_debug("perf_evsel__new\n"); 1245 + return -1; 1246 + } 1247 + 1248 + if (perf_evsel__test_field(evsel, "prev_comm", 16, true)) 1249 + ret = -1; 1250 + 1251 + if (perf_evsel__test_field(evsel, "prev_pid", 4, true)) 1252 + ret = -1; 1253 + 1254 + if (perf_evsel__test_field(evsel, "prev_prio", 4, true)) 1255 + ret = -1; 1256 + 1257 + if (perf_evsel__test_field(evsel, "prev_state", 8, true)) 1258 + ret = -1; 1259 + 1260 + if (perf_evsel__test_field(evsel, "next_comm", 16, true)) 1261 + ret = -1; 1262 + 1263 + if (perf_evsel__test_field(evsel, "next_pid", 4, true)) 1264 + ret = -1; 1265 + 1266 + if (perf_evsel__test_field(evsel, "next_prio", 4, true)) 1267 + ret = -1; 1268 + 1269 + perf_evsel__delete(evsel); 1270 + 1271 + evsel = perf_evsel__newtp("sched", "sched_wakeup", 0); 1272 + 1273 + if (perf_evsel__test_field(evsel, "comm", 16, true)) 1274 + ret = -1; 1275 + 1276 + if (perf_evsel__test_field(evsel, "pid", 4, true)) 1277 + ret = -1; 1278 + 1279 + if (perf_evsel__test_field(evsel, "prio", 4, true)) 1280 + ret = -1; 1281 + 1282 + if (perf_evsel__test_field(evsel, "success", 4, true)) 1283 + ret = -1; 1284 + 1285 + if (perf_evsel__test_field(evsel, "target_cpu", 4, true)) 1286 + ret = -1; 1287 + 1288 + return 0; 1289 + } 1290 + 1211 1291 static struct test { 1212 1292 const char *desc; 1213 1293 int (*func)(void); ··· 1334 1252 { 1335 1253 .desc = "roundtrip evsel->name check", 1336 1254 .func = perf_evsel__roundtrip_name_test, 1255 + }, 1256 + { 1257 + .desc = "Check parsing of sched tracepoints fields", 1258 + .func = perf_evsel__tp_sched_test, 1337 1259 }, 1338 1260 { 1339 1261 .func = NULL,
+15 -27
tools/perf/builtin-timechart.c
··· 168 168 return cursor; 169 169 cursor = cursor->next; 170 170 } 171 - cursor = malloc(sizeof(struct per_pid)); 171 + cursor = zalloc(sizeof(*cursor)); 172 172 assert(cursor != NULL); 173 - memset(cursor, 0, sizeof(struct per_pid)); 174 173 cursor->pid = pid; 175 174 cursor->next = all_data; 176 175 all_data = cursor; ··· 194 195 } 195 196 c = c->next; 196 197 } 197 - c = malloc(sizeof(struct per_pidcomm)); 198 + c = zalloc(sizeof(*c)); 198 199 assert(c != NULL); 199 - memset(c, 0, sizeof(struct per_pidcomm)); 200 200 c->comm = strdup(comm); 201 201 p->current = c; 202 202 c->next = p->all; ··· 237 239 p = find_create_pid(pid); 238 240 c = p->current; 239 241 if (!c) { 240 - c = malloc(sizeof(struct per_pidcomm)); 242 + c = zalloc(sizeof(*c)); 241 243 assert(c != NULL); 242 - memset(c, 0, sizeof(struct per_pidcomm)); 243 244 p->current = c; 244 245 c->next = p->all; 245 246 p->all = c; 246 247 } 247 248 248 - sample = malloc(sizeof(struct cpu_sample)); 249 + sample = zalloc(sizeof(*sample)); 249 250 assert(sample != NULL); 250 - memset(sample, 0, sizeof(struct cpu_sample)); 251 251 sample->start_time = start; 252 252 sample->end_time = end; 253 253 sample->type = type; ··· 369 373 370 374 static void c_state_end(int cpu, u64 timestamp) 371 375 { 372 - struct power_event *pwr; 373 - pwr = malloc(sizeof(struct power_event)); 376 + struct power_event *pwr = zalloc(sizeof(*pwr)); 377 + 374 378 if (!pwr) 375 379 return; 376 - memset(pwr, 0, sizeof(struct power_event)); 377 380 378 381 pwr->state = cpus_cstate_state[cpu]; 379 382 pwr->start_time = cpus_cstate_start_times[cpu]; ··· 387 392 static void p_state_change(int cpu, u64 timestamp, u64 new_freq) 388 393 { 389 394 struct power_event *pwr; 390 - pwr = malloc(sizeof(struct power_event)); 391 395 392 396 if (new_freq > 8000000) /* detect invalid data */ 393 397 return; 394 398 399 + pwr = zalloc(sizeof(*pwr)); 395 400 if (!pwr) 396 401 return; 397 - memset(pwr, 0, sizeof(struct power_event)); 398 402 399 403 pwr->state = cpus_pstate_state[cpu]; 400 404 pwr->start_time = cpus_pstate_start_times[cpu]; ··· 423 429 static void 424 430 sched_wakeup(int cpu, u64 timestamp, int pid, struct trace_entry *te) 425 431 { 426 - struct wake_event *we; 427 432 struct per_pid *p; 428 433 struct wakeup_entry *wake = (void *)te; 434 + struct wake_event *we = zalloc(sizeof(*we)); 429 435 430 - we = malloc(sizeof(struct wake_event)); 431 436 if (!we) 432 437 return; 433 438 434 - memset(we, 0, sizeof(struct wake_event)); 435 439 we->time = timestamp; 436 440 we->waker = pid; 437 441 ··· 571 579 struct power_event *pwr; 572 580 573 581 for (cpu = 0; cpu <= numcpus; cpu++) { 574 - pwr = malloc(sizeof(struct power_event)); 575 - if (!pwr) 576 - return; 577 - memset(pwr, 0, sizeof(struct power_event)); 578 - 579 582 /* C state */ 580 583 #if 0 584 + pwr = zalloc(sizeof(*pwr)); 585 + if (!pwr) 586 + return; 587 + 581 588 pwr->state = cpus_cstate_state[cpu]; 582 589 pwr->start_time = cpus_cstate_start_times[cpu]; 583 590 pwr->end_time = last_time; ··· 588 597 #endif 589 598 /* P state */ 590 599 591 - pwr = malloc(sizeof(struct power_event)); 600 + pwr = zalloc(sizeof(*pwr)); 592 601 if (!pwr) 593 602 return; 594 - memset(pwr, 0, sizeof(struct power_event)); 595 603 596 604 pwr->state = cpus_pstate_state[cpu]; 597 605 pwr->start_time = cpus_pstate_start_times[cpu]; ··· 820 830 821 831 static void add_process_filter(const char *string) 822 832 { 823 - struct process_filter *filt; 824 - int pid; 833 + int pid = strtoull(string, NULL, 10); 834 + struct process_filter *filt = malloc(sizeof(*filt)); 825 835 826 - pid = strtoull(string, NULL, 10); 827 - filt = malloc(sizeof(struct process_filter)); 828 836 if (!filt) 829 837 return; 830 838
+82 -6
tools/perf/util/evsel.c
··· 10 10 #include <byteswap.h> 11 11 #include <linux/bitops.h> 12 12 #include "asm/bug.h" 13 + #include "debugfs.h" 13 14 #include "event-parse.h" 14 15 #include "evsel.h" 15 16 #include "evlist.h" ··· 68 67 perf_evsel__init(evsel, attr, idx); 69 68 70 69 return evsel; 70 + } 71 + 72 + static struct event_format *event_format__new(const char *sys, const char *name) 73 + { 74 + int fd, n; 75 + char *filename; 76 + void *bf = NULL, *nbf; 77 + size_t size = 0, alloc_size = 0; 78 + struct event_format *format = NULL; 79 + 80 + if (asprintf(&filename, "%s/%s/%s/format", tracing_events_path, sys, name) < 0) 81 + goto out; 82 + 83 + fd = open(filename, O_RDONLY); 84 + if (fd < 0) 85 + goto out_free_filename; 86 + 87 + do { 88 + if (size == alloc_size) { 89 + alloc_size += BUFSIZ; 90 + nbf = realloc(bf, alloc_size); 91 + if (nbf == NULL) 92 + goto out_free_bf; 93 + bf = nbf; 94 + } 95 + 96 + n = read(fd, bf + size, BUFSIZ); 97 + if (n < 0) 98 + goto out_free_bf; 99 + size += n; 100 + } while (n > 0); 101 + 102 + pevent_parse_format(&format, bf, size, sys); 103 + 104 + out_free_bf: 105 + free(bf); 106 + close(fd); 107 + out_free_filename: 108 + free(filename); 109 + out: 110 + return format; 111 + } 112 + 113 + struct perf_evsel *perf_evsel__newtp(const char *sys, const char *name, int idx) 114 + { 115 + struct perf_evsel *evsel = zalloc(sizeof(*evsel)); 116 + 117 + if (evsel != NULL) { 118 + struct perf_event_attr attr = { 119 + .type = PERF_TYPE_TRACEPOINT, 120 + }; 121 + 122 + evsel->tp_format = event_format__new(sys, name); 123 + if (evsel->tp_format == NULL) 124 + goto out_free; 125 + 126 + attr.config = evsel->tp_format->id; 127 + perf_evsel__init(evsel, &attr, idx); 128 + evsel->name = evsel->tp_format->name; 129 + } 130 + 131 + return evsel; 132 + 133 + out_free: 134 + free(evsel); 135 + return NULL; 71 136 } 72 137 73 138 const char *perf_evsel__hw_names[PERF_COUNT_HW_MAX] = { ··· 562 495 perf_evsel__exit(evsel); 563 496 close_cgroup(evsel->cgrp); 564 497 free(evsel->group_name); 498 + if (evsel->tp_format && evsel->name == evsel->tp_format->name) { 499 + evsel->name = NULL; 500 + pevent_free_format(evsel->tp_format); 501 + } 565 502 free(evsel->name); 566 503 free(evsel); 567 504 } ··· 1073 1002 return 0; 1074 1003 } 1075 1004 1005 + struct format_field *perf_evsel__field(struct perf_evsel *evsel, const char *name) 1006 + { 1007 + return pevent_find_field(evsel->tp_format, name); 1008 + } 1009 + 1076 1010 char *perf_evsel__strval(struct perf_evsel *evsel, struct perf_sample *sample, 1077 1011 const char *name) 1078 1012 { 1079 - struct format_field *field = pevent_find_field(evsel->tp_format, name); 1013 + struct format_field *field = perf_evsel__field(evsel, name); 1080 1014 int offset; 1081 1015 1082 - if (!field) 1083 - return NULL; 1016 + if (!field) 1017 + return NULL; 1084 1018 1085 1019 offset = field->offset; 1086 1020 ··· 1100 1024 u64 perf_evsel__intval(struct perf_evsel *evsel, struct perf_sample *sample, 1101 1025 const char *name) 1102 1026 { 1103 - struct format_field *field = pevent_find_field(evsel->tp_format, name); 1027 + struct format_field *field = perf_evsel__field(evsel, name); 1104 1028 u64 val; 1105 1029 1106 - if (!field) 1107 - return 0; 1030 + if (!field) 1031 + return 0; 1108 1032 1109 1033 val = pevent_read_number(evsel->tp_format->pevent, 1110 1034 sample->raw_data + field->offset, field->size);
+5
tools/perf/util/evsel.h
··· 81 81 struct perf_record_opts; 82 82 83 83 struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr, int idx); 84 + struct perf_evsel *perf_evsel__newtp(const char *sys, const char *name, int idx); 84 85 void perf_evsel__init(struct perf_evsel *evsel, 85 86 struct perf_event_attr *attr, int idx); 86 87 void perf_evsel__exit(struct perf_evsel *evsel); ··· 128 127 const char *name); 129 128 u64 perf_evsel__intval(struct perf_evsel *evsel, struct perf_sample *sample, 130 129 const char *name); 130 + 131 + struct format_field; 132 + 133 + struct format_field *perf_evsel__field(struct perf_evsel *evsel, const char *name); 131 134 132 135 #define perf_evsel__match(evsel, t, c) \ 133 136 (evsel->attr.type == PERF_TYPE_##t && \
+385 -164
tools/perf/util/header.c
··· 22 22 #include "cpumap.h" 23 23 #include "pmu.h" 24 24 #include "vdso.h" 25 + #include "strbuf.h" 25 26 26 27 static bool no_buildid_cache = false; 27 28 ··· 1103 1102 return 0; 1104 1103 } 1105 1104 1106 - static void print_hostname(struct perf_header *ph, int fd, FILE *fp) 1105 + static void print_hostname(struct perf_header *ph, int fd __maybe_unused, 1106 + FILE *fp) 1107 1107 { 1108 - char *str = do_read_string(fd, ph); 1109 - fprintf(fp, "# hostname : %s\n", str); 1110 - free(str); 1108 + fprintf(fp, "# hostname : %s\n", ph->env.hostname); 1111 1109 } 1112 1110 1113 - static void print_osrelease(struct perf_header *ph, int fd, FILE *fp) 1111 + static void print_osrelease(struct perf_header *ph, int fd __maybe_unused, 1112 + FILE *fp) 1114 1113 { 1115 - char *str = do_read_string(fd, ph); 1116 - fprintf(fp, "# os release : %s\n", str); 1117 - free(str); 1114 + fprintf(fp, "# os release : %s\n", ph->env.os_release); 1118 1115 } 1119 1116 1120 - static void print_arch(struct perf_header *ph, int fd, FILE *fp) 1117 + static void print_arch(struct perf_header *ph, int fd __maybe_unused, FILE *fp) 1121 1118 { 1122 - char *str = do_read_string(fd, ph); 1123 - fprintf(fp, "# arch : %s\n", str); 1124 - free(str); 1119 + fprintf(fp, "# arch : %s\n", ph->env.arch); 1125 1120 } 1126 1121 1127 - static void print_cpudesc(struct perf_header *ph, int fd, FILE *fp) 1122 + static void print_cpudesc(struct perf_header *ph, int fd __maybe_unused, 1123 + FILE *fp) 1128 1124 { 1129 - char *str = do_read_string(fd, ph); 1130 - fprintf(fp, "# cpudesc : %s\n", str); 1131 - free(str); 1125 + fprintf(fp, "# cpudesc : %s\n", ph->env.cpu_desc); 1132 1126 } 1133 1127 1134 - static void print_nrcpus(struct perf_header *ph, int fd, FILE *fp) 1128 + static void print_nrcpus(struct perf_header *ph, int fd __maybe_unused, 1129 + FILE *fp) 1135 1130 { 1136 - ssize_t ret; 1137 - u32 nr; 1138 - 1139 - ret = read(fd, &nr, sizeof(nr)); 1140 - if (ret != (ssize_t)sizeof(nr)) 1141 - nr = -1; /* interpreted as error */ 1142 - 1143 - if (ph->needs_swap) 1144 - nr = bswap_32(nr); 1145 - 1146 - fprintf(fp, "# nrcpus online : %u\n", nr); 1147 - 1148 - ret = read(fd, &nr, sizeof(nr)); 1149 - if (ret != (ssize_t)sizeof(nr)) 1150 - nr = -1; /* interpreted as error */ 1151 - 1152 - if (ph->needs_swap) 1153 - nr = bswap_32(nr); 1154 - 1155 - fprintf(fp, "# nrcpus avail : %u\n", nr); 1131 + fprintf(fp, "# nrcpus online : %u\n", ph->env.nr_cpus_online); 1132 + fprintf(fp, "# nrcpus avail : %u\n", ph->env.nr_cpus_avail); 1156 1133 } 1157 1134 1158 - static void print_version(struct perf_header *ph, int fd, FILE *fp) 1135 + static void print_version(struct perf_header *ph, int fd __maybe_unused, 1136 + FILE *fp) 1159 1137 { 1160 - char *str = do_read_string(fd, ph); 1161 - fprintf(fp, "# perf version : %s\n", str); 1162 - free(str); 1138 + fprintf(fp, "# perf version : %s\n", ph->env.version); 1163 1139 } 1164 1140 1165 - static void print_cmdline(struct perf_header *ph, int fd, FILE *fp) 1141 + static void print_cmdline(struct perf_header *ph, int fd __maybe_unused, 1142 + FILE *fp) 1166 1143 { 1167 - ssize_t ret; 1144 + int nr, i; 1168 1145 char *str; 1169 - u32 nr, i; 1170 1146 1171 - ret = read(fd, &nr, sizeof(nr)); 1172 - if (ret != (ssize_t)sizeof(nr)) 1173 - return; 1174 - 1175 - if (ph->needs_swap) 1176 - nr = bswap_32(nr); 1147 + nr = ph->env.nr_cmdline; 1148 + str = ph->env.cmdline; 1177 1149 1178 1150 fprintf(fp, "# cmdline : "); 1179 1151 1180 1152 for (i = 0; i < nr; i++) { 1181 - str = do_read_string(fd, ph); 1182 1153 fprintf(fp, "%s ", str); 1183 - free(str); 1154 + str += strlen(str) + 1; 1184 1155 } 1185 1156 fputc('\n', fp); 1186 1157 } 1187 1158 1188 - static void print_cpu_topology(struct perf_header *ph, int fd, FILE *fp) 1159 + static void print_cpu_topology(struct perf_header *ph, int fd __maybe_unused, 1160 + FILE *fp) 1189 1161 { 1190 - ssize_t ret; 1191 - u32 nr, i; 1162 + int nr, i; 1192 1163 char *str; 1193 1164 1194 - ret = read(fd, &nr, sizeof(nr)); 1195 - if (ret != (ssize_t)sizeof(nr)) 1196 - return; 1197 - 1198 - if (ph->needs_swap) 1199 - nr = bswap_32(nr); 1165 + nr = ph->env.nr_sibling_cores; 1166 + str = ph->env.sibling_cores; 1200 1167 1201 1168 for (i = 0; i < nr; i++) { 1202 - str = do_read_string(fd, ph); 1203 1169 fprintf(fp, "# sibling cores : %s\n", str); 1204 - free(str); 1170 + str += strlen(str) + 1; 1205 1171 } 1206 1172 1207 - ret = read(fd, &nr, sizeof(nr)); 1208 - if (ret != (ssize_t)sizeof(nr)) 1209 - return; 1210 - 1211 - if (ph->needs_swap) 1212 - nr = bswap_32(nr); 1173 + nr = ph->env.nr_sibling_threads; 1174 + str = ph->env.sibling_threads; 1213 1175 1214 1176 for (i = 0; i < nr; i++) { 1215 - str = do_read_string(fd, ph); 1216 1177 fprintf(fp, "# sibling threads : %s\n", str); 1217 - free(str); 1178 + str += strlen(str) + 1; 1218 1179 } 1219 1180 } 1220 1181 ··· 1337 1374 free_event_desc(events); 1338 1375 } 1339 1376 1340 - static void print_total_mem(struct perf_header *h __maybe_unused, int fd, 1377 + static void print_total_mem(struct perf_header *ph, int fd __maybe_unused, 1341 1378 FILE *fp) 1342 1379 { 1343 - uint64_t mem; 1344 - ssize_t ret; 1345 - 1346 - ret = read(fd, &mem, sizeof(mem)); 1347 - if (ret != sizeof(mem)) 1348 - goto error; 1349 - 1350 - if (h->needs_swap) 1351 - mem = bswap_64(mem); 1352 - 1353 - fprintf(fp, "# total memory : %"PRIu64" kB\n", mem); 1354 - return; 1355 - error: 1356 - fprintf(fp, "# total memory : unknown\n"); 1380 + fprintf(fp, "# total memory : %Lu kB\n", ph->env.total_mem); 1357 1381 } 1358 1382 1359 - static void print_numa_topology(struct perf_header *h __maybe_unused, int fd, 1383 + static void print_numa_topology(struct perf_header *ph, int fd __maybe_unused, 1360 1384 FILE *fp) 1361 1385 { 1362 - ssize_t ret; 1363 1386 u32 nr, c, i; 1364 - char *str; 1387 + char *str, *tmp; 1365 1388 uint64_t mem_total, mem_free; 1366 1389 1367 1390 /* nr nodes */ 1368 - ret = read(fd, &nr, sizeof(nr)); 1369 - if (ret != (ssize_t)sizeof(nr)) 1370 - goto error; 1371 - 1372 - if (h->needs_swap) 1373 - nr = bswap_32(nr); 1391 + nr = ph->env.nr_numa_nodes; 1392 + str = ph->env.numa_nodes; 1374 1393 1375 1394 for (i = 0; i < nr; i++) { 1376 - 1377 1395 /* node number */ 1378 - ret = read(fd, &c, sizeof(c)); 1379 - if (ret != (ssize_t)sizeof(c)) 1396 + c = strtoul(str, &tmp, 0); 1397 + if (*tmp != ':') 1380 1398 goto error; 1381 1399 1382 - if (h->needs_swap) 1383 - c = bswap_32(c); 1384 - 1385 - ret = read(fd, &mem_total, sizeof(u64)); 1386 - if (ret != sizeof(u64)) 1400 + str = tmp + 1; 1401 + mem_total = strtoull(str, &tmp, 0); 1402 + if (*tmp != ':') 1387 1403 goto error; 1388 1404 1389 - ret = read(fd, &mem_free, sizeof(u64)); 1390 - if (ret != sizeof(u64)) 1405 + str = tmp + 1; 1406 + mem_free = strtoull(str, &tmp, 0); 1407 + if (*tmp != ':') 1391 1408 goto error; 1392 - 1393 - if (h->needs_swap) { 1394 - mem_total = bswap_64(mem_total); 1395 - mem_free = bswap_64(mem_free); 1396 - } 1397 1409 1398 1410 fprintf(fp, "# node%u meminfo : total = %"PRIu64" kB," 1399 1411 " free = %"PRIu64" kB\n", 1400 - c, 1401 - mem_total, 1402 - mem_free); 1412 + c, mem_total, mem_free); 1403 1413 1404 - str = do_read_string(fd, h); 1414 + str = tmp + 1; 1405 1415 fprintf(fp, "# node%u cpu list : %s\n", c, str); 1406 - free(str); 1407 1416 } 1408 1417 return; 1409 1418 error: 1410 1419 fprintf(fp, "# numa topology : not available\n"); 1411 1420 } 1412 1421 1413 - static void print_cpuid(struct perf_header *ph, int fd, FILE *fp) 1422 + static void print_cpuid(struct perf_header *ph, int fd __maybe_unused, FILE *fp) 1414 1423 { 1415 - char *str = do_read_string(fd, ph); 1416 - fprintf(fp, "# cpuid : %s\n", str); 1417 - free(str); 1424 + fprintf(fp, "# cpuid : %s\n", ph->env.cpuid); 1418 1425 } 1419 1426 1420 1427 static void print_branch_stack(struct perf_header *ph __maybe_unused, 1421 - int fd __maybe_unused, 1422 - FILE *fp) 1428 + int fd __maybe_unused, FILE *fp) 1423 1429 { 1424 1430 fprintf(fp, "# contains samples with branch stack\n"); 1425 1431 } 1426 1432 1427 - static void print_pmu_mappings(struct perf_header *ph, int fd, FILE *fp) 1433 + static void print_pmu_mappings(struct perf_header *ph, int fd __maybe_unused, 1434 + FILE *fp) 1428 1435 { 1429 1436 const char *delimiter = "# pmu mappings: "; 1430 - char *name; 1431 - int ret; 1437 + char *str, *tmp; 1432 1438 u32 pmu_num; 1433 1439 u32 type; 1434 1440 1435 - ret = read(fd, &pmu_num, sizeof(pmu_num)); 1436 - if (ret != sizeof(pmu_num)) 1437 - goto error; 1438 - 1439 - if (ph->needs_swap) 1440 - pmu_num = bswap_32(pmu_num); 1441 - 1441 + pmu_num = ph->env.nr_pmu_mappings; 1442 1442 if (!pmu_num) { 1443 1443 fprintf(fp, "# pmu mappings: not available\n"); 1444 1444 return; 1445 1445 } 1446 1446 1447 - while (pmu_num) { 1448 - if (read(fd, &type, sizeof(type)) != sizeof(type)) 1449 - break; 1450 - if (ph->needs_swap) 1451 - type = bswap_32(type); 1447 + str = ph->env.pmu_mappings; 1452 1448 1453 - name = do_read_string(fd, ph); 1454 - if (!name) 1455 - break; 1456 - pmu_num--; 1457 - fprintf(fp, "%s%s = %" PRIu32, delimiter, name, type); 1458 - free(name); 1449 + while (pmu_num) { 1450 + type = strtoul(str, &tmp, 0); 1451 + if (*tmp != ':') 1452 + goto error; 1453 + 1454 + str = tmp + 1; 1455 + fprintf(fp, "%s%s = %" PRIu32, delimiter, str, type); 1456 + 1459 1457 delimiter = ", "; 1458 + str += strlen(str) + 1; 1459 + pmu_num--; 1460 1460 } 1461 1461 1462 1462 fprintf(fp, "\n"); ··· 1580 1654 return err; 1581 1655 } 1582 1656 1583 - static int process_tracing_data(struct perf_file_section *section 1584 - __maybe_unused, 1585 - struct perf_header *ph __maybe_unused, 1586 - int feat __maybe_unused, int fd, void *data) 1657 + static int process_tracing_data(struct perf_file_section *section __maybe_unused, 1658 + struct perf_header *ph __maybe_unused, 1659 + int fd, void *data) 1587 1660 { 1588 1661 trace_report(fd, data, false); 1589 1662 return 0; 1590 1663 } 1591 1664 1592 1665 static int process_build_id(struct perf_file_section *section, 1593 - struct perf_header *ph, 1594 - int feat __maybe_unused, int fd, 1666 + struct perf_header *ph, int fd, 1595 1667 void *data __maybe_unused) 1596 1668 { 1597 1669 if (perf_header__read_build_ids(ph, fd, section->offset, section->size)) 1598 1670 pr_debug("Failed to read buildids, continuing...\n"); 1671 + return 0; 1672 + } 1673 + 1674 + static int process_hostname(struct perf_file_section *section __maybe_unused, 1675 + struct perf_header *ph, int fd, 1676 + void *data __maybe_unused) 1677 + { 1678 + ph->env.hostname = do_read_string(fd, ph); 1679 + return ph->env.hostname ? 0 : -ENOMEM; 1680 + } 1681 + 1682 + static int process_osrelease(struct perf_file_section *section __maybe_unused, 1683 + struct perf_header *ph, int fd, 1684 + void *data __maybe_unused) 1685 + { 1686 + ph->env.os_release = do_read_string(fd, ph); 1687 + return ph->env.os_release ? 0 : -ENOMEM; 1688 + } 1689 + 1690 + static int process_version(struct perf_file_section *section __maybe_unused, 1691 + struct perf_header *ph, int fd, 1692 + void *data __maybe_unused) 1693 + { 1694 + ph->env.version = do_read_string(fd, ph); 1695 + return ph->env.version ? 0 : -ENOMEM; 1696 + } 1697 + 1698 + static int process_arch(struct perf_file_section *section __maybe_unused, 1699 + struct perf_header *ph, int fd, 1700 + void *data __maybe_unused) 1701 + { 1702 + ph->env.arch = do_read_string(fd, ph); 1703 + return ph->env.arch ? 0 : -ENOMEM; 1704 + } 1705 + 1706 + static int process_nrcpus(struct perf_file_section *section __maybe_unused, 1707 + struct perf_header *ph, int fd, 1708 + void *data __maybe_unused) 1709 + { 1710 + size_t ret; 1711 + u32 nr; 1712 + 1713 + ret = read(fd, &nr, sizeof(nr)); 1714 + if (ret != sizeof(nr)) 1715 + return -1; 1716 + 1717 + if (ph->needs_swap) 1718 + nr = bswap_32(nr); 1719 + 1720 + ph->env.nr_cpus_online = nr; 1721 + 1722 + ret = read(fd, &nr, sizeof(nr)); 1723 + if (ret != sizeof(nr)) 1724 + return -1; 1725 + 1726 + if (ph->needs_swap) 1727 + nr = bswap_32(nr); 1728 + 1729 + ph->env.nr_cpus_avail = nr; 1730 + return 0; 1731 + } 1732 + 1733 + static int process_cpudesc(struct perf_file_section *section __maybe_unused, 1734 + struct perf_header *ph, int fd, 1735 + void *data __maybe_unused) 1736 + { 1737 + ph->env.cpu_desc = do_read_string(fd, ph); 1738 + return ph->env.cpu_desc ? 0 : -ENOMEM; 1739 + } 1740 + 1741 + static int process_cpuid(struct perf_file_section *section __maybe_unused, 1742 + struct perf_header *ph, int fd, 1743 + void *data __maybe_unused) 1744 + { 1745 + ph->env.cpuid = do_read_string(fd, ph); 1746 + return ph->env.cpuid ? 0 : -ENOMEM; 1747 + } 1748 + 1749 + static int process_total_mem(struct perf_file_section *section __maybe_unused, 1750 + struct perf_header *ph, int fd, 1751 + void *data __maybe_unused) 1752 + { 1753 + uint64_t mem; 1754 + size_t ret; 1755 + 1756 + ret = read(fd, &mem, sizeof(mem)); 1757 + if (ret != sizeof(mem)) 1758 + return -1; 1759 + 1760 + if (ph->needs_swap) 1761 + mem = bswap_64(mem); 1762 + 1763 + ph->env.total_mem = mem; 1599 1764 return 0; 1600 1765 } 1601 1766 ··· 1704 1687 } 1705 1688 1706 1689 static void 1707 - perf_evlist__set_event_name(struct perf_evlist *evlist, struct perf_evsel *event) 1690 + perf_evlist__set_event_name(struct perf_evlist *evlist, 1691 + struct perf_evsel *event) 1708 1692 { 1709 1693 struct perf_evsel *evsel; 1710 1694 ··· 1724 1706 1725 1707 static int 1726 1708 process_event_desc(struct perf_file_section *section __maybe_unused, 1727 - struct perf_header *header, int feat __maybe_unused, int fd, 1709 + struct perf_header *header, int fd, 1728 1710 void *data __maybe_unused) 1729 1711 { 1730 - struct perf_session *session = container_of(header, struct perf_session, header); 1712 + struct perf_session *session; 1731 1713 struct perf_evsel *evsel, *events = read_event_desc(header, fd); 1732 1714 1733 1715 if (!events) 1734 1716 return 0; 1735 1717 1718 + session = container_of(header, struct perf_session, header); 1736 1719 for (evsel = events; evsel->attr.size; evsel++) 1737 1720 perf_evlist__set_event_name(session->evlist, evsel); 1738 1721 ··· 1742 1723 return 0; 1743 1724 } 1744 1725 1726 + static int process_cmdline(struct perf_file_section *section __maybe_unused, 1727 + struct perf_header *ph, int fd, 1728 + void *data __maybe_unused) 1729 + { 1730 + size_t ret; 1731 + char *str; 1732 + u32 nr, i; 1733 + struct strbuf sb; 1734 + 1735 + ret = read(fd, &nr, sizeof(nr)); 1736 + if (ret != sizeof(nr)) 1737 + return -1; 1738 + 1739 + if (ph->needs_swap) 1740 + nr = bswap_32(nr); 1741 + 1742 + ph->env.nr_cmdline = nr; 1743 + strbuf_init(&sb, 128); 1744 + 1745 + for (i = 0; i < nr; i++) { 1746 + str = do_read_string(fd, ph); 1747 + if (!str) 1748 + goto error; 1749 + 1750 + /* include a NULL character at the end */ 1751 + strbuf_add(&sb, str, strlen(str) + 1); 1752 + free(str); 1753 + } 1754 + ph->env.cmdline = strbuf_detach(&sb, NULL); 1755 + return 0; 1756 + 1757 + error: 1758 + strbuf_release(&sb); 1759 + return -1; 1760 + } 1761 + 1762 + static int process_cpu_topology(struct perf_file_section *section __maybe_unused, 1763 + struct perf_header *ph, int fd, 1764 + void *data __maybe_unused) 1765 + { 1766 + size_t ret; 1767 + u32 nr, i; 1768 + char *str; 1769 + struct strbuf sb; 1770 + 1771 + ret = read(fd, &nr, sizeof(nr)); 1772 + if (ret != sizeof(nr)) 1773 + return -1; 1774 + 1775 + if (ph->needs_swap) 1776 + nr = bswap_32(nr); 1777 + 1778 + ph->env.nr_sibling_cores = nr; 1779 + strbuf_init(&sb, 128); 1780 + 1781 + for (i = 0; i < nr; i++) { 1782 + str = do_read_string(fd, ph); 1783 + if (!str) 1784 + goto error; 1785 + 1786 + /* include a NULL character at the end */ 1787 + strbuf_add(&sb, str, strlen(str) + 1); 1788 + free(str); 1789 + } 1790 + ph->env.sibling_cores = strbuf_detach(&sb, NULL); 1791 + 1792 + ret = read(fd, &nr, sizeof(nr)); 1793 + if (ret != sizeof(nr)) 1794 + return -1; 1795 + 1796 + if (ph->needs_swap) 1797 + nr = bswap_32(nr); 1798 + 1799 + ph->env.nr_sibling_threads = nr; 1800 + 1801 + for (i = 0; i < nr; i++) { 1802 + str = do_read_string(fd, ph); 1803 + if (!str) 1804 + goto error; 1805 + 1806 + /* include a NULL character at the end */ 1807 + strbuf_add(&sb, str, strlen(str) + 1); 1808 + free(str); 1809 + } 1810 + ph->env.sibling_threads = strbuf_detach(&sb, NULL); 1811 + return 0; 1812 + 1813 + error: 1814 + strbuf_release(&sb); 1815 + return -1; 1816 + } 1817 + 1818 + static int process_numa_topology(struct perf_file_section *section __maybe_unused, 1819 + struct perf_header *ph, int fd, 1820 + void *data __maybe_unused) 1821 + { 1822 + size_t ret; 1823 + u32 nr, node, i; 1824 + char *str; 1825 + uint64_t mem_total, mem_free; 1826 + struct strbuf sb; 1827 + 1828 + /* nr nodes */ 1829 + ret = read(fd, &nr, sizeof(nr)); 1830 + if (ret != sizeof(nr)) 1831 + goto error; 1832 + 1833 + if (ph->needs_swap) 1834 + nr = bswap_32(nr); 1835 + 1836 + ph->env.nr_numa_nodes = nr; 1837 + strbuf_init(&sb, 256); 1838 + 1839 + for (i = 0; i < nr; i++) { 1840 + /* node number */ 1841 + ret = read(fd, &node, sizeof(node)); 1842 + if (ret != sizeof(node)) 1843 + goto error; 1844 + 1845 + ret = read(fd, &mem_total, sizeof(u64)); 1846 + if (ret != sizeof(u64)) 1847 + goto error; 1848 + 1849 + ret = read(fd, &mem_free, sizeof(u64)); 1850 + if (ret != sizeof(u64)) 1851 + goto error; 1852 + 1853 + if (ph->needs_swap) { 1854 + node = bswap_32(node); 1855 + mem_total = bswap_64(mem_total); 1856 + mem_free = bswap_64(mem_free); 1857 + } 1858 + 1859 + strbuf_addf(&sb, "%u:%"PRIu64":%"PRIu64":", 1860 + node, mem_total, mem_free); 1861 + 1862 + str = do_read_string(fd, ph); 1863 + if (!str) 1864 + goto error; 1865 + 1866 + /* include a NULL character at the end */ 1867 + strbuf_add(&sb, str, strlen(str) + 1); 1868 + free(str); 1869 + } 1870 + ph->env.numa_nodes = strbuf_detach(&sb, NULL); 1871 + return 0; 1872 + 1873 + error: 1874 + strbuf_release(&sb); 1875 + return -1; 1876 + } 1877 + 1878 + static int process_pmu_mappings(struct perf_file_section *section __maybe_unused, 1879 + struct perf_header *ph, int fd, 1880 + void *data __maybe_unused) 1881 + { 1882 + size_t ret; 1883 + char *name; 1884 + u32 pmu_num; 1885 + u32 type; 1886 + struct strbuf sb; 1887 + 1888 + ret = read(fd, &pmu_num, sizeof(pmu_num)); 1889 + if (ret != sizeof(pmu_num)) 1890 + return -1; 1891 + 1892 + if (ph->needs_swap) 1893 + pmu_num = bswap_32(pmu_num); 1894 + 1895 + if (!pmu_num) { 1896 + pr_debug("pmu mappings not available\n"); 1897 + return 0; 1898 + } 1899 + 1900 + ph->env.nr_pmu_mappings = pmu_num; 1901 + strbuf_init(&sb, 128); 1902 + 1903 + while (pmu_num) { 1904 + if (read(fd, &type, sizeof(type)) != sizeof(type)) 1905 + goto error; 1906 + if (ph->needs_swap) 1907 + type = bswap_32(type); 1908 + 1909 + name = do_read_string(fd, ph); 1910 + if (!name) 1911 + goto error; 1912 + 1913 + strbuf_addf(&sb, "%u:%s", type, name); 1914 + /* include a NULL character at the end */ 1915 + strbuf_add(&sb, "", 1); 1916 + 1917 + free(name); 1918 + pmu_num--; 1919 + } 1920 + ph->env.pmu_mappings = strbuf_detach(&sb, NULL); 1921 + return 0; 1922 + 1923 + error: 1924 + strbuf_release(&sb); 1925 + return -1; 1926 + } 1927 + 1745 1928 struct feature_ops { 1746 1929 int (*write)(int fd, struct perf_header *h, struct perf_evlist *evlist); 1747 1930 void (*print)(struct perf_header *h, int fd, FILE *fp); 1748 1931 int (*process)(struct perf_file_section *section, 1749 - struct perf_header *h, int feat, int fd, void *data); 1932 + struct perf_header *h, int fd, void *data); 1750 1933 const char *name; 1751 1934 bool full_only; 1752 1935 }; ··· 1960 1739 .process = process_##func } 1961 1740 #define FEAT_OPF(n, func) \ 1962 1741 [n] = { .name = #n, .write = write_##func, .print = print_##func, \ 1963 - .full_only = true } 1742 + .process = process_##func, .full_only = true } 1964 1743 1965 1744 /* feature_ops not implemented: */ 1966 1745 #define print_tracing_data NULL ··· 1969 1748 static const struct feature_ops feat_ops[HEADER_LAST_FEATURE] = { 1970 1749 FEAT_OPP(HEADER_TRACING_DATA, tracing_data), 1971 1750 FEAT_OPP(HEADER_BUILD_ID, build_id), 1972 - FEAT_OPA(HEADER_HOSTNAME, hostname), 1973 - FEAT_OPA(HEADER_OSRELEASE, osrelease), 1974 - FEAT_OPA(HEADER_VERSION, version), 1975 - FEAT_OPA(HEADER_ARCH, arch), 1976 - FEAT_OPA(HEADER_NRCPUS, nrcpus), 1977 - FEAT_OPA(HEADER_CPUDESC, cpudesc), 1978 - FEAT_OPA(HEADER_CPUID, cpuid), 1979 - FEAT_OPA(HEADER_TOTAL_MEM, total_mem), 1751 + FEAT_OPP(HEADER_HOSTNAME, hostname), 1752 + FEAT_OPP(HEADER_OSRELEASE, osrelease), 1753 + FEAT_OPP(HEADER_VERSION, version), 1754 + FEAT_OPP(HEADER_ARCH, arch), 1755 + FEAT_OPP(HEADER_NRCPUS, nrcpus), 1756 + FEAT_OPP(HEADER_CPUDESC, cpudesc), 1757 + FEAT_OPP(HEADER_CPUID, cpuid), 1758 + FEAT_OPP(HEADER_TOTAL_MEM, total_mem), 1980 1759 FEAT_OPP(HEADER_EVENT_DESC, event_desc), 1981 - FEAT_OPA(HEADER_CMDLINE, cmdline), 1760 + FEAT_OPP(HEADER_CMDLINE, cmdline), 1982 1761 FEAT_OPF(HEADER_CPU_TOPOLOGY, cpu_topology), 1983 1762 FEAT_OPF(HEADER_NUMA_TOPOLOGY, numa_topology), 1984 1763 FEAT_OPA(HEADER_BRANCH_STACK, branch_stack), 1985 - FEAT_OPA(HEADER_PMU_MAPPINGS, pmu_mappings), 1764 + FEAT_OPP(HEADER_PMU_MAPPINGS, pmu_mappings), 1986 1765 }; 1987 1766 1988 1767 struct header_print_data { ··· 2462 2241 if (!feat_ops[feat].process) 2463 2242 return 0; 2464 2243 2465 - return feat_ops[feat].process(section, ph, feat, fd, data); 2244 + return feat_ops[feat].process(section, ph, fd, data); 2466 2245 } 2467 2246 2468 2247 static int perf_file_header__read_pipe(struct perf_pipe_file_header *header,
+24
tools/perf/util/header.h
··· 58 58 int perf_file_header__read(struct perf_file_header *header, 59 59 struct perf_header *ph, int fd); 60 60 61 + struct perf_session_env { 62 + char *hostname; 63 + char *os_release; 64 + char *version; 65 + char *arch; 66 + int nr_cpus_online; 67 + int nr_cpus_avail; 68 + char *cpu_desc; 69 + char *cpuid; 70 + unsigned long long total_mem; 71 + 72 + int nr_cmdline; 73 + char *cmdline; 74 + int nr_sibling_cores; 75 + char *sibling_cores; 76 + int nr_sibling_threads; 77 + char *sibling_threads; 78 + int nr_numa_nodes; 79 + char *numa_nodes; 80 + int nr_pmu_mappings; 81 + char *pmu_mappings; 82 + }; 83 + 61 84 struct perf_header { 62 85 int frozen; 63 86 bool needs_swap; ··· 90 67 u64 event_offset; 91 68 u64 event_size; 92 69 DECLARE_BITMAP(adds_features, HEADER_FEAT_BITS); 70 + struct perf_session_env env; 93 71 }; 94 72 95 73 struct perf_evlist;
+2 -3
tools/perf/util/map.c
··· 243 243 244 244 size_t map__fprintf_dsoname(struct map *map, FILE *fp) 245 245 { 246 - const char *dsoname; 246 + const char *dsoname = "[unknown]"; 247 247 248 248 if (map && map->dso && (map->dso->name || map->dso->long_name)) { 249 249 if (symbol_conf.show_kernel_path && map->dso->long_name) 250 250 dsoname = map->dso->long_name; 251 251 else if (map->dso->name) 252 252 dsoname = map->dso->name; 253 - } else 254 - dsoname = "[unknown]"; 253 + } 255 254 256 255 return fprintf(fp, "%s", dsoname); 257 256 }
+29 -7
tools/perf/util/probe-event.c
··· 1100 1100 struct probe_trace_point *tp = &tev->point; 1101 1101 char pr; 1102 1102 char *p; 1103 + char *argv0_str = NULL, *fmt, *fmt1_str, *fmt2_str, *fmt3_str; 1103 1104 int ret, i, argc; 1104 1105 char **argv; 1105 1106 ··· 1117 1116 } 1118 1117 1119 1118 /* Scan event and group name. */ 1120 - ret = sscanf(argv[0], "%c:%a[^/ \t]/%a[^ \t]", 1121 - &pr, (float *)(void *)&tev->group, 1122 - (float *)(void *)&tev->event); 1123 - if (ret != 3) { 1119 + argv0_str = strdup(argv[0]); 1120 + if (argv0_str == NULL) { 1121 + ret = -ENOMEM; 1122 + goto out; 1123 + } 1124 + fmt1_str = strtok_r(argv0_str, ":", &fmt); 1125 + fmt2_str = strtok_r(NULL, "/", &fmt); 1126 + fmt3_str = strtok_r(NULL, " \t", &fmt); 1127 + if (fmt1_str == NULL || strlen(fmt1_str) != 1 || fmt2_str == NULL 1128 + || fmt3_str == NULL) { 1124 1129 semantic_error("Failed to parse event name: %s\n", argv[0]); 1125 1130 ret = -EINVAL; 1131 + goto out; 1132 + } 1133 + pr = fmt1_str[0]; 1134 + tev->group = strdup(fmt2_str); 1135 + tev->event = strdup(fmt3_str); 1136 + if (tev->group == NULL || tev->event == NULL) { 1137 + ret = -ENOMEM; 1126 1138 goto out; 1127 1139 } 1128 1140 pr_debug("Group:%s Event:%s probe:%c\n", tev->group, tev->event, pr); ··· 1149 1135 p++; 1150 1136 } else 1151 1137 p = argv[1]; 1152 - ret = sscanf(p, "%a[^+]+%lu", (float *)(void *)&tp->symbol, 1153 - &tp->offset); 1154 - if (ret == 1) 1138 + fmt1_str = strtok_r(p, "+", &fmt); 1139 + tp->symbol = strdup(fmt1_str); 1140 + if (tp->symbol == NULL) { 1141 + ret = -ENOMEM; 1142 + goto out; 1143 + } 1144 + fmt2_str = strtok_r(NULL, "", &fmt); 1145 + if (fmt2_str == NULL) 1155 1146 tp->offset = 0; 1147 + else 1148 + tp->offset = strtoul(fmt2_str, NULL, 10); 1156 1149 1157 1150 tev->nargs = argc - 2; 1158 1151 tev->args = zalloc(sizeof(struct probe_trace_arg) * tev->nargs); ··· 1183 1162 } 1184 1163 ret = 0; 1185 1164 out: 1165 + free(argv0_str); 1186 1166 argv_free(argv); 1187 1167 return ret; 1188 1168 }
+1 -1
tools/perf/util/scripting-engines/trace-event-perl.c
··· 282 282 283 283 event = find_cache_event(evsel); 284 284 if (!event) 285 - die("ug! no event found for type %d", evsel->attr.config); 285 + die("ug! no event found for type %" PRIu64, evsel->attr.config); 286 286 287 287 pid = raw_field_value(event, "common_pid", data); 288 288
+1
tools/perf/util/symbol.h
··· 34 34 return NULL; 35 35 } 36 36 #else 37 + #define PACKAGE 'perf' 37 38 #include <bfd.h> 38 39 #endif 39 40 #endif
+2
tools/perf/util/thread.h
··· 16 16 bool comm_set; 17 17 char *comm; 18 18 int comm_len; 19 + 20 + void *priv; 19 21 }; 20 22 21 23 struct machine;
+8 -10
tools/perf/util/trace-event-parse.c
··· 229 229 char *next = NULL; 230 230 char *addr_str; 231 231 char *mod; 232 - char ch; 232 + char *fmt; 233 233 234 234 line = strtok_r(file, "\n", &next); 235 235 while (line) { 236 236 mod = NULL; 237 - sscanf(line, "%as %c %as\t[%as", 238 - (float *)(void *)&addr_str, /* workaround gcc warning */ 239 - &ch, (float *)(void *)&func, (float *)(void *)&mod); 237 + addr_str = strtok_r(line, " ", &fmt); 240 238 addr = strtoull(addr_str, NULL, 16); 241 - free(addr_str); 242 - 243 - /* truncate the extra ']' */ 239 + /* skip character */ 240 + strtok_r(NULL, " ", &fmt); 241 + func = strtok_r(NULL, "\t", &fmt); 242 + mod = strtok_r(NULL, "]", &fmt); 243 + /* truncate the extra '[' */ 244 244 if (mod) 245 - mod[strlen(mod) - 1] = 0; 245 + mod = mod + 1; 246 246 247 247 pevent_register_function(pevent, func, addr, mod); 248 - free(func); 249 - free(mod); 250 248 251 249 line = strtok_r(NULL, "\n", &next); 252 250 }