Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull more perf updates from Ingo Molnar:
"The only kernel change is comment typo fixes.

The rest is mostly tooling fixes, but also new vendor event additions
and updates, a bigger libperf/libtraceevent library and a header files
reorganization that came in a bit late"

* 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (108 commits)
perf unwind: Fix libunwind build failure on i386 systems
perf parser: Remove needless include directives
perf build: Add detection of java-11-openjdk-devel package
perf jvmti: Include JVMTI support for s390
perf vendor events: Remove P8 HW events which are not supported
perf evlist: Fix access of freed id arrays
perf stat: Fix free memory access / memory leaks in metrics
perf tools: Replace needless mmap.h with what is needed, event.h
perf evsel: Move config terms to a separate header
perf evlist: Remove unused perf_evlist__fprintf() method
perf evsel: Introduce evsel_fprintf.h
perf evsel: Remove need for symbol_conf in evsel_fprintf.c
perf copyfile: Move copyfile routines to separate files
libperf: Add perf_evlist__poll() function
libperf: Add perf_evlist__add_pollfd() function
libperf: Add perf_evlist__alloc_pollfd() function
libperf: Add libperf_init() call to the tests
libperf: Merge libperf_set_print() into libperf_init()
libperf: Add libperf dependency for tests targets
libperf: Use sys/types.h to get ssize_t, not unistd.h
...

+4801 -3617
+3 -3
kernel/events/core.c
··· 2239 2239 * 2240 2240 * If event->ctx is a cloned context, callers must make sure that 2241 2241 * every task struct that event->ctx->task could possibly point to 2242 - * remains valid. This condition is satisifed when called through 2242 + * remains valid. This condition is satisfied when called through 2243 2243 * perf_event_for_each_child or perf_event_for_each because they 2244 2244 * hold the top-level event's child_mutex, so any descendant that 2245 2245 * goes to exit will block in perf_event_exit_event(). ··· 6054 6054 * Get remaining task size from user stack pointer. 6055 6055 * 6056 6056 * It'd be better to take stack vma map and limit this more 6057 - * precisly, but there's no way to get it safely under interrupt, 6057 + * precisely, but there's no way to get it safely under interrupt, 6058 6058 * so using TASK_SIZE as limit. 6059 6059 */ 6060 6060 static u64 perf_ustack_task_size(struct pt_regs *regs) ··· 6616 6616 6617 6617 if (sample_type & PERF_SAMPLE_STACK_USER) { 6618 6618 /* 6619 - * Either we need PERF_SAMPLE_STACK_USER bit to be allways 6619 + * Either we need PERF_SAMPLE_STACK_USER bit to be always 6620 6620 * processed as the last one or have additional check added 6621 6621 * in case new sample type is added, because we could eat 6622 6622 * up the rest of the sample size.
+3
tools/arch/x86/include/asm/cpufeatures.h
··· 231 231 #define X86_FEATURE_VMMCALL ( 8*32+15) /* Prefer VMMCALL to VMCALL */ 232 232 #define X86_FEATURE_XENPV ( 8*32+16) /* "" Xen paravirtual guest */ 233 233 #define X86_FEATURE_EPT_AD ( 8*32+17) /* Intel Extended Page Table access-dirty bit */ 234 + #define X86_FEATURE_VMCALL ( 8*32+18) /* "" Hypervisor supports the VMCALL instruction */ 235 + #define X86_FEATURE_VMW_VMMCALL ( 8*32+19) /* "" VMware prefers VMMCALL hypercall instruction */ 234 236 235 237 /* Intel-defined CPU features, CPUID level 0x00000007:0 (EBX), word 9 */ 236 238 #define X86_FEATURE_FSGSBASE ( 9*32+ 0) /* RDFSBASE, WRFSBASE, RDGSBASE, WRGSBASE instructions*/ ··· 356 354 /* Intel-defined CPU features, CPUID level 0x00000007:0 (EDX), word 18 */ 357 355 #define X86_FEATURE_AVX512_4VNNIW (18*32+ 2) /* AVX-512 Neural Network Instructions */ 358 356 #define X86_FEATURE_AVX512_4FMAPS (18*32+ 3) /* AVX-512 Multiply Accumulation Single precision */ 357 + #define X86_FEATURE_AVX512_VP2INTERSECT (18*32+ 8) /* AVX-512 Intersect for D/Q */ 359 358 #define X86_FEATURE_MD_CLEAR (18*32+10) /* VERW clears CPU buffers */ 360 359 #define X86_FEATURE_TSX_FORCE_ABORT (18*32+13) /* "" TSX_FORCE_ABORT */ 361 360 #define X86_FEATURE_PCONFIG (18*32+18) /* Intel PCONFIG */
+1 -1
tools/arch/x86/include/uapi/asm/unistd.h
··· 3 3 #define _UAPI_ASM_X86_UNISTD_H 4 4 5 5 /* x32 syscall flag bit */ 6 - #define __X32_SYSCALL_BIT 0x40000000 6 + #define __X32_SYSCALL_BIT 0x40000000UL 7 7 8 8 #ifndef __KERNEL__ 9 9 # ifdef __i386__
+1
tools/include/asm/bug.h
··· 3 3 #define _TOOLS_ASM_BUG_H 4 4 5 5 #include <linux/compiler.h> 6 + #include <stdio.h> 6 7 7 8 #define __WARN_printf(arg...) do { fprintf(stderr, arg); } while (0) 8 9
+1 -1
tools/include/uapi/asm-generic/unistd.h
··· 569 569 __SC_COMP(__NR_semctl, sys_semctl, compat_sys_semctl) 570 570 #if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32 571 571 #define __NR_semtimedop 192 572 - __SC_COMP(__NR_semtimedop, sys_semtimedop, sys_semtimedop_time32) 572 + __SC_3264(__NR_semtimedop, sys_semtimedop_time32, sys_semtimedop) 573 573 #endif 574 574 #define __NR_semop 193 575 575 __SYSCALL(__NR_semop, sys_semop)
+6 -1
tools/include/uapi/linux/prctl.h
··· 181 181 #define PR_GET_THP_DISABLE 42 182 182 183 183 /* 184 - * Tell the kernel to start/stop helping userspace manage bounds tables. 184 + * No longer implemented, but left here to ensure the numbers stay reserved: 185 185 */ 186 186 #define PR_MPX_ENABLE_MANAGEMENT 43 187 187 #define PR_MPX_DISABLE_MANAGEMENT 44 ··· 228 228 # define PR_PAC_APDAKEY (1UL << 2) 229 229 # define PR_PAC_APDBKEY (1UL << 3) 230 230 # define PR_PAC_APGAKEY (1UL << 4) 231 + 232 + /* Tagged user address controls for arm64 */ 233 + #define PR_SET_TAGGED_ADDR_CTRL 55 234 + #define PR_GET_TAGGED_ADDR_CTRL 56 235 + # define PR_TAGGED_ADDR_ENABLE (1UL << 0) 231 236 232 237 #endif /* _LINUX_PRCTL_H */
-11
tools/lib/traceevent/Build
··· 6 6 libtraceevent-y += kbuffer-parse.o 7 7 libtraceevent-y += tep_strerror.o 8 8 libtraceevent-y += event-parse-api.o 9 - 10 - plugin_jbd2-y += plugin_jbd2.o 11 - plugin_hrtimer-y += plugin_hrtimer.o 12 - plugin_kmem-y += plugin_kmem.o 13 - plugin_kvm-y += plugin_kvm.o 14 - plugin_mac80211-y += plugin_mac80211.o 15 - plugin_sched_switch-y += plugin_sched_switch.o 16 - plugin_function-y += plugin_function.o 17 - plugin_xen-y += plugin_xen.o 18 - plugin_scsi-y += plugin_scsi.o 19 - plugin_cfg80211-y += plugin_cfg80211.o
+130
tools/lib/traceevent/Documentation/libtraceevent-event_print.txt
··· 1 + libtraceevent(3) 2 + ================ 3 + 4 + NAME 5 + ---- 6 + tep_print_event - Writes event information into a trace sequence. 7 + 8 + SYNOPSIS 9 + -------- 10 + [verse] 11 + -- 12 + *#include <event-parse.h>* 13 + *#include <trace-seq.h>* 14 + 15 + void *tep_print_event*(struct tep_handle pass:[*]_tep_, struct trace_seqpass:[*]_s_, struct tep_record pass:[*]_record_, const char pass:[*]_fmt_, _..._) 16 + -- 17 + 18 + DESCRIPTION 19 + ----------- 20 + 21 + The _tep_print_event()_ function parses the event information of the given 22 + _record_ and writes it into the trace sequence _s_, according to the format 23 + string _fmt_. The desired information is specified after the format string. 24 + The _fmt_ is printf-like format string, following arguments are supported: 25 + [verse] 26 + -- 27 + TEP_PRINT_PID, "%d" - PID of the event. 28 + TEP_PRINT_CPU, "%d" - Event CPU. 29 + TEP_PRINT_COMM, "%s" - Event command string. 30 + TEP_PRINT_NAME, "%s" - Event name. 31 + TEP_PRINT_LATENCY, "%s" - Latency of the event. It prints 4 or more 32 + fields - interrupt state, scheduling state, 33 + current context, and preemption count. 34 + Field 1 is the interrupt enabled state: 35 + d : Interrupts are disabled 36 + . : Interrupts are enabled 37 + X : The architecture does not support this 38 + information 39 + Field 2 is the "need resched" state. 40 + N : The task is set to call the scheduler when 41 + possible, as another higher priority task 42 + may need to be scheduled in. 43 + . : The task is not set to call the scheduler. 44 + Field 3 is the context state. 45 + . : Normal context 46 + s : Soft interrupt context 47 + h : Hard interrupt context 48 + H : Hard interrupt context which triggered 49 + during soft interrupt context. 50 + z : NMI context 51 + Z : NMI context which triggered during hard 52 + interrupt context 53 + Field 4 is the preemption count. 54 + . : The preempt count is zero. 55 + On preemptible kernels (where the task can be scheduled 56 + out in arbitrary locations while in kernel context), the 57 + preempt count, when non zero, will prevent the kernel 58 + from scheduling out the current task. The preempt count 59 + number is displayed when it is not zero. 60 + Depending on the kernel, it may show other fields 61 + (lock depth, or migration disabled, which are unique to 62 + specialized kernels). 63 + TEP_PRINT_TIME, %d - event time stamp. A divisor and precision can be 64 + specified as part of this format string: 65 + "%precision.divisord". Example: 66 + "%3.1000d" - divide the time by 1000 and print the first 67 + 3 digits before the dot. Thus, the time stamp 68 + "123456000" will be printed as "123.456" 69 + TEP_PRINT_INFO, "%s" - event information. 70 + TEP_PRINT_INFO_RAW, "%s" - event information, in raw format. 71 + 72 + -- 73 + EXAMPLE 74 + ------- 75 + [source,c] 76 + -- 77 + #include <event-parse.h> 78 + #include <trace-seq.h> 79 + ... 80 + struct trace_seq seq; 81 + trace_seq_init(&seq); 82 + struct tep_handle *tep = tep_alloc(); 83 + ... 84 + void print_my_event(struct tep_record *record) 85 + { 86 + trace_seq_reset(&seq); 87 + tep_print_event(tep, s, record, "%16s-%-5d [%03d] %s %6.1000d %s %s", 88 + TEP_PRINT_COMM, TEP_PRINT_PID, TEP_PRINT_CPU, 89 + TEP_PRINT_LATENCY, TEP_PRINT_TIME, TEP_PRINT_NAME, 90 + TEP_PRINT_INFO); 91 + } 92 + ... 93 + -- 94 + 95 + FILES 96 + ----- 97 + [verse] 98 + -- 99 + *event-parse.h* 100 + Header file to include in order to have access to the library APIs. 101 + *trace-seq.h* 102 + Header file to include in order to have access to trace sequences related APIs. 103 + Trace sequences are used to allow a function to call several other functions 104 + to create a string of data to use. 105 + *-ltraceevent* 106 + Linker switch to add when building a program that uses the library. 107 + -- 108 + 109 + SEE ALSO 110 + -------- 111 + _libtraceevent(3)_, _trace-cmd(1)_ 112 + 113 + AUTHOR 114 + ------ 115 + [verse] 116 + -- 117 + *Steven Rostedt* <rostedt@goodmis.org>, author of *libtraceevent*. 118 + *Tzvetomir Stoyanov* <tz.stoyanov@gmail.com>, author of this man page. 119 + -- 120 + REPORTING BUGS 121 + -------------- 122 + Report bugs to <linux-trace-devel@vger.kernel.org> 123 + 124 + LICENSE 125 + ------- 126 + libtraceevent is Free Software licensed under the GNU LGPL 2.1 127 + 128 + RESOURCES 129 + --------- 130 + https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
+5 -5
tools/lib/traceevent/Documentation/libtraceevent-func_apis.txt
··· 59 59 60 60 The _tep_register_function()_ function registers a function name mapped to an 61 61 address and (optional) module. This mapping is used in case the function tracer 62 - or events have "%pF" or "%pS" parameter in its format string. It is common to 63 - pass in the kallsyms function names with their corresponding addresses with this 62 + or events have "%pS" parameter in its format string. It is common to pass in 63 + the kallsyms function names with their corresponding addresses with this 64 64 function. The _tep_ argument is the trace event parser context. The _name_ is 65 - the name of the function, the string is copied internally. The _addr_ is 66 - the start address of the function. The _mod_ is the kernel module 67 - the function may be in (NULL for none). 65 + the name of the function, the string is copied internally. The _addr_ is the 66 + start address of the function. The _mod_ is the kernel module the function may 67 + be in (NULL for none). 68 68 69 69 The _tep_register_print_string()_ function registers a string by the address 70 70 it was stored in the kernel. Some strings internal to the kernel with static
+4 -4
tools/lib/traceevent/Documentation/libtraceevent-handle.txt
··· 3 3 4 4 NAME 5 5 ---- 6 - tep_alloc, tep_free,tep_ref, tep_unref,tep_ref_get - Create, destroy, manage 6 + tep_alloc, tep_free,tep_ref, tep_unref,tep_get_ref - Create, destroy, manage 7 7 references of trace event parser context. 8 8 9 9 SYNOPSIS ··· 16 16 void *tep_free*(struct tep_handle pass:[*]_tep_); 17 17 void *tep_ref*(struct tep_handle pass:[*]_tep_); 18 18 void *tep_unref*(struct tep_handle pass:[*]_tep_); 19 - int *tep_ref_get*(struct tep_handle pass:[*]_tep_); 19 + int *tep_get_ref*(struct tep_handle pass:[*]_tep_); 20 20 -- 21 21 22 22 DESCRIPTION ··· 57 57 ... 58 58 struct tep_handle *tep = tep_alloc(); 59 59 ... 60 - int ref = tep_ref_get(tep); 60 + int ref = tep_get_ref(tep); 61 61 tep_ref(tep); 62 - if ( (ref+1) != tep_ref_get(tep)) { 62 + if ( (ref+1) != tep_get_ref(tep)) { 63 63 /* Something wrong happened, the counter is not incremented by 1 */ 64 64 } 65 65 tep_unref(tep);
+99
tools/lib/traceevent/Documentation/libtraceevent-plugins.txt
··· 1 + libtraceevent(3) 2 + ================ 3 + 4 + NAME 5 + ---- 6 + tep_load_plugins, tep_unload_plugins - Load / unload traceevent plugins. 7 + 8 + SYNOPSIS 9 + -------- 10 + [verse] 11 + -- 12 + *#include <event-parse.h>* 13 + 14 + struct tep_plugin_list pass:[*]*tep_load_plugins*(struct tep_handle pass:[*]_tep_); 15 + void *tep_unload_plugins*(struct tep_plugin_list pass:[*]_plugin_list_, struct tep_handle pass:[*]_tep_); 16 + -- 17 + 18 + DESCRIPTION 19 + ----------- 20 + The _tep_load_plugins()_ function loads all plugins, located in the plugin 21 + directories. The _tep_ argument is trace event parser context. 22 + The plugin directories are : 23 + [verse] 24 + -- 25 + - System's plugin directory, defined at the library compile time. It 26 + depends on the library installation prefix and usually is 27 + _(install_preffix)/lib/traceevent/plugins_ 28 + - Directory, defined by the environment variable _TRACEEVENT_PLUGIN_DIR_ 29 + - User's plugin directory, located at _~/.local/lib/traceevent/plugins_ 30 + -- 31 + Loading of plugins can be controlled by the _tep_flags_, using the 32 + _tep_set_flag()_ API: 33 + [verse] 34 + -- 35 + _TEP_DISABLE_SYS_PLUGINS_ - do not load plugins, located in 36 + the system's plugin directory. 37 + _TEP_DISABLE_PLUGINS_ - do not load any plugins. 38 + -- 39 + The _tep_set_flag()_ API needs to be called before _tep_load_plugins()_, if 40 + loading of all plugins is not the desired case. 41 + 42 + The _tep_unload_plugins()_ function unloads the plugins, previously loaded by 43 + _tep_load_plugins()_. The _tep_ argument is trace event parser context. The 44 + _plugin_list_ is the list of loaded plugins, returned by 45 + the _tep_load_plugins()_ function. 46 + 47 + RETURN VALUE 48 + ------------ 49 + The _tep_load_plugins()_ function returns a list of successfully loaded plugins, 50 + or NULL in case no plugins are loaded. 51 + 52 + EXAMPLE 53 + ------- 54 + [source,c] 55 + -- 56 + #include <event-parse.h> 57 + ... 58 + struct tep_handle *tep = tep_alloc(); 59 + ... 60 + struct tep_plugin_list *plugins = tep_load_plugins(tep); 61 + if (plugins == NULL) { 62 + /* no plugins are loaded */ 63 + } 64 + ... 65 + tep_unload_plugins(plugins, tep); 66 + -- 67 + 68 + FILES 69 + ----- 70 + [verse] 71 + -- 72 + *event-parse.h* 73 + Header file to include in order to have access to the library APIs. 74 + *-ltraceevent* 75 + Linker switch to add when building a program that uses the library. 76 + -- 77 + 78 + SEE ALSO 79 + -------- 80 + _libtraceevent(3)_, _trace-cmd(1)_, _tep_set_flag(3)_ 81 + 82 + AUTHOR 83 + ------ 84 + [verse] 85 + -- 86 + *Steven Rostedt* <rostedt@goodmis.org>, author of *libtraceevent*. 87 + *Tzvetomir Stoyanov* <tz.stoyanov@gmail.com>, author of this man page. 88 + -- 89 + REPORTING BUGS 90 + -------------- 91 + Report bugs to <linux-trace-devel@vger.kernel.org> 92 + 93 + LICENSE 94 + ------- 95 + libtraceevent is Free Software licensed under the GNU LGPL 2.1 96 + 97 + RESOURCES 98 + --------- 99 + https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
+2 -13
tools/lib/traceevent/Documentation/libtraceevent.txt
··· 16 16 void *tep_free*(struct tep_handle pass:[*]_tep_); 17 17 void *tep_ref*(struct tep_handle pass:[*]_tep_); 18 18 void *tep_unref*(struct tep_handle pass:[*]_tep_); 19 - int *tep_ref_get*(struct tep_handle pass:[*]_tep_); 19 + int *tep_get_ref*(struct tep_handle pass:[*]_tep_); 20 20 void *tep_set_flag*(struct tep_handle pass:[*]_tep_, enum tep_flag _flag_); 21 21 void *tep_clear_flag*(struct tep_handle pass:[*]_tep_, enum tep_flag _flag_); 22 22 bool *tep_test_flag*(struct tep_handle pass:[*]_tep_, enum tep_flag _flags_); ··· 26 26 void *tep_set_long_size*(struct tep_handle pass:[*]_tep_, int _long_size_); 27 27 int *tep_get_page_size*(struct tep_handle pass:[*]_tep_); 28 28 void *tep_set_page_size*(struct tep_handle pass:[*]_tep_, int _page_size_); 29 - bool *tep_is_latency_format*(struct tep_handle pass:[*]_tep_); 30 - void *tep_set_latency_format*(struct tep_handle pass:[*]_tep_, int _lat_); 31 29 int *tep_get_header_page_size*(struct tep_handle pass:[*]_tep_); 32 30 int *tep_get_header_timestamp_size*(struct tep_handle pass:[*]_tep_); 33 31 bool *tep_is_old_format*(struct tep_handle pass:[*]_tep_); 34 32 int *tep_strerror*(struct tep_handle pass:[*]_tep_, enum tep_errno _errnum_, char pass:[*]_buf_, size_t _buflen_); 35 33 36 34 Register / unregister APIs: 37 - int *tep_register_trace_clock*(struct tep_handle pass:[*]_tep_, const char pass:[*]_trace_clock_); 38 35 int *tep_register_function*(struct tep_handle pass:[*]_tep_, char pass:[*]_name_, unsigned long long _addr_, char pass:[*]_mod_); 39 36 int *tep_register_event_handler*(struct tep_handle pass:[*]_tep_, int _id_, const char pass:[*]_sys_name_, const char pass:[*]_event_name_, tep_event_handler_func _func_, void pass:[*]_context_); 40 37 int *tep_unregister_event_handler*(struct tep_handle pass:[*]tep, int id, const char pass:[*]sys_name, const char pass:[*]event_name, tep_event_handler_func func, void pass:[*]_context_); ··· 54 57 int *tep_get_events_count*(struct tep_handle pass:[*]_tep_); 55 58 struct tep_event pass:[*]pass:[*]*tep_list_events*(struct tep_handle pass:[*]_tep_, enum tep_event_sort_type _sort_type_); 56 59 struct tep_event pass:[*]pass:[*]*tep_list_events_copy*(struct tep_handle pass:[*]_tep_, enum tep_event_sort_type _sort_type_); 57 - 58 - Event printing: 59 - void *tep_print_event*(struct tep_handle pass:[*]_tep_, struct trace_seq pass:[*]_s_, struct tep_record pass:[*]_record_, bool _use_trace_clock_); 60 - void *tep_print_event_data*(struct tep_handle pass:[*]_tep_, struct trace_seq pass:[*]_s_, struct tep_event pass:[*]_event_, struct tep_record pass:[*]_record_); 61 - void *tep_event_info*(struct trace_seq pass:[*]_s_, struct tep_event pass:[*]_event_, struct tep_record pass:[*]_record_); 62 - void *tep_print_event_task*(struct tep_handle pass:[*]_tep_, struct trace_seq pass:[*]_s_, struct tep_event pass:[*]_event_, struct tep_record pass:[*]_record_); 63 - void *tep_print_event_time*(struct tep_handle pass:[*]_tep_, struct trace_seq pass:[*]_s_, struct tep_event pass:[*]_event_, struct tep_record pass:[*]record, bool _use_trace_clock_); 64 - void *tep_set_print_raw*(struct tep_handle pass:[*]_tep_, int _print_raw_); 60 + void *tep_print_event*(struct tep_handle pass:[*]_tep_, struct trace_seq pass:[*]_s_, struct tep_record pass:[*]_record_, const char pass:[*]_fmt_, _..._); 65 61 66 62 Event finding: 67 63 struct tep_event pass:[*]*tep_find_event*(struct tep_handle pass:[*]_tep_, int _id_); ··· 106 116 int *tep_filter_compare*(struct tep_event_filter pass:[*]_filter1_, struct tep_event_filter pass:[*]_filter2_); 107 117 108 118 Parsing various data from the records: 109 - void *tep_data_latency_format*(struct tep_handle pass:[*]_tep_, struct trace_seq pass:[*]_s_, struct tep_record pass:[*]_record_); 110 119 int *tep_data_type*(struct tep_handle pass:[*]_tep_, struct tep_record pass:[*]_rec_); 111 120 int *tep_data_pid*(struct tep_handle pass:[*]_tep_, struct tep_record pass:[*]_rec_); 112 121 int *tep_data_preempt_count*(struct tep_handle pass:[*]_tep_, struct tep_record pass:[*]_rec_);
+16 -78
tools/lib/traceevent/Makefile
··· 58 58 export DESTDIR DESTDIR_SQ 59 59 export EVENT_PARSE_VERSION 60 60 61 - set_plugin_dir := 1 62 - 63 - # Set plugin_dir to preffered global plugin location 64 - # If we install under $HOME directory we go under 65 - # $(HOME)/.local/lib/traceevent/plugins 66 - # 67 - # We dont set PLUGIN_DIR in case we install under $HOME 68 - # directory, because by default the code looks under: 69 - # $(HOME)/.local/lib/traceevent/plugins by default. 70 - # 71 - ifeq ($(plugin_dir),) 72 - ifeq ($(prefix),$(HOME)) 73 - override plugin_dir = $(HOME)/.local/lib/traceevent/plugins 74 - set_plugin_dir := 0 75 - else 76 - override plugin_dir = $(libdir)/traceevent/plugins 77 - endif 78 - endif 79 - 80 - ifeq ($(set_plugin_dir),1) 81 - PLUGIN_DIR = -DPLUGIN_DIR="$(plugin_dir)" 82 - PLUGIN_DIR_SQ = '$(subst ','\'',$(PLUGIN_DIR))' 83 - endif 84 - 85 61 include ../../scripts/Makefile.include 86 62 87 63 # copy a bit from Linux kbuild ··· 81 105 # Shell quotes 82 106 libdir_SQ = $(subst ','\'',$(libdir)) 83 107 libdir_relative_SQ = $(subst ','\'',$(libdir_relative)) 84 - plugin_dir_SQ = $(subst ','\'',$(plugin_dir)) 85 108 86 109 CONFIG_INCLUDES = 87 110 CONFIG_LIBS = ··· 126 151 export srctree OUTPUT CC LD CFLAGS V 127 152 build := -f $(srctree)/tools/build/Makefile.build dir=. obj 128 153 129 - PLUGINS = plugin_jbd2.so 130 - PLUGINS += plugin_hrtimer.so 131 - PLUGINS += plugin_kmem.so 132 - PLUGINS += plugin_kvm.so 133 - PLUGINS += plugin_mac80211.so 134 - PLUGINS += plugin_sched_switch.so 135 - PLUGINS += plugin_function.so 136 - PLUGINS += plugin_xen.so 137 - PLUGINS += plugin_scsi.so 138 - PLUGINS += plugin_cfg80211.so 139 - 140 - PLUGINS := $(addprefix $(OUTPUT),$(PLUGINS)) 141 - PLUGINS_IN := $(PLUGINS:.so=-in.o) 142 - 143 154 TE_IN := $(OUTPUT)libtraceevent-in.o 144 155 LIB_TARGET := $(addprefix $(OUTPUT),$(LIB_TARGET)) 145 - DYNAMIC_LIST_FILE := $(OUTPUT)libtraceevent-dynamic-list 146 156 147 - CMD_TARGETS = $(LIB_TARGET) $(PLUGINS) $(DYNAMIC_LIST_FILE) 157 + CMD_TARGETS = $(LIB_TARGET) 148 158 149 159 TARGETS = $(CMD_TARGETS) 150 160 151 - all: all_cmd 161 + all: all_cmd plugins 152 162 153 163 all_cmd: $(CMD_TARGETS) 154 164 ··· 147 187 148 188 $(OUTPUT)libtraceevent.a: $(TE_IN) 149 189 $(QUIET_LINK)$(RM) $@; $(AR) rcs $@ $^ 150 - 151 - $(OUTPUT)libtraceevent-dynamic-list: $(PLUGINS) 152 - $(QUIET_GEN)$(call do_generate_dynamic_list_file, $(PLUGINS), $@) 153 - 154 - plugins: $(PLUGINS) 155 - 156 - __plugin_obj = $(notdir $@) 157 - plugin_obj = $(__plugin_obj:-in.o=) 158 - 159 - $(PLUGINS_IN): force 160 - $(Q)$(MAKE) $(build)=$(plugin_obj) 161 190 162 191 $(OUTPUT)%.so: $(OUTPUT)%-in.o 163 192 $(QUIET_LINK)$(CC) $(CFLAGS) -shared $(LDFLAGS) -nostartfiles -o $@ $^ ··· 207 258 $(INSTALL) $(if $3,-m $3,) $1 '$(DESTDIR_SQ)$2' 208 259 endef 209 260 210 - define do_install_plugins 211 - for plugin in $1; do \ 212 - $(call do_install,$$plugin,$(plugin_dir_SQ)); \ 213 - done 214 - endef 215 - 216 - define do_generate_dynamic_list_file 217 - symbol_type=`$(NM) -u -D $1 | awk 'NF>1 {print $$1}' | \ 218 - xargs echo "U w W" | tr 'w ' 'W\n' | sort -u | xargs echo`;\ 219 - if [ "$$symbol_type" = "U W" ];then \ 220 - (echo '{'; \ 221 - $(NM) -u -D $1 | awk 'NF>1 {print "\t"$$2";"}' | sort -u;\ 222 - echo '};'; \ 223 - ) > $2; \ 224 - else \ 225 - (echo Either missing one of [$1] or bad version of $(NM)) 1>&2;\ 226 - fi 227 - endef 228 - 229 261 PKG_CONFIG_FILE = libtraceevent.pc 230 262 define do_install_pkgconfig_file 231 263 if [ -n "${pkgconfig_dir}" ]; then \ ··· 226 296 $(call do_install_mkdir,$(libdir_SQ)); \ 227 297 cp -fpR $(LIB_INSTALL) $(DESTDIR)$(libdir_SQ) 228 298 229 - install_plugins: $(PLUGINS) 230 - $(call QUIET_INSTALL, trace_plugins) \ 231 - $(call do_install_plugins, $(PLUGINS)) 232 - 233 299 install_pkgconfig: 234 300 $(call QUIET_INSTALL, $(PKG_CONFIG_FILE)) \ 235 301 $(call do_install_pkgconfig_file,$(prefix)) ··· 239 313 240 314 install: install_lib 241 315 242 - clean: 316 + clean: clean_plugins 243 317 $(call QUIET_CLEAN, libtraceevent) \ 244 318 $(RM) *.o *~ $(TARGETS) *.a *.so $(VERSION_FILES) .*.d .*.cmd; \ 245 319 $(RM) TRACEEVENT-CFLAGS tags TAGS; \ ··· 277 351 @echo ' doc-install - install the man pages' 278 352 @echo ' doc-uninstall - uninstall the man pages' 279 353 @echo'' 280 - PHONY += force plugins 354 + 355 + PHONY += plugins 356 + plugins: 357 + $(call descend,plugins) 358 + 359 + PHONY += install_plugins 360 + install_plugins: 361 + $(call descend,plugins,install) 362 + 363 + PHONY += clean_plugins 364 + clean_plugins: 365 + $(call descend,plugins,clean) 366 + 281 367 force: 282 368 283 369 # Declare the contents of the .PHONY variable as phony. We keep that
+17 -5
tools/lib/traceevent/event-parse.c
··· 4367 4367 switch (*ptr) { 4368 4368 case 's': 4369 4369 case 'S': 4370 - case 'f': 4371 - case 'F': 4372 4370 case 'x': 4373 4371 break; 4372 + case 'f': 4373 + case 'F': 4374 + /* 4375 + * Pre-5.5 kernels use %pf and 4376 + * %pF for printing symbols 4377 + * while kernels since 5.5 use 4378 + * %pfw for fwnodes. So check 4379 + * %p[fF] isn't followed by 'w'. 4380 + */ 4381 + if (ptr[1] != 'w') 4382 + break; 4383 + /* fall through */ 4374 4384 default: 4375 4385 /* 4376 4386 * Older kernels do not process ··· 4497 4487 4498 4488 printk = find_printk(tep, addr); 4499 4489 if (!printk) { 4500 - if (asprintf(&format, "%%pf: (NO FORMAT FOUND at %llx)\n", addr) < 0) 4490 + if (asprintf(&format, "%%ps: (NO FORMAT FOUND at %llx)\n", addr) < 0) 4501 4491 return NULL; 4502 4492 return format; 4503 4493 } 4504 4494 4505 - if (asprintf(&format, "%s: %s", "%pf", printk->printk) < 0) 4495 + if (asprintf(&format, "%s: %s", "%ps", printk->printk) < 0) 4506 4496 return NULL; 4507 4497 4508 4498 return format; ··· 5527 5517 if (divstr && isdigit(*(divstr + 1))) 5528 5518 div = atoi(divstr + 1); 5529 5519 time = record->ts; 5530 - if (div) 5520 + if (div) { 5521 + time += div / 2; 5531 5522 time /= div; 5523 + } 5532 5524 pr = prec; 5533 5525 while (pr--) 5534 5526 p10 *= 10;
+2
tools/lib/traceevent/event-parse.h
··· 441 441 unsigned long long addr); 442 442 bool tep_is_pid_registered(struct tep_handle *tep, int pid); 443 443 444 + struct tep_event *tep_get_event(struct tep_handle *tep, int index); 445 + 444 446 #define TEP_PRINT_INFO "INFO" 445 447 #define TEP_PRINT_INFO_RAW "INFO_RAW" 446 448 #define TEP_PRINT_COMM "COMM"
tools/lib/traceevent/plugin_cfg80211.c tools/lib/traceevent/plugins/plugin_cfg80211.c
tools/lib/traceevent/plugin_function.c tools/lib/traceevent/plugins/plugin_function.c
tools/lib/traceevent/plugin_hrtimer.c tools/lib/traceevent/plugins/plugin_hrtimer.c
tools/lib/traceevent/plugin_jbd2.c tools/lib/traceevent/plugins/plugin_jbd2.c
tools/lib/traceevent/plugin_kmem.c tools/lib/traceevent/plugins/plugin_kmem.c
tools/lib/traceevent/plugin_kvm.c tools/lib/traceevent/plugins/plugin_kvm.c
tools/lib/traceevent/plugin_mac80211.c tools/lib/traceevent/plugins/plugin_mac80211.c
tools/lib/traceevent/plugin_sched_switch.c tools/lib/traceevent/plugins/plugin_sched_switch.c
tools/lib/traceevent/plugin_scsi.c tools/lib/traceevent/plugins/plugin_scsi.c
tools/lib/traceevent/plugin_xen.c tools/lib/traceevent/plugins/plugin_xen.c
+10
tools/lib/traceevent/plugins/Build
··· 1 + plugin_jbd2-y += plugin_jbd2.o 2 + plugin_hrtimer-y += plugin_hrtimer.o 3 + plugin_kmem-y += plugin_kmem.o 4 + plugin_kvm-y += plugin_kvm.o 5 + plugin_mac80211-y += plugin_mac80211.o 6 + plugin_sched_switch-y += plugin_sched_switch.o 7 + plugin_function-y += plugin_function.o 8 + plugin_xen-y += plugin_xen.o 9 + plugin_scsi-y += plugin_scsi.o 10 + plugin_cfg80211-y += plugin_cfg80211.o
+222
tools/lib/traceevent/plugins/Makefile
··· 1 + # SPDX-License-Identifier: GPL-2.0 2 + 3 + #MAKEFLAGS += --no-print-directory 4 + 5 + 6 + # Makefiles suck: This macro sets a default value of $(2) for the 7 + # variable named by $(1), unless the variable has been set by 8 + # environment or command line. This is necessary for CC and AR 9 + # because make sets default values, so the simpler ?= approach 10 + # won't work as expected. 11 + define allow-override 12 + $(if $(or $(findstring environment,$(origin $(1))),\ 13 + $(findstring command line,$(origin $(1)))),,\ 14 + $(eval $(1) = $(2))) 15 + endef 16 + 17 + # Allow setting CC and AR, or setting CROSS_COMPILE as a prefix. 18 + $(call allow-override,CC,$(CROSS_COMPILE)gcc) 19 + $(call allow-override,AR,$(CROSS_COMPILE)ar) 20 + $(call allow-override,NM,$(CROSS_COMPILE)nm) 21 + $(call allow-override,PKG_CONFIG,pkg-config) 22 + 23 + EXT = -std=gnu99 24 + INSTALL = install 25 + 26 + # Use DESTDIR for installing into a different root directory. 27 + # This is useful for building a package. The program will be 28 + # installed in this directory as if it was the root directory. 29 + # Then the build tool can move it later. 30 + DESTDIR ?= 31 + DESTDIR_SQ = '$(subst ','\'',$(DESTDIR))' 32 + 33 + LP64 := $(shell echo __LP64__ | ${CC} ${CFLAGS} -E -x c - | tail -n 1) 34 + ifeq ($(LP64), 1) 35 + libdir_relative = lib64 36 + else 37 + libdir_relative = lib 38 + endif 39 + 40 + prefix ?= /usr/local 41 + libdir = $(prefix)/$(libdir_relative) 42 + 43 + set_plugin_dir := 1 44 + 45 + # Set plugin_dir to preffered global plugin location 46 + # If we install under $HOME directory we go under 47 + # $(HOME)/.local/lib/traceevent/plugins 48 + # 49 + # We dont set PLUGIN_DIR in case we install under $HOME 50 + # directory, because by default the code looks under: 51 + # $(HOME)/.local/lib/traceevent/plugins by default. 52 + # 53 + ifeq ($(plugin_dir),) 54 + ifeq ($(prefix),$(HOME)) 55 + override plugin_dir = $(HOME)/.local/lib/traceevent/plugins 56 + set_plugin_dir := 0 57 + else 58 + override plugin_dir = $(libdir)/traceevent/plugins 59 + endif 60 + endif 61 + 62 + ifeq ($(set_plugin_dir),1) 63 + PLUGIN_DIR = -DPLUGIN_DIR="$(plugin_dir)" 64 + PLUGIN_DIR_SQ = '$(subst ','\'',$(PLUGIN_DIR))' 65 + endif 66 + 67 + include ../../../scripts/Makefile.include 68 + 69 + # copy a bit from Linux kbuild 70 + 71 + ifeq ("$(origin V)", "command line") 72 + VERBOSE = $(V) 73 + endif 74 + ifndef VERBOSE 75 + VERBOSE = 0 76 + endif 77 + 78 + ifeq ($(srctree),) 79 + srctree := $(patsubst %/,%,$(dir $(CURDIR))) 80 + srctree := $(patsubst %/,%,$(dir $(srctree))) 81 + srctree := $(patsubst %/,%,$(dir $(srctree))) 82 + srctree := $(patsubst %/,%,$(dir $(srctree))) 83 + #$(info Determined 'srctree' to be $(srctree)) 84 + endif 85 + 86 + export prefix libdir src obj 87 + 88 + # Shell quotes 89 + plugin_dir_SQ = $(subst ','\'',$(plugin_dir)) 90 + 91 + CONFIG_INCLUDES = 92 + CONFIG_LIBS = 93 + CONFIG_FLAGS = 94 + 95 + OBJ = $@ 96 + N = 97 + 98 + INCLUDES = -I. -I.. -I $(srctree)/tools/include $(CONFIG_INCLUDES) 99 + 100 + # Set compile option CFLAGS 101 + ifdef EXTRA_CFLAGS 102 + CFLAGS := $(EXTRA_CFLAGS) 103 + else 104 + CFLAGS := -g -Wall 105 + endif 106 + 107 + # Append required CFLAGS 108 + override CFLAGS += -fPIC 109 + override CFLAGS += $(CONFIG_FLAGS) $(INCLUDES) $(PLUGIN_DIR_SQ) 110 + override CFLAGS += $(udis86-flags) -D_GNU_SOURCE 111 + 112 + ifeq ($(VERBOSE),1) 113 + Q = 114 + else 115 + Q = @ 116 + endif 117 + 118 + # Disable command line variables (CFLAGS) override from top 119 + # level Makefile (perf), otherwise build Makefile will get 120 + # the same command line setup. 121 + MAKEOVERRIDES= 122 + 123 + export srctree OUTPUT CC LD CFLAGS V 124 + 125 + build := -f $(srctree)/tools/build/Makefile.build dir=. obj 126 + 127 + DYNAMIC_LIST_FILE := $(OUTPUT)libtraceevent-dynamic-list 128 + 129 + PLUGINS = plugin_jbd2.so 130 + PLUGINS += plugin_hrtimer.so 131 + PLUGINS += plugin_kmem.so 132 + PLUGINS += plugin_kvm.so 133 + PLUGINS += plugin_mac80211.so 134 + PLUGINS += plugin_sched_switch.so 135 + PLUGINS += plugin_function.so 136 + PLUGINS += plugin_xen.so 137 + PLUGINS += plugin_scsi.so 138 + PLUGINS += plugin_cfg80211.so 139 + 140 + PLUGINS := $(addprefix $(OUTPUT),$(PLUGINS)) 141 + PLUGINS_IN := $(PLUGINS:.so=-in.o) 142 + 143 + plugins: $(PLUGINS) $(DYNAMIC_LIST_FILE) 144 + 145 + __plugin_obj = $(notdir $@) 146 + plugin_obj = $(__plugin_obj:-in.o=) 147 + 148 + $(PLUGINS_IN): force 149 + $(Q)$(MAKE) $(build)=$(plugin_obj) 150 + 151 + $(OUTPUT)libtraceevent-dynamic-list: $(PLUGINS) 152 + $(QUIET_GEN)$(call do_generate_dynamic_list_file, $(PLUGINS), $@) 153 + 154 + $(OUTPUT)%.so: $(OUTPUT)%-in.o 155 + $(QUIET_LINK)$(CC) $(CFLAGS) -shared $(LDFLAGS) -nostartfiles -o $@ $^ 156 + 157 + define update_dir 158 + (echo $1 > $@.tmp; \ 159 + if [ -r $@ ] && cmp -s $@ $@.tmp; then \ 160 + rm -f $@.tmp; \ 161 + else \ 162 + echo ' UPDATE $@'; \ 163 + mv -f $@.tmp $@; \ 164 + fi); 165 + endef 166 + 167 + tags: force 168 + $(RM) tags 169 + find . -name '*.[ch]' | xargs ctags --extra=+f --c-kinds=+px \ 170 + --regex-c++='/_PE\(([^,)]*).*/TEP_ERRNO__\1/' 171 + 172 + TAGS: force 173 + $(RM) TAGS 174 + find . -name '*.[ch]' | xargs etags \ 175 + --regex='/_PE(\([^,)]*\).*/TEP_ERRNO__\1/' 176 + 177 + define do_install_mkdir 178 + if [ ! -d '$(DESTDIR_SQ)$1' ]; then \ 179 + $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$1'; \ 180 + fi 181 + endef 182 + 183 + define do_install 184 + $(call do_install_mkdir,$2); \ 185 + $(INSTALL) $(if $3,-m $3,) $1 '$(DESTDIR_SQ)$2' 186 + endef 187 + 188 + define do_install_plugins 189 + for plugin in $1; do \ 190 + $(call do_install,$$plugin,$(plugin_dir_SQ)); \ 191 + done 192 + endef 193 + 194 + define do_generate_dynamic_list_file 195 + symbol_type=`$(NM) -u -D $1 | awk 'NF>1 {print $$1}' | \ 196 + xargs echo "U w W" | tr 'w ' 'W\n' | sort -u | xargs echo`;\ 197 + if [ "$$symbol_type" = "U W" ];then \ 198 + (echo '{'; \ 199 + $(NM) -u -D $1 | awk 'NF>1 {print "\t"$$2";"}' | sort -u;\ 200 + echo '};'; \ 201 + ) > $2; \ 202 + else \ 203 + (echo Either missing one of [$1] or bad version of $(NM)) 1>&2;\ 204 + fi 205 + endef 206 + 207 + install: $(PLUGINS) 208 + $(call QUIET_INSTALL, trace_plugins) \ 209 + $(call do_install_plugins, $(PLUGINS)) 210 + 211 + clean: 212 + $(call QUIET_CLEAN, trace_plugins) \ 213 + $(RM) *.o *~ $(TARGETS) *.a *.so $(VERSION_FILES) .*.d .*.cmd; \ 214 + $(RM) $(OUTPUT)libtraceevent-dynamic-list \ 215 + $(RM) TRACEEVENT-CFLAGS tags TAGS; 216 + 217 + PHONY += force plugins 218 + force: 219 + 220 + # Declare the contents of the .PHONY variable as phony. We keep that 221 + # information in a variable so we can use it in if_changed and friends. 222 + .PHONY: $(PHONY)
+1 -1
tools/perf/Makefile.config
··· 924 924 JDIR=$(shell /usr/sbin/update-java-alternatives -l | head -1 | awk '{print $$3}') 925 925 else 926 926 ifneq (,$(wildcard /usr/sbin/alternatives)) 927 - JDIR=$(shell /usr/sbin/alternatives --display java | tail -1 | cut -d' ' -f 5 | sed 's%/jre/bin/java.%%g') 927 + JDIR=$(shell /usr/sbin/alternatives --display java | tail -1 | cut -d' ' -f 5 | sed -e 's%/jre/bin/java.%%g' -e 's%/bin/java.%%g') 928 928 endif 929 929 endif 930 930 ifndef JDIR
+3 -3
tools/perf/Makefile.perf
··· 292 292 LIBTRACEEVENT = $(TE_PATH)libtraceevent.a 293 293 export LIBTRACEEVENT 294 294 295 - LIBTRACEEVENT_DYNAMIC_LIST = $(TE_PATH)libtraceevent-dynamic-list 295 + LIBTRACEEVENT_DYNAMIC_LIST = $(TE_PATH)plugins/libtraceevent-dynamic-list 296 296 297 297 # 298 298 # The static build has no dynsym table, so this does not work for ··· 567 567 # Create python binding output directory if not already present 568 568 _dummy := $(shell [ -d '$(OUTPUT)python' ] || mkdir -p '$(OUTPUT)python') 569 569 570 - $(OUTPUT)python/perf.so: $(PYTHON_EXT_SRCS) $(PYTHON_EXT_DEPS) $(LIBTRACEEVENT_DYNAMIC_LIST) 570 + $(OUTPUT)python/perf.so: $(PYTHON_EXT_SRCS) $(PYTHON_EXT_DEPS) $(LIBTRACEEVENT_DYNAMIC_LIST) $(LIBPERF) 571 571 $(QUIET_GEN)LDSHARED="$(CC) -pthread -shared" \ 572 572 CFLAGS='$(CFLAGS)' LDFLAGS='$(LDFLAGS) $(LIBTRACEEVENT_DYNAMIC_LIST_LDFLAGS)' \ 573 573 $(PYTHON_WORD) util/setup.py \ ··· 737 737 $(Q)$(MAKE) -C $(TRACE_EVENT_DIR) $(LIBTRACEEVENT_FLAGS) O=$(OUTPUT) plugins 738 738 739 739 $(LIBTRACEEVENT_DYNAMIC_LIST): libtraceevent_plugins 740 - $(Q)$(MAKE) -C $(TRACE_EVENT_DIR) $(LIBTRACEEVENT_FLAGS) O=$(OUTPUT) $(OUTPUT)libtraceevent-dynamic-list 740 + $(Q)$(MAKE) -C $(TRACE_EVENT_DIR) $(LIBTRACEEVENT_FLAGS) O=$(OUTPUT) $(OUTPUT)plugins/libtraceevent-dynamic-list 741 741 742 742 $(LIBTRACEEVENT)-clean: 743 743 $(call QUIET_CLEAN, libtraceevent)
+4 -3
tools/perf/arch/arm/util/cs-etm.c
··· 23 23 #include "../../util/event.h" 24 24 #include "../../util/evlist.h" 25 25 #include "../../util/evsel.h" 26 + #include "../../util/evsel_config.h" 26 27 #include "../../util/pmu.h" 27 28 #include "../../util/cs-etm.h" 28 - #include "../../util/util.h" 29 + #include <internal/lib.h> // page_size 29 30 #include "../../util/session.h" 30 31 31 32 #include <errno.h> ··· 417 416 if (err) 418 417 goto out; 419 418 420 - tracking_evsel = perf_evlist__last(evlist); 419 + tracking_evsel = evlist__last(evlist); 421 420 perf_evlist__set_tracking_event(evlist, tracking_evsel); 422 421 423 422 tracking_evsel->core.attr.freq = 0; ··· 649 648 if (priv_size != cs_etm_info_priv_size(itr, session->evlist)) 650 649 return -EINVAL; 651 650 652 - if (!session->evlist->nr_mmaps) 651 + if (!session->evlist->core.nr_mmaps) 653 652 return -EINVAL; 654 653 655 654 /* If the cpu_map is empty all online CPUs are involved */
+3 -3
tools/perf/arch/arm64/util/arm-spe.c
··· 16 16 #include "../../util/evsel.h" 17 17 #include "../../util/evlist.h" 18 18 #include "../../util/session.h" 19 - #include "../../util/util.h" 19 + #include <internal/lib.h> // page_size 20 20 #include "../../util/pmu.h" 21 21 #include "../../util/debug.h" 22 22 #include "../../util/auxtrace.h" ··· 51 51 if (priv_size != ARM_SPE_AUXTRACE_PRIV_SIZE) 52 52 return -EINVAL; 53 53 54 - if (!session->evlist->nr_mmaps) 54 + if (!session->evlist->core.nr_mmaps) 55 55 return -EINVAL; 56 56 57 57 auxtrace_info->type = PERF_AUXTRACE_ARM_SPE; ··· 129 129 if (err) 130 130 return err; 131 131 132 - tracking_evsel = perf_evlist__last(evlist); 132 + tracking_evsel = evlist__last(evlist); 133 133 perf_evlist__set_tracking_event(evlist, tracking_evsel); 134 134 135 135 tracking_evsel->core.attr.freq = 0;
-1
tools/perf/arch/arm64/util/dwarf-regs.c
··· 11 11 #include <dwarf-regs.h> 12 12 #include <linux/ptrace.h> /* for struct user_pt_regs */ 13 13 #include <linux/stringify.h> 14 - #include "util.h" 15 14 16 15 struct pt_regs_dwarfnum { 17 16 const char *name;
+3 -1
tools/perf/arch/arm64/util/header.c
··· 1 1 #include <stdio.h> 2 2 #include <stdlib.h> 3 + #include <perf/cpumap.h> 4 + #include <internal/cpumap.h> 3 5 #include <api/fs/fs.h> 4 6 #include "debug.h" 5 7 #include "header.h" ··· 31 29 32 30 /* read midr from list of cpus mapped to this pmu */ 33 31 cpus = perf_cpu_map__get(pmu->cpus); 34 - for (cpu = 0; cpu < cpus->nr; cpu++) { 32 + for (cpu = 0; cpu < perf_cpu_map__nr(cpus); cpu++) { 35 33 scnprintf(path, PATH_MAX, "%s/devices/system/cpu/cpu%d"MIDR, 36 34 sysfs, cpus->map[cpu]); 37 35
+1 -1
tools/perf/arch/arm64/util/unwind-libunwind.c
··· 5 5 #include <libunwind.h> 6 6 #include "perf_regs.h" 7 7 #include "../../util/unwind.h" 8 - #include "../../util/debug.h" 9 8 #endif 9 + #include "../../util/debug.h" 10 10 11 11 int LIBUNWIND__ARCH_REG_ID(int regnum) 12 12 {
-1
tools/perf/arch/powerpc/util/dwarf-regs.c
··· 12 12 #include <linux/ptrace.h> 13 13 #include <linux/kernel.h> 14 14 #include <linux/stringify.h> 15 - #include "util.h" 16 15 17 16 struct pt_regs_dwarfnum { 18 17 const char *name;
-1
tools/perf/arch/powerpc/util/header.c
··· 6 6 #include <string.h> 7 7 #include <linux/stringify.h> 8 8 #include "header.h" 9 - #include "util.h" 10 9 11 10 #define mfspr(rn) ({unsigned long rval; \ 12 11 asm volatile("mfspr %0," __stringify(rn) \
+45
tools/perf/arch/powerpc/util/kvm-stat.c
··· 5 5 #include "util/debug.h" 6 6 #include "util/evsel.h" 7 7 #include "util/evlist.h" 8 + #include "util/pmu.h" 8 9 9 10 #include "book3s_hv_exits.h" 10 11 #include "book3s_hcalls.h" 12 + #include <subcmd/parse-options.h> 11 13 12 14 #define NR_TPS 4 13 15 ··· 173 171 } 174 172 175 173 return ret; 174 + } 175 + 176 + /* 177 + * Incase of powerpc architecture, pmu registers are programmable 178 + * by guest kernel. So monitoring guest via host may not provide 179 + * valid samples with default 'cycles' event. It is better to use 180 + * 'trace_imc/trace_cycles' event for guest profiling, since it 181 + * can track the guest instruction pointer in the trace-record. 182 + * 183 + * Function to parse the arguments and return appropriate values. 184 + */ 185 + int kvm_add_default_arch_event(int *argc, const char **argv) 186 + { 187 + const char **tmp; 188 + bool event = false; 189 + int i, j = *argc; 190 + 191 + const struct option event_options[] = { 192 + OPT_BOOLEAN('e', "event", &event, NULL), 193 + OPT_END() 194 + }; 195 + 196 + tmp = calloc(j + 1, sizeof(char *)); 197 + if (!tmp) 198 + return -EINVAL; 199 + 200 + for (i = 0; i < j; i++) 201 + tmp[i] = argv[i]; 202 + 203 + parse_options(j, tmp, event_options, NULL, PARSE_OPT_KEEP_UNKNOWN); 204 + if (!event) { 205 + if (pmu_have_event("trace_imc", "trace_cycles")) { 206 + argv[j++] = strdup("-e"); 207 + argv[j++] = strdup("trace_imc/trace_cycles/"); 208 + *argc += 2; 209 + } else { 210 + free(tmp); 211 + return -EINVAL; 212 + } 213 + } 214 + 215 + free(tmp); 216 + return 0; 176 217 }
+1
tools/perf/arch/powerpc/util/skip-callchain-idx.c
··· 13 13 #include "util/callchain.h" 14 14 #include "util/debug.h" 15 15 #include "util/dso.h" 16 + #include "util/event.h" // struct ip_callchain 16 17 #include "util/map.h" 17 18 #include "util/symbol.h" 18 19
-1
tools/perf/arch/powerpc/util/sym-handling.c
··· 4 4 * Copyright (C) 2015 Naveen N. Rao, IBM Corporation 5 5 */ 6 6 7 - #include "debug.h" 8 7 #include "dso.h" 9 8 #include "symbol.h" 10 9 #include "map.h"
+1
tools/perf/arch/s390/Makefile
··· 4 4 endif 5 5 HAVE_KVM_STAT_SUPPORT := 1 6 6 PERF_HAVE_ARCH_REGS_QUERY_REGISTER_OFFSET := 1 7 + PERF_HAVE_JITDUMP := 1 7 8 8 9 # 9 10 # Syscall table generation for perf
+1
tools/perf/arch/s390/util/auxtrace.c
··· 1 1 #include <stdbool.h> 2 + #include <stdlib.h> 2 3 #include <linux/kernel.h> 3 4 #include <linux/types.h> 4 5 #include <linux/bitops.h>
+1 -1
tools/perf/arch/s390/util/machine.c
··· 2 2 #include <unistd.h> 3 3 #include <stdio.h> 4 4 #include <string.h> 5 - #include "util.h" 5 + #include <internal/lib.h> // page_size 6 6 #include "machine.h" 7 7 #include "api/fs/fs.h" 8 8 #include "debug.h"
+3 -3
tools/perf/arch/x86/tests/intel-cqm.c
··· 5 5 #include "evlist.h" 6 6 #include "evsel.h" 7 7 #include "arch-tests.h" 8 - #include "util.h" 8 + #include <internal/lib.h> // page_size 9 9 10 10 #include <signal.h> 11 11 #include <sys/mman.h> ··· 63 63 goto out; 64 64 } 65 65 66 - evsel = perf_evlist__first(evlist); 66 + evsel = evlist__first(evlist); 67 67 if (!evsel) { 68 - pr_debug("perf_evlist__first failed\n"); 68 + pr_debug("evlist__first failed\n"); 69 69 goto out; 70 70 } 71 71
+6 -6
tools/perf/arch/x86/tests/perf-time-to-tsc.c
··· 15 15 #include "evlist.h" 16 16 #include "evsel.h" 17 17 #include "thread_map.h" 18 - #include "cpumap.h" 19 18 #include "record.h" 20 19 #include "tsc.h" 20 + #include "util/mmap.h" 21 21 #include "tests/tests.h" 22 22 23 23 #include "arch-tests.h" ··· 66 66 union perf_event *event; 67 67 u64 test_tsc, comm1_tsc, comm2_tsc; 68 68 u64 test_time, comm1_time = 0, comm2_time = 0; 69 - struct perf_mmap *md; 69 + struct mmap *md; 70 70 71 71 threads = thread_map__new(-1, getpid(), UINT_MAX); 72 72 CHECK_NOT_NULL__(threads); ··· 83 83 84 84 perf_evlist__config(evlist, &opts, NULL); 85 85 86 - evsel = perf_evlist__first(evlist); 86 + evsel = evlist__first(evlist); 87 87 88 88 evsel->core.attr.comm = 1; 89 89 evsel->core.attr.disabled = 1; ··· 91 91 92 92 CHECK__(evlist__open(evlist)); 93 93 94 - CHECK__(perf_evlist__mmap(evlist, UINT_MAX)); 94 + CHECK__(evlist__mmap(evlist, UINT_MAX)); 95 95 96 - pc = evlist->mmap[0].base; 96 + pc = evlist->mmap[0].core.base; 97 97 ret = perf_read_tsc_conversion(pc, &tc); 98 98 if (ret) { 99 99 if (ret == -EOPNOTSUPP) { ··· 115 115 116 116 evlist__disable(evlist); 117 117 118 - for (i = 0; i < evlist->nr_mmaps; i++) { 118 + for (i = 0; i < evlist->core.nr_mmaps; i++) { 119 119 md = &evlist->mmap[i]; 120 120 if (perf_mmap__read_init(md) < 0) 121 121 continue;
+1 -1
tools/perf/arch/x86/tests/rdpmc.c
··· 13 13 #include "tests/tests.h" 14 14 #include "cloexec.h" 15 15 #include "event.h" 16 - #include "util.h" 16 + #include <internal/lib.h> // page_size 17 17 #include "arch-tests.h" 18 18 19 19 static u64 rdpmc(unsigned int counter)
+1
tools/perf/arch/x86/util/archinsn.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0 2 2 #include "../../../../arch/x86/include/asm/insn.h" 3 3 #include "archinsn.h" 4 + #include "event.h" 4 5 #include "machine.h" 5 6 #include "thread.h" 6 7 #include "symbol.h"
+2
tools/perf/arch/x86/util/event.c
··· 3 3 #include <linux/string.h> 4 4 #include <linux/zalloc.h> 5 5 6 + #include "../../util/event.h" 7 + #include "../../util/synthetic-events.h" 6 8 #include "../../util/machine.h" 7 9 #include "../../util/tool.h" 8 10 #include "../../util/map.h"
+5 -4
tools/perf/arch/x86/util/intel-bts.c
··· 15 15 #include "../../util/event.h" 16 16 #include "../../util/evsel.h" 17 17 #include "../../util/evlist.h" 18 + #include "../../util/mmap.h" 18 19 #include "../../util/session.h" 19 20 #include "../../util/pmu.h" 20 21 #include "../../util/debug.h" ··· 23 22 #include "../../util/tsc.h" 24 23 #include "../../util/auxtrace.h" 25 24 #include "../../util/intel-bts.h" 26 - #include "../../util/util.h" 25 + #include <internal/lib.h> // page_size 27 26 28 27 #define KiB(x) ((x) * 1024) 29 28 #define MiB(x) ((x) * 1024 * 1024) ··· 75 74 if (priv_size != INTEL_BTS_AUXTRACE_PRIV_SIZE) 76 75 return -EINVAL; 77 76 78 - if (!session->evlist->nr_mmaps) 77 + if (!session->evlist->core.nr_mmaps) 79 78 return -EINVAL; 80 79 81 - pc = session->evlist->mmap[0].base; 80 + pc = session->evlist->mmap[0].core.base; 82 81 if (pc) { 83 82 err = perf_read_tsc_conversion(pc, &tc); 84 83 if (err) { ··· 231 230 if (err) 232 231 return err; 233 232 234 - tracking_evsel = perf_evlist__last(evlist); 233 + tracking_evsel = evlist__last(evlist); 235 234 236 235 perf_evlist__set_tracking_event(evlist, tracking_evsel); 237 236
+9 -8
tools/perf/arch/x86/util/intel-pt.c
··· 18 18 #include "../../util/evlist.h" 19 19 #include "../../util/evsel.h" 20 20 #include "../../util/cpumap.h" 21 + #include "../../util/mmap.h" 21 22 #include <subcmd/parse-options.h> 22 23 #include "../../util/parse-events.h" 23 24 #include "../../util/pmu.h" ··· 27 26 #include "../../util/record.h" 28 27 #include "../../util/target.h" 29 28 #include "../../util/tsc.h" 30 - #include "../../util/util.h" 29 + #include <internal/lib.h> // page_size 31 30 #include "../../util/intel-pt.h" 32 31 33 32 #define KiB(x) ((x) * 1024) ··· 352 351 filter = intel_pt_find_filter(session->evlist, ptr->intel_pt_pmu); 353 352 filter_str_len = filter ? strlen(filter) : 0; 354 353 355 - if (!session->evlist->nr_mmaps) 354 + if (!session->evlist->core.nr_mmaps) 356 355 return -EINVAL; 357 356 358 - pc = session->evlist->mmap[0].base; 357 + pc = session->evlist->mmap[0].core.base; 359 358 if (pc) { 360 359 err = perf_read_tsc_conversion(pc, &tc); 361 360 if (err) { ··· 417 416 return err; 418 417 } 419 418 420 - evsel = perf_evlist__last(evlist); 419 + evsel = evlist__last(evlist); 421 420 422 421 perf_evsel__set_sample_bit(evsel, CPU); 423 422 perf_evsel__set_sample_bit(evsel, TIME); 424 423 425 - evsel->system_wide = true; 424 + evsel->core.system_wide = true; 426 425 evsel->no_aux_samples = true; 427 426 evsel->immediate = true; 428 427 ··· 717 716 if (err) 718 717 return err; 719 718 720 - switch_evsel = perf_evlist__last(evlist); 719 + switch_evsel = evlist__last(evlist); 721 720 722 721 switch_evsel->core.attr.freq = 0; 723 722 switch_evsel->core.attr.sample_period = 1; 724 723 switch_evsel->core.attr.context_switch = 1; 725 724 726 - switch_evsel->system_wide = true; 725 + switch_evsel->core.system_wide = true; 727 726 switch_evsel->no_aux_samples = true; 728 727 switch_evsel->immediate = true; 729 728 ··· 775 774 if (err) 776 775 return err; 777 776 778 - tracking_evsel = perf_evlist__last(evlist); 777 + tracking_evsel = evlist__last(evlist); 779 778 780 779 perf_evlist__set_tracking_event(evlist, tracking_evsel); 781 780
+2 -1
tools/perf/arch/x86/util/machine.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0 2 2 #include <linux/types.h> 3 3 #include <linux/string.h> 4 + #include <limits.h> 4 5 #include <stdlib.h> 5 6 6 - #include "../../util/util.h" 7 + #include <internal/lib.h> // page_size 7 8 #include "../../util/machine.h" 8 9 #include "../../util/map.h" 9 10 #include "../../util/symbol.h"
+2
tools/perf/arch/x86/util/tsc.c
··· 8 8 #include <linux/types.h> 9 9 #include <asm/barrier.h> 10 10 #include "../../../util/debug.h" 11 + #include "../../../util/event.h" 12 + #include "../../../util/synthetic-events.h" 11 13 #include "../../../util/tsc.h" 12 14 13 15 int perf_read_tsc_conversion(const struct perf_event_mmap_page *pc,
+1 -1
tools/perf/arch/x86/util/unwind-libunwind.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0 2 2 3 3 #include <errno.h> 4 + #include "../../util/debug.h" 4 5 #ifndef REMOTE_UNWIND_LIBUNWIND 5 6 #include <libunwind.h> 6 7 #include "perf_regs.h" 7 8 #include "../../util/unwind.h" 8 - #include "../../util/debug.h" 9 9 #endif 10 10 11 11 #ifdef HAVE_ARCH_X86_64_SUPPORT
+1 -1
tools/perf/bench/epoll-ctl.c
··· 21 21 #include <sys/resource.h> 22 22 #include <sys/epoll.h> 23 23 #include <sys/eventfd.h> 24 + #include <internal/cpumap.h> 24 25 #include <perf/cpumap.h> 25 26 26 27 #include "../util/stat.h" 27 28 #include <subcmd/parse-options.h> 28 29 #include "bench.h" 29 - #include "cpumap.h" 30 30 31 31 #include <err.h> 32 32
+1 -1
tools/perf/bench/epoll-wait.c
··· 76 76 #include <sys/epoll.h> 77 77 #include <sys/eventfd.h> 78 78 #include <sys/types.h> 79 + #include <internal/cpumap.h> 79 80 #include <perf/cpumap.h> 80 81 81 82 #include "../util/stat.h" 82 83 #include <subcmd/parse-options.h> 83 84 #include "bench.h" 84 - #include "cpumap.h" 85 85 86 86 #include <err.h> 87 87
+1 -1
tools/perf/bench/futex-hash.c
··· 20 20 #include <linux/kernel.h> 21 21 #include <linux/zalloc.h> 22 22 #include <sys/time.h> 23 + #include <internal/cpumap.h> 23 24 #include <perf/cpumap.h> 24 25 25 26 #include "../util/stat.h" 26 27 #include <subcmd/parse-options.h> 27 28 #include "bench.h" 28 29 #include "futex.h" 29 - #include "cpumap.h" 30 30 31 31 #include <err.h> 32 32
+1 -1
tools/perf/bench/futex-lock-pi.c
··· 14 14 #include <linux/kernel.h> 15 15 #include <linux/zalloc.h> 16 16 #include <errno.h> 17 + #include <internal/cpumap.h> 17 18 #include <perf/cpumap.h> 18 19 #include "bench.h" 19 20 #include "futex.h" 20 - #include "cpumap.h" 21 21 22 22 #include <err.h> 23 23 #include <stdlib.h>
+1 -1
tools/perf/bench/futex-requeue.c
··· 20 20 #include <linux/kernel.h> 21 21 #include <linux/time64.h> 22 22 #include <errno.h> 23 + #include <internal/cpumap.h> 23 24 #include <perf/cpumap.h> 24 25 #include "bench.h" 25 26 #include "futex.h" 26 - #include "cpumap.h" 27 27 28 28 #include <err.h> 29 29 #include <stdlib.h>
+2 -1
tools/perf/bench/futex-wake-parallel.c
··· 29 29 #include <linux/time64.h> 30 30 #include <errno.h> 31 31 #include "futex.h" 32 - #include "cpumap.h" 32 + #include <internal/cpumap.h> 33 + #include <perf/cpumap.h> 33 34 34 35 #include <err.h> 35 36 #include <stdlib.h>
+1 -1
tools/perf/bench/futex-wake.c
··· 20 20 #include <linux/kernel.h> 21 21 #include <linux/time64.h> 22 22 #include <errno.h> 23 + #include <internal/cpumap.h> 23 24 #include <perf/cpumap.h> 24 25 #include "bench.h" 25 26 #include "futex.h" 26 - #include "cpumap.h" 27 27 28 28 #include <err.h> 29 29 #include <stdlib.h>
-1
tools/perf/bench/numa.c
··· 9 9 /* For the CLR_() macros */ 10 10 #include <pthread.h> 11 11 12 - #include "../builtin.h" 13 12 #include <subcmd/parse-options.h> 14 13 #include "../util/cloexec.h" 15 14
-2
tools/perf/bench/sched-messaging.c
··· 10 10 * 11 11 */ 12 12 13 - #include "../util/util.h" 14 13 #include <subcmd/parse-options.h> 15 - #include "../builtin.h" 16 14 #include "bench.h" 17 15 18 16 /* Test groups of 20 processes spraying to 20 receivers */
-2
tools/perf/bench/sched-pipe.c
··· 9 9 * http://people.redhat.com/mingo/cfs-scheduler/tools/pipe-test-1m.c 10 10 * Ported to perf by Hitoshi Mitake <mitake@dcl.info.waseda.ac.jp> 11 11 */ 12 - #include "../util/util.h" 13 12 #include <subcmd/parse-options.h> 14 - #include "../builtin.h" 15 13 #include "bench.h" 16 14 17 15 #include <unistd.h>
+4 -2
tools/perf/builtin-annotate.c
··· 27 27 #include "util/sort.h" 28 28 #include "util/hist.h" 29 29 #include "util/dso.h" 30 + #include "util/machine.h" 30 31 #include "util/map.h" 31 32 #include "util/session.h" 32 33 #include "util/tool.h" ··· 40 39 #include <dlfcn.h> 41 40 #include <errno.h> 42 41 #include <linux/bitmap.h> 42 + #include <linux/err.h> 43 43 44 44 struct perf_annotate { 45 45 struct perf_tool tool; ··· 585 583 data.path = input_name; 586 584 587 585 annotate.session = perf_session__new(&data, false, &annotate.tool); 588 - if (annotate.session == NULL) 589 - return -1; 586 + if (IS_ERR(annotate.session)) 587 + return PTR_ERR(annotate.session); 590 588 591 589 annotate.has_br_stack = perf_header__has_feat(&annotate.session->header, 592 590 HEADER_BRANCH_STACK);
+3 -2
tools/perf/builtin-buildid-cache.c
··· 28 28 #include "util/util.h" 29 29 #include "util/probe-file.h" 30 30 #include <linux/string.h> 31 + #include <linux/err.h> 31 32 32 33 static int build_id_cache__kcore_buildid(const char *proc_dir, char *sbuildid) 33 34 { ··· 423 422 data.force = force; 424 423 425 424 session = perf_session__new(&data, false, NULL); 426 - if (session == NULL) 427 - return -1; 425 + if (IS_ERR(session)) 426 + return PTR_ERR(session); 428 427 } 429 428 430 429 if (symbol__init(session ? &session->header.env : NULL) < 0)
+3 -2
tools/perf/builtin-buildid-list.c
··· 18 18 #include "util/symbol.h" 19 19 #include "util/data.h" 20 20 #include <errno.h> 21 + #include <linux/err.h> 21 22 22 23 static int sysfs__fprintf_build_id(FILE *fp) 23 24 { ··· 66 65 goto out; 67 66 68 67 session = perf_session__new(&data, false, &build_id__mark_dso_hit_ops); 69 - if (session == NULL) 70 - return -1; 68 + if (IS_ERR(session)) 69 + return PTR_ERR(session); 71 70 72 71 /* 73 72 * We take all buildids when the file contains AUX area tracing data
+5 -2
tools/perf/builtin-c2c.c
··· 13 13 #include <errno.h> 14 14 #include <inttypes.h> 15 15 #include <linux/compiler.h> 16 + #include <linux/err.h> 16 17 #include <linux/kernel.h> 17 18 #include <linux/stringify.h> 18 19 #include <linux/zalloc.h> ··· 21 20 #include <sys/param.h> 22 21 #include "debug.h" 23 22 #include "builtin.h" 23 + #include <perf/cpumap.h> 24 24 #include <subcmd/pager.h> 25 25 #include <subcmd/parse-options.h> 26 26 #include "map_symbol.h" ··· 2782 2780 } 2783 2781 2784 2782 session = perf_session__new(&data, 0, &c2c.tool); 2785 - if (session == NULL) { 2786 - pr_debug("No memory for session\n"); 2783 + if (IS_ERR(session)) { 2784 + err = PTR_ERR(session); 2785 + pr_debug("Error creating perf session\n"); 2787 2786 goto out; 2788 2787 } 2789 2788
-1
tools/perf/builtin-config.c
··· 9 9 10 10 #include "util/cache.h" 11 11 #include <subcmd/parse-options.h> 12 - #include "util/util.h" 13 12 #include "util/debug.h" 14 13 #include "util/config.h" 15 14 #include <linux/string.h>
+5 -4
tools/perf/builtin-diff.c
··· 23 23 #include "util/time-utils.h" 24 24 #include "util/annotate.h" 25 25 #include "util/map.h" 26 + #include <linux/err.h> 26 27 #include <linux/zalloc.h> 27 28 #include <subcmd/pager.h> 28 29 #include <subcmd/parse-options.h> ··· 1154 1153 1155 1154 data__for_each_file(i, d) { 1156 1155 d->session = perf_session__new(&d->data, false, &pdiff.tool); 1157 - if (!d->session) { 1156 + if (IS_ERR(d->session)) { 1158 1157 pr_err("Failed to open %s\n", d->data.path); 1159 - return -1; 1158 + return PTR_ERR(d->session); 1160 1159 } 1161 1160 1162 1161 has_br_stack = perf_header__has_feat(&d->session->header, ··· 1186 1185 1187 1186 data__for_each_file(i, d) { 1188 1187 d->session = perf_session__new(&d->data, false, &pdiff.tool); 1189 - if (!d->session) { 1188 + if (IS_ERR(d->session)) { 1189 + ret = PTR_ERR(d->session); 1190 1190 pr_err("Failed to open %s\n", d->data.path); 1191 - ret = -1; 1192 1191 goto out_delete; 1193 1192 } 1194 1193
+4 -4
tools/perf/builtin-evlist.c
··· 5 5 */ 6 6 #include "builtin.h" 7 7 8 - #include "util/util.h" 9 - 10 8 #include <linux/list.h> 11 9 12 10 #include "perf.h" 13 11 #include "util/evlist.h" 14 12 #include "util/evsel.h" 13 + #include "util/evsel_fprintf.h" 15 14 #include "util/parse-events.h" 16 15 #include <subcmd/parse-options.h> 17 16 #include "util/session.h" 18 17 #include "util/data.h" 19 18 #include "util/debug.h" 19 + #include <linux/err.h> 20 20 21 21 static int __cmd_evlist(const char *file_name, struct perf_attr_details *details) 22 22 { ··· 30 30 bool has_tracepoint = false; 31 31 32 32 session = perf_session__new(&data, 0, NULL); 33 - if (session == NULL) 34 - return -1; 33 + if (IS_ERR(session)) 34 + return PTR_ERR(session); 35 35 36 36 evlist__for_each_entry(session->evlist, pos) { 37 37 perf_evsel__fprintf(pos, details, stdout);
+4 -2
tools/perf/builtin-inject.c
··· 21 21 #include "util/auxtrace.h" 22 22 #include "util/jit.h" 23 23 #include "util/symbol.h" 24 + #include "util/synthetic-events.h" 24 25 #include "util/thread.h" 26 + #include <linux/err.h> 25 27 26 28 #include <subcmd/parse-options.h> 27 29 ··· 836 834 837 835 data.path = inject.input_name; 838 836 inject.session = perf_session__new(&data, true, &inject.tool); 839 - if (inject.session == NULL) 840 - return -1; 837 + if (IS_ERR(inject.session)) 838 + return PTR_ERR(inject.session); 841 839 842 840 if (zstd_init(&(inject.session->zstd_data), 0) < 0) 843 841 pr_warning("Decompression initialization failed.\n");
+3 -2
tools/perf/builtin-kmem.c
··· 14 14 #include "util/tool.h" 15 15 #include "util/callchain.h" 16 16 #include "util/time-utils.h" 17 + #include <linux/err.h> 17 18 18 19 #include <subcmd/pager.h> 19 20 #include <subcmd/parse-options.h> ··· 1957 1956 data.path = input_name; 1958 1957 1959 1958 kmem_session = session = perf_session__new(&data, false, &perf_kmem); 1960 - if (session == NULL) 1961 - return -1; 1959 + if (IS_ERR(session)) 1960 + return PTR_ERR(session); 1962 1961 1963 1962 ret = -1; 1964 1963
+25 -12
tools/perf/builtin-kvm.c
··· 5 5 #include "util/build-id.h" 6 6 #include "util/evsel.h" 7 7 #include "util/evlist.h" 8 + #include "util/mmap.h" 8 9 #include "util/term.h" 9 10 #include "util/symbol.h" 10 11 #include "util/thread.h" ··· 18 17 #include "util/debug.h" 19 18 #include "util/tool.h" 20 19 #include "util/stat.h" 20 + #include "util/synthetic-events.h" 21 21 #include "util/top.h" 22 22 #include "util/data.h" 23 23 #include "util/ordered-events.h" 24 + #include "util/kvm-stat.h" 24 25 #include "ui/ui.h" 25 26 26 27 #include <sys/prctl.h> ··· 34 31 #include <sys/stat.h> 35 32 #include <fcntl.h> 36 33 34 + #include <linux/err.h> 37 35 #include <linux/kernel.h> 38 36 #include <linux/string.h> 39 37 #include <linux/time64.h> ··· 62 58 } 63 59 64 60 #ifdef HAVE_KVM_STAT_SUPPORT 65 - #include "util/kvm-stat.h" 66 61 67 62 void exit_event_get_key(struct evsel *evsel, 68 63 struct perf_sample *sample, ··· 751 748 { 752 749 struct evlist *evlist = kvm->evlist; 753 750 union perf_event *event; 754 - struct perf_mmap *md; 751 + struct mmap *md; 755 752 u64 timestamp; 756 753 s64 n = 0; 757 754 int err; ··· 802 799 s64 n, ntotal = 0; 803 800 u64 flush_time = ULLONG_MAX, mmap_time; 804 801 805 - for (i = 0; i < kvm->evlist->nr_mmaps; i++) { 802 + for (i = 0; i < kvm->evlist->core.nr_mmaps; i++) { 806 803 n = perf_kvm__mmap_read_idx(kvm, i, &mmap_time); 807 804 if (n < 0) 808 805 return -1; ··· 967 964 goto out; 968 965 } 969 966 970 - if (perf_evlist__add_pollfd(kvm->evlist, kvm->timerfd) < 0) 967 + if (evlist__add_pollfd(kvm->evlist, kvm->timerfd) < 0) 971 968 goto out; 972 969 973 - nr_stdin = perf_evlist__add_pollfd(kvm->evlist, fileno(stdin)); 970 + nr_stdin = evlist__add_pollfd(kvm->evlist, fileno(stdin)); 974 971 if (nr_stdin < 0) 975 972 goto out; 976 973 ··· 981 978 evlist__enable(kvm->evlist); 982 979 983 980 while (!done) { 984 - struct fdarray *fda = &kvm->evlist->pollfd; 981 + struct fdarray *fda = &kvm->evlist->core.pollfd; 985 982 int rc; 986 983 987 984 rc = perf_kvm__mmap_read(kvm); ··· 1061 1058 goto out; 1062 1059 } 1063 1060 1064 - if (perf_evlist__mmap(evlist, kvm->opts.mmap_pages) < 0) { 1061 + if (evlist__mmap(evlist, kvm->opts.mmap_pages) < 0) { 1065 1062 ui__error("Failed to mmap the events: %s\n", 1066 1063 str_error_r(errno, sbuf, sizeof(sbuf))); 1067 1064 evlist__close(evlist); ··· 1093 1090 1094 1091 kvm->tool = eops; 1095 1092 kvm->session = perf_session__new(&file, false, &kvm->tool); 1096 - if (!kvm->session) { 1093 + if (IS_ERR(kvm->session)) { 1097 1094 pr_err("Initializing perf session failed\n"); 1098 - return -1; 1095 + return PTR_ERR(kvm->session); 1099 1096 } 1100 1097 1101 1098 symbol__init(&kvm->session->header.env); ··· 1448 1445 * perf session 1449 1446 */ 1450 1447 kvm->session = perf_session__new(&data, false, &kvm->tool); 1451 - if (kvm->session == NULL) { 1452 - err = -1; 1448 + if (IS_ERR(kvm->session)) { 1449 + err = PTR_ERR(kvm->session); 1453 1450 goto out; 1454 1451 } 1455 1452 kvm->session->evlist = kvm->evlist; ··· 1516 1513 } 1517 1514 #endif /* HAVE_KVM_STAT_SUPPORT */ 1518 1515 1516 + int __weak kvm_add_default_arch_event(int *argc __maybe_unused, 1517 + const char **argv __maybe_unused) 1518 + { 1519 + return 0; 1520 + } 1521 + 1519 1522 static int __cmd_record(const char *file_name, int argc, const char **argv) 1520 1523 { 1521 - int rec_argc, i = 0, j; 1524 + int rec_argc, i = 0, j, ret; 1522 1525 const char **rec_argv; 1526 + 1527 + ret = kvm_add_default_arch_event(&argc, argv); 1528 + if (ret) 1529 + return -EINVAL; 1523 1530 1524 1531 rec_argc = argc + 2; 1525 1532 rec_argv = calloc(rec_argc + 1, sizeof(char *));
+2 -2
tools/perf/builtin-list.c
··· 81 81 long_desc_flag, details_flag); 82 82 else if (strcmp(argv[i], "sdt") == 0) 83 83 print_sdt_events(NULL, NULL, raw_dump); 84 - else if (strcmp(argv[i], "metric") == 0) 84 + else if (strcmp(argv[i], "metric") == 0 || strcmp(argv[i], "metrics") == 0) 85 85 metricgroup__print(true, false, NULL, raw_dump, details_flag); 86 - else if (strcmp(argv[i], "metricgroup") == 0) 86 + else if (strcmp(argv[i], "metricgroup") == 0 || strcmp(argv[i], "metricgroups") == 0) 87 87 metricgroup__print(false, true, NULL, raw_dump, details_flag); 88 88 else if ((sep = strchr(argv[i], ':')) != NULL) { 89 89 int sep_idx;
+3 -2
tools/perf/builtin-lock.c
··· 30 30 #include <linux/hash.h> 31 31 #include <linux/kernel.h> 32 32 #include <linux/zalloc.h> 33 + #include <linux/err.h> 33 34 34 35 static struct perf_session *session; 35 36 ··· 873 872 }; 874 873 875 874 session = perf_session__new(&data, false, &eops); 876 - if (!session) { 875 + if (IS_ERR(session)) { 877 876 pr_err("Initializing perf session failed\n"); 878 - return -1; 877 + return PTR_ERR(session); 879 878 } 880 879 881 880 symbol__init(&session->header.env);
+3 -2
tools/perf/builtin-mem.c
··· 17 17 #include "util/dso.h" 18 18 #include "util/map.h" 19 19 #include "util/symbol.h" 20 + #include <linux/err.h> 20 21 21 22 #define MEM_OPERATION_LOAD 0x1 22 23 #define MEM_OPERATION_STORE 0x2 ··· 250 249 struct perf_session *session = perf_session__new(&data, false, 251 250 &mem->tool); 252 251 253 - if (session == NULL) 254 - return -1; 252 + if (IS_ERR(session)) 253 + return PTR_ERR(session); 255 254 256 255 if (mem->cpu_list) { 257 256 ret = perf_session__cpu_bitmap(session, mem->cpu_list,
+56 -61
tools/perf/builtin-record.c
··· 20 20 #include "util/evlist.h" 21 21 #include "util/evsel.h" 22 22 #include "util/debug.h" 23 + #include "util/mmap.h" 23 24 #include "util/target.h" 24 25 #include "util/session.h" 25 26 #include "util/tool.h" ··· 39 38 #include "util/trigger.h" 40 39 #include "util/perf-hooks.h" 41 40 #include "util/cpu-set-sched.h" 41 + #include "util/synthetic-events.h" 42 42 #include "util/time-utils.h" 43 43 #include "util/units.h" 44 44 #include "util/bpf-event.h" ··· 55 53 #include <signal.h> 56 54 #include <sys/mman.h> 57 55 #include <sys/wait.h> 56 + #include <linux/err.h> 58 57 #include <linux/string.h> 59 58 #include <linux/time64.h> 60 59 #include <linux/zalloc.h> ··· 120 117 trigger_is_ready(&switch_output_trigger); 121 118 } 122 119 123 - static int record__write(struct record *rec, struct perf_mmap *map __maybe_unused, 120 + static int record__write(struct record *rec, struct mmap *map __maybe_unused, 124 121 void *bf, size_t size) 125 122 { 126 123 struct perf_data_file *file = &rec->session->data->file; ··· 169 166 return rc; 170 167 } 171 168 172 - static int record__aio_complete(struct perf_mmap *md, struct aiocb *cblock) 169 + static int record__aio_complete(struct mmap *md, struct aiocb *cblock) 173 170 { 174 171 void *rem_buf; 175 172 off_t rem_off; ··· 215 212 return rc; 216 213 } 217 214 218 - static int record__aio_sync(struct perf_mmap *md, bool sync_all) 215 + static int record__aio_sync(struct mmap *md, bool sync_all) 219 216 { 220 217 struct aiocb **aiocb = md->aio.aiocb; 221 218 struct aiocb *cblocks = md->aio.cblocks; ··· 256 253 size_t size; 257 254 }; 258 255 259 - static int record__aio_pushfn(struct perf_mmap *map, void *to, void *buf, size_t size) 256 + static int record__aio_pushfn(struct mmap *map, void *to, void *buf, size_t size) 260 257 { 261 258 struct record_aio *aio = to; 262 259 263 260 /* 264 - * map->base data pointed by buf is copied into free map->aio.data[] buffer 261 + * map->core.base data pointed by buf is copied into free map->aio.data[] buffer 265 262 * to release space in the kernel buffer as fast as possible, calling 266 263 * perf_mmap__consume() from perf_mmap__push() function. 267 264 * ··· 301 298 return size; 302 299 } 303 300 304 - static int record__aio_push(struct record *rec, struct perf_mmap *map, off_t *off) 301 + static int record__aio_push(struct record *rec, struct mmap *map, off_t *off) 305 302 { 306 303 int ret, idx; 307 304 int trace_fd = rec->session->data->file.fd; ··· 352 349 { 353 350 int i; 354 351 struct evlist *evlist = rec->evlist; 355 - struct perf_mmap *maps = evlist->mmap; 352 + struct mmap *maps = evlist->mmap; 356 353 357 354 if (!record__aio_enabled(rec)) 358 355 return; 359 356 360 - for (i = 0; i < evlist->nr_mmaps; i++) { 361 - struct perf_mmap *map = &maps[i]; 357 + for (i = 0; i < evlist->core.nr_mmaps; i++) { 358 + struct mmap *map = &maps[i]; 362 359 363 - if (map->base) 360 + if (map->core.base) 364 361 record__aio_sync(map, true); 365 362 } 366 363 } ··· 388 385 #else /* HAVE_AIO_SUPPORT */ 389 386 static int nr_cblocks_max = 0; 390 387 391 - static int record__aio_push(struct record *rec __maybe_unused, struct perf_mmap *map __maybe_unused, 388 + static int record__aio_push(struct record *rec __maybe_unused, struct mmap *map __maybe_unused, 392 389 off_t *off __maybe_unused) 393 390 { 394 391 return -1; ··· 440 437 if (!opts->mmap_flush) 441 438 opts->mmap_flush = MMAP_FLUSH_DEFAULT; 442 439 443 - flush_max = perf_evlist__mmap_size(opts->mmap_pages); 440 + flush_max = evlist__mmap_size(opts->mmap_pages); 444 441 flush_max /= 4; 445 442 if (opts->mmap_flush > flush_max) 446 443 opts->mmap_flush = flush_max; ··· 483 480 return record__write(rec, NULL, event, event->header.size); 484 481 } 485 482 486 - static int record__pushfn(struct perf_mmap *map, void *to, void *bf, size_t size) 483 + static int record__pushfn(struct mmap *map, void *to, void *bf, size_t size) 487 484 { 488 485 struct record *rec = to; 489 486 ··· 528 525 #ifdef HAVE_AUXTRACE_SUPPORT 529 526 530 527 static int record__process_auxtrace(struct perf_tool *tool, 531 - struct perf_mmap *map, 528 + struct mmap *map, 532 529 union perf_event *event, void *data1, 533 530 size_t len1, void *data2, size_t len2) 534 531 { ··· 566 563 } 567 564 568 565 static int record__auxtrace_mmap_read(struct record *rec, 569 - struct perf_mmap *map) 566 + struct mmap *map) 570 567 { 571 568 int ret; 572 569 ··· 582 579 } 583 580 584 581 static int record__auxtrace_mmap_read_snapshot(struct record *rec, 585 - struct perf_mmap *map) 582 + struct mmap *map) 586 583 { 587 584 int ret; 588 585 ··· 603 600 int i; 604 601 int rc = 0; 605 602 606 - for (i = 0; i < rec->evlist->nr_mmaps; i++) { 607 - struct perf_mmap *map = &rec->evlist->mmap[i]; 603 + for (i = 0; i < rec->evlist->core.nr_mmaps; i++) { 604 + struct mmap *map = &rec->evlist->mmap[i]; 608 605 609 606 if (!map->auxtrace_mmap.base) 610 607 continue; ··· 669 666 670 667 static inline 671 668 int record__auxtrace_mmap_read(struct record *rec __maybe_unused, 672 - struct perf_mmap *map __maybe_unused) 669 + struct mmap *map __maybe_unused) 673 670 { 674 671 return 0; 675 672 } ··· 708 705 if (opts->affinity != PERF_AFFINITY_SYS) 709 706 cpu__setup_cpunode_map(); 710 707 711 - if (perf_evlist__mmap_ex(evlist, opts->mmap_pages, 708 + if (evlist__mmap_ex(evlist, opts->mmap_pages, 712 709 opts->auxtrace_mmap_pages, 713 710 opts->auxtrace_snapshot_mode, 714 711 opts->nr_cblocks, opts->affinity, ··· 756 753 if (perf_evlist__add_dummy(evlist)) 757 754 return -ENOMEM; 758 755 759 - pos = perf_evlist__first(evlist); 756 + pos = evlist__first(evlist); 760 757 pos->tracking = 0; 761 - pos = perf_evlist__last(evlist); 758 + pos = evlist__last(evlist); 762 759 pos->tracking = 1; 763 760 pos->core.attr.enable_on_exec = 1; 764 761 } ··· 787 784 } 788 785 789 786 pos->supported = true; 787 + } 788 + 789 + if (symbol_conf.kptr_restrict && !perf_evlist__exclude_kernel(evlist)) { 790 + pr_warning( 791 + "WARNING: Kernel address maps (/proc/{kallsyms,modules}) are restricted,\n" 792 + "check /proc/sys/kernel/kptr_restrict and /proc/sys/kernel/perf_event_paranoid.\n\n" 793 + "Samples in kernel functions may not be resolved if a suitable vmlinux\n" 794 + "file is not found in the buildid cache or in the vmlinux path.\n\n" 795 + "Samples in kernel modules won't be resolved at all.\n\n" 796 + "If some relocation was applied (e.g. kexec) symbols may be misresolved\n" 797 + "even with a suitable vmlinux or kallsyms file.\n\n"); 790 798 } 791 799 792 800 if (perf_evlist__apply_filters(evlist, &pos)) { ··· 902 888 .type = PERF_RECORD_FINISHED_ROUND, 903 889 }; 904 890 905 - static void record__adjust_affinity(struct record *rec, struct perf_mmap *map) 891 + static void record__adjust_affinity(struct record *rec, struct mmap *map) 906 892 { 907 893 if (rec->opts.affinity != PERF_AFFINITY_SYS && 908 894 !CPU_EQUAL(&rec->affinity_mask, &map->affinity_mask)) { ··· 949 935 u64 bytes_written = rec->bytes_written; 950 936 int i; 951 937 int rc = 0; 952 - struct perf_mmap *maps; 938 + struct mmap *maps; 953 939 int trace_fd = rec->data.file.fd; 954 940 off_t off = 0; 955 941 ··· 966 952 if (record__aio_enabled(rec)) 967 953 off = record__aio_get_pos(trace_fd); 968 954 969 - for (i = 0; i < evlist->nr_mmaps; i++) { 955 + for (i = 0; i < evlist->core.nr_mmaps; i++) { 970 956 u64 flush = 0; 971 - struct perf_mmap *map = &maps[i]; 957 + struct mmap *map = &maps[i]; 972 958 973 - if (map->base) { 959 + if (map->core.base) { 974 960 record__adjust_affinity(rec, map); 975 961 if (synch) { 976 - flush = map->flush; 977 - map->flush = 1; 962 + flush = map->core.flush; 963 + map->core.flush = 1; 978 964 } 979 965 if (!record__aio_enabled(rec)) { 980 966 if (perf_mmap__push(map, rec, record__pushfn) < 0) { 981 967 if (synch) 982 - map->flush = flush; 968 + map->core.flush = flush; 983 969 rc = -1; 984 970 goto out; 985 971 } ··· 987 973 if (record__aio_push(rec, map, &off) < 0) { 988 974 record__aio_set_pos(trace_fd, off); 989 975 if (synch) 990 - map->flush = flush; 976 + map->core.flush = flush; 991 977 rc = -1; 992 978 goto out; 993 979 } 994 980 } 995 981 if (synch) 996 - map->flush = flush; 982 + map->core.flush = flush; 997 983 } 998 984 999 985 if (map->auxtrace_mmap.base && !rec->opts.auxtrace_snapshot_mode && ··· 1194 1180 static void snapshot_sig_handler(int sig); 1195 1181 static void alarm_sig_handler(int sig); 1196 1182 1197 - int __weak 1198 - perf_event__synth_time_conv(const struct perf_event_mmap_page *pc __maybe_unused, 1199 - struct perf_tool *tool __maybe_unused, 1200 - perf_event__handler_t process __maybe_unused, 1201 - struct machine *machine __maybe_unused) 1202 - { 1203 - return 0; 1204 - } 1205 - 1206 1183 static const struct perf_event_mmap_page * 1207 1184 perf_evlist__pick_pc(struct evlist *evlist) 1208 1185 { 1209 1186 if (evlist) { 1210 - if (evlist->mmap && evlist->mmap[0].base) 1211 - return evlist->mmap[0].base; 1212 - if (evlist->overwrite_mmap && evlist->overwrite_mmap[0].base) 1213 - return evlist->overwrite_mmap[0].base; 1187 + if (evlist->mmap && evlist->mmap[0].core.base) 1188 + return evlist->mmap[0].core.base; 1189 + if (evlist->overwrite_mmap && evlist->overwrite_mmap[0].core.base) 1190 + return evlist->overwrite_mmap[0].core.base; 1214 1191 } 1215 1192 return NULL; 1216 1193 } ··· 1367 1362 } 1368 1363 1369 1364 session = perf_session__new(data, false, tool); 1370 - if (session == NULL) { 1365 + if (IS_ERR(session)) { 1371 1366 pr_err("Perf session creation failed.\n"); 1372 - return -1; 1367 + return PTR_ERR(session); 1373 1368 } 1374 1369 1375 1370 fd = perf_data__fd(data); ··· 1412 1407 err = -1; 1413 1408 goto out_child; 1414 1409 } 1415 - session->header.env.comp_mmap_len = session->evlist->mmap_len; 1410 + session->header.env.comp_mmap_len = session->evlist->core.mmap_len; 1416 1411 1417 1412 err = bpf__apply_obj_config(); 1418 1413 if (err) { ··· 1615 1610 if (hits == rec->samples) { 1616 1611 if (done || draining) 1617 1612 break; 1618 - err = perf_evlist__poll(rec->evlist, -1); 1613 + err = evlist__poll(rec->evlist, -1); 1619 1614 /* 1620 1615 * Propagate error, only if there's any. Ignore positive 1621 1616 * number of returned events and interrupt error. ··· 1624 1619 err = 0; 1625 1620 waking++; 1626 1621 1627 - if (perf_evlist__filter_pollfd(rec->evlist, POLLERR | POLLHUP) == 0) 1622 + if (evlist__filter_pollfd(rec->evlist, POLLERR | POLLHUP) == 0) 1628 1623 draining = true; 1629 1624 } 1630 1625 ··· 1981 1976 1982 1977 static void switch_output_size_warn(struct record *rec) 1983 1978 { 1984 - u64 wakeup_size = perf_evlist__mmap_size(rec->opts.mmap_pages); 1979 + u64 wakeup_size = evlist__mmap_size(rec->opts.mmap_pages); 1985 1980 struct switch_output *s = &rec->switch_output; 1986 1981 1987 1982 wakeup_size /= 2; ··· 2375 2370 } 2376 2371 2377 2372 err = -ENOMEM; 2378 - 2379 - if (symbol_conf.kptr_restrict && !perf_evlist__exclude_kernel(rec->evlist)) 2380 - pr_warning( 2381 - "WARNING: Kernel address maps (/proc/{kallsyms,modules}) are restricted,\n" 2382 - "check /proc/sys/kernel/kptr_restrict and /proc/sys/kernel/perf_event_paranoid.\n\n" 2383 - "Samples in kernel functions may not be resolved if a suitable vmlinux\n" 2384 - "file is not found in the buildid cache or in the vmlinux path.\n\n" 2385 - "Samples in kernel modules won't be resolved at all.\n\n" 2386 - "If some relocation was applied (e.g. kexec) symbols may be misresolved\n" 2387 - "even with a suitable vmlinux or kallsyms file.\n\n"); 2388 2373 2389 2374 if (rec->no_buildid_cache || rec->no_buildid) { 2390 2375 disable_buildid_cache();
+3 -3
tools/perf/builtin-report.c
··· 48 48 #include "util/auxtrace.h" 49 49 #include "util/units.h" 50 50 #include "util/branch.h" 51 - #include "util/util.h" 51 + #include "util/util.h" // perf_tip() 52 52 #include "ui/ui.h" 53 53 #include "ui/progress.h" 54 54 ··· 1269 1269 1270 1270 repeat: 1271 1271 session = perf_session__new(&data, false, &report.tool); 1272 - if (session == NULL) 1273 - return -1; 1272 + if (IS_ERR(session)) 1273 + return PTR_ERR(session); 1274 1274 1275 1275 ret = evswitch__init(&report.evswitch, session->evlist, stderr); 1276 1276 if (ret)
+11 -6
tools/perf/builtin-sched.c
··· 3 3 #include "perf.h" 4 4 #include "perf-sys.h" 5 5 6 + #include "util/cpumap.h" 6 7 #include "util/evlist.h" 7 8 #include "util/evsel.h" 9 + #include "util/evsel_fprintf.h" 8 10 #include "util/symbol.h" 9 11 #include "util/thread.h" 10 12 #include "util/header.h" ··· 25 23 #include "util/trace-event.h" 26 24 27 25 #include "util/debug.h" 26 + #include "util/event.h" 28 27 29 28 #include <linux/kernel.h> 30 29 #include <linux/log2.h> ··· 39 36 #include <pthread.h> 40 37 #include <math.h> 41 38 #include <api/fs/fs.h> 39 + #include <perf/cpumap.h> 42 40 #include <linux/time64.h> 41 + #include <linux/err.h> 43 42 44 43 #include <linux/ctype.h> 45 44 ··· 1799 1794 int rc = -1; 1800 1795 1801 1796 session = perf_session__new(&data, false, &sched->tool); 1802 - if (session == NULL) { 1803 - pr_debug("No Memory for session\n"); 1804 - return -1; 1797 + if (IS_ERR(session)) { 1798 + pr_debug("Error creating perf session"); 1799 + return PTR_ERR(session); 1805 1800 } 1806 1801 1807 1802 symbol__init(&session->header.env); ··· 2056 2051 EVSEL__PRINT_SYM | EVSEL__PRINT_ONELINE | 2057 2052 EVSEL__PRINT_CALLCHAIN_ARROW | 2058 2053 EVSEL__PRINT_SKIP_IGNORED, 2059 - &callchain_cursor, stdout); 2054 + &callchain_cursor, symbol_conf.bt_stop_list, stdout); 2060 2055 2061 2056 out: 2062 2057 printf("\n"); ··· 2991 2986 symbol_conf.use_callchain = sched->show_callchain; 2992 2987 2993 2988 session = perf_session__new(&data, false, &sched->tool); 2994 - if (session == NULL) 2995 - return -ENOMEM; 2989 + if (IS_ERR(session)) 2990 + return PTR_ERR(session); 2996 2991 2997 2992 evlist = session->evlist; 2998 2993
+12 -8
tools/perf/builtin-script.c
··· 17 17 #include "util/trace-event.h" 18 18 #include "util/evlist.h" 19 19 #include "util/evsel.h" 20 + #include "util/evsel_fprintf.h" 20 21 #include "util/evswitch.h" 21 22 #include "util/sort.h" 22 23 #include "util/data.h" ··· 53 52 #include <unistd.h> 54 53 #include <subcmd/pager.h> 55 54 #include <perf/evlist.h> 55 + #include <linux/err.h> 56 56 #include "util/record.h" 57 57 #include "util/util.h" 58 58 #include "perf.h" ··· 1326 1324 } else 1327 1325 printed += fprintf(fp, "\n"); 1328 1326 1329 - printed += sample__fprintf_sym(sample, al, 0, print_opts, cursor, fp); 1327 + printed += sample__fprintf_sym(sample, al, 0, print_opts, cursor, 1328 + symbol_conf.bt_stop_list, fp); 1330 1329 } 1331 1330 1332 1331 /* print branch_to information */ ··· 1869 1866 cursor = &callchain_cursor; 1870 1867 1871 1868 fputc(cursor ? '\n' : ' ', fp); 1872 - sample__fprintf_sym(sample, al, 0, output[type].print_ip_opts, cursor, fp); 1869 + sample__fprintf_sym(sample, al, 0, output[type].print_ip_opts, cursor, 1870 + symbol_conf.bt_stop_list, fp); 1873 1871 } 1874 1872 1875 1873 if (PRINT_FIELD(IREGS)) ··· 1919 1915 int cpu, thread; 1920 1916 static int header_printed; 1921 1917 1922 - if (counter->system_wide) 1918 + if (counter->core.system_wide) 1923 1919 nthreads = 1; 1924 1920 1925 1921 if (!header_printed) { ··· 2046 2042 return err; 2047 2043 2048 2044 evlist = *pevlist; 2049 - evsel = perf_evlist__last(*pevlist); 2045 + evsel = evlist__last(*pevlist); 2050 2046 2051 2047 if (!evsel->priv) { 2052 2048 if (scr->per_event_dump) { ··· 3087 3083 int i = 0; 3088 3084 3089 3085 session = perf_session__new(&data, false, NULL); 3090 - if (!session) 3091 - return -1; 3086 + if (IS_ERR(session)) 3087 + return PTR_ERR(session); 3092 3088 3093 3089 snprintf(scripts_path, MAXPATHLEN, "%s/scripts", get_argv_exec_path()); 3094 3090 ··· 3758 3754 } 3759 3755 3760 3756 session = perf_session__new(&data, false, &script.tool); 3761 - if (session == NULL) 3762 - return -1; 3757 + if (IS_ERR(session)) 3758 + return PTR_ERR(session); 3763 3759 3764 3760 if (header || header_only) { 3765 3761 script.tool.show_feat_hdr = SHOW_FEAT_HEADER;
+17 -24
tools/perf/builtin-stat.c
··· 61 61 #include "util/tool.h" 62 62 #include "util/string2.h" 63 63 #include "util/metricgroup.h" 64 + #include "util/synthetic-events.h" 64 65 #include "util/target.h" 65 66 #include "util/time-utils.h" 66 67 #include "util/top.h" ··· 83 82 #include <unistd.h> 84 83 #include <sys/time.h> 85 84 #include <sys/resource.h> 85 + #include <linux/err.h> 86 86 87 87 #include <linux/ctype.h> 88 88 #include <perf/evlist.h> ··· 235 233 #define WRITE_STAT_ROUND_EVENT(time, interval) \ 236 234 write_stat_round_event(time, PERF_STAT_ROUND_TYPE__ ## interval) 237 235 238 - #define SID(e, x, y) xyarray__entry(e->sample_id, x, y) 236 + #define SID(e, x, y) xyarray__entry(e->core.sample_id, x, y) 239 237 240 238 static int 241 239 perf_evsel__write_stat_event(struct evsel *counter, u32 cpu, u32 thread, ··· 278 276 if (!counter->supported) 279 277 return -ENOENT; 280 278 281 - if (counter->system_wide) 279 + if (counter->core.system_wide) 282 280 nthreads = 1; 283 281 284 282 for (thread = 0; thread < nthreads; thread++) { ··· 542 540 if (err < 0) 543 541 return err; 544 542 545 - err = perf_stat_synthesize_config(&stat_config, NULL, evsel_list, 546 - process_synthesized_event, is_pipe); 543 + err = perf_event__synthesize_stat_events(&stat_config, NULL, evsel_list, 544 + process_synthesized_event, is_pipe); 547 545 if (err < 0) 548 546 return err; 549 547 } ··· 824 822 return cpu_map__get_core(map, cpu, NULL); 825 823 } 826 824 827 - static int cpu_map__get_max(struct perf_cpu_map *map) 828 - { 829 - int i, max = -1; 830 - 831 - for (i = 0; i < map->nr; i++) { 832 - if (map->map[i] > max) 833 - max = map->map[i]; 834 - } 835 - 836 - return max; 837 - } 838 - 839 825 static int perf_stat__get_aggr(struct perf_stat_config *config, 840 826 aggr_get_id_t get_id, struct perf_cpu_map *map, int idx) 841 827 { ··· 918 928 * taking the highest cpu number to be the size of 919 929 * the aggregation translate cpumap. 920 930 */ 921 - nr = cpu_map__get_max(evsel_list->core.cpus); 931 + nr = perf_cpu_map__max(evsel_list->core.cpus); 922 932 stat_config.cpus_aggr_map = perf_cpu_map__empty_new(nr + 1); 923 933 return stat_config.cpus_aggr_map ? 0 : -ENOMEM; 924 934 } ··· 1437 1447 } 1438 1448 1439 1449 session = perf_session__new(data, false, NULL); 1440 - if (session == NULL) { 1441 - pr_err("Perf session creation failed.\n"); 1442 - return -1; 1450 + if (IS_ERR(session)) { 1451 + pr_err("Perf session creation failed\n"); 1452 + return PTR_ERR(session); 1443 1453 } 1444 1454 1445 1455 init_features(session); ··· 1636 1646 perf_stat.data.mode = PERF_DATA_MODE_READ; 1637 1647 1638 1648 session = perf_session__new(&perf_stat.data, false, &perf_stat.tool); 1639 - if (session == NULL) 1640 - return -1; 1649 + if (IS_ERR(session)) 1650 + return PTR_ERR(session); 1641 1651 1642 1652 perf_stat.session = session; 1643 1653 stat_config.output = stderr; ··· 1671 1681 struct evsel *counter; 1672 1682 1673 1683 evlist__for_each_entry(evsel_list, counter) { 1674 - if (!counter->system_wide) 1684 + if (!counter->core.system_wide) 1675 1685 return; 1676 1686 } 1677 1687 ··· 1953 1963 fprintf(output, "[ perf stat: executing run #%d ... ]\n", 1954 1964 run_idx + 1); 1955 1965 1966 + if (run_idx != 0) 1967 + perf_evlist__reset_prev_raw_counts(evsel_list); 1968 + 1956 1969 status = run_perf_stat(argc, argv, run_idx); 1957 - if (forever && status != -1) { 1970 + if (forever && status != -1 && !interval) { 1958 1971 print_counters(NULL, argc, argv); 1959 1972 perf_stat__reset_stats(); 1960 1973 }
+3 -2
tools/perf/builtin-timechart.c
··· 35 35 #include "util/tool.h" 36 36 #include "util/data.h" 37 37 #include "util/debug.h" 38 + #include <linux/err.h> 38 39 39 40 #ifdef LACKS_OPEN_MEMSTREAM_PROTOTYPE 40 41 FILE *open_memstream(char **ptr, size_t *sizeloc); ··· 1602 1601 &tchart->tool); 1603 1602 int ret = -EINVAL; 1604 1603 1605 - if (session == NULL) 1606 - return -1; 1604 + if (IS_ERR(session)) 1605 + return PTR_ERR(session); 1607 1606 1608 1607 symbol__init(&session->header.env); 1609 1608
+16 -12
tools/perf/builtin-top.c
··· 27 27 #include "util/dso.h" 28 28 #include "util/evlist.h" 29 29 #include "util/evsel.h" 30 + #include "util/evsel_config.h" 30 31 #include "util/event.h" 31 32 #include "util/machine.h" 32 33 #include "util/map.h" 34 + #include "util/mmap.h" 33 35 #include "util/session.h" 34 36 #include "util/symbol.h" 37 + #include "util/synthetic-events.h" 35 38 #include "util/top.h" 36 39 #include "util/util.h" 37 40 #include <linux/rbtree.h> ··· 79 76 #include <linux/stringify.h> 80 77 #include <linux/time64.h> 81 78 #include <linux/types.h> 79 + #include <linux/err.h> 82 80 83 81 #include <linux/ctype.h> 84 82 ··· 532 528 prompt_integer(&counter, "Enter details event counter"); 533 529 534 530 if (counter >= top->evlist->core.nr_entries) { 535 - top->sym_evsel = perf_evlist__first(top->evlist); 531 + top->sym_evsel = evlist__first(top->evlist); 536 532 fprintf(stderr, "Sorry, no such event, using %s.\n", perf_evsel__name(top->sym_evsel)); 537 533 sleep(1); 538 534 break; ··· 541 537 if (top->sym_evsel->idx == counter) 542 538 break; 543 539 } else 544 - top->sym_evsel = perf_evlist__first(top->evlist); 540 + top->sym_evsel = evlist__first(top->evlist); 545 541 break; 546 542 case 'f': 547 543 prompt_integer(&top->count_filter, "Enter display event count filter"); ··· 865 861 { 866 862 struct record_opts *opts = &top->record_opts; 867 863 struct evlist *evlist = top->evlist; 868 - struct perf_mmap *md; 864 + struct mmap *md; 869 865 union perf_event *event; 870 866 871 867 md = opts->overwrite ? &evlist->overwrite_mmap[idx] : &evlist->mmap[idx]; ··· 905 901 if (overwrite) 906 902 perf_evlist__toggle_bkw_mmap(evlist, BKW_MMAP_DATA_PENDING); 907 903 908 - for (i = 0; i < top->evlist->nr_mmaps; i++) 904 + for (i = 0; i < top->evlist->core.nr_mmaps; i++) 909 905 perf_top__mmap_read_idx(top, i); 910 906 911 907 if (overwrite) { ··· 963 959 /* has term for current event */ 964 960 if ((overwrite < 0) && (set >= 0)) { 965 961 /* if it's first event, set overwrite */ 966 - if (evsel == perf_evlist__first(evlist)) 962 + if (evsel == evlist__first(evlist)) 967 963 overwrite = set; 968 964 else 969 965 return -1; ··· 987 983 return 0; 988 984 989 985 /* only fall back when first event fails */ 990 - if (evsel != perf_evlist__first(evlist)) 986 + if (evsel != evlist__first(evlist)) 991 987 return 0; 992 988 993 989 evlist__for_each_entry(evlist, counter) ··· 1044 1040 } 1045 1041 } 1046 1042 1047 - if (perf_evlist__mmap(evlist, opts->mmap_pages) < 0) { 1043 + if (evlist__mmap(evlist, opts->mmap_pages) < 0) { 1048 1044 ui__error("Failed to mmap with %d (%s)\n", 1049 1045 errno, str_error_r(errno, msg, sizeof(msg))); 1050 1046 goto out_err; ··· 1308 1304 } 1309 1305 1310 1306 /* Wait for a minimal set of events before starting the snapshot */ 1311 - perf_evlist__poll(top->evlist, 100); 1307 + evlist__poll(top->evlist, 100); 1312 1308 1313 1309 perf_top__mmap_read(top); 1314 1310 ··· 1318 1314 perf_top__mmap_read(top); 1319 1315 1320 1316 if (opts->overwrite || (hits == top->samples)) 1321 - ret = perf_evlist__poll(top->evlist, 100); 1317 + ret = evlist__poll(top->evlist, 100); 1322 1318 1323 1319 if (resize) { 1324 1320 perf_top__resize(top); ··· 1645 1641 goto out_delete_evlist; 1646 1642 } 1647 1643 1648 - top.sym_evsel = perf_evlist__first(top.evlist); 1644 + top.sym_evsel = evlist__first(top.evlist); 1649 1645 1650 1646 if (!callchain_param.enabled) { 1651 1647 symbol_conf.cumulate_callchain = false; ··· 1675 1671 } 1676 1672 1677 1673 top.session = perf_session__new(NULL, false, NULL); 1678 - if (top.session == NULL) { 1679 - status = -1; 1674 + if (IS_ERR(top.session)) { 1675 + status = PTR_ERR(top.session); 1680 1676 goto out_delete_evlist; 1681 1677 } 1682 1678
+13 -9
tools/perf/builtin-trace.c
··· 28 28 #include "util/dso.h" 29 29 #include "util/env.h" 30 30 #include "util/event.h" 31 + #include "util/evsel.h" 32 + #include "util/evsel_fprintf.h" 33 + #include "util/synthetic-events.h" 31 34 #include "util/evlist.h" 32 35 #include "util/evswitch.h" 36 + #include "util/mmap.h" 33 37 #include <subcmd/pager.h> 34 38 #include <subcmd/exec-cmd.h> 35 39 #include "util/machine.h" ··· 2078 2074 EVSEL__PRINT_DSO | 2079 2075 EVSEL__PRINT_UNKNOWN_AS_ADDR; 2080 2076 2081 - return sample__fprintf_callchain(sample, 38, print_opts, &callchain_cursor, trace->output); 2077 + return sample__fprintf_callchain(sample, 38, print_opts, &callchain_cursor, symbol_conf.bt_stop_list, trace->output); 2082 2078 } 2083 2079 2084 2080 static const char *errno_to_name(struct evsel *evsel, int err) ··· 3412 3408 if (trace->dump.map) 3413 3409 bpf_map__fprintf(trace->dump.map, trace->output); 3414 3410 3415 - err = perf_evlist__mmap(evlist, trace->opts.mmap_pages); 3411 + err = evlist__mmap(evlist, trace->opts.mmap_pages); 3416 3412 if (err < 0) 3417 3413 goto out_error_mmap; 3418 3414 ··· 3429 3425 3430 3426 trace->multiple_threads = perf_thread_map__pid(evlist->core.threads, 0) == -1 || 3431 3427 evlist->core.threads->nr > 1 || 3432 - perf_evlist__first(evlist)->core.attr.inherit; 3428 + evlist__first(evlist)->core.attr.inherit; 3433 3429 3434 3430 /* 3435 3431 * Now that we already used evsel->core.attr to ask the kernel to setup the ··· 3445 3441 again: 3446 3442 before = trace->nr_events; 3447 3443 3448 - for (i = 0; i < evlist->nr_mmaps; i++) { 3444 + for (i = 0; i < evlist->core.nr_mmaps; i++) { 3449 3445 union perf_event *event; 3450 - struct perf_mmap *md; 3446 + struct mmap *md; 3451 3447 3452 3448 md = &evlist->mmap[i]; 3453 3449 if (perf_mmap__read_init(md) < 0) ··· 3476 3472 if (trace->nr_events == before) { 3477 3473 int timeout = done ? 100 : -1; 3478 3474 3479 - if (!draining && perf_evlist__poll(evlist, timeout) > 0) { 3480 - if (perf_evlist__filter_pollfd(evlist, POLLERR | POLLHUP | POLLNVAL) == 0) 3475 + if (!draining && evlist__poll(evlist, timeout) > 0) { 3476 + if (evlist__filter_pollfd(evlist, POLLERR | POLLHUP | POLLNVAL) == 0) 3481 3477 draining = true; 3482 3478 3483 3479 goto again; ··· 3588 3584 trace->multiple_threads = true; 3589 3585 3590 3586 session = perf_session__new(&data, false, &trace->tool); 3591 - if (session == NULL) 3592 - return -1; 3587 + if (IS_ERR(session)) 3588 + return PTR_ERR(session); 3593 3589 3594 3590 if (trace->opts.target.pid) 3595 3591 symbol_conf.pid_list_str = strdup(trace->opts.target.pid);
+9
tools/perf/jvmti/Build
··· 1 1 jvmti-y += libjvmti.o 2 2 jvmti-y += jvmti_agent.o 3 3 4 + # For strlcpy 5 + jvmti-y += libstring.o 6 + 4 7 CFLAGS_jvmti = -fPIC -DPIC -I$(JDIR)/include -I$(JDIR)/include/linux 5 8 CFLAGS_REMOVE_jvmti = -Wmissing-declarations 6 9 CFLAGS_REMOVE_jvmti += -Wstrict-prototypes 7 10 CFLAGS_REMOVE_jvmti += -Wextra 8 11 CFLAGS_REMOVE_jvmti += -Wwrite-strings 12 + 13 + CFLAGS_libstring.o += -Wno-unused-parameter -DETC_PERFCONFIG="BUILD_STR($(ETC_PERFCONFIG_SQ))" 14 + 15 + $(OUTPUT)jvmti/libstring.o: ../lib/string.c FORCE 16 + $(call rule_mkdir) 17 + $(call if_changed_dep,cc_o_c)
+32 -4
tools/perf/lib/Makefile
··· 59 59 CFLAGS := -g -Wall 60 60 endif 61 61 62 - INCLUDES = -I$(srctree)/tools/perf/lib/include -I$(srctree)/tools/include -I$(srctree)/tools/arch/$(SRCARCH)/include/ -I$(srctree)/tools/arch/$(SRCARCH)/include/uapi -I$(srctree)/tools/include/uapi 62 + INCLUDES = \ 63 + -I$(srctree)/tools/perf/lib/include \ 64 + -I$(srctree)/tools/lib/ \ 65 + -I$(srctree)/tools/include \ 66 + -I$(srctree)/tools/arch/$(SRCARCH)/include/ \ 67 + -I$(srctree)/tools/arch/$(SRCARCH)/include/uapi \ 68 + -I$(srctree)/tools/include/uapi 63 69 64 70 # Append required CFLAGS 65 71 override CFLAGS += $(EXTRA_WARNINGS) ··· 94 88 95 89 LIBPERF_ALL := $(LIBPERF_A) $(OUTPUT)libperf.so* 96 90 91 + LIB_DIR := $(srctree)/tools/lib/api/ 92 + 93 + ifneq ($(OUTPUT),) 94 + ifneq ($(subdir),) 95 + API_PATH=$(OUTPUT)/../lib/api/ 96 + else 97 + API_PATH=$(OUTPUT) 98 + endif 99 + else 100 + API_PATH=$(LIB_DIR) 101 + endif 102 + 103 + LIBAPI = $(API_PATH)libapi.a 104 + 105 + $(LIBAPI): FORCE 106 + $(Q)$(MAKE) -C $(LIB_DIR) O=$(OUTPUT) $(OUTPUT)libapi.a 107 + 108 + $(LIBAPI)-clean: 109 + $(call QUIET_CLEAN, libapi) 110 + $(Q)$(MAKE) -C $(LIB_DIR) O=$(OUTPUT) clean >/dev/null 111 + 97 112 $(LIBPERF_IN): FORCE 98 113 $(Q)$(MAKE) $(build)=libperf 99 114 100 115 $(LIBPERF_A): $(LIBPERF_IN) 101 116 $(QUIET_AR)$(RM) $@ && $(AR) rcs $@ $(LIBPERF_IN) 102 117 103 - $(LIBPERF_SO): $(LIBPERF_IN) 118 + $(LIBPERF_SO): $(LIBPERF_IN) $(LIBAPI) 104 119 $(QUIET_LINK)$(CC) --shared -Wl,-soname,libperf.so \ 105 120 -Wl,--version-script=$(VERSION_SCRIPT) $^ -o $@ 106 121 @ln -sf $(@F) $(OUTPUT)libperf.so ··· 133 106 all: fixdep 134 107 $(Q)$(MAKE) libs 135 108 136 - clean: 109 + clean: $(LIBAPI)-clean 137 110 $(call QUIET_CLEAN, libperf) $(RM) $(LIBPERF_A) \ 138 111 *.o *~ *.a *.so *.so.$(VERSION) *.so.$(LIBPERF_VERSION) .*.d .*.cmd LIBPERF-CFLAGS $(LIBPERF_PC) 139 112 $(Q)$(MAKE) -C tests clean 140 113 141 - tests: 114 + tests: libs 142 115 $(Q)$(MAKE) -C tests 143 116 $(Q)$(MAKE) -C tests run 144 117 ··· 173 146 $(call do_install,include/perf/threadmap.h,$(prefix)/include/perf,644); \ 174 147 $(call do_install,include/perf/evlist.h,$(prefix)/include/perf,644); \ 175 148 $(call do_install,include/perf/evsel.h,$(prefix)/include/perf,644); 149 + $(call do_install,include/perf/event.h,$(prefix)/include/perf,644); 176 150 177 151 install_pkgconfig: $(LIBPERF_PC) 178 152 $(call QUIET_INSTALL, $(LIBPERF_PC)) \
+8 -5
tools/perf/lib/core.c
··· 4 4 5 5 #include <stdio.h> 6 6 #include <stdarg.h> 7 + #include <unistd.h> 7 8 #include <perf/core.h> 9 + #include <internal/lib.h> 8 10 #include "internal.h" 9 11 10 12 static int __base_pr(enum libperf_print_level level, const char *format, ··· 16 14 } 17 15 18 16 static libperf_print_fn_t __libperf_pr = __base_pr; 19 - 20 - void libperf_set_print(libperf_print_fn_t fn) 21 - { 22 - __libperf_pr = fn; 23 - } 24 17 25 18 __printf(2, 3) 26 19 void libperf_print(enum libperf_print_level level, const char *format, ...) ··· 28 31 va_start(args, format); 29 32 __libperf_pr(level, format, args); 30 33 va_end(args); 34 + } 35 + 36 + void libperf_init(libperf_print_fn_t fn) 37 + { 38 + page_size = sysconf(_SC_PAGE_SIZE); 39 + __libperf_pr = fn; 31 40 }
+12
tools/perf/lib/cpumap.c
··· 260 260 261 261 return -1; 262 262 } 263 + 264 + int perf_cpu_map__max(struct perf_cpu_map *map) 265 + { 266 + int i, max = -1; 267 + 268 + for (i = 0; i < map->nr; i++) { 269 + if (map->map[i] > max) 270 + max = map->map[i]; 271 + } 272 + 273 + return max; 274 + }
+124
tools/perf/lib/evlist.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0 2 2 #include <perf/evlist.h> 3 3 #include <perf/evsel.h> 4 + #include <linux/bitops.h> 4 5 #include <linux/list.h> 6 + #include <linux/hash.h> 7 + #include <sys/ioctl.h> 5 8 #include <internal/evlist.h> 6 9 #include <internal/evsel.h> 10 + #include <internal/xyarray.h> 7 11 #include <linux/zalloc.h> 8 12 #include <stdlib.h> 13 + #include <errno.h> 14 + #include <unistd.h> 15 + #include <fcntl.h> 16 + #include <signal.h> 17 + #include <poll.h> 9 18 #include <perf/cpumap.h> 10 19 #include <perf/threadmap.h> 20 + #include <api/fd/array.h> 11 21 12 22 void perf_evlist__init(struct perf_evlist *evlist) 13 23 { 24 + int i; 25 + 26 + for (i = 0; i < PERF_EVLIST__HLIST_SIZE; ++i) 27 + INIT_HLIST_HEAD(&evlist->heads[i]); 14 28 INIT_LIST_HEAD(&evlist->entries); 15 29 evlist->nr_entries = 0; 16 30 } ··· 170 156 171 157 perf_evlist__for_each_entry(evlist, evsel) 172 158 perf_evsel__disable(evsel); 159 + } 160 + 161 + u64 perf_evlist__read_format(struct perf_evlist *evlist) 162 + { 163 + struct perf_evsel *first = perf_evlist__first(evlist); 164 + 165 + return first->attr.read_format; 166 + } 167 + 168 + #define SID(e, x, y) xyarray__entry(e->sample_id, x, y) 169 + 170 + static void perf_evlist__id_hash(struct perf_evlist *evlist, 171 + struct perf_evsel *evsel, 172 + int cpu, int thread, u64 id) 173 + { 174 + int hash; 175 + struct perf_sample_id *sid = SID(evsel, cpu, thread); 176 + 177 + sid->id = id; 178 + sid->evsel = evsel; 179 + hash = hash_64(sid->id, PERF_EVLIST__HLIST_BITS); 180 + hlist_add_head(&sid->node, &evlist->heads[hash]); 181 + } 182 + 183 + void perf_evlist__id_add(struct perf_evlist *evlist, 184 + struct perf_evsel *evsel, 185 + int cpu, int thread, u64 id) 186 + { 187 + perf_evlist__id_hash(evlist, evsel, cpu, thread, id); 188 + evsel->id[evsel->ids++] = id; 189 + } 190 + 191 + int perf_evlist__id_add_fd(struct perf_evlist *evlist, 192 + struct perf_evsel *evsel, 193 + int cpu, int thread, int fd) 194 + { 195 + u64 read_data[4] = { 0, }; 196 + int id_idx = 1; /* The first entry is the counter value */ 197 + u64 id; 198 + int ret; 199 + 200 + ret = ioctl(fd, PERF_EVENT_IOC_ID, &id); 201 + if (!ret) 202 + goto add; 203 + 204 + if (errno != ENOTTY) 205 + return -1; 206 + 207 + /* Legacy way to get event id.. All hail to old kernels! */ 208 + 209 + /* 210 + * This way does not work with group format read, so bail 211 + * out in that case. 212 + */ 213 + if (perf_evlist__read_format(evlist) & PERF_FORMAT_GROUP) 214 + return -1; 215 + 216 + if (!(evsel->attr.read_format & PERF_FORMAT_ID) || 217 + read(fd, &read_data, sizeof(read_data)) == -1) 218 + return -1; 219 + 220 + if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) 221 + ++id_idx; 222 + if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) 223 + ++id_idx; 224 + 225 + id = read_data[id_idx]; 226 + 227 + add: 228 + perf_evlist__id_add(evlist, evsel, cpu, thread, id); 229 + return 0; 230 + } 231 + 232 + int perf_evlist__alloc_pollfd(struct perf_evlist *evlist) 233 + { 234 + int nr_cpus = perf_cpu_map__nr(evlist->cpus); 235 + int nr_threads = perf_thread_map__nr(evlist->threads); 236 + int nfds = 0; 237 + struct perf_evsel *evsel; 238 + 239 + perf_evlist__for_each_entry(evlist, evsel) { 240 + if (evsel->system_wide) 241 + nfds += nr_cpus; 242 + else 243 + nfds += nr_cpus * nr_threads; 244 + } 245 + 246 + if (fdarray__available_entries(&evlist->pollfd) < nfds && 247 + fdarray__grow(&evlist->pollfd, nfds) < 0) 248 + return -ENOMEM; 249 + 250 + return 0; 251 + } 252 + 253 + int perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd, 254 + void *ptr, short revent) 255 + { 256 + int pos = fdarray__add(&evlist->pollfd, fd, revent | POLLERR | POLLHUP); 257 + 258 + if (pos >= 0) { 259 + evlist->pollfd.priv[pos].ptr = ptr; 260 + fcntl(fd, F_SETFL, O_NONBLOCK); 261 + } 262 + 263 + return pos; 264 + } 265 + 266 + int perf_evlist__poll(struct perf_evlist *evlist, int timeout) 267 + { 268 + return fdarray__poll(&evlist->pollfd, timeout); 173 269 }
+30
tools/perf/lib/evsel.c
··· 230 230 { 231 231 return &evsel->attr; 232 232 } 233 + 234 + int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads) 235 + { 236 + if (ncpus == 0 || nthreads == 0) 237 + return 0; 238 + 239 + if (evsel->system_wide) 240 + nthreads = 1; 241 + 242 + evsel->sample_id = xyarray__new(ncpus, nthreads, sizeof(struct perf_sample_id)); 243 + if (evsel->sample_id == NULL) 244 + return -ENOMEM; 245 + 246 + evsel->id = zalloc(ncpus * nthreads * sizeof(u64)); 247 + if (evsel->id == NULL) { 248 + xyarray__delete(evsel->sample_id); 249 + evsel->sample_id = NULL; 250 + return -ENOMEM; 251 + } 252 + 253 + return 0; 254 + } 255 + 256 + void perf_evsel__free_id(struct perf_evsel *evsel) 257 + { 258 + xyarray__delete(evsel->sample_id); 259 + evsel->sample_id = NULL; 260 + zfree(&evsel->id); 261 + evsel->ids = 0; 262 + }
+33
tools/perf/lib/include/internal/evlist.h
··· 3 3 #define __LIBPERF_INTERNAL_EVLIST_H 4 4 5 5 #include <linux/list.h> 6 + #include <api/fd/array.h> 7 + #include <internal/evsel.h> 8 + 9 + #define PERF_EVLIST__HLIST_BITS 8 10 + #define PERF_EVLIST__HLIST_SIZE (1 << PERF_EVLIST__HLIST_BITS) 6 11 7 12 struct perf_cpu_map; 8 13 struct perf_thread_map; ··· 18 13 bool has_user_cpus; 19 14 struct perf_cpu_map *cpus; 20 15 struct perf_thread_map *threads; 16 + int nr_mmaps; 17 + size_t mmap_len; 18 + struct fdarray pollfd; 19 + struct hlist_head heads[PERF_EVLIST__HLIST_SIZE]; 21 20 }; 21 + 22 + int perf_evlist__alloc_pollfd(struct perf_evlist *evlist); 23 + int perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd, 24 + void *ptr, short revent); 22 25 23 26 /** 24 27 * __perf_evlist__for_each_entry - iterate thru all the evsels ··· 59 46 */ 60 47 #define perf_evlist__for_each_entry_reverse(evlist, evsel) \ 61 48 __perf_evlist__for_each_entry_reverse(&(evlist)->entries, evsel) 49 + 50 + static inline struct perf_evsel *perf_evlist__first(struct perf_evlist *evlist) 51 + { 52 + return list_entry(evlist->entries.next, struct perf_evsel, node); 53 + } 54 + 55 + static inline struct perf_evsel *perf_evlist__last(struct perf_evlist *evlist) 56 + { 57 + return list_entry(evlist->entries.prev, struct perf_evsel, node); 58 + } 59 + 60 + u64 perf_evlist__read_format(struct perf_evlist *evlist); 61 + 62 + void perf_evlist__id_add(struct perf_evlist *evlist, 63 + struct perf_evsel *evsel, 64 + int cpu, int thread, u64 id); 65 + 66 + int perf_evlist__id_add_fd(struct perf_evlist *evlist, 67 + struct perf_evsel *evsel, 68 + int cpu, int thread, int fd); 62 69 63 70 #endif /* __LIBPERF_INTERNAL_EVLIST_H */
+33
tools/perf/lib/include/internal/evsel.h
··· 4 4 5 5 #include <linux/types.h> 6 6 #include <linux/perf_event.h> 7 + #include <stdbool.h> 8 + #include <sys/types.h> 7 9 8 10 struct perf_cpu_map; 9 11 struct perf_thread_map; 12 + struct xyarray; 13 + 14 + /* 15 + * Per fd, to map back from PERF_SAMPLE_ID to evsel, only used when there are 16 + * more than one entry in the evlist. 17 + */ 18 + struct perf_sample_id { 19 + struct hlist_node node; 20 + u64 id; 21 + struct perf_evsel *evsel; 22 + /* 23 + * 'idx' will be used for AUX area sampling. A sample will have AUX area 24 + * data that will be queued for decoding, where there are separate 25 + * queues for each CPU (per-cpu tracing) or task (per-thread tracing). 26 + * The sample ID can be used to lookup 'idx' which is effectively the 27 + * queue number. 28 + */ 29 + int idx; 30 + int cpu; 31 + pid_t tid; 32 + 33 + /* Holds total ID period value for PERF_SAMPLE_READ processing. */ 34 + u64 period; 35 + }; 10 36 11 37 struct perf_evsel { 12 38 struct list_head node; ··· 41 15 struct perf_cpu_map *own_cpus; 42 16 struct perf_thread_map *threads; 43 17 struct xyarray *fd; 18 + struct xyarray *sample_id; 19 + u64 *id; 20 + u32 ids; 44 21 45 22 /* parse modifier helper */ 46 23 int nr_members; 24 + bool system_wide; 47 25 }; 48 26 49 27 int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads); ··· 55 25 void perf_evsel__free_fd(struct perf_evsel *evsel); 56 26 int perf_evsel__read_size(struct perf_evsel *evsel); 57 27 int perf_evsel__apply_filter(struct perf_evsel *evsel, const char *filter); 28 + 29 + int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads); 30 + void perf_evsel__free_id(struct perf_evsel *evsel); 58 31 59 32 #endif /* __LIBPERF_INTERNAL_EVSEL_H */
+3 -1
tools/perf/lib/include/internal/lib.h
··· 2 2 #ifndef __LIBPERF_INTERNAL_LIB_H 3 3 #define __LIBPERF_INTERNAL_LIB_H 4 4 5 - #include <unistd.h> 5 + #include <sys/types.h> 6 + 7 + extern unsigned int page_size; 6 8 7 9 ssize_t readn(int fd, void *buf, size_t n); 8 10 ssize_t writen(int fd, const void *buf, size_t n);
+32
tools/perf/lib/include/internal/mmap.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + #ifndef __LIBPERF_INTERNAL_MMAP_H 3 + #define __LIBPERF_INTERNAL_MMAP_H 4 + 5 + #include <linux/compiler.h> 6 + #include <linux/refcount.h> 7 + #include <linux/types.h> 8 + #include <stdbool.h> 9 + 10 + /* perf sample has 16 bits size limit */ 11 + #define PERF_SAMPLE_MAX_SIZE (1 << 16) 12 + 13 + /** 14 + * struct perf_mmap - perf's ring buffer mmap details 15 + * 16 + * @refcnt - e.g. code using PERF_EVENT_IOC_SET_OUTPUT to share this 17 + */ 18 + struct perf_mmap { 19 + void *base; 20 + int mask; 21 + int fd; 22 + int cpu; 23 + refcount_t refcnt; 24 + u64 prev; 25 + u64 start; 26 + u64 end; 27 + bool overwrite; 28 + u64 flush; 29 + char event_copy[PERF_SAMPLE_MAX_SIZE] __aligned(8); 30 + }; 31 + 32 + #endif /* __LIBPERF_INTERNAL_MMAP_H */
+1 -1
tools/perf/lib/include/perf/core.h
··· 17 17 typedef int (*libperf_print_fn_t)(enum libperf_print_level level, 18 18 const char *, va_list ap); 19 19 20 - LIBPERF_API void libperf_set_print(libperf_print_fn_t fn); 20 + LIBPERF_API void libperf_init(libperf_print_fn_t fn); 21 21 22 22 #endif /* __LIBPERF_CORE_H */
+1
tools/perf/lib/include/perf/cpumap.h
··· 16 16 LIBPERF_API int perf_cpu_map__cpu(const struct perf_cpu_map *cpus, int idx); 17 17 LIBPERF_API int perf_cpu_map__nr(const struct perf_cpu_map *cpus); 18 18 LIBPERF_API bool perf_cpu_map__empty(const struct perf_cpu_map *map); 19 + LIBPERF_API int perf_cpu_map__max(struct perf_cpu_map *map); 19 20 20 21 #define perf_cpu_map__for_each_cpu(cpu, idx, cpus) \ 21 22 for ((idx) = 0, (cpu) = perf_cpu_map__cpu(cpus, idx); \
+1
tools/perf/lib/include/perf/evlist.h
··· 31 31 LIBPERF_API void perf_evlist__set_maps(struct perf_evlist *evlist, 32 32 struct perf_cpu_map *cpus, 33 33 struct perf_thread_map *threads); 34 + LIBPERF_API int perf_evlist__poll(struct perf_evlist *evlist, int timeout); 34 35 35 36 #endif /* __LIBPERF_EVLIST_H */
+2
tools/perf/lib/lib.c
··· 5 5 #include <linux/kernel.h> 6 6 #include <internal/lib.h> 7 7 8 + unsigned int page_size; 9 + 8 10 static ssize_t ion(bool is_read, int fd, void *buf, size_t n) 9 11 { 10 12 void *buf_start = buf;
+3 -1
tools/perf/lib/libperf.map
··· 1 1 LIBPERF_0.0.1 { 2 2 global: 3 - libperf_set_print; 3 + libperf_init; 4 4 perf_cpu_map__dummy_new; 5 5 perf_cpu_map__get; 6 6 perf_cpu_map__put; ··· 9 9 perf_cpu_map__nr; 10 10 perf_cpu_map__cpu; 11 11 perf_cpu_map__empty; 12 + perf_cpu_map__max; 12 13 perf_thread_map__new_dummy; 13 14 perf_thread_map__set_pid; 14 15 perf_thread_map__comm; ··· 39 38 perf_evlist__remove; 40 39 perf_evlist__next; 41 40 perf_evlist__set_maps; 41 + perf_evlist__poll; 42 42 local: 43 43 *; 44 44 };
+10
tools/perf/lib/tests/test-cpumap.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0 2 + #include <stdarg.h> 3 + #include <stdio.h> 2 4 #include <perf/cpumap.h> 3 5 #include <internal/tests.h> 6 + 7 + static int libperf_print(enum libperf_print_level level, 8 + const char *fmt, va_list ap) 9 + { 10 + return vfprintf(stderr, fmt, ap); 11 + } 4 12 5 13 int main(int argc, char **argv) 6 14 { 7 15 struct perf_cpu_map *cpus; 8 16 9 17 __T_START; 18 + 19 + libperf_init(libperf_print); 10 20 11 21 cpus = perf_cpu_map__dummy_new(); 12 22 if (!cpus)
+10
tools/perf/lib/tests/test-evlist.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0 2 + #include <stdio.h> 3 + #include <stdarg.h> 2 4 #include <linux/perf_event.h> 3 5 #include <perf/cpumap.h> 4 6 #include <perf/threadmap.h> 5 7 #include <perf/evlist.h> 6 8 #include <perf/evsel.h> 7 9 #include <internal/tests.h> 10 + 11 + static int libperf_print(enum libperf_print_level level, 12 + const char *fmt, va_list ap) 13 + { 14 + return vfprintf(stderr, fmt, ap); 15 + } 8 16 9 17 static int test_stat_cpu(void) 10 18 { ··· 184 176 int main(int argc, char **argv) 185 177 { 186 178 __T_START; 179 + 180 + libperf_init(libperf_print); 187 181 188 182 test_stat_cpu(); 189 183 test_stat_thread();
+10
tools/perf/lib/tests/test-evsel.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0 2 + #include <stdarg.h> 3 + #include <stdio.h> 2 4 #include <linux/perf_event.h> 3 5 #include <perf/cpumap.h> 4 6 #include <perf/threadmap.h> 5 7 #include <perf/evsel.h> 6 8 #include <internal/tests.h> 9 + 10 + static int libperf_print(enum libperf_print_level level, 11 + const char *fmt, va_list ap) 12 + { 13 + return vfprintf(stderr, fmt, ap); 14 + } 7 15 8 16 static int test_stat_cpu(void) 9 17 { ··· 123 115 int main(int argc, char **argv) 124 116 { 125 117 __T_START; 118 + 119 + libperf_init(libperf_print); 126 120 127 121 test_stat_cpu(); 128 122 test_stat_thread();
+10
tools/perf/lib/tests/test-threadmap.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0 2 + #include <stdarg.h> 3 + #include <stdio.h> 2 4 #include <perf/threadmap.h> 3 5 #include <internal/tests.h> 6 + 7 + static int libperf_print(enum libperf_print_level level, 8 + const char *fmt, va_list ap) 9 + { 10 + return vfprintf(stderr, fmt, ap); 11 + } 4 12 5 13 int main(int argc, char **argv) 6 14 { 7 15 struct perf_thread_map *threads; 8 16 9 17 __T_START; 18 + 19 + libperf_init(libperf_print); 10 20 11 21 threads = perf_thread_map__new_dummy(); 12 22 if (!threads)
+10 -3
tools/perf/perf.c
··· 12 12 #include "util/build-id.h" 13 13 #include "util/cache.h" 14 14 #include "util/env.h" 15 + #include <internal/lib.h> // page_size 15 16 #include <subcmd/exec-cmd.h> 16 17 #include "util/config.h" 17 18 #include <subcmd/run-command.h> ··· 21 20 #include "util/bpf-loader.h" 22 21 #include "util/debug.h" 23 22 #include "util/event.h" 24 - #include "util/util.h" 23 + #include "util/util.h" // usage() 25 24 #include "ui/ui.h" 26 25 #include "perf-sys.h" 27 26 #include <api/fs/fs.h> 28 27 #include <api/fs/tracing_path.h> 28 + #include <perf/core.h> 29 29 #include <errno.h> 30 30 #include <pthread.h> 31 31 #include <signal.h> ··· 430 428 pthread_sigmask(SIG_UNBLOCK, &set, NULL); 431 429 } 432 430 431 + static int libperf_print(enum libperf_print_level level, 432 + const char *fmt, va_list ap) 433 + { 434 + return eprintf(level, verbose, fmt, ap); 435 + } 436 + 433 437 int main(int argc, const char **argv) 434 438 { 435 439 int err; ··· 446 438 exec_cmd_init("perf", PREFIX, PERF_EXEC_PATH, EXEC_PATH_ENVIRONMENT); 447 439 pager_init(PERF_PAGER_ENVIRONMENT); 448 440 449 - /* The page_size is placed in util object. */ 450 - page_size = sysconf(_SC_PAGE_SIZE); 441 + libperf_init(libperf_print); 451 442 452 443 cmd = extract_argv0_path(argv[0]); 453 444 if (!cmd)
+11 -11
tools/perf/pmu-events/README
··· 30 30 All the topic JSON files for a CPU model/family should be in a separate 31 31 sub directory. Thus for the Silvermont X86 CPU: 32 32 33 - $ ls tools/perf/pmu-events/arch/x86/Silvermont_core 34 - Cache.json Memory.json Virtual-Memory.json 35 - Frontend.json Pipeline.json 33 + $ ls tools/perf/pmu-events/arch/x86/silvermont 34 + cache.json memory.json virtual-memory.json 35 + frontend.json pipeline.json 36 36 37 37 The JSONs folder for a CPU model/family may be placed in the root arch 38 38 folder, or may be placed in a vendor sub-folder under the arch folder ··· 94 94 95 95 where 'pm_1plus_ppc_cmpl' is a Power8 PMU event. 96 96 97 - However some errors in processing may cause the perf build to fail. 97 + However some errors in processing may cause the alias build to fail. 98 98 99 99 Mapfile format 100 100 =============== ··· 119 119 120 120 Header line 121 121 The header line is the first line in the file, which is 122 - always _IGNORED_. It can empty. 122 + always _IGNORED_. It can be empty. 123 123 124 124 CPUID: 125 125 CPUID is an arch-specific char string, that can be used ··· 138 138 files, relative to the directory containing the mapfile.csv 139 139 140 140 Type: 141 - indicates whether the events or "core" or "uncore" events. 141 + indicates whether the events are "core" or "uncore" events. 142 142 143 143 144 144 Eg: 145 145 146 - $ grep Silvermont tools/perf/pmu-events/arch/x86/mapfile.csv 147 - GenuineIntel-6-37,V13,Silvermont_core,core 148 - GenuineIntel-6-4D,V13,Silvermont_core,core 149 - GenuineIntel-6-4C,V13,Silvermont_core,core 146 + $ grep silvermont tools/perf/pmu-events/arch/x86/mapfile.csv 147 + GenuineIntel-6-37,v13,silvermont,core 148 + GenuineIntel-6-4D,v13,silvermont,core 149 + GenuineIntel-6-4C,v13,silvermont,core 150 150 151 151 i.e the three CPU models use the JSON files (i.e PMU events) listed 152 - in the directory 'tools/perf/pmu-events/arch/x86/Silvermont_core'. 152 + in the directory 'tools/perf/pmu-events/arch/x86/silvermont'.
+14
tools/perf/pmu-events/arch/arm64/arm/cortex-a76-n1/branch.json
··· 1 + [ 2 + { 3 + "PublicDescription": "Mispredicted or not predicted branch speculatively executed. This event counts any predictable branch instruction which is mispredicted either due to dynamic misprediction or because the MMU is off and the branches are statically predicted not taken.", 4 + "EventCode": "0x10", 5 + "EventName": "BR_MIS_PRED", 6 + "BriefDescription": "Mispredicted or not predicted branch speculatively executed." 7 + }, 8 + { 9 + "PublicDescription": "Predictable branch speculatively executed. This event counts all predictable branches.", 10 + "EventCode": "0x12", 11 + "EventName": "BR_PRED", 12 + "BriefDescription": "Predictable branch speculatively executed." 13 + } 14 + ]
+24
tools/perf/pmu-events/arch/arm64/arm/cortex-a76-n1/bus.json
··· 1 + [ 2 + { 3 + "EventCode": "0x11", 4 + "EventName": "CPU_CYCLES", 5 + "BriefDescription": "The number of core clock cycles." 6 + }, 7 + { 8 + "PublicDescription": "Bus access. This event counts for every beat of data transferred over the data channels between the core and the SCU. If both read and write data beats are transferred on a given cycle, this event is counted twice on that cycle. This event counts the sum of BUS_ACCESS_RD and BUS_ACCESS_WR.", 9 + "EventCode": "0x19", 10 + "EventName": "BUS_ACCESS", 11 + "BriefDescription": "Bus access." 12 + }, 13 + { 14 + "EventCode": "0x1D", 15 + "EventName": "BUS_CYCLES", 16 + "BriefDescription": "Bus cycles. This event duplicates CPU_CYCLES." 17 + }, 18 + { 19 + "ArchStdEvent": "BUS_ACCESS_RD" 20 + }, 21 + { 22 + "ArchStdEvent": "BUS_ACCESS_WR" 23 + } 24 + ]
+207
tools/perf/pmu-events/arch/arm64/arm/cortex-a76-n1/cache.json
··· 1 + [ 2 + { 3 + "PublicDescription": "L1 instruction cache refill. This event counts any instruction fetch which misses in the cache.", 4 + "EventCode": "0x01", 5 + "EventName": "L1I_CACHE_REFILL", 6 + "BriefDescription": "L1 instruction cache refill" 7 + }, 8 + { 9 + "PublicDescription": "L1 instruction TLB refill. This event counts any refill of the instruction L1 TLB from the L2 TLB. This includes refills that result in a translation fault.", 10 + "EventCode": "0x02", 11 + "EventName": "L1I_TLB_REFILL", 12 + "BriefDescription": "L1 instruction TLB refill" 13 + }, 14 + { 15 + "PublicDescription": "L1 data cache refill. This event counts any load or store operation or page table walk access which causes data to be read from outside the L1, including accesses which do not allocate into L1.", 16 + "EventCode": "0x03", 17 + "EventName": "L1D_CACHE_REFILL", 18 + "BriefDescription": "L1 data cache refill" 19 + }, 20 + { 21 + "PublicDescription": "L1 data cache access. This event counts any load or store operation or page table walk access which looks up in the L1 data cache. In particular, any access which could count the L1D_CACHE_REFILL event causes this event to count.", 22 + "EventCode": "0x04", 23 + "EventName": "L1D_CACHE", 24 + "BriefDescription": "L1 data cache access" 25 + }, 26 + { 27 + "PublicDescription": "L1 data TLB refill. This event counts any refill of the data L1 TLB from the L2 TLB. This includes refills that result in a translation fault.", 28 + "EventCode": "0x05", 29 + "EventName": "L1D_TLB_REFILL", 30 + "BriefDescription": "L1 data TLB refill" 31 + }, 32 + { 33 + "PublicDescription": "Level 1 instruction cache access or Level 0 Macro-op cache access. This event counts any instruction fetch which accesses the L1 instruction cache or L0 Macro-op cache.", 34 + "EventCode": "0x14", 35 + "EventName": "L1I_CACHE", 36 + "BriefDescription": "L1 instruction cache access" 37 + }, 38 + { 39 + "PublicDescription": "L1 data cache Write-Back. This event counts any write-back of data from the L1 data cache to L2 or L3. This counts both victim line evictions and snoops, including cache maintenance operations.", 40 + "EventCode": "0x15", 41 + "EventName": "L1D_CACHE_WB", 42 + "BriefDescription": "L1 data cache Write-Back" 43 + }, 44 + { 45 + "PublicDescription": "L2 data cache access. This event counts any transaction from L1 which looks up in the L2 cache, and any write-back from the L1 to the L2. Snoops from outside the core and cache maintenance operations are not counted.", 46 + "EventCode": "0x16", 47 + "EventName": "L2D_CACHE", 48 + "BriefDescription": "L2 data cache access" 49 + }, 50 + { 51 + "PublicDescription": "L2 data cache refill. This event counts any cacheable transaction from L1 which causes data to be read from outside the core. L2 refills caused by stashes into L2 should not be counted", 52 + "EventCode": "0x17", 53 + "EventName": "L2D_CACHE_REFILL", 54 + "BriefDescription": "L2 data cache refill" 55 + }, 56 + { 57 + "PublicDescription": "L2 data cache write-back. This event counts any write-back of data from the L2 cache to outside the core. This includes snoops to the L2 which return data, regardless of whether they cause an invalidation. Invalidations from the L2 which do not write data outside of the core and snoops which return data from the L1 are not counted", 58 + "EventCode": "0x18", 59 + "EventName": "L2D_CACHE_WB", 60 + "BriefDescription": "L2 data cache write-back" 61 + }, 62 + { 63 + "PublicDescription": "L2 data cache allocation without refill. This event counts any full cache line write into the L2 cache which does not cause a linefill, including write-backs from L1 to L2 and full-line writes which do not allocate into L1.", 64 + "EventCode": "0x20", 65 + "EventName": "L2D_CACHE_ALLOCATE", 66 + "BriefDescription": "L2 data cache allocation without refill" 67 + }, 68 + { 69 + "PublicDescription": "Level 1 data TLB access. This event counts any load or store operation which accesses the data L1 TLB. If both a load and a store are executed on a cycle, this event counts twice. This event counts regardless of whether the MMU is enabled.", 70 + "EventCode": "0x25", 71 + "EventName": "L1D_TLB", 72 + "BriefDescription": "Level 1 data TLB access." 73 + }, 74 + { 75 + "PublicDescription": "Level 1 instruction TLB access. This event counts any instruction fetch which accesses the instruction L1 TLB.This event counts regardless of whether the MMU is enabled.", 76 + "EventCode": "0x26", 77 + "EventName": "L1I_TLB", 78 + "BriefDescription": "Level 1 instruction TLB access" 79 + }, 80 + { 81 + "PublicDescription": "This event counts any full cache line write into the L3 cache which does not cause a linefill, including write-backs from L2 to L3 and full-line writes which do not allocate into L2", 82 + "EventCode": "0x29", 83 + "EventName": "L3D_CACHE_ALLOCATE", 84 + "BriefDescription": "Allocation without refill" 85 + }, 86 + { 87 + "PublicDescription": "Attributable Level 3 unified cache refill. This event counts for any cacheable read transaction returning datafrom the SCU for which the data source was outside the cluster. Transactions such as ReadUnique are counted here as 'read' transactions, even though they can be generated by store instructions.", 88 + "EventCode": "0x2A", 89 + "EventName": "L3D_CACHE_REFILL", 90 + "BriefDescription": "Attributable Level 3 unified cache refill." 91 + }, 92 + { 93 + "PublicDescription": "Attributable Level 3 unified cache access. This event counts for any cacheable read transaction returning datafrom the SCU, or for any cacheable write to the SCU.", 94 + "EventCode": "0x2B", 95 + "EventName": "L3D_CACHE", 96 + "BriefDescription": "Attributable Level 3 unified cache access." 97 + }, 98 + { 99 + "PublicDescription": "Attributable L2 data or unified TLB refill. This event counts on anyrefill of the L2 TLB, caused by either an instruction or data access.This event does not count if the MMU is disabled.", 100 + "EventCode": "0x2D", 101 + "EventName": "L2D_TLB_REFILL", 102 + "BriefDescription": "Attributable L2 data or unified TLB refill" 103 + }, 104 + { 105 + "PublicDescription": "Attributable L2 data or unified TLB access. This event counts on any access to the L2 TLB (caused by a refill of any of the L1 TLBs). This event does not count if the MMU is disabled.", 106 + "EventCode": "0x2F", 107 + "EventName": "L2D_TLB", 108 + "BriefDescription": "Attributable L2 data or unified TLB access" 109 + }, 110 + { 111 + "PublicDescription": "Access to data TLB that caused a page table walk. This event counts on any data access which causes L2D_TLB_REFILL to count.", 112 + "EventCode": "0x34", 113 + "EventName": "DTLB_WALK", 114 + "BriefDescription": "Access to data TLB that caused a page table walk." 115 + }, 116 + { 117 + "PublicDescription": "Access to instruction TLB that caused a page table walk. This event counts on any instruction access which causes L2D_TLB_REFILL to count.", 118 + "EventCode": "0x35", 119 + "EventName": "ITLB_WALK", 120 + "BriefDescription": "Access to instruction TLB that caused a page table walk." 121 + }, 122 + { 123 + "EventCode": "0x36", 124 + "EventName": "LL_CACHE_RD", 125 + "BriefDescription": "Last level cache access, read" 126 + }, 127 + { 128 + "EventCode": "0x37", 129 + "EventName": "LL_CACHE_MISS_RD", 130 + "BriefDescription": "Last level cache miss, read" 131 + }, 132 + { 133 + "ArchStdEvent": "L1D_CACHE_INVAL" 134 + }, 135 + { 136 + "ArchStdEvent": "L1D_CACHE_RD" 137 + }, 138 + { 139 + "ArchStdEvent": "L1D_CACHE_REFILL_INNER" 140 + }, 141 + { 142 + "ArchStdEvent": "L1D_CACHE_REFILL_OUTER" 143 + }, 144 + { 145 + "ArchStdEvent": "L1D_CACHE_REFILL_RD" 146 + }, 147 + { 148 + "ArchStdEvent": "L1D_CACHE_REFILL_WR" 149 + }, 150 + { 151 + "ArchStdEvent": "L1D_CACHE_WB_CLEAN" 152 + }, 153 + { 154 + "ArchStdEvent": "L1D_CACHE_WB_VICTIM" 155 + }, 156 + { 157 + "ArchStdEvent": "L1D_CACHE_WR" 158 + }, 159 + { 160 + "ArchStdEvent": "L1D_TLB_RD" 161 + }, 162 + { 163 + "ArchStdEvent": "L1D_TLB_REFILL_RD" 164 + }, 165 + { 166 + "ArchStdEvent": "L1D_TLB_REFILL_WR" 167 + }, 168 + { 169 + "ArchStdEvent": "L1D_TLB_WR" 170 + }, 171 + { 172 + "ArchStdEvent": "L2D_CACHE_INVAL" 173 + }, 174 + { 175 + "ArchStdEvent": "L2D_CACHE_RD" 176 + }, 177 + { 178 + "ArchStdEvent": "L2D_CACHE_REFILL_RD" 179 + }, 180 + { 181 + "ArchStdEvent": "L2D_CACHE_REFILL_WR" 182 + }, 183 + { 184 + "ArchStdEvent": "L2D_CACHE_WB_CLEAN" 185 + }, 186 + { 187 + "ArchStdEvent": "L2D_CACHE_WB_VICTIM" 188 + }, 189 + { 190 + "ArchStdEvent": "L2D_CACHE_WR" 191 + }, 192 + { 193 + "ArchStdEvent": "L2D_TLB_RD" 194 + }, 195 + { 196 + "ArchStdEvent": "L2D_TLB_REFILL_RD" 197 + }, 198 + { 199 + "ArchStdEvent": "L2D_TLB_REFILL_WR" 200 + }, 201 + { 202 + "ArchStdEvent": "L2D_TLB_WR" 203 + }, 204 + { 205 + "ArchStdEvent": "L3D_CACHE_RD" 206 + } 207 + ]
+52
tools/perf/pmu-events/arch/arm64/arm/cortex-a76-n1/exception.json
··· 1 + [ 2 + { 3 + "EventCode": "0x09", 4 + "EventName": "EXC_TAKEN", 5 + "BriefDescription": "Exception taken." 6 + }, 7 + { 8 + "PublicDescription": "Local memory error. This event counts any correctable or uncorrectable memory error (ECC or parity) in the protected core RAMs", 9 + "EventCode": "0x1A", 10 + "EventName": "MEMORY_ERROR", 11 + "BriefDescription": "Local memory error." 12 + }, 13 + { 14 + "ArchStdEvent": "EXC_DABORT" 15 + }, 16 + { 17 + "ArchStdEvent": "EXC_FIQ" 18 + }, 19 + { 20 + "ArchStdEvent": "EXC_HVC" 21 + }, 22 + { 23 + "ArchStdEvent": "EXC_IRQ" 24 + }, 25 + { 26 + "ArchStdEvent": "EXC_PABORT" 27 + }, 28 + { 29 + "ArchStdEvent": "EXC_SMC" 30 + }, 31 + { 32 + "ArchStdEvent": "EXC_SVC" 33 + }, 34 + { 35 + "ArchStdEvent": "EXC_TRAP_DABORT" 36 + }, 37 + { 38 + "ArchStdEvent": "EXC_TRAP_FIQ" 39 + }, 40 + { 41 + "ArchStdEvent": "EXC_TRAP_IRQ" 42 + }, 43 + { 44 + "ArchStdEvent": "EXC_TRAP_OTHER" 45 + }, 46 + { 47 + "ArchStdEvent": "EXC_TRAP_PABORT" 48 + }, 49 + { 50 + "ArchStdEvent": "EXC_UNDEF" 51 + } 52 + ]
+108
tools/perf/pmu-events/arch/arm64/arm/cortex-a76-n1/instruction.json
··· 1 + [ 2 + { 3 + "PublicDescription": "Software increment. Instruction architecturally executed (condition code check pass).", 4 + "EventCode": "0x00", 5 + "EventName": "SW_INCR", 6 + "BriefDescription": "Software increment." 7 + }, 8 + { 9 + "PublicDescription": "Instruction architecturally executed. This event counts all retired instructions, including those that fail their condition check.", 10 + "EventCode": "0x08", 11 + "EventName": "INST_RETIRED", 12 + "BriefDescription": "Instruction architecturally executed." 13 + }, 14 + { 15 + "EventCode": "0x0A", 16 + "EventName": "EXC_RETURN", 17 + "BriefDescription": "Instruction architecturally executed, condition code check pass, exception return." 18 + }, 19 + { 20 + "PublicDescription": "Instruction architecturally executed, condition code check pass, write to CONTEXTIDR. This event only counts writes to CONTEXTIDR in AArch32 state, and via the CONTEXTIDR_EL1 mnemonic in AArch64 state.", 21 + "EventCode": "0x0B", 22 + "EventName": "CID_WRITE_RETIRED", 23 + "BriefDescription": "Instruction architecturally executed, condition code check pass, write to CONTEXTIDR." 24 + }, 25 + { 26 + "EventCode": "0x1B", 27 + "EventName": "INST_SPEC", 28 + "BriefDescription": "Operation speculatively executed" 29 + }, 30 + { 31 + "PublicDescription": "Instruction architecturally executed, condition code check pass, write to TTBR. This event only counts writes to TTBR0/TTBR1 in AArch32 state and TTBR0_EL1/TTBR1_EL1 in AArch64 state.", 32 + "EventCode": "0x1C", 33 + "EventName": "TTBR_WRITE_RETIRED", 34 + "BriefDescription": "Instruction architecturally executed, condition code check pass, write to TTBR" 35 + }, 36 + { 37 + "PublicDescription": "Instruction architecturally executed, branch. This event counts all branches, taken or not. This excludes exception entries, debug entries and CCFAIL branches.", 38 + "EventCode": "0x21", 39 + "EventName": "BR_RETIRED", 40 + "BriefDescription": "Instruction architecturally executed, branch." 41 + }, 42 + { 43 + "PublicDescription": "Instruction architecturally executed, mispredicted branch. This event counts any branch counted by BR_RETIRED which is not correctly predicted and causes a pipeline flush.", 44 + "EventCode": "0x22", 45 + "EventName": "BR_MIS_PRED_RETIRED", 46 + "BriefDescription": "Instruction architecturally executed, mispredicted branch." 47 + }, 48 + { 49 + "ArchStdEvent": "ASE_SPEC" 50 + }, 51 + { 52 + "ArchStdEvent": "BR_IMMED_SPEC" 53 + }, 54 + { 55 + "ArchStdEvent": "BR_INDIRECT_SPEC" 56 + }, 57 + { 58 + "ArchStdEvent": "BR_RETURN_SPEC" 59 + }, 60 + { 61 + "ArchStdEvent": "CRYPTO_SPEC" 62 + }, 63 + { 64 + "ArchStdEvent": "DMB_SPEC" 65 + }, 66 + { 67 + "ArchStdEvent": "DP_SPEC" 68 + }, 69 + { 70 + "ArchStdEvent": "DSB_SPEC" 71 + }, 72 + { 73 + "ArchStdEvent": "ISB_SPEC" 74 + }, 75 + { 76 + "ArchStdEvent": "LDREX_SPEC" 77 + }, 78 + { 79 + "ArchStdEvent": "LDST_SPEC" 80 + }, 81 + { 82 + "ArchStdEvent": "LD_SPEC" 83 + }, 84 + { 85 + "ArchStdEvent": "PC_WRITE_SPEC" 86 + }, 87 + { 88 + "ArchStdEvent": "RC_LD_SPEC" 89 + }, 90 + { 91 + "ArchStdEvent": "RC_ST_SPEC" 92 + }, 93 + { 94 + "ArchStdEvent": "STREX_FAIL_SPEC" 95 + }, 96 + { 97 + "ArchStdEvent": "STREX_PASS_SPEC" 98 + }, 99 + { 100 + "ArchStdEvent": "STREX_SPEC" 101 + }, 102 + { 103 + "ArchStdEvent": "ST_SPEC" 104 + }, 105 + { 106 + "ArchStdEvent": "VFP_SPEC" 107 + } 108 + ]
+23
tools/perf/pmu-events/arch/arm64/arm/cortex-a76-n1/memory.json
··· 1 + [ 2 + { 3 + "PublicDescription": "Data memory access. This event counts memory accesses due to load or store instructions. This event counts the sum of MEM_ACCESS_RD and MEM_ACCESS_WR.", 4 + "EventCode": "0x13", 5 + "EventName": "MEM_ACCESS", 6 + "BriefDescription": "Data memory access" 7 + }, 8 + { 9 + "ArchStdEvent": "MEM_ACCESS_RD" 10 + }, 11 + { 12 + "ArchStdEvent": "MEM_ACCESS_WR" 13 + }, 14 + { 15 + "ArchStdEvent": "UNALIGNED_LD_SPEC" 16 + }, 17 + { 18 + "ArchStdEvent": "UNALIGNED_ST_SPEC" 19 + }, 20 + { 21 + "ArchStdEvent": "UNALIGNED_LDST_SPEC" 22 + } 23 + ]
+7
tools/perf/pmu-events/arch/arm64/arm/cortex-a76-n1/other.json
··· 1 + [ 2 + { 3 + "EventCode": "0x31", 4 + "EventName": "REMOTE_ACCESS", 5 + "BriefDescription": "Access to another socket in a multi-socket system" 6 + } 7 + ]
+14
tools/perf/pmu-events/arch/arm64/arm/cortex-a76-n1/pipeline.json
··· 1 + [ 2 + { 3 + "PublicDescription": "No operation issued because of the frontend. The counter counts on any cycle when there are no fetched instructions available to dispatch.", 4 + "EventCode": "0x23", 5 + "EventName": "STALL_FRONTEND", 6 + "BriefDescription": "No operation issued because of the frontend." 7 + }, 8 + { 9 + "PublicDescription": "No operation issued because of the backend. The counter counts on any cycle fetched instructions are not dispatched due to resource constraints.", 10 + "EventCode": "0x24", 11 + "EventName": "STALL_BACKEND", 12 + "BriefDescription": "No operation issued because of the backend." 13 + } 14 + ]
+2
tools/perf/pmu-events/arch/arm64/mapfile.csv
··· 16 16 0x00000000420f1000,v1,arm/cortex-a53,core 17 17 0x00000000410fd070,v1,arm/cortex-a57-a72,core 18 18 0x00000000410fd080,v1,arm/cortex-a57-a72,core 19 + 0x00000000410fd0b0,v1,arm/cortex-a76-n1,core 20 + 0x00000000410fd0c0,v1,arm/cortex-a76-n1,core 19 21 0x00000000420f5160,v1,cavium/thunderx2,core 20 22 0x00000000430f0af0,v1,cavium/thunderx2,core 21 23 0x00000000480fd010,v1,hisilicon/hip08,core
-24
tools/perf/pmu-events/arch/powerpc/power8/other.json
··· 1776 1776 "PublicDescription": "" 1777 1777 }, 1778 1778 {, 1779 - "EventCode": "0xa29084", 1780 - "EventName": "PM_L3_P0_GRP_PUMP", 1781 - "BriefDescription": "L3 pf sent with grp scope port 0", 1782 - "PublicDescription": "" 1783 - }, 1784 - {, 1785 - "EventCode": "0x528084", 1786 - "EventName": "PM_L3_P0_LCO_DATA", 1787 - "BriefDescription": "lco sent with data port 0", 1788 - "PublicDescription": "" 1789 - }, 1790 - {, 1791 - "EventCode": "0x518080", 1792 - "EventName": "PM_L3_P0_LCO_NO_DATA", 1793 - "BriefDescription": "dataless l3 lco sent port 0", 1794 - "PublicDescription": "" 1795 - }, 1796 - {, 1797 - "EventCode": "0xa4908c", 1798 - "EventName": "PM_L3_P0_LCO_RTY", 1799 - "BriefDescription": "L3 LCO received retry port 0", 1800 - "PublicDescription": "" 1801 - }, 1802 - {, 1803 1779 "EventCode": "0x84908d", 1804 1780 "EventName": "PM_L3_PF0_ALLOC", 1805 1781 "BriefDescription": "lifetime, sample of PF machine 0 valid",
+42
tools/perf/pmu-events/arch/x86/amdfam17h/cache.json
··· 283 283 "BriefDescription": "Total cycles spent with one or more fill requests in flight from L2.", 284 284 "PublicDescription": "Total cycles spent with one or more fill requests in flight from L2.", 285 285 "UMask": "0x1" 286 + }, 287 + { 288 + "EventName": "l3_request_g1.caching_l3_cache_accesses", 289 + "EventCode": "0x01", 290 + "BriefDescription": "Caching: L3 cache accesses", 291 + "UMask": "0x80", 292 + "Unit": "L3PMC" 293 + }, 294 + { 295 + "EventName": "l3_lookup_state.all_l3_req_typs", 296 + "EventCode": "0x04", 297 + "BriefDescription": "All L3 Request Types", 298 + "UMask": "0xff", 299 + "Unit": "L3PMC" 300 + }, 301 + { 302 + "EventName": "l3_comb_clstr_state.other_l3_miss_typs", 303 + "EventCode": "0x06", 304 + "BriefDescription": "Other L3 Miss Request Types", 305 + "UMask": "0xfe", 306 + "Unit": "L3PMC" 307 + }, 308 + { 309 + "EventName": "l3_comb_clstr_state.request_miss", 310 + "EventCode": "0x06", 311 + "BriefDescription": "L3 cache misses", 312 + "UMask": "0x01", 313 + "Unit": "L3PMC" 314 + }, 315 + { 316 + "EventName": "xi_sys_fill_latency", 317 + "EventCode": "0x90", 318 + "BriefDescription": "L3 Cache Miss Latency. Total cycles for all transactions divided by 16. Ignores SliceMask and ThreadMask.", 319 + "UMask": "0x00", 320 + "Unit": "L3PMC" 321 + }, 322 + { 323 + "EventName": "xi_ccx_sdp_req1.all_l3_miss_req_typs", 324 + "EventCode": "0x9a", 325 + "BriefDescription": "All L3 Miss Request Types. Ignores SliceMask and ThreadMask.", 326 + "UMask": "0x3f", 327 + "Unit": "L3PMC" 286 328 } 287 329 ]
+1 -1
tools/perf/pmu-events/arch/x86/amdfam17h/core.json
··· 13 13 { 14 14 "EventName": "ex_ret_brn", 15 15 "EventCode": "0xc2", 16 - "BriefDescription": "[Retired Branch Instructions.", 16 + "BriefDescription": "Retired Branch Instructions.", 17 17 "PublicDescription": "The number of branch instructions retired. This includes all types of architectural control flow changes, including exceptions and interrupts." 18 18 }, 19 19 {
+1
tools/perf/pmu-events/jevents.c
··· 239 239 { "hisi_sccl,ddrc", "hisi_sccl,ddrc" }, 240 240 { "hisi_sccl,hha", "hisi_sccl,hha" }, 241 241 { "hisi_sccl,l3c", "hisi_sccl,l3c" }, 242 + { "L3PMC", "amd_l3" }, 242 243 {} 243 244 }; 244 245
+6 -5
tools/perf/tests/backward-ring-buffer.c
··· 10 10 #include "tests.h" 11 11 #include "debug.h" 12 12 #include "parse-events.h" 13 + #include "util/mmap.h" 13 14 #include <errno.h> 14 15 #include <linux/string.h> 15 16 ··· 33 32 { 34 33 int i; 35 34 36 - for (i = 0; i < evlist->nr_mmaps; i++) { 37 - struct perf_mmap *map = &evlist->overwrite_mmap[i]; 35 + for (i = 0; i < evlist->core.nr_mmaps; i++) { 36 + struct mmap *map = &evlist->overwrite_mmap[i]; 38 37 union perf_event *event; 39 38 40 39 perf_mmap__read_init(map); ··· 64 63 int err; 65 64 char sbuf[STRERR_BUFSIZE]; 66 65 67 - err = perf_evlist__mmap(evlist, mmap_pages); 66 + err = evlist__mmap(evlist, mmap_pages); 68 67 if (err < 0) { 69 - pr_debug("perf_evlist__mmap: %s\n", 68 + pr_debug("evlist__mmap: %s\n", 70 69 str_error_r(errno, sbuf, sizeof(sbuf))); 71 70 return TEST_FAIL; 72 71 } ··· 76 75 evlist__disable(evlist); 77 76 78 77 err = count_samples(evlist, sample_count, comm_count); 79 - perf_evlist__munmap(evlist); 78 + evlist__munmap(evlist); 80 79 return err; 81 80 } 82 81
+1 -1
tools/perf/tests/bitmap.c
··· 2 2 #include <linux/compiler.h> 3 3 #include <linux/bitmap.h> 4 4 #include <perf/cpumap.h> 5 + #include <internal/cpumap.h> 5 6 #include "tests.h" 6 - #include "cpumap.h" 7 7 #include "debug.h" 8 8 9 9 #define NBITS 100
+5 -4
tools/perf/tests/bpf.c
··· 19 19 #include "llvm.h" 20 20 #include "debug.h" 21 21 #include "parse-events.h" 22 + #include "util/mmap.h" 22 23 #define NR_ITERS 111 23 24 #define PERF_TEST_BPF_PATH "/sys/fs/bpf/perf_test" 24 25 ··· 168 167 goto out_delete_evlist; 169 168 } 170 169 171 - err = perf_evlist__mmap(evlist, opts.mmap_pages); 170 + err = evlist__mmap(evlist, opts.mmap_pages); 172 171 if (err < 0) { 173 - pr_debug("perf_evlist__mmap: %s\n", 172 + pr_debug("evlist__mmap: %s\n", 174 173 str_error_r(errno, sbuf, sizeof(sbuf))); 175 174 goto out_delete_evlist; 176 175 } ··· 179 178 (*func)(); 180 179 evlist__disable(evlist); 181 180 182 - for (i = 0; i < evlist->nr_mmaps; i++) { 181 + for (i = 0; i < evlist->core.nr_mmaps; i++) { 183 182 union perf_event *event; 184 - struct perf_mmap *md; 183 + struct mmap *md; 185 184 186 185 md = &evlist->mmap[i]; 187 186 if (perf_mmap__read_init(md) < 0)
-2
tools/perf/tests/clang.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0 2 2 #include "tests.h" 3 - #include "debug.h" 4 - #include "util.h" 5 3 #include "c++/clang-c.h" 6 4 #include <linux/kernel.h> 7 5
+7 -6
tools/perf/tests/code-reading.c
··· 19 19 #include "evlist.h" 20 20 #include "evsel.h" 21 21 #include "thread_map.h" 22 - #include "cpumap.h" 23 22 #include "machine.h" 24 23 #include "map.h" 25 24 #include "symbol.h" 26 25 #include "event.h" 27 26 #include "record.h" 27 + #include "util/mmap.h" 28 + #include "util/synthetic-events.h" 28 29 #include "thread.h" 29 30 30 31 #include "tests.h" ··· 420 419 struct state *state) 421 420 { 422 421 union perf_event *event; 423 - struct perf_mmap *md; 422 + struct mmap *md; 424 423 int i, ret; 425 424 426 - for (i = 0; i < evlist->nr_mmaps; i++) { 425 + for (i = 0; i < evlist->core.nr_mmaps; i++) { 427 426 md = &evlist->mmap[i]; 428 427 if (perf_mmap__read_init(md) < 0) 429 428 continue; ··· 652 651 653 652 perf_evlist__config(evlist, &opts, NULL); 654 653 655 - evsel = perf_evlist__first(evlist); 654 + evsel = evlist__first(evlist); 656 655 657 656 evsel->core.attr.comm = 1; 658 657 evsel->core.attr.disabled = 1; ··· 686 685 break; 687 686 } 688 687 689 - ret = perf_evlist__mmap(evlist, UINT_MAX); 688 + ret = evlist__mmap(evlist, UINT_MAX); 690 689 if (ret < 0) { 691 - pr_debug("perf_evlist__mmap failed\n"); 690 + pr_debug("evlist__mmap failed\n"); 692 691 goto out_put; 693 692 } 694 693
+1
tools/perf/tests/cpumap.c
··· 3 3 #include <stdio.h> 4 4 #include "cpumap.h" 5 5 #include "event.h" 6 + #include "util/synthetic-events.h" 6 7 #include <string.h> 7 8 #include <linux/bitops.h> 8 9 #include <perf/cpumap.h>
-1
tools/perf/tests/dso-data.c
··· 10 10 #include <sys/resource.h> 11 11 #include <api/fs/fs.h> 12 12 #include "dso.h" 13 - #include "util.h" 14 13 #include "machine.h" 15 14 #include "symbol.h" 16 15 #include "tests.h"
+1
tools/perf/tests/dwarf-unwind.c
··· 15 15 #include "symbol.h" 16 16 #include "thread.h" 17 17 #include "callchain.h" 18 + #include "util/synthetic-events.h" 18 19 19 20 #if defined (__x86_64__) || defined (__i386__) || defined (__powerpc__) 20 21 #include "arch-tests.h"
+7 -8
tools/perf/tests/event-times.c
··· 9 9 #include "tests.h" 10 10 #include "evlist.h" 11 11 #include "evsel.h" 12 - #include "util.h" 13 12 #include "debug.h" 14 13 #include "parse-events.h" 15 14 #include "thread_map.h" ··· 16 17 17 18 static int attach__enable_on_exec(struct evlist *evlist) 18 19 { 19 - struct evsel *evsel = perf_evlist__last(evlist); 20 + struct evsel *evsel = evlist__last(evlist); 20 21 struct target target = { 21 22 .uid = UINT_MAX, 22 23 }; ··· 58 59 59 60 static int attach__current_disabled(struct evlist *evlist) 60 61 { 61 - struct evsel *evsel = perf_evlist__last(evlist); 62 + struct evsel *evsel = evlist__last(evlist); 62 63 struct perf_thread_map *threads; 63 64 int err; 64 65 ··· 84 85 85 86 static int attach__current_enabled(struct evlist *evlist) 86 87 { 87 - struct evsel *evsel = perf_evlist__last(evlist); 88 + struct evsel *evsel = evlist__last(evlist); 88 89 struct perf_thread_map *threads; 89 90 int err; 90 91 ··· 104 105 105 106 static int detach__disable(struct evlist *evlist) 106 107 { 107 - struct evsel *evsel = perf_evlist__last(evlist); 108 + struct evsel *evsel = evlist__last(evlist); 108 109 109 110 return evsel__enable(evsel); 110 111 } 111 112 112 113 static int attach__cpu_disabled(struct evlist *evlist) 113 114 { 114 - struct evsel *evsel = perf_evlist__last(evlist); 115 + struct evsel *evsel = evlist__last(evlist); 115 116 struct perf_cpu_map *cpus; 116 117 int err; 117 118 ··· 140 141 141 142 static int attach__cpu_enabled(struct evlist *evlist) 142 143 { 143 - struct evsel *evsel = perf_evlist__last(evlist); 144 + struct evsel *evsel = evlist__last(evlist); 144 145 struct perf_cpu_map *cpus; 145 146 int err; 146 147 ··· 180 181 goto out_err; 181 182 } 182 183 183 - evsel = perf_evlist__last(evlist); 184 + evsel = evlist__last(evlist); 184 185 evsel->core.attr.read_format |= 185 186 PERF_FORMAT_TOTAL_TIME_ENABLED | 186 187 PERF_FORMAT_TOTAL_TIME_RUNNING;
+6 -4
tools/perf/tests/event_update.c
··· 2 2 #include <linux/compiler.h> 3 3 #include <perf/cpumap.h> 4 4 #include <string.h> 5 + #include "cpumap.h" 5 6 #include "evlist.h" 6 7 #include "evsel.h" 7 8 #include "header.h" 8 9 #include "machine.h" 10 + #include "util/synthetic-events.h" 9 11 #include "tool.h" 10 12 #include "tests.h" 11 13 #include "debug.h" ··· 92 90 evlist = perf_evlist__new_default(); 93 91 TEST_ASSERT_VAL("failed to get evlist", evlist); 94 92 95 - evsel = perf_evlist__first(evlist); 93 + evsel = evlist__first(evlist); 96 94 97 - TEST_ASSERT_VAL("failed to allos ids", 98 - !perf_evsel__alloc_id(evsel, 1, 1)); 95 + TEST_ASSERT_VAL("failed to allocate ids", 96 + !perf_evsel__alloc_id(&evsel->core, 1, 1)); 99 97 100 - perf_evlist__id_add(evlist, evsel, 0, 0, 123); 98 + perf_evlist__id_add(&evlist->core, &evsel->core, 0, 0, 123); 101 99 102 100 evsel->unit = strdup("KRAVA"); 103 101
+1 -1
tools/perf/tests/evsel-roundtrip-name.c
··· 34 34 } 35 35 36 36 idx = 0; 37 - evsel = perf_evlist__first(evlist); 37 + evsel = evlist__first(evlist); 38 38 39 39 for (type = 0; type < PERF_COUNT_HW_CACHE_MAX; type++) { 40 40 for (op = 0; op < PERF_COUNT_HW_CACHE_OP_MAX; op++) {
+2
tools/perf/tests/hists_common.c
··· 2 2 #include <inttypes.h> 3 3 #include "util/debug.h" 4 4 #include "util/dso.h" 5 + #include "util/event.h" // struct perf_sample 5 6 #include "util/map.h" 6 7 #include "util/symbol.h" 7 8 #include "util/sort.h" ··· 11 10 #include "util/thread.h" 12 11 #include "tests/hists_common.h" 13 12 #include <linux/kernel.h> 13 + #include <linux/perf_event.h> 14 14 15 15 static struct { 16 16 u32 pid;
+1 -1
tools/perf/tests/hists_cumulate.c
··· 721 721 if (verbose > 1) 722 722 machine__fprintf(machine, stderr); 723 723 724 - evsel = perf_evlist__first(evlist); 724 + evsel = evlist__first(evlist); 725 725 726 726 for (i = 0; i < ARRAY_SIZE(testcases); i++) { 727 727 err = testcases[i](evsel, machine);
+3 -2
tools/perf/tests/hists_link.c
··· 8 8 #include "machine.h" 9 9 #include "parse-events.h" 10 10 #include "hists_common.h" 11 + #include "util/mmap.h" 11 12 #include <errno.h> 12 13 #include <linux/kernel.h> 13 14 ··· 311 310 print_hists_in(hists); 312 311 } 313 312 314 - first = perf_evlist__first(evlist); 315 - evsel = perf_evlist__last(evlist); 313 + first = evlist__first(evlist); 314 + evsel = evlist__last(evlist); 316 315 317 316 first_hists = evsel__hists(first); 318 317 hists = evsel__hists(evsel);
+1 -1
tools/perf/tests/hists_output.c
··· 608 608 if (verbose > 1) 609 609 machine__fprintf(machine, stderr); 610 610 611 - evsel = perf_evlist__first(evlist); 611 + evsel = evlist__first(evlist); 612 612 613 613 for (i = 0; i < ARRAY_SIZE(testcases); i++) { 614 614 err = testcases[i](evsel, machine);
+7 -7
tools/perf/tests/keep-tracking.c
··· 12 12 #include "evsel.h" 13 13 #include "record.h" 14 14 #include "thread_map.h" 15 - #include "cpumap.h" 16 15 #include "tests.h" 16 + #include "util/mmap.h" 17 17 18 18 #define CHECK__(x) { \ 19 19 while ((x) < 0) { \ ··· 32 32 static int find_comm(struct evlist *evlist, const char *comm) 33 33 { 34 34 union perf_event *event; 35 - struct perf_mmap *md; 35 + struct mmap *md; 36 36 int i, found; 37 37 38 38 found = 0; 39 - for (i = 0; i < evlist->nr_mmaps; i++) { 39 + for (i = 0; i < evlist->core.nr_mmaps; i++) { 40 40 md = &evlist->mmap[i]; 41 41 if (perf_mmap__read_init(md) < 0) 42 42 continue; ··· 93 93 94 94 perf_evlist__config(evlist, &opts, NULL); 95 95 96 - evsel = perf_evlist__first(evlist); 96 + evsel = evlist__first(evlist); 97 97 98 98 evsel->core.attr.comm = 1; 99 99 evsel->core.attr.disabled = 1; ··· 105 105 goto out_err; 106 106 } 107 107 108 - CHECK__(perf_evlist__mmap(evlist, UINT_MAX)); 108 + CHECK__(evlist__mmap(evlist, UINT_MAX)); 109 109 110 110 /* 111 111 * First, test that a 'comm' event can be found when the event is ··· 132 132 133 133 evlist__enable(evlist); 134 134 135 - evsel = perf_evlist__last(evlist); 135 + evsel = evlist__last(evlist); 136 136 137 137 CHECK__(evsel__disable(evsel)); 138 138 ··· 143 143 144 144 found = find_comm(evlist, comm); 145 145 if (found != 1) { 146 - pr_debug("Seconf time, failed to find tracking event.\n"); 146 + pr_debug("Second time, failed to find tracking event.\n"); 147 147 goto out_err; 148 148 } 149 149
-1
tools/perf/tests/llvm.c
··· 7 7 #include "llvm.h" 8 8 #include "tests.h" 9 9 #include "debug.h" 10 - #include "util.h" 11 10 12 11 #ifdef HAVE_LIBBPF_SUPPORT 13 12 static int test__bpf_parsing(void *obj_buf, size_t obj_buf_sz)
+6 -2
tools/perf/tests/make
··· 100 100 make_install_pdf := install-pdf 101 101 make_install_prefix := install prefix=/tmp/krava 102 102 make_install_prefix_slash := install prefix=/tmp/krava/ 103 - make_static := LDFLAGS=-static 103 + make_static := LDFLAGS=-static NO_PERF_READ_VDSO32=1 NO_PERF_READ_VDSOX32=1 NO_JVMTI=1 104 104 105 105 # all the NO_* variable combined 106 106 make_minimal := NO_LIBPERL=1 NO_LIBPYTHON=1 NO_NEWT=1 NO_GTK2=1 ··· 327 327 (make -C ../../tools $(PARALLEL_OPT) $(K_O_OPT) perf) > $@ 2>&1 && \ 328 328 test -x $(KERNEL_O)/tools/perf/perf && rm -f $@ || (cat $@ ; false) 329 329 330 + make_libperf: 331 + @echo "- make -C lib"; 332 + make -C lib clean >$@ 2>&1; make -C lib >>$@ 2>&1 && rm $@ 333 + 330 334 FEATURES_DUMP_FILE := $(FULL_O)/BUILD_TEST_FEATURE_DUMP 331 335 FEATURES_DUMP_FILE_STATIC := $(FULL_O)/BUILD_TEST_FEATURE_DUMP_STATIC 332 336 ··· 369 365 $(eval $(t) := $($(t)) FEATURES_DUMP=$(FEATURES_DUMP_FILE)))) 370 366 endif 371 367 372 - .PHONY: all $(run) $(run_O) tarpkg clean make_kernelsrc make_kernelsrc_tools 368 + .PHONY: all $(run) $(run_O) tarpkg clean make_kernelsrc make_kernelsrc_tools make_libperf 373 369 endif # ifndef MK
+1 -1
tools/perf/tests/mem2node.c
··· 4 4 #include <linux/kernel.h> 5 5 #include <linux/zalloc.h> 6 6 #include <perf/cpumap.h> 7 - #include "cpumap.h" 7 + #include <internal/cpumap.h> 8 8 #include "debug.h" 9 9 #include "env.h" 10 10 #include "mem2node.h"
+4 -4
tools/perf/tests/mmap-basic.c
··· 10 10 #include "evlist.h" 11 11 #include "evsel.h" 12 12 #include "thread_map.h" 13 - #include "cpumap.h" 14 13 #include "tests.h" 14 + #include "util/mmap.h" 15 15 #include <linux/err.h> 16 16 #include <linux/kernel.h> 17 17 #include <linux/string.h> ··· 43 43 expected_nr_events[nsyscalls], i, j; 44 44 struct evsel *evsels[nsyscalls], *evsel; 45 45 char sbuf[STRERR_BUFSIZE]; 46 - struct perf_mmap *md; 46 + struct mmap *md; 47 47 48 48 threads = thread_map__new(-1, getpid(), UINT_MAX); 49 49 if (threads == NULL) { ··· 53 53 54 54 cpus = perf_cpu_map__new(NULL); 55 55 if (cpus == NULL) { 56 - pr_debug("cpu_map__new\n"); 56 + pr_debug("perf_cpu_map__new\n"); 57 57 goto out_free_threads; 58 58 } 59 59 ··· 100 100 expected_nr_events[i] = 1 + rand() % 127; 101 101 } 102 102 103 - if (perf_evlist__mmap(evlist, 128) < 0) { 103 + if (evlist__mmap(evlist, 128) < 0) { 104 104 pr_debug("failed to mmap events: %d (%s)\n", errno, 105 105 str_error_r(errno, sbuf, sizeof(sbuf))); 106 106 goto out_delete_evlist;
+3 -1
tools/perf/tests/mmap-thread-lookup.c
··· 8 8 #include <stdlib.h> 9 9 #include <stdio.h> 10 10 #include "debug.h" 11 + #include "event.h" 11 12 #include "tests.h" 12 13 #include "machine.h" 13 14 #include "thread_map.h" 14 15 #include "map.h" 15 16 #include "symbol.h" 17 + #include "util/synthetic-events.h" 16 18 #include "thread.h" 17 - #include "util.h" 19 + #include <internal/lib.h> // page_size 18 20 19 21 #define THREADS 4 20 22
+3 -2
tools/perf/tests/openat-syscall-all-cpus.c
··· 14 14 #include "evsel.h" 15 15 #include "tests.h" 16 16 #include "thread_map.h" 17 - #include "cpumap.h" 17 + #include <perf/cpumap.h> 18 + #include <internal/cpumap.h> 18 19 #include "debug.h" 19 20 #include "stat.h" 20 21 #include "util/counts.h" ··· 38 37 39 38 cpus = perf_cpu_map__new(NULL); 40 39 if (cpus == NULL) { 41 - pr_debug("cpu_map__new\n"); 40 + pr_debug("perf_cpu_map__new\n"); 42 41 goto out_thread_map_delete; 43 42 } 44 43
+6 -5
tools/perf/tests/openat-syscall-tp-fields.c
··· 11 11 #include "record.h" 12 12 #include "tests.h" 13 13 #include "debug.h" 14 + #include "util/mmap.h" 14 15 #include <errno.h> 15 16 16 17 #ifndef O_DIRECTORY ··· 70 69 goto out_delete_evlist; 71 70 } 72 71 73 - err = perf_evlist__mmap(evlist, UINT_MAX); 72 + err = evlist__mmap(evlist, UINT_MAX); 74 73 if (err < 0) { 75 - pr_debug("perf_evlist__mmap: %s\n", 74 + pr_debug("evlist__mmap: %s\n", 76 75 str_error_r(errno, sbuf, sizeof(sbuf))); 77 76 goto out_delete_evlist; 78 77 } ··· 87 86 while (1) { 88 87 int before = nr_events; 89 88 90 - for (i = 0; i < evlist->nr_mmaps; i++) { 89 + for (i = 0; i < evlist->core.nr_mmaps; i++) { 91 90 union perf_event *event; 92 - struct perf_mmap *md; 91 + struct mmap *md; 93 92 94 93 md = &evlist->mmap[i]; 95 94 if (perf_mmap__read_init(md) < 0) ··· 127 126 } 128 127 129 128 if (nr_events == before) 130 - perf_evlist__poll(evlist, 10); 129 + evlist__poll(evlist, 10); 131 130 132 131 if (++nr_polls > 5) { 133 132 pr_debug("%s: no events!\n", __func__);
+58 -59
tools/perf/tests/parse-events.c
··· 6 6 #include "tests.h" 7 7 #include "debug.h" 8 8 #include "pmu.h" 9 - #include "util.h" 10 9 #include <dirent.h> 11 10 #include <errno.h> 12 11 #include <sys/types.h> ··· 46 47 47 48 static int test__checkevent_tracepoint(struct evlist *evlist) 48 49 { 49 - struct evsel *evsel = perf_evlist__first(evlist); 50 + struct evsel *evsel = evlist__first(evlist); 50 51 51 52 TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->core.nr_entries); 52 53 TEST_ASSERT_VAL("wrong number of groups", 0 == evlist->nr_groups); ··· 77 78 78 79 static int test__checkevent_raw(struct evlist *evlist) 79 80 { 80 - struct evsel *evsel = perf_evlist__first(evlist); 81 + struct evsel *evsel = evlist__first(evlist); 81 82 82 83 TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->core.nr_entries); 83 84 TEST_ASSERT_VAL("wrong type", PERF_TYPE_RAW == evsel->core.attr.type); ··· 87 88 88 89 static int test__checkevent_numeric(struct evlist *evlist) 89 90 { 90 - struct evsel *evsel = perf_evlist__first(evlist); 91 + struct evsel *evsel = evlist__first(evlist); 91 92 92 93 TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->core.nr_entries); 93 94 TEST_ASSERT_VAL("wrong type", 1 == evsel->core.attr.type); ··· 97 98 98 99 static int test__checkevent_symbolic_name(struct evlist *evlist) 99 100 { 100 - struct evsel *evsel = perf_evlist__first(evlist); 101 + struct evsel *evsel = evlist__first(evlist); 101 102 102 103 TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->core.nr_entries); 103 104 TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type); ··· 108 109 109 110 static int test__checkevent_symbolic_name_config(struct evlist *evlist) 110 111 { 111 - struct evsel *evsel = perf_evlist__first(evlist); 112 + struct evsel *evsel = evlist__first(evlist); 112 113 113 114 TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->core.nr_entries); 114 115 TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type); ··· 129 130 130 131 static int test__checkevent_symbolic_alias(struct evlist *evlist) 131 132 { 132 - struct evsel *evsel = perf_evlist__first(evlist); 133 + struct evsel *evsel = evlist__first(evlist); 133 134 134 135 TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->core.nr_entries); 135 136 TEST_ASSERT_VAL("wrong type", PERF_TYPE_SOFTWARE == evsel->core.attr.type); ··· 140 141 141 142 static int test__checkevent_genhw(struct evlist *evlist) 142 143 { 143 - struct evsel *evsel = perf_evlist__first(evlist); 144 + struct evsel *evsel = evlist__first(evlist); 144 145 145 146 TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->core.nr_entries); 146 147 TEST_ASSERT_VAL("wrong type", PERF_TYPE_HW_CACHE == evsel->core.attr.type); ··· 150 151 151 152 static int test__checkevent_breakpoint(struct evlist *evlist) 152 153 { 153 - struct evsel *evsel = perf_evlist__first(evlist); 154 + struct evsel *evsel = evlist__first(evlist); 154 155 155 156 TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->core.nr_entries); 156 157 TEST_ASSERT_VAL("wrong type", PERF_TYPE_BREAKPOINT == evsel->core.attr.type); ··· 164 165 165 166 static int test__checkevent_breakpoint_x(struct evlist *evlist) 166 167 { 167 - struct evsel *evsel = perf_evlist__first(evlist); 168 + struct evsel *evsel = evlist__first(evlist); 168 169 169 170 TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->core.nr_entries); 170 171 TEST_ASSERT_VAL("wrong type", PERF_TYPE_BREAKPOINT == evsel->core.attr.type); ··· 177 178 178 179 static int test__checkevent_breakpoint_r(struct evlist *evlist) 179 180 { 180 - struct evsel *evsel = perf_evlist__first(evlist); 181 + struct evsel *evsel = evlist__first(evlist); 181 182 182 183 TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->core.nr_entries); 183 184 TEST_ASSERT_VAL("wrong type", ··· 192 193 193 194 static int test__checkevent_breakpoint_w(struct evlist *evlist) 194 195 { 195 - struct evsel *evsel = perf_evlist__first(evlist); 196 + struct evsel *evsel = evlist__first(evlist); 196 197 197 198 TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->core.nr_entries); 198 199 TEST_ASSERT_VAL("wrong type", ··· 207 208 208 209 static int test__checkevent_breakpoint_rw(struct evlist *evlist) 209 210 { 210 - struct evsel *evsel = perf_evlist__first(evlist); 211 + struct evsel *evsel = evlist__first(evlist); 211 212 212 213 TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->core.nr_entries); 213 214 TEST_ASSERT_VAL("wrong type", ··· 222 223 223 224 static int test__checkevent_tracepoint_modifier(struct evlist *evlist) 224 225 { 225 - struct evsel *evsel = perf_evlist__first(evlist); 226 + struct evsel *evsel = evlist__first(evlist); 226 227 227 228 TEST_ASSERT_VAL("wrong exclude_user", evsel->core.attr.exclude_user); 228 229 TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel); ··· 253 254 254 255 static int test__checkevent_raw_modifier(struct evlist *evlist) 255 256 { 256 - struct evsel *evsel = perf_evlist__first(evlist); 257 + struct evsel *evsel = evlist__first(evlist); 257 258 258 259 TEST_ASSERT_VAL("wrong exclude_user", evsel->core.attr.exclude_user); 259 260 TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel); ··· 265 266 266 267 static int test__checkevent_numeric_modifier(struct evlist *evlist) 267 268 { 268 - struct evsel *evsel = perf_evlist__first(evlist); 269 + struct evsel *evsel = evlist__first(evlist); 269 270 270 271 TEST_ASSERT_VAL("wrong exclude_user", evsel->core.attr.exclude_user); 271 272 TEST_ASSERT_VAL("wrong exclude_kernel", evsel->core.attr.exclude_kernel); ··· 277 278 278 279 static int test__checkevent_symbolic_name_modifier(struct evlist *evlist) 279 280 { 280 - struct evsel *evsel = perf_evlist__first(evlist); 281 + struct evsel *evsel = evlist__first(evlist); 281 282 282 283 TEST_ASSERT_VAL("wrong exclude_user", evsel->core.attr.exclude_user); 283 284 TEST_ASSERT_VAL("wrong exclude_kernel", evsel->core.attr.exclude_kernel); ··· 289 290 290 291 static int test__checkevent_exclude_host_modifier(struct evlist *evlist) 291 292 { 292 - struct evsel *evsel = perf_evlist__first(evlist); 293 + struct evsel *evsel = evlist__first(evlist); 293 294 294 295 TEST_ASSERT_VAL("wrong exclude guest", !evsel->core.attr.exclude_guest); 295 296 TEST_ASSERT_VAL("wrong exclude host", evsel->core.attr.exclude_host); ··· 299 300 300 301 static int test__checkevent_exclude_guest_modifier(struct evlist *evlist) 301 302 { 302 - struct evsel *evsel = perf_evlist__first(evlist); 303 + struct evsel *evsel = evlist__first(evlist); 303 304 304 305 TEST_ASSERT_VAL("wrong exclude guest", evsel->core.attr.exclude_guest); 305 306 TEST_ASSERT_VAL("wrong exclude host", !evsel->core.attr.exclude_host); ··· 309 310 310 311 static int test__checkevent_symbolic_alias_modifier(struct evlist *evlist) 311 312 { 312 - struct evsel *evsel = perf_evlist__first(evlist); 313 + struct evsel *evsel = evlist__first(evlist); 313 314 314 315 TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user); 315 316 TEST_ASSERT_VAL("wrong exclude_kernel", evsel->core.attr.exclude_kernel); ··· 321 322 322 323 static int test__checkevent_genhw_modifier(struct evlist *evlist) 323 324 { 324 - struct evsel *evsel = perf_evlist__first(evlist); 325 + struct evsel *evsel = evlist__first(evlist); 325 326 326 327 TEST_ASSERT_VAL("wrong exclude_user", evsel->core.attr.exclude_user); 327 328 TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel); ··· 333 334 334 335 static int test__checkevent_exclude_idle_modifier(struct evlist *evlist) 335 336 { 336 - struct evsel *evsel = perf_evlist__first(evlist); 337 + struct evsel *evsel = evlist__first(evlist); 337 338 338 339 TEST_ASSERT_VAL("wrong exclude idle", evsel->core.attr.exclude_idle); 339 340 TEST_ASSERT_VAL("wrong exclude guest", !evsel->core.attr.exclude_guest); ··· 348 349 349 350 static int test__checkevent_exclude_idle_modifier_1(struct evlist *evlist) 350 351 { 351 - struct evsel *evsel = perf_evlist__first(evlist); 352 + struct evsel *evsel = evlist__first(evlist); 352 353 353 354 TEST_ASSERT_VAL("wrong exclude idle", evsel->core.attr.exclude_idle); 354 355 TEST_ASSERT_VAL("wrong exclude guest", !evsel->core.attr.exclude_guest); ··· 363 364 364 365 static int test__checkevent_breakpoint_modifier(struct evlist *evlist) 365 366 { 366 - struct evsel *evsel = perf_evlist__first(evlist); 367 + struct evsel *evsel = evlist__first(evlist); 367 368 368 369 369 370 TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user); ··· 378 379 379 380 static int test__checkevent_breakpoint_x_modifier(struct evlist *evlist) 380 381 { 381 - struct evsel *evsel = perf_evlist__first(evlist); 382 + struct evsel *evsel = evlist__first(evlist); 382 383 383 384 TEST_ASSERT_VAL("wrong exclude_user", evsel->core.attr.exclude_user); 384 385 TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel); ··· 392 393 393 394 static int test__checkevent_breakpoint_r_modifier(struct evlist *evlist) 394 395 { 395 - struct evsel *evsel = perf_evlist__first(evlist); 396 + struct evsel *evsel = evlist__first(evlist); 396 397 397 398 TEST_ASSERT_VAL("wrong exclude_user", evsel->core.attr.exclude_user); 398 399 TEST_ASSERT_VAL("wrong exclude_kernel", evsel->core.attr.exclude_kernel); ··· 406 407 407 408 static int test__checkevent_breakpoint_w_modifier(struct evlist *evlist) 408 409 { 409 - struct evsel *evsel = perf_evlist__first(evlist); 410 + struct evsel *evsel = evlist__first(evlist); 410 411 411 412 TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user); 412 413 TEST_ASSERT_VAL("wrong exclude_kernel", evsel->core.attr.exclude_kernel); ··· 420 421 421 422 static int test__checkevent_breakpoint_rw_modifier(struct evlist *evlist) 422 423 { 423 - struct evsel *evsel = perf_evlist__first(evlist); 424 + struct evsel *evsel = evlist__first(evlist); 424 425 425 426 TEST_ASSERT_VAL("wrong exclude_user", evsel->core.attr.exclude_user); 426 427 TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel); ··· 435 436 static int test__checkevent_pmu(struct evlist *evlist) 436 437 { 437 438 438 - struct evsel *evsel = perf_evlist__first(evlist); 439 + struct evsel *evsel = evlist__first(evlist); 439 440 440 441 TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->core.nr_entries); 441 442 TEST_ASSERT_VAL("wrong type", PERF_TYPE_RAW == evsel->core.attr.type); ··· 453 454 454 455 static int test__checkevent_list(struct evlist *evlist) 455 456 { 456 - struct evsel *evsel = perf_evlist__first(evlist); 457 + struct evsel *evsel = evlist__first(evlist); 457 458 458 459 TEST_ASSERT_VAL("wrong number of entries", 3 == evlist->core.nr_entries); 459 460 ··· 492 493 493 494 static int test__checkevent_pmu_name(struct evlist *evlist) 494 495 { 495 - struct evsel *evsel = perf_evlist__first(evlist); 496 + struct evsel *evsel = evlist__first(evlist); 496 497 497 498 /* cpu/config=1,name=krava/u */ 498 499 TEST_ASSERT_VAL("wrong number of entries", 2 == evlist->core.nr_entries); ··· 513 514 514 515 static int test__checkevent_pmu_partial_time_callgraph(struct evlist *evlist) 515 516 { 516 - struct evsel *evsel = perf_evlist__first(evlist); 517 + struct evsel *evsel = evlist__first(evlist); 517 518 518 519 /* cpu/config=1,call-graph=fp,time,period=100000/ */ 519 520 TEST_ASSERT_VAL("wrong number of entries", 2 == evlist->core.nr_entries); ··· 546 547 547 548 static int test__checkevent_pmu_events(struct evlist *evlist) 548 549 { 549 - struct evsel *evsel = perf_evlist__first(evlist); 550 + struct evsel *evsel = evlist__first(evlist); 550 551 551 552 TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->core.nr_entries); 552 553 TEST_ASSERT_VAL("wrong type", PERF_TYPE_RAW == evsel->core.attr.type); ··· 564 565 565 566 static int test__checkevent_pmu_events_mix(struct evlist *evlist) 566 567 { 567 - struct evsel *evsel = perf_evlist__first(evlist); 568 + struct evsel *evsel = evlist__first(evlist); 568 569 569 570 /* pmu-event:u */ 570 571 TEST_ASSERT_VAL("wrong number of entries", 2 == evlist->core.nr_entries); ··· 642 643 TEST_ASSERT_VAL("wrong number of groups", 1 == evlist->nr_groups); 643 644 644 645 /* instructions:k */ 645 - evsel = leader = perf_evlist__first(evlist); 646 + evsel = leader = evlist__first(evlist); 646 647 TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type); 647 648 TEST_ASSERT_VAL("wrong config", 648 649 PERF_COUNT_HW_INSTRUCTIONS == evsel->core.attr.config); ··· 684 685 TEST_ASSERT_VAL("wrong number of groups", 1 == evlist->nr_groups); 685 686 686 687 /* faults + :ku modifier */ 687 - evsel = leader = perf_evlist__first(evlist); 688 + evsel = leader = evlist__first(evlist); 688 689 TEST_ASSERT_VAL("wrong type", PERF_TYPE_SOFTWARE == evsel->core.attr.type); 689 690 TEST_ASSERT_VAL("wrong config", 690 691 PERF_COUNT_SW_PAGE_FAULTS == evsel->core.attr.config); ··· 739 740 TEST_ASSERT_VAL("wrong number of groups", 2 == evlist->nr_groups); 740 741 741 742 /* group1 syscalls:sys_enter_openat:H */ 742 - evsel = leader = perf_evlist__first(evlist); 743 + evsel = leader = evlist__first(evlist); 743 744 TEST_ASSERT_VAL("wrong type", PERF_TYPE_TRACEPOINT == evsel->core.attr.type); 744 745 TEST_ASSERT_VAL("wrong sample_type", 745 746 PERF_TP_SAMPLE_TYPE == evsel->core.attr.sample_type); ··· 831 832 TEST_ASSERT_VAL("wrong number of groups", 1 == evlist->nr_groups); 832 833 833 834 /* cycles:u + p */ 834 - evsel = leader = perf_evlist__first(evlist); 835 + evsel = leader = evlist__first(evlist); 835 836 TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type); 836 837 TEST_ASSERT_VAL("wrong config", 837 838 PERF_COUNT_HW_CPU_CYCLES == evsel->core.attr.config); ··· 875 876 TEST_ASSERT_VAL("wrong number of groups", 2 == evlist->nr_groups); 876 877 877 878 /* cycles + G */ 878 - evsel = leader = perf_evlist__first(evlist); 879 + evsel = leader = evlist__first(evlist); 879 880 TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type); 880 881 TEST_ASSERT_VAL("wrong config", 881 882 PERF_COUNT_HW_CPU_CYCLES == evsel->core.attr.config); ··· 961 962 TEST_ASSERT_VAL("wrong number of groups", 1 == evlist->nr_groups); 962 963 963 964 /* cycles + :H group modifier */ 964 - evsel = leader = perf_evlist__first(evlist); 965 + evsel = leader = evlist__first(evlist); 965 966 TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type); 966 967 TEST_ASSERT_VAL("wrong config", 967 968 PERF_COUNT_HW_CPU_CYCLES == evsel->core.attr.config); ··· 1001 1002 TEST_ASSERT_VAL("wrong number of groups", 1 == evlist->nr_groups); 1002 1003 1003 1004 /* cycles + :G group modifier */ 1004 - evsel = leader = perf_evlist__first(evlist); 1005 + evsel = leader = evlist__first(evlist); 1005 1006 TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type); 1006 1007 TEST_ASSERT_VAL("wrong config", 1007 1008 PERF_COUNT_HW_CPU_CYCLES == evsel->core.attr.config); ··· 1041 1042 TEST_ASSERT_VAL("wrong number of groups", 1 == evlist->nr_groups); 1042 1043 1043 1044 /* cycles:G + :u group modifier */ 1044 - evsel = leader = perf_evlist__first(evlist); 1045 + evsel = leader = evlist__first(evlist); 1045 1046 TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type); 1046 1047 TEST_ASSERT_VAL("wrong config", 1047 1048 PERF_COUNT_HW_CPU_CYCLES == evsel->core.attr.config); ··· 1081 1082 TEST_ASSERT_VAL("wrong number of groups", 1 == evlist->nr_groups); 1082 1083 1083 1084 /* cycles:G + :uG group modifier */ 1084 - evsel = leader = perf_evlist__first(evlist); 1085 + evsel = leader = evlist__first(evlist); 1085 1086 TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type); 1086 1087 TEST_ASSERT_VAL("wrong config", 1087 1088 PERF_COUNT_HW_CPU_CYCLES == evsel->core.attr.config); ··· 1120 1121 TEST_ASSERT_VAL("wrong number of entries", 3 == evlist->core.nr_entries); 1121 1122 1122 1123 /* cycles - sampling group leader */ 1123 - evsel = leader = perf_evlist__first(evlist); 1124 + evsel = leader = evlist__first(evlist); 1124 1125 TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type); 1125 1126 TEST_ASSERT_VAL("wrong config", 1126 1127 PERF_COUNT_HW_CPU_CYCLES == evsel->core.attr.config); ··· 1173 1174 TEST_ASSERT_VAL("wrong number of entries", 2 == evlist->core.nr_entries); 1174 1175 1175 1176 /* instructions - sampling group leader */ 1176 - evsel = leader = perf_evlist__first(evlist); 1177 + evsel = leader = evlist__first(evlist); 1177 1178 TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type); 1178 1179 TEST_ASSERT_VAL("wrong config", 1179 1180 PERF_COUNT_HW_INSTRUCTIONS == evsel->core.attr.config); ··· 1207 1208 1208 1209 static int test__checkevent_pinned_modifier(struct evlist *evlist) 1209 1210 { 1210 - struct evsel *evsel = perf_evlist__first(evlist); 1211 + struct evsel *evsel = evlist__first(evlist); 1211 1212 1212 1213 TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user); 1213 1214 TEST_ASSERT_VAL("wrong exclude_kernel", evsel->core.attr.exclude_kernel); ··· 1225 1226 TEST_ASSERT_VAL("wrong number of entries", 3 == evlist->core.nr_entries); 1226 1227 1227 1228 /* cycles - group leader */ 1228 - evsel = leader = perf_evlist__first(evlist); 1229 + evsel = leader = evlist__first(evlist); 1229 1230 TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type); 1230 1231 TEST_ASSERT_VAL("wrong config", 1231 1232 PERF_COUNT_HW_CPU_CYCLES == evsel->core.attr.config); ··· 1251 1252 1252 1253 static int test__checkevent_breakpoint_len(struct evlist *evlist) 1253 1254 { 1254 - struct evsel *evsel = perf_evlist__first(evlist); 1255 + struct evsel *evsel = evlist__first(evlist); 1255 1256 1256 1257 TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->core.nr_entries); 1257 1258 TEST_ASSERT_VAL("wrong type", PERF_TYPE_BREAKPOINT == evsel->core.attr.type); ··· 1266 1267 1267 1268 static int test__checkevent_breakpoint_len_w(struct evlist *evlist) 1268 1269 { 1269 - struct evsel *evsel = perf_evlist__first(evlist); 1270 + struct evsel *evsel = evlist__first(evlist); 1270 1271 1271 1272 TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->core.nr_entries); 1272 1273 TEST_ASSERT_VAL("wrong type", PERF_TYPE_BREAKPOINT == evsel->core.attr.type); ··· 1282 1283 static int 1283 1284 test__checkevent_breakpoint_len_rw_modifier(struct evlist *evlist) 1284 1285 { 1285 - struct evsel *evsel = perf_evlist__first(evlist); 1286 + struct evsel *evsel = evlist__first(evlist); 1286 1287 1287 1288 TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user); 1288 1289 TEST_ASSERT_VAL("wrong exclude_kernel", evsel->core.attr.exclude_kernel); ··· 1294 1295 1295 1296 static int test__checkevent_precise_max_modifier(struct evlist *evlist) 1296 1297 { 1297 - struct evsel *evsel = perf_evlist__first(evlist); 1298 + struct evsel *evsel = evlist__first(evlist); 1298 1299 1299 1300 TEST_ASSERT_VAL("wrong number of entries", 2 == evlist->core.nr_entries); 1300 1301 TEST_ASSERT_VAL("wrong type", PERF_TYPE_SOFTWARE == evsel->core.attr.type); ··· 1305 1306 1306 1307 static int test__checkevent_config_symbol(struct evlist *evlist) 1307 1308 { 1308 - struct evsel *evsel = perf_evlist__first(evlist); 1309 + struct evsel *evsel = evlist__first(evlist); 1309 1310 1310 1311 TEST_ASSERT_VAL("wrong name setting", strcmp(evsel->name, "insn") == 0); 1311 1312 return 0; ··· 1313 1314 1314 1315 static int test__checkevent_config_raw(struct evlist *evlist) 1315 1316 { 1316 - struct evsel *evsel = perf_evlist__first(evlist); 1317 + struct evsel *evsel = evlist__first(evlist); 1317 1318 1318 1319 TEST_ASSERT_VAL("wrong name setting", strcmp(evsel->name, "rawpmu") == 0); 1319 1320 return 0; ··· 1321 1322 1322 1323 static int test__checkevent_config_num(struct evlist *evlist) 1323 1324 { 1324 - struct evsel *evsel = perf_evlist__first(evlist); 1325 + struct evsel *evsel = evlist__first(evlist); 1325 1326 1326 1327 TEST_ASSERT_VAL("wrong name setting", strcmp(evsel->name, "numpmu") == 0); 1327 1328 return 0; ··· 1329 1330 1330 1331 static int test__checkevent_config_cache(struct evlist *evlist) 1331 1332 { 1332 - struct evsel *evsel = perf_evlist__first(evlist); 1333 + struct evsel *evsel = evlist__first(evlist); 1333 1334 1334 1335 TEST_ASSERT_VAL("wrong name setting", strcmp(evsel->name, "cachepmu") == 0); 1335 1336 return 0; ··· 1342 1343 1343 1344 static int test__intel_pt(struct evlist *evlist) 1344 1345 { 1345 - struct evsel *evsel = perf_evlist__first(evlist); 1346 + struct evsel *evsel = evlist__first(evlist); 1346 1347 1347 1348 TEST_ASSERT_VAL("wrong name setting", strcmp(evsel->name, "intel_pt//u") == 0); 1348 1349 return 0; ··· 1350 1351 1351 1352 static int test__checkevent_complex_name(struct evlist *evlist) 1352 1353 { 1353 - struct evsel *evsel = perf_evlist__first(evlist); 1354 + struct evsel *evsel = evlist__first(evlist); 1354 1355 1355 1356 TEST_ASSERT_VAL("wrong complex name parsing", strcmp(evsel->name, "COMPLEX_CYCLES_NAME:orig=cycles,desc=chip-clock-ticks") == 0); 1356 1357 return 0; ··· 1358 1359 1359 1360 static int test__sym_event_slash(struct evlist *evlist) 1360 1361 { 1361 - struct evsel *evsel = perf_evlist__first(evlist); 1362 + struct evsel *evsel = evlist__first(evlist); 1362 1363 1363 1364 TEST_ASSERT_VAL("wrong type", evsel->core.attr.type == PERF_TYPE_HARDWARE); 1364 1365 TEST_ASSERT_VAL("wrong config", evsel->core.attr.config == PERF_COUNT_HW_CPU_CYCLES); ··· 1368 1369 1369 1370 static int test__sym_event_dc(struct evlist *evlist) 1370 1371 { 1371 - struct evsel *evsel = perf_evlist__first(evlist); 1372 + struct evsel *evsel = evlist__first(evlist); 1372 1373 1373 1374 TEST_ASSERT_VAL("wrong type", evsel->core.attr.type == PERF_TYPE_HARDWARE); 1374 1375 TEST_ASSERT_VAL("wrong config", evsel->core.attr.config == PERF_COUNT_HW_CPU_CYCLES);
-2
tools/perf/tests/parse-no-sample-id-all.c
··· 1 - // SPDX-License-Identifier: GPL-2.0 2 1 #include <linux/kernel.h> 3 2 #include <linux/types.h> 4 3 #include <stddef.h> ··· 7 8 #include "event.h" 8 9 #include "evlist.h" 9 10 #include "header.h" 10 - #include "util.h" 11 11 #include "debug.h" 12 12 13 13 static int process_event(struct evlist **pevlist, union perf_event *event)
-1
tools/perf/tests/perf-hooks.c
··· 4 4 5 5 #include "tests.h" 6 6 #include "debug.h" 7 - #include "util.h" 8 7 #include "perf-hooks.h" 9 8 10 9 static void sigsegv_handler(int sig __maybe_unused)
+7 -6
tools/perf/tests/perf-record.c
··· 11 11 #include "debug.h" 12 12 #include "record.h" 13 13 #include "tests.h" 14 + #include "util/mmap.h" 14 15 15 16 static int sched__get_first_possible_cpu(pid_t pid, cpu_set_t *maskp) 16 17 { ··· 104 103 /* 105 104 * Config the evsels, setting attr->comm on the first one, etc. 106 105 */ 107 - evsel = perf_evlist__first(evlist); 106 + evsel = evlist__first(evlist); 108 107 perf_evsel__set_sample_bit(evsel, CPU); 109 108 perf_evsel__set_sample_bit(evsel, TID); 110 109 perf_evsel__set_sample_bit(evsel, TIME); ··· 144 143 * fds in the same CPU to be injected in the same mmap ring buffer 145 144 * (using ioctl(PERF_EVENT_IOC_SET_OUTPUT)). 146 145 */ 147 - err = perf_evlist__mmap(evlist, opts.mmap_pages); 146 + err = evlist__mmap(evlist, opts.mmap_pages); 148 147 if (err < 0) { 149 - pr_debug("perf_evlist__mmap: %s\n", 148 + pr_debug("evlist__mmap: %s\n", 150 149 str_error_r(errno, sbuf, sizeof(sbuf))); 151 150 goto out_delete_evlist; 152 151 } ··· 165 164 while (1) { 166 165 int before = total_events; 167 166 168 - for (i = 0; i < evlist->nr_mmaps; i++) { 167 + for (i = 0; i < evlist->core.nr_mmaps; i++) { 169 168 union perf_event *event; 170 - struct perf_mmap *md; 169 + struct mmap *md; 171 170 172 171 md = &evlist->mmap[i]; 173 172 if (perf_mmap__read_init(md) < 0) ··· 287 286 * perf_event_attr.wakeup_events, just PERF_EVENT_SAMPLE does. 288 287 */ 289 288 if (total_events == before && false) 290 - perf_evlist__poll(evlist, -1); 289 + evlist__poll(evlist, -1); 291 290 292 291 sleep(1); 293 292 if (++wakeups > 5) {
-1
tools/perf/tests/pmu.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0 2 2 #include "parse-events.h" 3 3 #include "pmu.h" 4 - #include "util.h" 5 4 #include "tests.h" 6 5 #include <errno.h> 7 6 #include <stdio.h>
+1 -1
tools/perf/tests/sample-parsing.c
··· 9 9 10 10 #include "map_symbol.h" 11 11 #include "branch.h" 12 - #include "util.h" 13 12 #include "event.h" 14 13 #include "evsel.h" 15 14 #include "debug.h" 15 + #include "util/synthetic-events.h" 16 16 17 17 #include "tests.h" 18 18
+1
tools/perf/tests/sdt.c
··· 3 3 #include <limits.h> 4 4 #include <stdio.h> 5 5 #include <stdlib.h> 6 + #include <unistd.h> 6 7 #include <sys/epoll.h> 7 8 #include <util/symbol.h> 8 9 #include <linux/filter.h>
+1
tools/perf/tests/stat.c
··· 5 5 #include "stat.h" 6 6 #include "counts.h" 7 7 #include "debug.h" 8 + #include "util/synthetic-events.h" 8 9 9 10 static bool has_term(struct perf_record_stat_config *config, 10 11 u64 tag, u64 val)
+3 -2
tools/perf/tests/sw-clock.c
··· 12 12 #include "util/evsel.h" 13 13 #include "util/evlist.h" 14 14 #include "util/cpumap.h" 15 + #include "util/mmap.h" 15 16 #include "util/thread_map.h" 16 17 #include <perf/evlist.h> 17 18 ··· 43 42 }; 44 43 struct perf_cpu_map *cpus; 45 44 struct perf_thread_map *threads; 46 - struct perf_mmap *md; 45 + struct mmap *md; 47 46 48 47 attr.sample_freq = 500; 49 48 ··· 83 82 goto out_delete_evlist; 84 83 } 85 84 86 - err = perf_evlist__mmap(evlist, 128); 85 + err = evlist__mmap(evlist, 128); 87 86 if (err < 0) { 88 87 pr_debug("failed to mmap event: %d (%s)\n", errno, 89 88 str_error_r(errno, sbuf, sizeof(sbuf)));
+15 -15
tools/perf/tests/switch-tracking.c
··· 14 14 #include "evlist.h" 15 15 #include "evsel.h" 16 16 #include "thread_map.h" 17 - #include "cpumap.h" 18 17 #include "record.h" 19 18 #include "tests.h" 19 + #include "util/mmap.h" 20 20 21 21 static int spin_sleep(void) 22 22 { ··· 144 144 return err; 145 145 /* 146 146 * Check for no missing sched_switch events i.e. that the 147 - * evsel->system_wide flag has worked. 147 + * evsel->core.system_wide flag has worked. 148 148 */ 149 149 if (switch_tracking->tids[cpu] != -1 && 150 150 switch_tracking->tids[cpu] != prev_tid) { ··· 264 264 unsigned pos, cnt = 0; 265 265 LIST_HEAD(events); 266 266 struct event_node *events_array, *node; 267 - struct perf_mmap *md; 267 + struct mmap *md; 268 268 int i, ret; 269 269 270 - for (i = 0; i < evlist->nr_mmaps; i++) { 270 + for (i = 0; i < evlist->core.nr_mmaps; i++) { 271 271 md = &evlist->mmap[i]; 272 272 if (perf_mmap__read_init(md) < 0) 273 273 continue; ··· 316 316 * 317 317 * This function implements a test that checks that sched_switch events and 318 318 * tracking events can be recorded for a workload (current process) using the 319 - * evsel->system_wide and evsel->tracking flags (respectively) with other events 319 + * evsel->core.system_wide and evsel->tracking flags (respectively) with other events 320 320 * sometimes enabled or disabled. 321 321 */ 322 322 int test__switch_tracking(struct test *test __maybe_unused, int subtest __maybe_unused) ··· 367 367 goto out_err; 368 368 } 369 369 370 - cpu_clocks_evsel = perf_evlist__last(evlist); 370 + cpu_clocks_evsel = evlist__last(evlist); 371 371 372 372 /* Second event */ 373 373 err = parse_events(evlist, "cycles:u", NULL); ··· 376 376 goto out_err; 377 377 } 378 378 379 - cycles_evsel = perf_evlist__last(evlist); 379 + cycles_evsel = evlist__last(evlist); 380 380 381 381 /* Third event */ 382 382 if (!perf_evlist__can_select_event(evlist, sched_switch)) { ··· 391 391 goto out_err; 392 392 } 393 393 394 - switch_evsel = perf_evlist__last(evlist); 394 + switch_evsel = evlist__last(evlist); 395 395 396 396 perf_evsel__set_sample_bit(switch_evsel, CPU); 397 397 perf_evsel__set_sample_bit(switch_evsel, TIME); 398 398 399 - switch_evsel->system_wide = true; 399 + switch_evsel->core.system_wide = true; 400 400 switch_evsel->no_aux_samples = true; 401 401 switch_evsel->immediate = true; 402 402 403 403 /* Test moving an event to the front */ 404 - if (cycles_evsel == perf_evlist__first(evlist)) { 404 + if (cycles_evsel == evlist__first(evlist)) { 405 405 pr_debug("cycles event already at front"); 406 406 goto out_err; 407 407 } 408 408 perf_evlist__to_front(evlist, cycles_evsel); 409 - if (cycles_evsel != perf_evlist__first(evlist)) { 409 + if (cycles_evsel != evlist__first(evlist)) { 410 410 pr_debug("Failed to move cycles event to front"); 411 411 goto out_err; 412 412 } ··· 421 421 goto out_err; 422 422 } 423 423 424 - tracking_evsel = perf_evlist__last(evlist); 424 + tracking_evsel = evlist__last(evlist); 425 425 426 426 perf_evlist__set_tracking_event(evlist, tracking_evsel); 427 427 ··· 434 434 perf_evlist__config(evlist, &opts, NULL); 435 435 436 436 /* Check moved event is still at the front */ 437 - if (cycles_evsel != perf_evlist__first(evlist)) { 437 + if (cycles_evsel != evlist__first(evlist)) { 438 438 pr_debug("Front event no longer at front"); 439 439 goto out_err; 440 440 } ··· 461 461 goto out; 462 462 } 463 463 464 - err = perf_evlist__mmap(evlist, UINT_MAX); 464 + err = evlist__mmap(evlist, UINT_MAX); 465 465 if (err) { 466 - pr_debug("perf_evlist__mmap failed!\n"); 466 + pr_debug("evlist__mmap failed!\n"); 467 467 goto out_err; 468 468 } 469 469
+6 -5
tools/perf/tests/task-exit.c
··· 4 4 #include "evsel.h" 5 5 #include "target.h" 6 6 #include "thread_map.h" 7 - #include "cpumap.h" 8 7 #include "tests.h" 8 + #include "util/mmap.h" 9 9 10 10 #include <errno.h> 11 11 #include <signal.h> 12 12 #include <linux/string.h> 13 + #include <perf/cpumap.h> 13 14 #include <perf/evlist.h> 14 15 15 16 static int exited; ··· 52 51 char sbuf[STRERR_BUFSIZE]; 53 52 struct perf_cpu_map *cpus; 54 53 struct perf_thread_map *threads; 55 - struct perf_mmap *md; 54 + struct mmap *md; 56 55 57 56 signal(SIGCHLD, sig_handler); 58 57 ··· 88 87 goto out_delete_evlist; 89 88 } 90 89 91 - evsel = perf_evlist__first(evlist); 90 + evsel = evlist__first(evlist); 92 91 evsel->core.attr.task = 1; 93 92 #ifdef __s390x__ 94 93 evsel->core.attr.sample_freq = 1000000; ··· 107 106 goto out_delete_evlist; 108 107 } 109 108 110 - if (perf_evlist__mmap(evlist, 128) < 0) { 109 + if (evlist__mmap(evlist, 128) < 0) { 111 110 pr_debug("failed to mmap events: %d (%s)\n", errno, 112 111 str_error_r(errno, sbuf, sizeof(sbuf))); 113 112 goto out_delete_evlist; ··· 130 129 131 130 out_init: 132 131 if (!exited || !nr_exit) { 133 - perf_evlist__poll(evlist, -1); 132 + evlist__poll(evlist, -1); 134 133 goto retry; 135 134 } 136 135
+1
tools/perf/tests/thread-map.c
··· 8 8 #include "thread_map.h" 9 9 #include "debug.h" 10 10 #include "event.h" 11 + #include "util/synthetic-events.h" 11 12 #include <linux/zalloc.h> 12 13 #include <perf/event.h> 13 14
+4 -3
tools/perf/tests/topology.c
··· 3 3 #include <stdlib.h> 4 4 #include <stdio.h> 5 5 #include <perf/cpumap.h> 6 + #include "cpumap.h" 6 7 #include "tests.h" 7 - #include "util.h" 8 8 #include "session.h" 9 9 #include "evlist.h" 10 10 #include "debug.h" 11 + #include <linux/err.h> 11 12 12 13 #define TEMPL "/tmp/perf-test-XXXXXX" 13 14 #define DATA_SIZE 10 ··· 40 39 }; 41 40 42 41 session = perf_session__new(&data, false, NULL); 43 - TEST_ASSERT_VAL("can't get session", session); 42 + TEST_ASSERT_VAL("can't get session", !IS_ERR(session)); 44 43 45 44 session->evlist = perf_evlist__new_default(); 46 45 TEST_ASSERT_VAL("can't get evlist", session->evlist); ··· 71 70 int i; 72 71 73 72 session = perf_session__new(&data, false, NULL); 74 - TEST_ASSERT_VAL("can't get session", session); 73 + TEST_ASSERT_VAL("can't get session", !IS_ERR(session)); 75 74 76 75 /* On platforms with large numbers of CPUs process_cpu_topology() 77 76 * might issue an error while reading the perf.data file section
+1 -1
tools/perf/tests/vmlinux-kallsyms.c
··· 7 7 #include "dso.h" 8 8 #include "map.h" 9 9 #include "symbol.h" 10 - #include "util.h" 10 + #include <internal/lib.h> // page_size 11 11 #include "tests.h" 12 12 #include "debug.h" 13 13 #include "machine.h"
-1
tools/perf/ui/browser.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0 2 - #include "../util/util.h" 3 2 #include "../util/string2.h" 4 3 #include "../util/config.h" 5 4 #include "libslang.h"
-1
tools/perf/ui/browsers/annotate.c
··· 2 2 #include "../browser.h" 3 3 #include "../helpline.h" 4 4 #include "../ui.h" 5 - #include "../util.h" 6 5 #include "../../util/annotate.h" 7 6 #include "../../util/debug.h" 8 7 #include "../../util/dso.h"
-1
tools/perf/ui/browsers/header.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0 2 - #include "util/debug.h" 3 2 #include "ui/browser.h" 4 3 #include "ui/keysyms.h" 5 4 #include "ui/ui.h"
+3 -3
tools/perf/ui/browsers/hists.c
··· 3319 3319 switch (key) { 3320 3320 case K_TAB: 3321 3321 if (pos->core.node.next == &evlist->core.entries) 3322 - pos = perf_evlist__first(evlist); 3322 + pos = evlist__first(evlist); 3323 3323 else 3324 3324 pos = perf_evsel__next(pos); 3325 3325 goto browse_hists; 3326 3326 case K_UNTAB: 3327 3327 if (pos->core.node.prev == &evlist->core.entries) 3328 - pos = perf_evlist__last(evlist); 3328 + pos = evlist__last(evlist); 3329 3329 else 3330 3330 pos = perf_evsel__prev(pos); 3331 3331 goto browse_hists; ··· 3417 3417 3418 3418 single_entry: 3419 3419 if (nr_entries == 1) { 3420 - struct evsel *first = perf_evlist__first(evlist); 3420 + struct evsel *first = evlist__first(evlist); 3421 3421 3422 3422 return perf_evsel__hists_browse(first, nr_entries, help, 3423 3423 false, hbt, min_pcnt,
-1
tools/perf/ui/browsers/map.c
··· 5 5 #include <stdlib.h> 6 6 #include <string.h> 7 7 #include <linux/bitops.h> 8 - #include "../../util/util.h" 9 8 #include "../../util/debug.h" 10 9 #include "../../util/map.h" 11 10 #include "../../util/dso.h"
+1 -1
tools/perf/ui/browsers/res_sample.c
··· 7 7 #include "config.h" 8 8 #include "time-utils.h" 9 9 #include "../util.h" 10 - #include "../../util/util.h" 10 + #include "../../util/util.h" // perf_exe() 11 11 #include "../../perf.h" 12 12 #include <stdlib.h> 13 13 #include <string.h>
+2 -1
tools/perf/ui/browsers/scripts.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0 2 2 #include "../../builtin.h" 3 3 #include "../../perf.h" 4 - #include "../../util/util.h" 4 + #include "../../util/util.h" // perf_exe() 5 + #include "../util.h" 5 6 #include "../../util/hist.h" 6 7 #include "../../util/debug.h" 7 8 #include "../../util/symbol.h"
-1
tools/perf/ui/gtk/helpline.c
··· 6 6 #include "gtk.h" 7 7 #include "../ui.h" 8 8 #include "../helpline.h" 9 - #include "../../util/debug.h" 10 9 11 10 static void gtk_helpline_pop(void) 12 11 {
+1
tools/perf/ui/gtk/hists.c
··· 8 8 #include "../string2.h" 9 9 #include "gtk.h" 10 10 #include <signal.h> 11 + #include <stdlib.h> 11 12 #include <linux/string.h> 12 13 13 14 #define MAX_COLUMNS 32
-1
tools/perf/ui/gtk/progress.c
··· 3 3 4 4 #include "gtk.h" 5 5 #include "../progress.h" 6 - #include "util.h" 7 6 8 7 static GtkWidget *dialog; 9 8 static GtkWidget *progress;
+2 -1
tools/perf/ui/gtk/setup.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0 2 2 #include "gtk.h" 3 - #include "../../util/debug.h" 3 + #include <linux/compiler.h> 4 + #include "../util.h" 4 5 5 6 extern struct perf_error_ops perf_gtk_eops; 6 7
-1
tools/perf/ui/gtk/util.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0 2 2 #include "../util.h" 3 - #include "../../util/debug.h" 4 3 #include "gtk.h" 5 4 6 5 #include <stdlib.h>
-2
tools/perf/ui/helpline.c
··· 3 3 #include <stdlib.h> 4 4 #include <string.h> 5 5 6 - #include "../util/debug.h" 7 6 #include "helpline.h" 8 7 #include "ui.h" 9 - #include "../util/util.h" 10 8 11 9 char ui_helpline__current[512]; 12 10
-1
tools/perf/ui/hist.c
··· 8 8 #include "../util/callchain.h" 9 9 #include "../util/debug.h" 10 10 #include "../util/hist.h" 11 - #include "../util/util.h" 12 11 #include "../util/sort.h" 13 12 #include "../util/evsel.h" 14 13 #include "../util/evlist.h"
+1 -1
tools/perf/ui/setup.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0 2 2 #include <pthread.h> 3 3 #include <dlfcn.h> 4 + #include <unistd.h> 4 5 5 6 #include <subcmd/pager.h> 6 7 #include "../util/debug.h" 7 8 #include "../util/hist.h" 8 - #include "../util/util.h" 9 9 #include "ui.h" 10 10 11 11 pthread_mutex_t ui__lock = PTHREAD_MUTEX_INITIALIZER;
+1
tools/perf/ui/stdio/hist.c
··· 5 5 6 6 #include "../../util/callchain.h" 7 7 #include "../../util/debug.h" 8 + #include "../../util/event.h" 8 9 #include "../../util/hist.h" 9 10 #include "../../util/map.h" 10 11 #include "../../util/map_groups.h"
-1
tools/perf/ui/tui/helpline.c
··· 6 6 #include <linux/kernel.h> 7 7 #include <linux/string.h> 8 8 9 - #include "../../util/debug.h" 10 9 #include "../helpline.h" 11 10 #include "../ui.h" 12 11 #include "../libslang.h"
+1 -1
tools/perf/ui/tui/setup.c
··· 2 2 #include <signal.h> 3 3 #include <stdbool.h> 4 4 #include <stdlib.h> 5 + #include <unistd.h> 5 6 #include <linux/kernel.h> 6 7 #ifdef HAVE_BACKTRACE_SUPPORT 7 8 #include <execinfo.h> 8 9 #endif 9 10 10 11 #include "../../util/debug.h" 11 - #include "../../util/util.h" 12 12 #include "../../perf.h" 13 13 #include "../browser.h" 14 14 #include "../helpline.h"
-1
tools/perf/ui/tui/util.c
··· 5 5 #include <stdlib.h> 6 6 #include <sys/ttydefaults.h> 7 7 8 - #include "../../util/debug.h" 9 8 #include "../browser.h" 10 9 #include "../keysyms.h" 11 10 #include "../helpline.h"
+3
tools/perf/util/Build
··· 3 3 perf-y += build-id.o 4 4 perf-y += cacheline.o 5 5 perf-y += config.o 6 + perf-y += copyfile.o 6 7 perf-y += ctype.o 7 8 perf-y += db-export.o 8 9 perf-y += env.o ··· 11 10 perf-y += evlist.o 12 11 perf-y += evsel.o 13 12 perf-y += evsel_fprintf.o 13 + perf-y += perf_event_attr_fprintf.o 14 14 perf-y += evswitch.o 15 15 perf-y += find_bit.o 16 16 perf-y += get_current_dir_name.o ··· 88 86 perf-y += record.o 89 87 perf-y += srcline.o 90 88 perf-y += srccode.o 89 + perf-y += synthetic-events.o 91 90 perf-y += data.o 92 91 perf-y += tsc.o 93 92 perf-y += cloexec.o
+2 -1
tools/perf/util/annotate.c
··· 14 14 #include <bpf/btf.h> 15 15 #include <bpf/libbpf.h> 16 16 #include <linux/btf.h> 17 - #include "util.h" 17 + #include "util.h" // hex_width() 18 18 #include "ui/ui.h" 19 19 #include "sort.h" 20 20 #include "build-id.h" ··· 34 34 #include "bpf-event.h" 35 35 #include "block-range.h" 36 36 #include "string2.h" 37 + #include "util/event.h" 37 38 #include "arch/common.h" 38 39 #include <regex.h> 39 40 #include <pthread.h>
-1
tools/perf/util/arm-spe.c
··· 16 16 #include <linux/log2.h> 17 17 #include <linux/zalloc.h> 18 18 19 - #include "cpumap.h" 20 19 #include "color.h" 21 20 #include "evsel.h" 22 21 #include "machine.h"
+7 -5
tools/perf/util/auxtrace.c
··· 31 31 #include "map.h" 32 32 #include "pmu.h" 33 33 #include "evsel.h" 34 - #include "cpumap.h" 35 34 #include "symbol.h" 35 + #include "util/synthetic-events.h" 36 36 #include "thread_map.h" 37 37 #include "asm/bug.h" 38 38 #include "auxtrace.h" ··· 50 50 #include "intel-bts.h" 51 51 #include "arm-spe.h" 52 52 #include "s390-cpumsf.h" 53 - #include "util.h" 53 + #include "util/mmap.h" 54 54 55 55 #include <linux/ctype.h> 56 + #include <linux/kernel.h> 56 57 #include "symbol/kallsyms.h" 58 + #include <internal/lib.h> 57 59 58 60 static bool auxtrace__dont_decode(struct perf_session *session) 59 61 { ··· 1228 1226 return 0; 1229 1227 } 1230 1228 1231 - static int __auxtrace_mmap__read(struct perf_mmap *map, 1229 + static int __auxtrace_mmap__read(struct mmap *map, 1232 1230 struct auxtrace_record *itr, 1233 1231 struct perf_tool *tool, process_auxtrace_t fn, 1234 1232 bool snapshot, size_t snapshot_size) ··· 1339 1337 return 1; 1340 1338 } 1341 1339 1342 - int auxtrace_mmap__read(struct perf_mmap *map, struct auxtrace_record *itr, 1340 + int auxtrace_mmap__read(struct mmap *map, struct auxtrace_record *itr, 1343 1341 struct perf_tool *tool, process_auxtrace_t fn) 1344 1342 { 1345 1343 return __auxtrace_mmap__read(map, itr, tool, fn, false, 0); 1346 1344 } 1347 1345 1348 - int auxtrace_mmap__read_snapshot(struct perf_mmap *map, 1346 + int auxtrace_mmap__read_snapshot(struct mmap *map, 1349 1347 struct auxtrace_record *itr, 1350 1348 struct perf_tool *tool, process_auxtrace_t fn, 1351 1349 size_t snapshot_size)
+7 -19
tools/perf/util/auxtrace.h
··· 11 11 #include <errno.h> 12 12 #include <stdbool.h> 13 13 #include <stddef.h> 14 + #include <stdio.h> // FILE 14 15 #include <linux/list.h> 15 16 #include <linux/perf_event.h> 16 17 #include <linux/types.h> 17 18 #include <asm/bitsperlong.h> 18 19 #include <asm/barrier.h> 19 20 20 - #include "event.h" 21 - 22 21 union perf_event; 23 22 struct perf_session; 24 23 struct evlist; 25 24 struct perf_tool; 26 - struct perf_mmap; 25 + struct mmap; 26 + struct perf_sample; 27 27 struct option; 28 28 struct record_opts; 29 + struct perf_record_auxtrace_error; 29 30 struct perf_record_auxtrace_info; 30 31 struct events_stats; 31 32 ··· 445 444 bool per_cpu); 446 445 447 446 typedef int (*process_auxtrace_t)(struct perf_tool *tool, 448 - struct perf_mmap *map, 447 + struct mmap *map, 449 448 union perf_event *event, void *data1, 450 449 size_t len1, void *data2, size_t len2); 451 450 452 - int auxtrace_mmap__read(struct perf_mmap *map, struct auxtrace_record *itr, 451 + int auxtrace_mmap__read(struct mmap *map, struct auxtrace_record *itr, 453 452 struct perf_tool *tool, process_auxtrace_t fn); 454 453 455 - int auxtrace_mmap__read_snapshot(struct perf_mmap *map, 454 + int auxtrace_mmap__read_snapshot(struct mmap *map, 456 455 struct auxtrace_record *itr, 457 456 struct perf_tool *tool, process_auxtrace_t fn, 458 457 size_t snapshot_size); ··· 525 524 int code, int cpu, pid_t pid, pid_t tid, u64 ip, 526 525 const char *msg, u64 timestamp); 527 526 528 - int perf_event__synthesize_auxtrace_info(struct auxtrace_record *itr, 529 - struct perf_tool *tool, 530 - struct perf_session *session, 531 - perf_event__handler_t process); 532 527 int perf_event__process_auxtrace_info(struct perf_session *session, 533 528 union perf_event *event); 534 529 s64 perf_event__process_auxtrace(struct perf_session *session, ··· 599 602 static inline 600 603 void auxtrace_record__free(struct auxtrace_record *itr __maybe_unused) 601 604 { 602 - } 603 - 604 - static inline int 605 - perf_event__synthesize_auxtrace_info(struct auxtrace_record *itr __maybe_unused, 606 - struct perf_tool *tool __maybe_unused, 607 - struct perf_session *session __maybe_unused, 608 - perf_event__handler_t process __maybe_unused) 609 - { 610 - return -EINVAL; 611 605 } 612 606 613 607 static inline
+1
tools/perf/util/bpf-event.c
··· 16 16 #include "map.h" 17 17 #include "evlist.h" 18 18 #include "record.h" 19 + #include "util/synthetic-events.h" 19 20 20 21 #define ptr_to_u64(ptr) ((__u64)(unsigned long)(ptr)) 21 22
+1 -14
tools/perf/util/bpf-event.h
··· 6 6 #include <linux/rbtree.h> 7 7 #include <pthread.h> 8 8 #include <api/fd/array.h> 9 - #include "event.h" 10 9 #include <stdio.h> 11 10 11 + struct bpf_prog_info; 12 12 struct machine; 13 13 union perf_event; 14 14 struct perf_env; ··· 33 33 #ifdef HAVE_LIBBPF_SUPPORT 34 34 int machine__process_bpf(struct machine *machine, union perf_event *event, 35 35 struct perf_sample *sample); 36 - 37 - int perf_event__synthesize_bpf_events(struct perf_session *session, 38 - perf_event__handler_t process, 39 - struct machine *machine, 40 - struct record_opts *opts); 41 36 int bpf_event__add_sb_event(struct evlist **evlist, 42 37 struct perf_env *env); 43 38 void bpf_event__print_bpf_prog_info(struct bpf_prog_info *info, ··· 42 47 static inline int machine__process_bpf(struct machine *machine __maybe_unused, 43 48 union perf_event *event __maybe_unused, 44 49 struct perf_sample *sample __maybe_unused) 45 - { 46 - return 0; 47 - } 48 - 49 - static inline int perf_event__synthesize_bpf_events(struct perf_session *session __maybe_unused, 50 - perf_event__handler_t process __maybe_unused, 51 - struct machine *machine __maybe_unused, 52 - struct record_opts *opts __maybe_unused) 53 50 { 54 51 return 0; 55 52 }
+1 -1
tools/perf/util/bpf-loader.c
··· 1568 1568 return ERR_PTR(-err); 1569 1569 } 1570 1570 1571 - evsel = perf_evlist__last(evlist); 1571 + evsel = evlist__last(evlist); 1572 1572 } 1573 1573 1574 1574 bpf__for_each_map_named(map, obj, tmp, name) {
-2
tools/perf/util/branch.c
··· 1 - #include "util/util.h" 2 - #include "util/debug.h" 3 1 #include "util/map_symbol.h" 4 2 #include "util/branch.h" 5 3 #include <linux/kernel.h>
+8 -1
tools/perf/util/branch.h
··· 1 1 #ifndef _PERF_BRANCH_H 2 2 #define _PERF_BRANCH_H 1 3 - 3 + /* 4 + * The linux/stddef.h isn't need here, but is needed for __always_inline used 5 + * in files included from uapi/linux/perf_event.h such as 6 + * /usr/include/linux/swab.h and /usr/include/linux/byteorder/little_endian.h, 7 + * detected in at least musl libc, used in Alpine Linux. -acme 8 + */ 4 9 #include <stdio.h> 5 10 #include <stdint.h> 11 + #include <linux/compiler.h> 12 + #include <linux/stddef.h> 6 13 #include <linux/perf_event.h> 7 14 #include <linux/types.h> 8 15
+2 -1
tools/perf/util/build-id.c
··· 7 7 * Copyright (C) 2009, 2010 Red Hat Inc. 8 8 * Copyright (C) 2009, 2010 Arnaldo Carvalho de Melo <acme@redhat.com> 9 9 */ 10 - #include "util.h" 10 + #include "util.h" // lsdir(), mkdir_p(), rm_rf() 11 11 #include <dirent.h> 12 12 #include <errno.h> 13 13 #include <stdio.h> 14 14 #include <sys/stat.h> 15 15 #include <sys/types.h> 16 + #include "util/copyfile.h" 16 17 #include "dso.h" 17 18 #include "build-id.h" 18 19 #include "event.h"
+1
tools/perf/util/callchain.c
··· 23 23 24 24 #include "debug.h" 25 25 #include "dso.h" 26 + #include "event.h" 26 27 #include "hist.h" 27 28 #include "sort.h" 28 29 #include "machine.h"
+4 -1
tools/perf/util/callchain.h
··· 4 4 5 5 #include <linux/list.h> 6 6 #include <linux/rbtree.h> 7 - #include "event.h" 8 7 #include "map_symbol.h" 9 8 #include "branch.h" 10 9 10 + struct addr_location; 11 11 struct evsel; 12 + struct ip_callchain; 12 13 struct map; 14 + struct perf_sample; 15 + struct thread; 13 16 14 17 #define HELP_PAD "\t\t\t\t" 15 18
+1 -1
tools/perf/util/cloexec.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0 2 2 #include <errno.h> 3 3 #include <sched.h> 4 - #include "util.h" 4 + #include "util.h" // for sched_getcpu() 5 5 #include "../perf-sys.h" 6 6 #include "cloexec.h" 7 7 #include "event.h"
+144
tools/perf/util/copyfile.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + #include "util/copyfile.h" 3 + #include "util/namespaces.h" 4 + #include <internal/lib.h> 5 + #include <sys/mman.h> 6 + #include <sys/stat.h> 7 + #include <errno.h> 8 + #include <fcntl.h> 9 + #include <stdio.h> 10 + #include <stdlib.h> 11 + #include <string.h> 12 + #include <unistd.h> 13 + 14 + static int slow_copyfile(const char *from, const char *to, struct nsinfo *nsi) 15 + { 16 + int err = -1; 17 + char *line = NULL; 18 + size_t n; 19 + FILE *from_fp, *to_fp; 20 + struct nscookie nsc; 21 + 22 + nsinfo__mountns_enter(nsi, &nsc); 23 + from_fp = fopen(from, "r"); 24 + nsinfo__mountns_exit(&nsc); 25 + if (from_fp == NULL) 26 + goto out; 27 + 28 + to_fp = fopen(to, "w"); 29 + if (to_fp == NULL) 30 + goto out_fclose_from; 31 + 32 + while (getline(&line, &n, from_fp) > 0) 33 + if (fputs(line, to_fp) == EOF) 34 + goto out_fclose_to; 35 + err = 0; 36 + out_fclose_to: 37 + fclose(to_fp); 38 + free(line); 39 + out_fclose_from: 40 + fclose(from_fp); 41 + out: 42 + return err; 43 + } 44 + 45 + int copyfile_offset(int ifd, loff_t off_in, int ofd, loff_t off_out, u64 size) 46 + { 47 + void *ptr; 48 + loff_t pgoff; 49 + 50 + pgoff = off_in & ~(page_size - 1); 51 + off_in -= pgoff; 52 + 53 + ptr = mmap(NULL, off_in + size, PROT_READ, MAP_PRIVATE, ifd, pgoff); 54 + if (ptr == MAP_FAILED) 55 + return -1; 56 + 57 + while (size) { 58 + ssize_t ret = pwrite(ofd, ptr + off_in, size, off_out); 59 + if (ret < 0 && errno == EINTR) 60 + continue; 61 + if (ret <= 0) 62 + break; 63 + 64 + size -= ret; 65 + off_in += ret; 66 + off_out += ret; 67 + } 68 + munmap(ptr, off_in + size); 69 + 70 + return size ? -1 : 0; 71 + } 72 + 73 + static int copyfile_mode_ns(const char *from, const char *to, mode_t mode, 74 + struct nsinfo *nsi) 75 + { 76 + int fromfd, tofd; 77 + struct stat st; 78 + int err; 79 + char *tmp = NULL, *ptr = NULL; 80 + struct nscookie nsc; 81 + 82 + nsinfo__mountns_enter(nsi, &nsc); 83 + err = stat(from, &st); 84 + nsinfo__mountns_exit(&nsc); 85 + if (err) 86 + goto out; 87 + err = -1; 88 + 89 + /* extra 'x' at the end is to reserve space for '.' */ 90 + if (asprintf(&tmp, "%s.XXXXXXx", to) < 0) { 91 + tmp = NULL; 92 + goto out; 93 + } 94 + ptr = strrchr(tmp, '/'); 95 + if (!ptr) 96 + goto out; 97 + ptr = memmove(ptr + 1, ptr, strlen(ptr) - 1); 98 + *ptr = '.'; 99 + 100 + tofd = mkstemp(tmp); 101 + if (tofd < 0) 102 + goto out; 103 + 104 + if (fchmod(tofd, mode)) 105 + goto out_close_to; 106 + 107 + if (st.st_size == 0) { /* /proc? do it slowly... */ 108 + err = slow_copyfile(from, tmp, nsi); 109 + goto out_close_to; 110 + } 111 + 112 + nsinfo__mountns_enter(nsi, &nsc); 113 + fromfd = open(from, O_RDONLY); 114 + nsinfo__mountns_exit(&nsc); 115 + if (fromfd < 0) 116 + goto out_close_to; 117 + 118 + err = copyfile_offset(fromfd, 0, tofd, 0, st.st_size); 119 + 120 + close(fromfd); 121 + out_close_to: 122 + close(tofd); 123 + if (!err) 124 + err = link(tmp, to); 125 + unlink(tmp); 126 + out: 127 + free(tmp); 128 + return err; 129 + } 130 + 131 + int copyfile_ns(const char *from, const char *to, struct nsinfo *nsi) 132 + { 133 + return copyfile_mode_ns(from, to, 0755, nsi); 134 + } 135 + 136 + int copyfile_mode(const char *from, const char *to, mode_t mode) 137 + { 138 + return copyfile_mode_ns(from, to, mode, NULL); 139 + } 140 + 141 + int copyfile(const char *from, const char *to) 142 + { 143 + return copyfile_mode(from, to, 0755); 144 + }
+16
tools/perf/util/copyfile.h
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + #ifndef PERF_COPYFILE_H_ 3 + #define PERF_COPYFILE_H_ 4 + 5 + #include <linux/types.h> 6 + #include <sys/types.h> 7 + #include <fcntl.h> 8 + 9 + struct nsinfo; 10 + 11 + int copyfile(const char *from, const char *to); 12 + int copyfile_mode(const char *from, const char *to, mode_t mode); 13 + int copyfile_ns(const char *from, const char *to, struct nsinfo *nsi); 14 + int copyfile_offset(int ifd, loff_t off_in, int ofd, loff_t off_out, u64 size); 15 + 16 + #endif // PERF_COPYFILE_H_
-1
tools/perf/util/cs-etm-decoder/cs-etm-decoder.c
··· 17 17 #include "cs-etm.h" 18 18 #include "cs-etm-decoder.h" 19 19 #include "intlist.h" 20 - #include "util.h" 21 20 22 21 /* use raw logging */ 23 22 #ifdef CS_DEBUG_RAW
+2 -2
tools/perf/util/cs-etm.c
··· 35 35 #include "thread.h" 36 36 #include "thread-stack.h" 37 37 #include <tools/libc_compat.h> 38 - #include "util.h" 38 + #include "util/synthetic-events.h" 39 39 40 40 #define MAX_TIMESTAMP (~0ULL) 41 41 ··· 1298 1298 attr.read_format = evsel->core.attr.read_format; 1299 1299 1300 1300 /* create new id val to be a fixed offset from evsel id */ 1301 - id = evsel->id[0] + 1000000000; 1301 + id = evsel->core.id[0] + 1000000000; 1302 1302 1303 1303 if (!id) 1304 1304 id = 1;
+4 -1
tools/perf/util/data-convert-bt.c
··· 30 30 #include "machine.h" 31 31 #include "config.h" 32 32 #include <linux/ctype.h> 33 + #include <linux/err.h> 33 34 34 35 #define pr_N(n, fmt, ...) \ 35 36 eprintf(n, debug_data_convert, fmt, ##__VA_ARGS__) ··· 1620 1619 err = -1; 1621 1620 /* perf.data session */ 1622 1621 session = perf_session__new(&data, 0, &c.tool); 1623 - if (!session) 1622 + if (IS_ERR(session)) { 1623 + err = PTR_ERR(session); 1624 1624 goto free_writer; 1625 + } 1625 1626 1626 1627 if (c.queue_size) { 1627 1628 ordered_events__set_alloc_size(&session->ordered_events,
+2 -1
tools/perf/util/data.c
··· 13 13 #include <dirent.h> 14 14 15 15 #include "data.h" 16 - #include "util.h" 16 + #include "util.h" // rm_rf_perf_data() 17 17 #include "debug.h" 18 18 #include "header.h" 19 + #include <internal/lib.h> 19 20 20 21 static void close_dir(struct perf_data_file *files, int nr) 21 22 {
-1
tools/perf/util/debug.c
··· 17 17 #include "event.h" 18 18 #include "debug.h" 19 19 #include "print_binary.h" 20 - #include "util.h" 21 20 #include "target.h" 22 21 #include "ui/helpline.h" 23 22 #include "ui/ui.h"
+1 -1
tools/perf/util/debug.h
··· 3 3 #ifndef __PERF_DEBUG_H 4 4 #define __PERF_DEBUG_H 5 5 6 + #include <stdarg.h> 6 7 #include <stdbool.h> 7 8 #include <linux/compiler.h> 8 - #include "../ui/util.h" 9 9 10 10 extern int verbose; 11 11 extern bool quiet, dump_trace;
-1
tools/perf/util/demangle-java.c
··· 3 3 #include <stdio.h> 4 4 #include <stdlib.h> 5 5 #include <string.h> 6 - #include "debug.h" 7 6 #include "symbol.h" 8 7 9 8 #include "demangle-java.h"
-1
tools/perf/util/demangle-rust.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0 2 2 #include <string.h> 3 - #include "util.h" 4 3 #include "debug.h" 5 4 6 5 #include "demangle-rust.h"
-1
tools/perf/util/dwarf-regs.c
··· 5 5 * Written by: Masami Hiramatsu <mhiramat@kernel.org> 6 6 */ 7 7 8 - #include <util.h> 9 8 #include <debug.h> 10 9 #include <dwarf-regs.h> 11 10 #include <elf.h>
+2 -1
tools/perf/util/env.h
··· 4 4 5 5 #include <linux/types.h> 6 6 #include <linux/rbtree.h> 7 - #include "cpumap.h" 8 7 #include "rwsem.h" 8 + 9 + struct perf_cpu_map; 9 10 10 11 struct cpu_topology_map { 11 12 int socket_id;
+3 -1106
tools/perf/util/event.c
··· 1 - #include <dirent.h> 2 1 #include <errno.h> 3 2 #include <fcntl.h> 4 3 #include <inttypes.h> 5 4 #include <linux/kernel.h> 6 5 #include <linux/types.h> 6 + #include <perf/cpumap.h> 7 7 #include <sys/types.h> 8 8 #include <sys/stat.h> 9 9 #include <unistd.h> 10 10 #include <uapi/linux/mman.h> /* To get things like MAP_HUGETLB even on older libc headers */ 11 - #include <api/fs/fs.h> 12 11 #include <linux/perf_event.h> 13 12 #include <linux/zalloc.h> 13 + #include "cpumap.h" 14 14 #include "dso.h" 15 15 #include "event.h" 16 16 #include "debug.h" ··· 24 24 #include "time-utils.h" 25 25 #include <linux/ctype.h> 26 26 #include "map.h" 27 + #include "util/namespaces.h" 27 28 #include "symbol.h" 28 29 #include "symbol/kallsyms.h" 29 30 #include "asm/bug.h" ··· 33 32 #include "bpf-event.h" 34 33 #include "tool.h" 35 34 #include "../perf.h" 36 - 37 - #define DEFAULT_PROC_MAP_PARSE_TIMEOUT 500 38 35 39 36 static const char *perf_event__names[] = { 40 37 [0] = "TOTAL", ··· 74 75 [PERF_RECORD_COMPRESSED] = "COMPRESSED", 75 76 }; 76 77 77 - static const char *perf_ns__names[] = { 78 - [NET_NS_INDEX] = "net", 79 - [UTS_NS_INDEX] = "uts", 80 - [IPC_NS_INDEX] = "ipc", 81 - [PID_NS_INDEX] = "pid", 82 - [USER_NS_INDEX] = "user", 83 - [MNT_NS_INDEX] = "mnt", 84 - [CGROUP_NS_INDEX] = "cgroup", 85 - }; 86 - 87 - unsigned int proc_map_timeout = DEFAULT_PROC_MAP_PARSE_TIMEOUT; 88 - 89 78 const char *perf_event__name(unsigned int id) 90 79 { 91 80 if (id >= ARRAY_SIZE(perf_event__names)) ··· 81 94 if (!perf_event__names[id]) 82 95 return "UNKNOWN"; 83 96 return perf_event__names[id]; 84 - } 85 - 86 - static const char *perf_ns__name(unsigned int id) 87 - { 88 - if (id >= ARRAY_SIZE(perf_ns__names)) 89 - return "UNKNOWN"; 90 - return perf_ns__names[id]; 91 - } 92 - 93 - int perf_tool__process_synth_event(struct perf_tool *tool, 94 - union perf_event *event, 95 - struct machine *machine, 96 - perf_event__handler_t process) 97 - { 98 - struct perf_sample synth_sample = { 99 - .pid = -1, 100 - .tid = -1, 101 - .time = -1, 102 - .stream_id = -1, 103 - .cpu = -1, 104 - .period = 1, 105 - .cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK, 106 - }; 107 - 108 - return process(tool, event, &synth_sample, machine); 109 - }; 110 - 111 - /* 112 - * Assumes that the first 4095 bytes of /proc/pid/stat contains 113 - * the comm, tgid and ppid. 114 - */ 115 - static int perf_event__get_comm_ids(pid_t pid, char *comm, size_t len, 116 - pid_t *tgid, pid_t *ppid) 117 - { 118 - char filename[PATH_MAX]; 119 - char bf[4096]; 120 - int fd; 121 - size_t size = 0; 122 - ssize_t n; 123 - char *name, *tgids, *ppids; 124 - 125 - *tgid = -1; 126 - *ppid = -1; 127 - 128 - snprintf(filename, sizeof(filename), "/proc/%d/status", pid); 129 - 130 - fd = open(filename, O_RDONLY); 131 - if (fd < 0) { 132 - pr_debug("couldn't open %s\n", filename); 133 - return -1; 134 - } 135 - 136 - n = read(fd, bf, sizeof(bf) - 1); 137 - close(fd); 138 - if (n <= 0) { 139 - pr_warning("Couldn't get COMM, tigd and ppid for pid %d\n", 140 - pid); 141 - return -1; 142 - } 143 - bf[n] = '\0'; 144 - 145 - name = strstr(bf, "Name:"); 146 - tgids = strstr(bf, "Tgid:"); 147 - ppids = strstr(bf, "PPid:"); 148 - 149 - if (name) { 150 - char *nl; 151 - 152 - name = skip_spaces(name + 5); /* strlen("Name:") */ 153 - nl = strchr(name, '\n'); 154 - if (nl) 155 - *nl = '\0'; 156 - 157 - size = strlen(name); 158 - if (size >= len) 159 - size = len - 1; 160 - memcpy(comm, name, size); 161 - comm[size] = '\0'; 162 - } else { 163 - pr_debug("Name: string not found for pid %d\n", pid); 164 - } 165 - 166 - if (tgids) { 167 - tgids += 5; /* strlen("Tgid:") */ 168 - *tgid = atoi(tgids); 169 - } else { 170 - pr_debug("Tgid: string not found for pid %d\n", pid); 171 - } 172 - 173 - if (ppids) { 174 - ppids += 5; /* strlen("PPid:") */ 175 - *ppid = atoi(ppids); 176 - } else { 177 - pr_debug("PPid: string not found for pid %d\n", pid); 178 - } 179 - 180 - return 0; 181 - } 182 - 183 - static int perf_event__prepare_comm(union perf_event *event, pid_t pid, 184 - struct machine *machine, 185 - pid_t *tgid, pid_t *ppid) 186 - { 187 - size_t size; 188 - 189 - *ppid = -1; 190 - 191 - memset(&event->comm, 0, sizeof(event->comm)); 192 - 193 - if (machine__is_host(machine)) { 194 - if (perf_event__get_comm_ids(pid, event->comm.comm, 195 - sizeof(event->comm.comm), 196 - tgid, ppid) != 0) { 197 - return -1; 198 - } 199 - } else { 200 - *tgid = machine->pid; 201 - } 202 - 203 - if (*tgid < 0) 204 - return -1; 205 - 206 - event->comm.pid = *tgid; 207 - event->comm.header.type = PERF_RECORD_COMM; 208 - 209 - size = strlen(event->comm.comm) + 1; 210 - size = PERF_ALIGN(size, sizeof(u64)); 211 - memset(event->comm.comm + size, 0, machine->id_hdr_size); 212 - event->comm.header.size = (sizeof(event->comm) - 213 - (sizeof(event->comm.comm) - size) + 214 - machine->id_hdr_size); 215 - event->comm.tid = pid; 216 - 217 - return 0; 218 - } 219 - 220 - pid_t perf_event__synthesize_comm(struct perf_tool *tool, 221 - union perf_event *event, pid_t pid, 222 - perf_event__handler_t process, 223 - struct machine *machine) 224 - { 225 - pid_t tgid, ppid; 226 - 227 - if (perf_event__prepare_comm(event, pid, machine, &tgid, &ppid) != 0) 228 - return -1; 229 - 230 - if (perf_tool__process_synth_event(tool, event, machine, process) != 0) 231 - return -1; 232 - 233 - return tgid; 234 - } 235 - 236 - static void perf_event__get_ns_link_info(pid_t pid, const char *ns, 237 - struct perf_ns_link_info *ns_link_info) 238 - { 239 - struct stat64 st; 240 - char proc_ns[128]; 241 - 242 - sprintf(proc_ns, "/proc/%u/ns/%s", pid, ns); 243 - if (stat64(proc_ns, &st) == 0) { 244 - ns_link_info->dev = st.st_dev; 245 - ns_link_info->ino = st.st_ino; 246 - } 247 - } 248 - 249 - int perf_event__synthesize_namespaces(struct perf_tool *tool, 250 - union perf_event *event, 251 - pid_t pid, pid_t tgid, 252 - perf_event__handler_t process, 253 - struct machine *machine) 254 - { 255 - u32 idx; 256 - struct perf_ns_link_info *ns_link_info; 257 - 258 - if (!tool || !tool->namespace_events) 259 - return 0; 260 - 261 - memset(&event->namespaces, 0, (sizeof(event->namespaces) + 262 - (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) + 263 - machine->id_hdr_size)); 264 - 265 - event->namespaces.pid = tgid; 266 - event->namespaces.tid = pid; 267 - 268 - event->namespaces.nr_namespaces = NR_NAMESPACES; 269 - 270 - ns_link_info = event->namespaces.link_info; 271 - 272 - for (idx = 0; idx < event->namespaces.nr_namespaces; idx++) 273 - perf_event__get_ns_link_info(pid, perf_ns__name(idx), 274 - &ns_link_info[idx]); 275 - 276 - event->namespaces.header.type = PERF_RECORD_NAMESPACES; 277 - 278 - event->namespaces.header.size = (sizeof(event->namespaces) + 279 - (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) + 280 - machine->id_hdr_size); 281 - 282 - if (perf_tool__process_synth_event(tool, event, machine, process) != 0) 283 - return -1; 284 - 285 - return 0; 286 - } 287 - 288 - static int perf_event__synthesize_fork(struct perf_tool *tool, 289 - union perf_event *event, 290 - pid_t pid, pid_t tgid, pid_t ppid, 291 - perf_event__handler_t process, 292 - struct machine *machine) 293 - { 294 - memset(&event->fork, 0, sizeof(event->fork) + machine->id_hdr_size); 295 - 296 - /* 297 - * for main thread set parent to ppid from status file. For other 298 - * threads set parent pid to main thread. ie., assume main thread 299 - * spawns all threads in a process 300 - */ 301 - if (tgid == pid) { 302 - event->fork.ppid = ppid; 303 - event->fork.ptid = ppid; 304 - } else { 305 - event->fork.ppid = tgid; 306 - event->fork.ptid = tgid; 307 - } 308 - event->fork.pid = tgid; 309 - event->fork.tid = pid; 310 - event->fork.header.type = PERF_RECORD_FORK; 311 - event->fork.header.misc = PERF_RECORD_MISC_FORK_EXEC; 312 - 313 - event->fork.header.size = (sizeof(event->fork) + machine->id_hdr_size); 314 - 315 - if (perf_tool__process_synth_event(tool, event, machine, process) != 0) 316 - return -1; 317 - 318 - return 0; 319 - } 320 - 321 - int perf_event__synthesize_mmap_events(struct perf_tool *tool, 322 - union perf_event *event, 323 - pid_t pid, pid_t tgid, 324 - perf_event__handler_t process, 325 - struct machine *machine, 326 - bool mmap_data) 327 - { 328 - char filename[PATH_MAX]; 329 - FILE *fp; 330 - unsigned long long t; 331 - bool truncation = false; 332 - unsigned long long timeout = proc_map_timeout * 1000000ULL; 333 - int rc = 0; 334 - const char *hugetlbfs_mnt = hugetlbfs__mountpoint(); 335 - int hugetlbfs_mnt_len = hugetlbfs_mnt ? strlen(hugetlbfs_mnt) : 0; 336 - 337 - if (machine__is_default_guest(machine)) 338 - return 0; 339 - 340 - snprintf(filename, sizeof(filename), "%s/proc/%d/task/%d/maps", 341 - machine->root_dir, pid, pid); 342 - 343 - fp = fopen(filename, "r"); 344 - if (fp == NULL) { 345 - /* 346 - * We raced with a task exiting - just return: 347 - */ 348 - pr_debug("couldn't open %s\n", filename); 349 - return -1; 350 - } 351 - 352 - event->header.type = PERF_RECORD_MMAP2; 353 - t = rdclock(); 354 - 355 - while (1) { 356 - char bf[BUFSIZ]; 357 - char prot[5]; 358 - char execname[PATH_MAX]; 359 - char anonstr[] = "//anon"; 360 - unsigned int ino; 361 - size_t size; 362 - ssize_t n; 363 - 364 - if (fgets(bf, sizeof(bf), fp) == NULL) 365 - break; 366 - 367 - if ((rdclock() - t) > timeout) { 368 - pr_warning("Reading %s time out. " 369 - "You may want to increase " 370 - "the time limit by --proc-map-timeout\n", 371 - filename); 372 - truncation = true; 373 - goto out; 374 - } 375 - 376 - /* ensure null termination since stack will be reused. */ 377 - strcpy(execname, ""); 378 - 379 - /* 00400000-0040c000 r-xp 00000000 fd:01 41038 /bin/cat */ 380 - n = sscanf(bf, "%"PRI_lx64"-%"PRI_lx64" %s %"PRI_lx64" %x:%x %u %[^\n]\n", 381 - &event->mmap2.start, &event->mmap2.len, prot, 382 - &event->mmap2.pgoff, &event->mmap2.maj, 383 - &event->mmap2.min, 384 - &ino, execname); 385 - 386 - /* 387 - * Anon maps don't have the execname. 388 - */ 389 - if (n < 7) 390 - continue; 391 - 392 - event->mmap2.ino = (u64)ino; 393 - 394 - /* 395 - * Just like the kernel, see __perf_event_mmap in kernel/perf_event.c 396 - */ 397 - if (machine__is_host(machine)) 398 - event->header.misc = PERF_RECORD_MISC_USER; 399 - else 400 - event->header.misc = PERF_RECORD_MISC_GUEST_USER; 401 - 402 - /* map protection and flags bits */ 403 - event->mmap2.prot = 0; 404 - event->mmap2.flags = 0; 405 - if (prot[0] == 'r') 406 - event->mmap2.prot |= PROT_READ; 407 - if (prot[1] == 'w') 408 - event->mmap2.prot |= PROT_WRITE; 409 - if (prot[2] == 'x') 410 - event->mmap2.prot |= PROT_EXEC; 411 - 412 - if (prot[3] == 's') 413 - event->mmap2.flags |= MAP_SHARED; 414 - else 415 - event->mmap2.flags |= MAP_PRIVATE; 416 - 417 - if (prot[2] != 'x') { 418 - if (!mmap_data || prot[0] != 'r') 419 - continue; 420 - 421 - event->header.misc |= PERF_RECORD_MISC_MMAP_DATA; 422 - } 423 - 424 - out: 425 - if (truncation) 426 - event->header.misc |= PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT; 427 - 428 - if (!strcmp(execname, "")) 429 - strcpy(execname, anonstr); 430 - 431 - if (hugetlbfs_mnt_len && 432 - !strncmp(execname, hugetlbfs_mnt, hugetlbfs_mnt_len)) { 433 - strcpy(execname, anonstr); 434 - event->mmap2.flags |= MAP_HUGETLB; 435 - } 436 - 437 - size = strlen(execname) + 1; 438 - memcpy(event->mmap2.filename, execname, size); 439 - size = PERF_ALIGN(size, sizeof(u64)); 440 - event->mmap2.len -= event->mmap.start; 441 - event->mmap2.header.size = (sizeof(event->mmap2) - 442 - (sizeof(event->mmap2.filename) - size)); 443 - memset(event->mmap2.filename + size, 0, machine->id_hdr_size); 444 - event->mmap2.header.size += machine->id_hdr_size; 445 - event->mmap2.pid = tgid; 446 - event->mmap2.tid = pid; 447 - 448 - if (perf_tool__process_synth_event(tool, event, machine, process) != 0) { 449 - rc = -1; 450 - break; 451 - } 452 - 453 - if (truncation) 454 - break; 455 - } 456 - 457 - fclose(fp); 458 - return rc; 459 - } 460 - 461 - int perf_event__synthesize_modules(struct perf_tool *tool, 462 - perf_event__handler_t process, 463 - struct machine *machine) 464 - { 465 - int rc = 0; 466 - struct map *pos; 467 - struct maps *maps = machine__kernel_maps(machine); 468 - union perf_event *event = zalloc((sizeof(event->mmap) + 469 - machine->id_hdr_size)); 470 - if (event == NULL) { 471 - pr_debug("Not enough memory synthesizing mmap event " 472 - "for kernel modules\n"); 473 - return -1; 474 - } 475 - 476 - event->header.type = PERF_RECORD_MMAP; 477 - 478 - /* 479 - * kernel uses 0 for user space maps, see kernel/perf_event.c 480 - * __perf_event_mmap 481 - */ 482 - if (machine__is_host(machine)) 483 - event->header.misc = PERF_RECORD_MISC_KERNEL; 484 - else 485 - event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL; 486 - 487 - for (pos = maps__first(maps); pos; pos = map__next(pos)) { 488 - size_t size; 489 - 490 - if (!__map__is_kmodule(pos)) 491 - continue; 492 - 493 - size = PERF_ALIGN(pos->dso->long_name_len + 1, sizeof(u64)); 494 - event->mmap.header.type = PERF_RECORD_MMAP; 495 - event->mmap.header.size = (sizeof(event->mmap) - 496 - (sizeof(event->mmap.filename) - size)); 497 - memset(event->mmap.filename + size, 0, machine->id_hdr_size); 498 - event->mmap.header.size += machine->id_hdr_size; 499 - event->mmap.start = pos->start; 500 - event->mmap.len = pos->end - pos->start; 501 - event->mmap.pid = machine->pid; 502 - 503 - memcpy(event->mmap.filename, pos->dso->long_name, 504 - pos->dso->long_name_len + 1); 505 - if (perf_tool__process_synth_event(tool, event, machine, process) != 0) { 506 - rc = -1; 507 - break; 508 - } 509 - } 510 - 511 - free(event); 512 - return rc; 513 - } 514 - 515 - static int __event__synthesize_thread(union perf_event *comm_event, 516 - union perf_event *mmap_event, 517 - union perf_event *fork_event, 518 - union perf_event *namespaces_event, 519 - pid_t pid, int full, 520 - perf_event__handler_t process, 521 - struct perf_tool *tool, 522 - struct machine *machine, 523 - bool mmap_data) 524 - { 525 - char filename[PATH_MAX]; 526 - DIR *tasks; 527 - struct dirent *dirent; 528 - pid_t tgid, ppid; 529 - int rc = 0; 530 - 531 - /* special case: only send one comm event using passed in pid */ 532 - if (!full) { 533 - tgid = perf_event__synthesize_comm(tool, comm_event, pid, 534 - process, machine); 535 - 536 - if (tgid == -1) 537 - return -1; 538 - 539 - if (perf_event__synthesize_namespaces(tool, namespaces_event, pid, 540 - tgid, process, machine) < 0) 541 - return -1; 542 - 543 - /* 544 - * send mmap only for thread group leader 545 - * see thread__init_map_groups 546 - */ 547 - if (pid == tgid && 548 - perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid, 549 - process, machine, mmap_data)) 550 - return -1; 551 - 552 - return 0; 553 - } 554 - 555 - if (machine__is_default_guest(machine)) 556 - return 0; 557 - 558 - snprintf(filename, sizeof(filename), "%s/proc/%d/task", 559 - machine->root_dir, pid); 560 - 561 - tasks = opendir(filename); 562 - if (tasks == NULL) { 563 - pr_debug("couldn't open %s\n", filename); 564 - return 0; 565 - } 566 - 567 - while ((dirent = readdir(tasks)) != NULL) { 568 - char *end; 569 - pid_t _pid; 570 - 571 - _pid = strtol(dirent->d_name, &end, 10); 572 - if (*end) 573 - continue; 574 - 575 - rc = -1; 576 - if (perf_event__prepare_comm(comm_event, _pid, machine, 577 - &tgid, &ppid) != 0) 578 - break; 579 - 580 - if (perf_event__synthesize_fork(tool, fork_event, _pid, tgid, 581 - ppid, process, machine) < 0) 582 - break; 583 - 584 - if (perf_event__synthesize_namespaces(tool, namespaces_event, _pid, 585 - tgid, process, machine) < 0) 586 - break; 587 - 588 - /* 589 - * Send the prepared comm event 590 - */ 591 - if (perf_tool__process_synth_event(tool, comm_event, machine, process) != 0) 592 - break; 593 - 594 - rc = 0; 595 - if (_pid == pid) { 596 - /* process the parent's maps too */ 597 - rc = perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid, 598 - process, machine, mmap_data); 599 - if (rc) 600 - break; 601 - } 602 - } 603 - 604 - closedir(tasks); 605 - return rc; 606 - } 607 - 608 - int perf_event__synthesize_thread_map(struct perf_tool *tool, 609 - struct perf_thread_map *threads, 610 - perf_event__handler_t process, 611 - struct machine *machine, 612 - bool mmap_data) 613 - { 614 - union perf_event *comm_event, *mmap_event, *fork_event; 615 - union perf_event *namespaces_event; 616 - int err = -1, thread, j; 617 - 618 - comm_event = malloc(sizeof(comm_event->comm) + machine->id_hdr_size); 619 - if (comm_event == NULL) 620 - goto out; 621 - 622 - mmap_event = malloc(sizeof(mmap_event->mmap2) + machine->id_hdr_size); 623 - if (mmap_event == NULL) 624 - goto out_free_comm; 625 - 626 - fork_event = malloc(sizeof(fork_event->fork) + machine->id_hdr_size); 627 - if (fork_event == NULL) 628 - goto out_free_mmap; 629 - 630 - namespaces_event = malloc(sizeof(namespaces_event->namespaces) + 631 - (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) + 632 - machine->id_hdr_size); 633 - if (namespaces_event == NULL) 634 - goto out_free_fork; 635 - 636 - err = 0; 637 - for (thread = 0; thread < threads->nr; ++thread) { 638 - if (__event__synthesize_thread(comm_event, mmap_event, 639 - fork_event, namespaces_event, 640 - perf_thread_map__pid(threads, thread), 0, 641 - process, tool, machine, 642 - mmap_data)) { 643 - err = -1; 644 - break; 645 - } 646 - 647 - /* 648 - * comm.pid is set to thread group id by 649 - * perf_event__synthesize_comm 650 - */ 651 - if ((int) comm_event->comm.pid != perf_thread_map__pid(threads, thread)) { 652 - bool need_leader = true; 653 - 654 - /* is thread group leader in thread_map? */ 655 - for (j = 0; j < threads->nr; ++j) { 656 - if ((int) comm_event->comm.pid == perf_thread_map__pid(threads, j)) { 657 - need_leader = false; 658 - break; 659 - } 660 - } 661 - 662 - /* if not, generate events for it */ 663 - if (need_leader && 664 - __event__synthesize_thread(comm_event, mmap_event, 665 - fork_event, namespaces_event, 666 - comm_event->comm.pid, 0, 667 - process, tool, machine, 668 - mmap_data)) { 669 - err = -1; 670 - break; 671 - } 672 - } 673 - } 674 - free(namespaces_event); 675 - out_free_fork: 676 - free(fork_event); 677 - out_free_mmap: 678 - free(mmap_event); 679 - out_free_comm: 680 - free(comm_event); 681 - out: 682 - return err; 683 - } 684 - 685 - static int __perf_event__synthesize_threads(struct perf_tool *tool, 686 - perf_event__handler_t process, 687 - struct machine *machine, 688 - bool mmap_data, 689 - struct dirent **dirent, 690 - int start, 691 - int num) 692 - { 693 - union perf_event *comm_event, *mmap_event, *fork_event; 694 - union perf_event *namespaces_event; 695 - int err = -1; 696 - char *end; 697 - pid_t pid; 698 - int i; 699 - 700 - comm_event = malloc(sizeof(comm_event->comm) + machine->id_hdr_size); 701 - if (comm_event == NULL) 702 - goto out; 703 - 704 - mmap_event = malloc(sizeof(mmap_event->mmap2) + machine->id_hdr_size); 705 - if (mmap_event == NULL) 706 - goto out_free_comm; 707 - 708 - fork_event = malloc(sizeof(fork_event->fork) + machine->id_hdr_size); 709 - if (fork_event == NULL) 710 - goto out_free_mmap; 711 - 712 - namespaces_event = malloc(sizeof(namespaces_event->namespaces) + 713 - (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) + 714 - machine->id_hdr_size); 715 - if (namespaces_event == NULL) 716 - goto out_free_fork; 717 - 718 - for (i = start; i < start + num; i++) { 719 - if (!isdigit(dirent[i]->d_name[0])) 720 - continue; 721 - 722 - pid = (pid_t)strtol(dirent[i]->d_name, &end, 10); 723 - /* only interested in proper numerical dirents */ 724 - if (*end) 725 - continue; 726 - /* 727 - * We may race with exiting thread, so don't stop just because 728 - * one thread couldn't be synthesized. 729 - */ 730 - __event__synthesize_thread(comm_event, mmap_event, fork_event, 731 - namespaces_event, pid, 1, process, 732 - tool, machine, mmap_data); 733 - } 734 - err = 0; 735 - 736 - free(namespaces_event); 737 - out_free_fork: 738 - free(fork_event); 739 - out_free_mmap: 740 - free(mmap_event); 741 - out_free_comm: 742 - free(comm_event); 743 - out: 744 - return err; 745 - } 746 - 747 - struct synthesize_threads_arg { 748 - struct perf_tool *tool; 749 - perf_event__handler_t process; 750 - struct machine *machine; 751 - bool mmap_data; 752 - struct dirent **dirent; 753 - int num; 754 - int start; 755 - }; 756 - 757 - static void *synthesize_threads_worker(void *arg) 758 - { 759 - struct synthesize_threads_arg *args = arg; 760 - 761 - __perf_event__synthesize_threads(args->tool, args->process, 762 - args->machine, args->mmap_data, 763 - args->dirent, 764 - args->start, args->num); 765 - return NULL; 766 - } 767 - 768 - int perf_event__synthesize_threads(struct perf_tool *tool, 769 - perf_event__handler_t process, 770 - struct machine *machine, 771 - bool mmap_data, 772 - unsigned int nr_threads_synthesize) 773 - { 774 - struct synthesize_threads_arg *args = NULL; 775 - pthread_t *synthesize_threads = NULL; 776 - char proc_path[PATH_MAX]; 777 - struct dirent **dirent; 778 - int num_per_thread; 779 - int m, n, i, j; 780 - int thread_nr; 781 - int base = 0; 782 - int err = -1; 783 - 784 - 785 - if (machine__is_default_guest(machine)) 786 - return 0; 787 - 788 - snprintf(proc_path, sizeof(proc_path), "%s/proc", machine->root_dir); 789 - n = scandir(proc_path, &dirent, 0, alphasort); 790 - if (n < 0) 791 - return err; 792 - 793 - if (nr_threads_synthesize == UINT_MAX) 794 - thread_nr = sysconf(_SC_NPROCESSORS_ONLN); 795 - else 796 - thread_nr = nr_threads_synthesize; 797 - 798 - if (thread_nr <= 1) { 799 - err = __perf_event__synthesize_threads(tool, process, 800 - machine, mmap_data, 801 - dirent, base, n); 802 - goto free_dirent; 803 - } 804 - if (thread_nr > n) 805 - thread_nr = n; 806 - 807 - synthesize_threads = calloc(sizeof(pthread_t), thread_nr); 808 - if (synthesize_threads == NULL) 809 - goto free_dirent; 810 - 811 - args = calloc(sizeof(*args), thread_nr); 812 - if (args == NULL) 813 - goto free_threads; 814 - 815 - num_per_thread = n / thread_nr; 816 - m = n % thread_nr; 817 - for (i = 0; i < thread_nr; i++) { 818 - args[i].tool = tool; 819 - args[i].process = process; 820 - args[i].machine = machine; 821 - args[i].mmap_data = mmap_data; 822 - args[i].dirent = dirent; 823 - } 824 - for (i = 0; i < m; i++) { 825 - args[i].num = num_per_thread + 1; 826 - args[i].start = i * args[i].num; 827 - } 828 - if (i != 0) 829 - base = args[i-1].start + args[i-1].num; 830 - for (j = i; j < thread_nr; j++) { 831 - args[j].num = num_per_thread; 832 - args[j].start = base + (j - i) * args[i].num; 833 - } 834 - 835 - for (i = 0; i < thread_nr; i++) { 836 - if (pthread_create(&synthesize_threads[i], NULL, 837 - synthesize_threads_worker, &args[i])) 838 - goto out_join; 839 - } 840 - err = 0; 841 - out_join: 842 - for (i = 0; i < thread_nr; i++) 843 - pthread_join(synthesize_threads[i], NULL); 844 - free(args); 845 - free_threads: 846 - free(synthesize_threads); 847 - free_dirent: 848 - for (i = 0; i < n; i++) 849 - zfree(&dirent[i]); 850 - free(dirent); 851 - 852 - return err; 853 97 } 854 98 855 99 struct process_symbol_args { ··· 115 897 116 898 *addr = args.start; 117 899 return 0; 118 - } 119 - 120 - int __weak perf_event__synthesize_extra_kmaps(struct perf_tool *tool __maybe_unused, 121 - perf_event__handler_t process __maybe_unused, 122 - struct machine *machine __maybe_unused) 123 - { 124 - return 0; 125 - } 126 - 127 - static int __perf_event__synthesize_kernel_mmap(struct perf_tool *tool, 128 - perf_event__handler_t process, 129 - struct machine *machine) 130 - { 131 - size_t size; 132 - struct map *map = machine__kernel_map(machine); 133 - struct kmap *kmap; 134 - int err; 135 - union perf_event *event; 136 - 137 - if (map == NULL) 138 - return -1; 139 - 140 - kmap = map__kmap(map); 141 - if (!kmap->ref_reloc_sym) 142 - return -1; 143 - 144 - /* 145 - * We should get this from /sys/kernel/sections/.text, but till that is 146 - * available use this, and after it is use this as a fallback for older 147 - * kernels. 148 - */ 149 - event = zalloc((sizeof(event->mmap) + machine->id_hdr_size)); 150 - if (event == NULL) { 151 - pr_debug("Not enough memory synthesizing mmap event " 152 - "for kernel modules\n"); 153 - return -1; 154 - } 155 - 156 - if (machine__is_host(machine)) { 157 - /* 158 - * kernel uses PERF_RECORD_MISC_USER for user space maps, 159 - * see kernel/perf_event.c __perf_event_mmap 160 - */ 161 - event->header.misc = PERF_RECORD_MISC_KERNEL; 162 - } else { 163 - event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL; 164 - } 165 - 166 - size = snprintf(event->mmap.filename, sizeof(event->mmap.filename), 167 - "%s%s", machine->mmap_name, kmap->ref_reloc_sym->name) + 1; 168 - size = PERF_ALIGN(size, sizeof(u64)); 169 - event->mmap.header.type = PERF_RECORD_MMAP; 170 - event->mmap.header.size = (sizeof(event->mmap) - 171 - (sizeof(event->mmap.filename) - size) + machine->id_hdr_size); 172 - event->mmap.pgoff = kmap->ref_reloc_sym->addr; 173 - event->mmap.start = map->start; 174 - event->mmap.len = map->end - event->mmap.start; 175 - event->mmap.pid = machine->pid; 176 - 177 - err = perf_tool__process_synth_event(tool, event, machine, process); 178 - free(event); 179 - 180 - return err; 181 - } 182 - 183 - int perf_event__synthesize_kernel_mmap(struct perf_tool *tool, 184 - perf_event__handler_t process, 185 - struct machine *machine) 186 - { 187 - int err; 188 - 189 - err = __perf_event__synthesize_kernel_mmap(tool, process, machine); 190 - if (err < 0) 191 - return err; 192 - 193 - return perf_event__synthesize_extra_kmaps(tool, process, machine); 194 - } 195 - 196 - int perf_event__synthesize_thread_map2(struct perf_tool *tool, 197 - struct perf_thread_map *threads, 198 - perf_event__handler_t process, 199 - struct machine *machine) 200 - { 201 - union perf_event *event; 202 - int i, err, size; 203 - 204 - size = sizeof(event->thread_map); 205 - size += threads->nr * sizeof(event->thread_map.entries[0]); 206 - 207 - event = zalloc(size); 208 - if (!event) 209 - return -ENOMEM; 210 - 211 - event->header.type = PERF_RECORD_THREAD_MAP; 212 - event->header.size = size; 213 - event->thread_map.nr = threads->nr; 214 - 215 - for (i = 0; i < threads->nr; i++) { 216 - struct perf_record_thread_map_entry *entry = &event->thread_map.entries[i]; 217 - char *comm = perf_thread_map__comm(threads, i); 218 - 219 - if (!comm) 220 - comm = (char *) ""; 221 - 222 - entry->pid = perf_thread_map__pid(threads, i); 223 - strncpy((char *) &entry->comm, comm, sizeof(entry->comm)); 224 - } 225 - 226 - err = process(tool, event, NULL, machine); 227 - 228 - free(event); 229 - return err; 230 - } 231 - 232 - static void synthesize_cpus(struct cpu_map_entries *cpus, 233 - struct perf_cpu_map *map) 234 - { 235 - int i; 236 - 237 - cpus->nr = map->nr; 238 - 239 - for (i = 0; i < map->nr; i++) 240 - cpus->cpu[i] = map->map[i]; 241 - } 242 - 243 - static void synthesize_mask(struct perf_record_record_cpu_map *mask, 244 - struct perf_cpu_map *map, int max) 245 - { 246 - int i; 247 - 248 - mask->nr = BITS_TO_LONGS(max); 249 - mask->long_size = sizeof(long); 250 - 251 - for (i = 0; i < map->nr; i++) 252 - set_bit(map->map[i], mask->mask); 253 - } 254 - 255 - static size_t cpus_size(struct perf_cpu_map *map) 256 - { 257 - return sizeof(struct cpu_map_entries) + map->nr * sizeof(u16); 258 - } 259 - 260 - static size_t mask_size(struct perf_cpu_map *map, int *max) 261 - { 262 - int i; 263 - 264 - *max = 0; 265 - 266 - for (i = 0; i < map->nr; i++) { 267 - /* bit possition of the cpu is + 1 */ 268 - int bit = map->map[i] + 1; 269 - 270 - if (bit > *max) 271 - *max = bit; 272 - } 273 - 274 - return sizeof(struct perf_record_record_cpu_map) + BITS_TO_LONGS(*max) * sizeof(long); 275 - } 276 - 277 - void *cpu_map_data__alloc(struct perf_cpu_map *map, size_t *size, u16 *type, int *max) 278 - { 279 - size_t size_cpus, size_mask; 280 - bool is_dummy = perf_cpu_map__empty(map); 281 - 282 - /* 283 - * Both array and mask data have variable size based 284 - * on the number of cpus and their actual values. 285 - * The size of the 'struct perf_record_cpu_map_data' is: 286 - * 287 - * array = size of 'struct cpu_map_entries' + 288 - * number of cpus * sizeof(u64) 289 - * 290 - * mask = size of 'struct perf_record_record_cpu_map' + 291 - * maximum cpu bit converted to size of longs 292 - * 293 - * and finaly + the size of 'struct perf_record_cpu_map_data'. 294 - */ 295 - size_cpus = cpus_size(map); 296 - size_mask = mask_size(map, max); 297 - 298 - if (is_dummy || (size_cpus < size_mask)) { 299 - *size += size_cpus; 300 - *type = PERF_CPU_MAP__CPUS; 301 - } else { 302 - *size += size_mask; 303 - *type = PERF_CPU_MAP__MASK; 304 - } 305 - 306 - *size += sizeof(struct perf_record_cpu_map_data); 307 - *size = PERF_ALIGN(*size, sizeof(u64)); 308 - return zalloc(*size); 309 - } 310 - 311 - void cpu_map_data__synthesize(struct perf_record_cpu_map_data *data, struct perf_cpu_map *map, 312 - u16 type, int max) 313 - { 314 - data->type = type; 315 - 316 - switch (type) { 317 - case PERF_CPU_MAP__CPUS: 318 - synthesize_cpus((struct cpu_map_entries *) data->data, map); 319 - break; 320 - case PERF_CPU_MAP__MASK: 321 - synthesize_mask((struct perf_record_record_cpu_map *)data->data, map, max); 322 - default: 323 - break; 324 - }; 325 - } 326 - 327 - static struct perf_record_cpu_map *cpu_map_event__new(struct perf_cpu_map *map) 328 - { 329 - size_t size = sizeof(struct perf_record_cpu_map); 330 - struct perf_record_cpu_map *event; 331 - int max; 332 - u16 type; 333 - 334 - event = cpu_map_data__alloc(map, &size, &type, &max); 335 - if (!event) 336 - return NULL; 337 - 338 - event->header.type = PERF_RECORD_CPU_MAP; 339 - event->header.size = size; 340 - event->data.type = type; 341 - 342 - cpu_map_data__synthesize(&event->data, map, type, max); 343 - return event; 344 - } 345 - 346 - int perf_event__synthesize_cpu_map(struct perf_tool *tool, 347 - struct perf_cpu_map *map, 348 - perf_event__handler_t process, 349 - struct machine *machine) 350 - { 351 - struct perf_record_cpu_map *event; 352 - int err; 353 - 354 - event = cpu_map_event__new(map); 355 - if (!event) 356 - return -ENOMEM; 357 - 358 - err = process(tool, (union perf_event *) event, NULL, machine); 359 - 360 - free(event); 361 - return err; 362 - } 363 - 364 - int perf_event__synthesize_stat_config(struct perf_tool *tool, 365 - struct perf_stat_config *config, 366 - perf_event__handler_t process, 367 - struct machine *machine) 368 - { 369 - struct perf_record_stat_config *event; 370 - int size, i = 0, err; 371 - 372 - size = sizeof(*event); 373 - size += (PERF_STAT_CONFIG_TERM__MAX * sizeof(event->data[0])); 374 - 375 - event = zalloc(size); 376 - if (!event) 377 - return -ENOMEM; 378 - 379 - event->header.type = PERF_RECORD_STAT_CONFIG; 380 - event->header.size = size; 381 - event->nr = PERF_STAT_CONFIG_TERM__MAX; 382 - 383 - #define ADD(__term, __val) \ 384 - event->data[i].tag = PERF_STAT_CONFIG_TERM__##__term; \ 385 - event->data[i].val = __val; \ 386 - i++; 387 - 388 - ADD(AGGR_MODE, config->aggr_mode) 389 - ADD(INTERVAL, config->interval) 390 - ADD(SCALE, config->scale) 391 - 392 - WARN_ONCE(i != PERF_STAT_CONFIG_TERM__MAX, 393 - "stat config terms unbalanced\n"); 394 - #undef ADD 395 - 396 - err = process(tool, (union perf_event *) event, NULL, machine); 397 - 398 - free(event); 399 - return err; 400 - } 401 - 402 - int perf_event__synthesize_stat(struct perf_tool *tool, 403 - u32 cpu, u32 thread, u64 id, 404 - struct perf_counts_values *count, 405 - perf_event__handler_t process, 406 - struct machine *machine) 407 - { 408 - struct perf_record_stat event; 409 - 410 - event.header.type = PERF_RECORD_STAT; 411 - event.header.size = sizeof(event); 412 - event.header.misc = 0; 413 - 414 - event.id = id; 415 - event.cpu = cpu; 416 - event.thread = thread; 417 - event.val = count->val; 418 - event.ena = count->ena; 419 - event.run = count->run; 420 - 421 - return process(tool, (union perf_event *) &event, NULL, machine); 422 - } 423 - 424 - int perf_event__synthesize_stat_round(struct perf_tool *tool, 425 - u64 evtime, u64 type, 426 - perf_event__handler_t process, 427 - struct machine *machine) 428 - { 429 - struct perf_record_stat_round event; 430 - 431 - event.header.type = PERF_RECORD_STAT_ROUND; 432 - event.header.size = sizeof(event); 433 - event.header.misc = 0; 434 - 435 - event.time = evtime; 436 - event.type = type; 437 - 438 - return process(tool, (union perf_event *) &event, NULL, machine); 439 900 } 440 901 441 902 void perf_event__read_stat_config(struct perf_stat_config *config,
+2 -75
tools/perf/util/event.h
··· 279 279 280 280 void perf_event__print_totals(void); 281 281 282 - struct perf_tool; 283 - struct perf_thread_map; 284 282 struct perf_cpu_map; 283 + struct perf_record_stat_config; 285 284 struct perf_stat_config; 286 - struct perf_counts_values; 285 + struct perf_tool; 287 286 288 - typedef int (*perf_event__handler_t)(struct perf_tool *tool, 289 - union perf_event *event, 290 - struct perf_sample *sample, 291 - struct machine *machine); 292 - 293 - int perf_event__synthesize_thread_map(struct perf_tool *tool, 294 - struct perf_thread_map *threads, 295 - perf_event__handler_t process, 296 - struct machine *machine, bool mmap_data); 297 - int perf_event__synthesize_thread_map2(struct perf_tool *tool, 298 - struct perf_thread_map *threads, 299 - perf_event__handler_t process, 300 - struct machine *machine); 301 - int perf_event__synthesize_cpu_map(struct perf_tool *tool, 302 - struct perf_cpu_map *cpus, 303 - perf_event__handler_t process, 304 - struct machine *machine); 305 - int perf_event__synthesize_threads(struct perf_tool *tool, 306 - perf_event__handler_t process, 307 - struct machine *machine, bool mmap_data, 308 - unsigned int nr_threads_synthesize); 309 - int perf_event__synthesize_kernel_mmap(struct perf_tool *tool, 310 - perf_event__handler_t process, 311 - struct machine *machine); 312 - int perf_event__synthesize_stat_config(struct perf_tool *tool, 313 - struct perf_stat_config *config, 314 - perf_event__handler_t process, 315 - struct machine *machine); 316 287 void perf_event__read_stat_config(struct perf_stat_config *config, 317 288 struct perf_record_stat_config *event); 318 - int perf_event__synthesize_stat(struct perf_tool *tool, 319 - u32 cpu, u32 thread, u64 id, 320 - struct perf_counts_values *count, 321 - perf_event__handler_t process, 322 - struct machine *machine); 323 - int perf_event__synthesize_stat_round(struct perf_tool *tool, 324 - u64 time, u64 type, 325 - perf_event__handler_t process, 326 - struct machine *machine); 327 - int perf_event__synthesize_modules(struct perf_tool *tool, 328 - perf_event__handler_t process, 329 - struct machine *machine); 330 289 331 290 int perf_event__process_comm(struct perf_tool *tool, 332 291 union perf_event *event, ··· 339 380 union perf_event *event, 340 381 struct perf_sample *sample, 341 382 struct machine *machine); 342 - int perf_tool__process_synth_event(struct perf_tool *tool, 343 - union perf_event *event, 344 - struct machine *machine, 345 - perf_event__handler_t process); 346 383 int perf_event__process(struct perf_tool *tool, 347 384 union perf_event *event, 348 385 struct perf_sample *sample, ··· 359 404 struct perf_sample *sample); 360 405 361 406 const char *perf_event__name(unsigned int id); 362 - 363 - size_t perf_event__sample_event_size(const struct perf_sample *sample, u64 type, 364 - u64 read_format); 365 - int perf_event__synthesize_sample(union perf_event *event, u64 type, 366 - u64 read_format, 367 - const struct perf_sample *sample); 368 - 369 - pid_t perf_event__synthesize_comm(struct perf_tool *tool, 370 - union perf_event *event, pid_t pid, 371 - perf_event__handler_t process, 372 - struct machine *machine); 373 - 374 - int perf_event__synthesize_namespaces(struct perf_tool *tool, 375 - union perf_event *event, 376 - pid_t pid, pid_t tgid, 377 - perf_event__handler_t process, 378 - struct machine *machine); 379 - 380 - int perf_event__synthesize_mmap_events(struct perf_tool *tool, 381 - union perf_event *event, 382 - pid_t pid, pid_t tgid, 383 - perf_event__handler_t process, 384 - struct machine *machine, 385 - bool mmap_data); 386 - 387 - int perf_event__synthesize_extra_kmaps(struct perf_tool *tool, 388 - perf_event__handler_t process, 389 - struct machine *machine); 390 407 391 408 size_t perf_event__fprintf_comm(union perf_event *event, FILE *fp); 392 409 size_t perf_event__fprintf_mmap(union perf_event *event, FILE *fp);
+87 -208
tools/perf/util/evlist.c
··· 10 10 #include <inttypes.h> 11 11 #include <poll.h> 12 12 #include "cpumap.h" 13 + #include "util/mmap.h" 13 14 #include "thread_map.h" 14 15 #include "target.h" 15 16 #include "evlist.h" 16 17 #include "evsel.h" 17 18 #include "debug.h" 18 19 #include "units.h" 19 - #include "util.h" 20 + #include <internal/lib.h> // page_size 20 21 #include "../perf.h" 21 22 #include "asm/bug.h" 22 23 #include "bpf-event.h" ··· 50 49 #endif 51 50 52 51 #define FD(e, x, y) (*(int *)xyarray__entry(e->core.fd, x, y)) 53 - #define SID(e, x, y) xyarray__entry(e->sample_id, x, y) 52 + #define SID(e, x, y) xyarray__entry(e->core.sample_id, x, y) 54 53 55 54 void evlist__init(struct evlist *evlist, struct perf_cpu_map *cpus, 56 55 struct perf_thread_map *threads) 57 56 { 58 - int i; 59 - 60 - for (i = 0; i < PERF_EVLIST__HLIST_SIZE; ++i) 61 - INIT_HLIST_HEAD(&evlist->heads[i]); 62 57 perf_evlist__init(&evlist->core); 63 58 perf_evlist__set_maps(&evlist->core, cpus, threads); 64 - fdarray__init(&evlist->pollfd, 64); 59 + fdarray__init(&evlist->core.pollfd, 64); 65 60 evlist->workload.pid = -1; 66 61 evlist->bkw_mmap_state = BKW_MMAP_NOTREADY; 67 62 } ··· 105 108 */ 106 109 void perf_evlist__set_id_pos(struct evlist *evlist) 107 110 { 108 - struct evsel *first = perf_evlist__first(evlist); 111 + struct evsel *first = evlist__first(evlist); 109 112 110 113 evlist->id_pos = first->id_pos; 111 114 evlist->is_pos = first->is_pos; ··· 121 124 perf_evlist__set_id_pos(evlist); 122 125 } 123 126 124 - static void perf_evlist__purge(struct evlist *evlist) 127 + static void evlist__purge(struct evlist *evlist) 125 128 { 126 129 struct evsel *pos, *n; 127 130 ··· 134 137 evlist->core.nr_entries = 0; 135 138 } 136 139 137 - void perf_evlist__exit(struct evlist *evlist) 140 + void evlist__exit(struct evlist *evlist) 138 141 { 139 142 zfree(&evlist->mmap); 140 143 zfree(&evlist->overwrite_mmap); 141 - fdarray__exit(&evlist->pollfd); 144 + fdarray__exit(&evlist->core.pollfd); 142 145 } 143 146 144 147 void evlist__delete(struct evlist *evlist) ··· 146 149 if (evlist == NULL) 147 150 return; 148 151 149 - perf_evlist__munmap(evlist); 152 + evlist__munmap(evlist); 150 153 evlist__close(evlist); 151 154 perf_cpu_map__put(evlist->core.cpus); 152 155 perf_thread_map__put(evlist->core.threads); 153 156 evlist->core.cpus = NULL; 154 157 evlist->core.threads = NULL; 155 - perf_evlist__purge(evlist); 156 - perf_evlist__exit(evlist); 158 + evlist__purge(evlist); 159 + evlist__exit(evlist); 157 160 free(evlist); 158 161 } 159 162 ··· 315 318 static int perf_evlist__nr_threads(struct evlist *evlist, 316 319 struct evsel *evsel) 317 320 { 318 - if (evsel->system_wide) 321 + if (evsel->core.system_wide) 319 322 return 1; 320 323 else 321 324 return perf_thread_map__nr(evlist->core.threads); ··· 398 401 return perf_evlist__enable_event_thread(evlist, evsel, idx); 399 402 } 400 403 401 - int perf_evlist__alloc_pollfd(struct evlist *evlist) 404 + int evlist__add_pollfd(struct evlist *evlist, int fd) 402 405 { 403 - int nr_cpus = perf_cpu_map__nr(evlist->core.cpus); 404 - int nr_threads = perf_thread_map__nr(evlist->core.threads); 405 - int nfds = 0; 406 - struct evsel *evsel; 407 - 408 - evlist__for_each_entry(evlist, evsel) { 409 - if (evsel->system_wide) 410 - nfds += nr_cpus; 411 - else 412 - nfds += nr_cpus * nr_threads; 413 - } 414 - 415 - if (fdarray__available_entries(&evlist->pollfd) < nfds && 416 - fdarray__grow(&evlist->pollfd, nfds) < 0) 417 - return -ENOMEM; 418 - 419 - return 0; 420 - } 421 - 422 - static int __perf_evlist__add_pollfd(struct evlist *evlist, int fd, 423 - struct perf_mmap *map, short revent) 424 - { 425 - int pos = fdarray__add(&evlist->pollfd, fd, revent | POLLERR | POLLHUP); 426 - /* 427 - * Save the idx so that when we filter out fds POLLHUP'ed we can 428 - * close the associated evlist->mmap[] entry. 429 - */ 430 - if (pos >= 0) { 431 - evlist->pollfd.priv[pos].ptr = map; 432 - 433 - fcntl(fd, F_SETFL, O_NONBLOCK); 434 - } 435 - 436 - return pos; 437 - } 438 - 439 - int perf_evlist__add_pollfd(struct evlist *evlist, int fd) 440 - { 441 - return __perf_evlist__add_pollfd(evlist, fd, NULL, POLLIN); 406 + return perf_evlist__add_pollfd(&evlist->core, fd, NULL, POLLIN); 442 407 } 443 408 444 409 static void perf_evlist__munmap_filtered(struct fdarray *fda, int fd, 445 410 void *arg __maybe_unused) 446 411 { 447 - struct perf_mmap *map = fda->priv[fd].ptr; 412 + struct mmap *map = fda->priv[fd].ptr; 448 413 449 414 if (map) 450 415 perf_mmap__put(map); 451 416 } 452 417 453 - int perf_evlist__filter_pollfd(struct evlist *evlist, short revents_and_mask) 418 + int evlist__filter_pollfd(struct evlist *evlist, short revents_and_mask) 454 419 { 455 - return fdarray__filter(&evlist->pollfd, revents_and_mask, 420 + return fdarray__filter(&evlist->core.pollfd, revents_and_mask, 456 421 perf_evlist__munmap_filtered, NULL); 457 422 } 458 423 459 - int perf_evlist__poll(struct evlist *evlist, int timeout) 424 + int evlist__poll(struct evlist *evlist, int timeout) 460 425 { 461 - return fdarray__poll(&evlist->pollfd, timeout); 462 - } 463 - 464 - static void perf_evlist__id_hash(struct evlist *evlist, 465 - struct evsel *evsel, 466 - int cpu, int thread, u64 id) 467 - { 468 - int hash; 469 - struct perf_sample_id *sid = SID(evsel, cpu, thread); 470 - 471 - sid->id = id; 472 - sid->evsel = evsel; 473 - hash = hash_64(sid->id, PERF_EVLIST__HLIST_BITS); 474 - hlist_add_head(&sid->node, &evlist->heads[hash]); 475 - } 476 - 477 - void perf_evlist__id_add(struct evlist *evlist, struct evsel *evsel, 478 - int cpu, int thread, u64 id) 479 - { 480 - perf_evlist__id_hash(evlist, evsel, cpu, thread, id); 481 - evsel->id[evsel->ids++] = id; 482 - } 483 - 484 - int perf_evlist__id_add_fd(struct evlist *evlist, 485 - struct evsel *evsel, 486 - int cpu, int thread, int fd) 487 - { 488 - u64 read_data[4] = { 0, }; 489 - int id_idx = 1; /* The first entry is the counter value */ 490 - u64 id; 491 - int ret; 492 - 493 - ret = ioctl(fd, PERF_EVENT_IOC_ID, &id); 494 - if (!ret) 495 - goto add; 496 - 497 - if (errno != ENOTTY) 498 - return -1; 499 - 500 - /* Legacy way to get event id.. All hail to old kernels! */ 501 - 502 - /* 503 - * This way does not work with group format read, so bail 504 - * out in that case. 505 - */ 506 - if (perf_evlist__read_format(evlist) & PERF_FORMAT_GROUP) 507 - return -1; 508 - 509 - if (!(evsel->core.attr.read_format & PERF_FORMAT_ID) || 510 - read(fd, &read_data, sizeof(read_data)) == -1) 511 - return -1; 512 - 513 - if (evsel->core.attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) 514 - ++id_idx; 515 - if (evsel->core.attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) 516 - ++id_idx; 517 - 518 - id = read_data[id_idx]; 519 - 520 - add: 521 - perf_evlist__id_add(evlist, evsel, cpu, thread, id); 522 - return 0; 426 + return perf_evlist__poll(&evlist->core, timeout); 523 427 } 524 428 525 429 static void perf_evlist__set_sid_idx(struct evlist *evlist, ··· 433 535 sid->cpu = evlist->core.cpus->map[cpu]; 434 536 else 435 537 sid->cpu = -1; 436 - if (!evsel->system_wide && evlist->core.threads && thread >= 0) 538 + if (!evsel->core.system_wide && evlist->core.threads && thread >= 0) 437 539 sid->tid = perf_thread_map__pid(evlist->core.threads, thread); 438 540 else 439 541 sid->tid = -1; ··· 446 548 int hash; 447 549 448 550 hash = hash_64(id, PERF_EVLIST__HLIST_BITS); 449 - head = &evlist->heads[hash]; 551 + head = &evlist->core.heads[hash]; 450 552 451 553 hlist_for_each_entry(sid, head, node) 452 554 if (sid->id == id) ··· 460 562 struct perf_sample_id *sid; 461 563 462 564 if (evlist->core.nr_entries == 1 || !id) 463 - return perf_evlist__first(evlist); 565 + return evlist__first(evlist); 464 566 465 567 sid = perf_evlist__id2sid(evlist, id); 466 568 if (sid) 467 - return sid->evsel; 569 + return container_of(sid->evsel, struct evsel, core); 468 570 469 571 if (!perf_evlist__sample_id_all(evlist)) 470 - return perf_evlist__first(evlist); 572 + return evlist__first(evlist); 471 573 472 574 return NULL; 473 575 } ··· 482 584 483 585 sid = perf_evlist__id2sid(evlist, id); 484 586 if (sid) 485 - return sid->evsel; 587 + return container_of(sid->evsel, struct evsel, core); 486 588 487 589 return NULL; 488 590 } ··· 511 613 struct evsel *perf_evlist__event2evsel(struct evlist *evlist, 512 614 union perf_event *event) 513 615 { 514 - struct evsel *first = perf_evlist__first(evlist); 616 + struct evsel *first = evlist__first(evlist); 515 617 struct hlist_head *head; 516 618 struct perf_sample_id *sid; 517 619 int hash; ··· 532 634 return first; 533 635 534 636 hash = hash_64(id, PERF_EVLIST__HLIST_BITS); 535 - head = &evlist->heads[hash]; 637 + head = &evlist->core.heads[hash]; 536 638 537 639 hlist_for_each_entry(sid, head, node) { 538 640 if (sid->id == id) 539 - return sid->evsel; 641 + return container_of(sid->evsel, struct evsel, core); 540 642 } 541 643 return NULL; 542 644 } ··· 548 650 if (!evlist->overwrite_mmap) 549 651 return 0; 550 652 551 - for (i = 0; i < evlist->nr_mmaps; i++) { 552 - int fd = evlist->overwrite_mmap[i].fd; 653 + for (i = 0; i < evlist->core.nr_mmaps; i++) { 654 + int fd = evlist->overwrite_mmap[i].core.fd; 553 655 int err; 554 656 555 657 if (fd < 0) ··· 571 673 return perf_evlist__set_paused(evlist, false); 572 674 } 573 675 574 - static void perf_evlist__munmap_nofree(struct evlist *evlist) 676 + static void evlist__munmap_nofree(struct evlist *evlist) 575 677 { 576 678 int i; 577 679 578 680 if (evlist->mmap) 579 - for (i = 0; i < evlist->nr_mmaps; i++) 681 + for (i = 0; i < evlist->core.nr_mmaps; i++) 580 682 perf_mmap__munmap(&evlist->mmap[i]); 581 683 582 684 if (evlist->overwrite_mmap) 583 - for (i = 0; i < evlist->nr_mmaps; i++) 685 + for (i = 0; i < evlist->core.nr_mmaps; i++) 584 686 perf_mmap__munmap(&evlist->overwrite_mmap[i]); 585 687 } 586 688 587 - void perf_evlist__munmap(struct evlist *evlist) 689 + void evlist__munmap(struct evlist *evlist) 588 690 { 589 - perf_evlist__munmap_nofree(evlist); 691 + evlist__munmap_nofree(evlist); 590 692 zfree(&evlist->mmap); 591 693 zfree(&evlist->overwrite_mmap); 592 694 } 593 695 594 - static struct perf_mmap *perf_evlist__alloc_mmap(struct evlist *evlist, 595 - bool overwrite) 696 + static struct mmap *evlist__alloc_mmap(struct evlist *evlist, 697 + bool overwrite) 596 698 { 597 699 int i; 598 - struct perf_mmap *map; 700 + struct mmap *map; 599 701 600 - evlist->nr_mmaps = perf_cpu_map__nr(evlist->core.cpus); 702 + evlist->core.nr_mmaps = perf_cpu_map__nr(evlist->core.cpus); 601 703 if (perf_cpu_map__empty(evlist->core.cpus)) 602 - evlist->nr_mmaps = perf_thread_map__nr(evlist->core.threads); 603 - map = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap)); 704 + evlist->core.nr_mmaps = perf_thread_map__nr(evlist->core.threads); 705 + map = zalloc(evlist->core.nr_mmaps * sizeof(struct mmap)); 604 706 if (!map) 605 707 return NULL; 606 708 607 - for (i = 0; i < evlist->nr_mmaps; i++) { 608 - map[i].fd = -1; 609 - map[i].overwrite = overwrite; 709 + for (i = 0; i < evlist->core.nr_mmaps; i++) { 710 + map[i].core.fd = -1; 711 + map[i].core.overwrite = overwrite; 610 712 /* 611 713 * When the perf_mmap() call is made we grab one refcount, plus 612 714 * one extra to let perf_mmap__consume() get the last ··· 616 718 * Each PERF_EVENT_IOC_SET_OUTPUT points to this mmap and 617 719 * thus does perf_mmap__get() on it. 618 720 */ 619 - refcount_set(&map[i].refcnt, 0); 721 + refcount_set(&map[i].core.refcnt, 0); 620 722 } 621 723 return map; 622 724 } ··· 630 732 return true; 631 733 } 632 734 633 - static int perf_evlist__mmap_per_evsel(struct evlist *evlist, int idx, 735 + static int evlist__mmap_per_evsel(struct evlist *evlist, int idx, 634 736 struct mmap_params *mp, int cpu_idx, 635 737 int thread, int *_output, int *_output_overwrite) 636 738 { ··· 639 741 int evlist_cpu = cpu_map__cpu(evlist->core.cpus, cpu_idx); 640 742 641 743 evlist__for_each_entry(evlist, evsel) { 642 - struct perf_mmap *maps = evlist->mmap; 744 + struct mmap *maps = evlist->mmap; 643 745 int *output = _output; 644 746 int fd; 645 747 int cpu; ··· 650 752 maps = evlist->overwrite_mmap; 651 753 652 754 if (!maps) { 653 - maps = perf_evlist__alloc_mmap(evlist, true); 755 + maps = evlist__alloc_mmap(evlist, true); 654 756 if (!maps) 655 757 return -1; 656 758 evlist->overwrite_mmap = maps; ··· 660 762 mp->prot &= ~PROT_WRITE; 661 763 } 662 764 663 - if (evsel->system_wide && thread) 765 + if (evsel->core.system_wide && thread) 664 766 continue; 665 767 666 768 cpu = perf_cpu_map__idx(evsel->core.cpus, evlist_cpu); ··· 690 792 * other events, so it should not need to be polled anyway. 691 793 * Therefore don't add it for polling. 692 794 */ 693 - if (!evsel->system_wide && 694 - __perf_evlist__add_pollfd(evlist, fd, &maps[idx], revent) < 0) { 795 + if (!evsel->core.system_wide && 796 + perf_evlist__add_pollfd(&evlist->core, fd, &maps[idx], revent) < 0) { 695 797 perf_mmap__put(&maps[idx]); 696 798 return -1; 697 799 } 698 800 699 801 if (evsel->core.attr.read_format & PERF_FORMAT_ID) { 700 - if (perf_evlist__id_add_fd(evlist, evsel, cpu, thread, 802 + if (perf_evlist__id_add_fd(&evlist->core, &evsel->core, cpu, thread, 701 803 fd) < 0) 702 804 return -1; 703 805 perf_evlist__set_sid_idx(evlist, evsel, idx, cpu, ··· 708 810 return 0; 709 811 } 710 812 711 - static int perf_evlist__mmap_per_cpu(struct evlist *evlist, 813 + static int evlist__mmap_per_cpu(struct evlist *evlist, 712 814 struct mmap_params *mp) 713 815 { 714 816 int cpu, thread; ··· 724 826 true); 725 827 726 828 for (thread = 0; thread < nr_threads; thread++) { 727 - if (perf_evlist__mmap_per_evsel(evlist, cpu, mp, cpu, 829 + if (evlist__mmap_per_evsel(evlist, cpu, mp, cpu, 728 830 thread, &output, &output_overwrite)) 729 831 goto out_unmap; 730 832 } ··· 733 835 return 0; 734 836 735 837 out_unmap: 736 - perf_evlist__munmap_nofree(evlist); 838 + evlist__munmap_nofree(evlist); 737 839 return -1; 738 840 } 739 841 740 - static int perf_evlist__mmap_per_thread(struct evlist *evlist, 842 + static int evlist__mmap_per_thread(struct evlist *evlist, 741 843 struct mmap_params *mp) 742 844 { 743 845 int thread; ··· 751 853 auxtrace_mmap_params__set_idx(&mp->auxtrace_mp, evlist, thread, 752 854 false); 753 855 754 - if (perf_evlist__mmap_per_evsel(evlist, thread, mp, 0, thread, 856 + if (evlist__mmap_per_evsel(evlist, thread, mp, 0, thread, 755 857 &output, &output_overwrite)) 756 858 goto out_unmap; 757 859 } ··· 759 861 return 0; 760 862 761 863 out_unmap: 762 - perf_evlist__munmap_nofree(evlist); 864 + evlist__munmap_nofree(evlist); 763 865 return -1; 764 866 } 765 867 ··· 786 888 return pages; 787 889 } 788 890 789 - size_t perf_evlist__mmap_size(unsigned long pages) 891 + size_t evlist__mmap_size(unsigned long pages) 790 892 { 791 893 if (pages == UINT_MAX) 792 894 pages = perf_event_mlock_kb_in_pages(); ··· 869 971 } 870 972 871 973 /** 872 - * perf_evlist__mmap_ex - Create mmaps to receive events. 974 + * evlist__mmap_ex - Create mmaps to receive events. 873 975 * @evlist: list of events 874 976 * @pages: map length in pages 875 977 * @overwrite: overwrite older events? ··· 877 979 * @auxtrace_overwrite - overwrite older auxtrace data? 878 980 * 879 981 * If @overwrite is %false the user needs to signal event consumption using 880 - * perf_mmap__write_tail(). Using perf_evlist__mmap_read() does this 982 + * perf_mmap__write_tail(). Using evlist__mmap_read() does this 881 983 * automatically. 882 984 * 883 985 * Similarly, if @auxtrace_overwrite is %false the user needs to signal data ··· 885 987 * 886 988 * Return: %0 on success, negative error code otherwise. 887 989 */ 888 - int perf_evlist__mmap_ex(struct evlist *evlist, unsigned int pages, 990 + int evlist__mmap_ex(struct evlist *evlist, unsigned int pages, 889 991 unsigned int auxtrace_pages, 890 992 bool auxtrace_overwrite, int nr_cblocks, int affinity, int flush, 891 993 int comp_level) ··· 902 1004 .comp_level = comp_level }; 903 1005 904 1006 if (!evlist->mmap) 905 - evlist->mmap = perf_evlist__alloc_mmap(evlist, false); 1007 + evlist->mmap = evlist__alloc_mmap(evlist, false); 906 1008 if (!evlist->mmap) 907 1009 return -ENOMEM; 908 1010 909 - if (evlist->pollfd.entries == NULL && perf_evlist__alloc_pollfd(evlist) < 0) 1011 + if (evlist->core.pollfd.entries == NULL && perf_evlist__alloc_pollfd(&evlist->core) < 0) 910 1012 return -ENOMEM; 911 1013 912 - evlist->mmap_len = perf_evlist__mmap_size(pages); 913 - pr_debug("mmap size %zuB\n", evlist->mmap_len); 914 - mp.mask = evlist->mmap_len - page_size - 1; 1014 + evlist->core.mmap_len = evlist__mmap_size(pages); 1015 + pr_debug("mmap size %zuB\n", evlist->core.mmap_len); 1016 + mp.mask = evlist->core.mmap_len - page_size - 1; 915 1017 916 - auxtrace_mmap_params__init(&mp.auxtrace_mp, evlist->mmap_len, 1018 + auxtrace_mmap_params__init(&mp.auxtrace_mp, evlist->core.mmap_len, 917 1019 auxtrace_pages, auxtrace_overwrite); 918 1020 919 1021 evlist__for_each_entry(evlist, evsel) { 920 1022 if ((evsel->core.attr.read_format & PERF_FORMAT_ID) && 921 - evsel->sample_id == NULL && 922 - perf_evsel__alloc_id(evsel, perf_cpu_map__nr(cpus), threads->nr) < 0) 1023 + evsel->core.sample_id == NULL && 1024 + perf_evsel__alloc_id(&evsel->core, perf_cpu_map__nr(cpus), threads->nr) < 0) 923 1025 return -ENOMEM; 924 1026 } 925 1027 926 1028 if (perf_cpu_map__empty(cpus)) 927 - return perf_evlist__mmap_per_thread(evlist, &mp); 1029 + return evlist__mmap_per_thread(evlist, &mp); 928 1030 929 - return perf_evlist__mmap_per_cpu(evlist, &mp); 1031 + return evlist__mmap_per_cpu(evlist, &mp); 930 1032 } 931 1033 932 - int perf_evlist__mmap(struct evlist *evlist, unsigned int pages) 1034 + int evlist__mmap(struct evlist *evlist, unsigned int pages) 933 1035 { 934 - return perf_evlist__mmap_ex(evlist, pages, 0, false, 0, PERF_AFFINITY_SYS, 1, 0); 1036 + return evlist__mmap_ex(evlist, pages, 0, false, 0, PERF_AFFINITY_SYS, 1, 0); 935 1037 } 936 1038 937 1039 int perf_evlist__create_maps(struct evlist *evlist, struct target *target) ··· 1123 1225 1124 1226 bool perf_evlist__valid_read_format(struct evlist *evlist) 1125 1227 { 1126 - struct evsel *first = perf_evlist__first(evlist), *pos = first; 1228 + struct evsel *first = evlist__first(evlist), *pos = first; 1127 1229 u64 read_format = first->core.attr.read_format; 1128 1230 u64 sample_type = first->core.attr.sample_type; 1129 1231 ··· 1141 1243 return true; 1142 1244 } 1143 1245 1144 - u64 perf_evlist__read_format(struct evlist *evlist) 1145 - { 1146 - struct evsel *first = perf_evlist__first(evlist); 1147 - return first->core.attr.read_format; 1148 - } 1149 - 1150 1246 u16 perf_evlist__id_hdr_size(struct evlist *evlist) 1151 1247 { 1152 - struct evsel *first = perf_evlist__first(evlist); 1248 + struct evsel *first = evlist__first(evlist); 1153 1249 struct perf_sample *data; 1154 1250 u64 sample_type; 1155 1251 u16 size = 0; ··· 1176 1284 1177 1285 bool perf_evlist__valid_sample_id_all(struct evlist *evlist) 1178 1286 { 1179 - struct evsel *first = perf_evlist__first(evlist), *pos = first; 1287 + struct evsel *first = evlist__first(evlist), *pos = first; 1180 1288 1181 1289 evlist__for_each_entry_continue(evlist, pos) { 1182 1290 if (first->core.attr.sample_id_all != pos->core.attr.sample_id_all) ··· 1188 1296 1189 1297 bool perf_evlist__sample_id_all(struct evlist *evlist) 1190 1298 { 1191 - struct evsel *first = perf_evlist__first(evlist); 1299 + struct evsel *first = evlist__first(evlist); 1192 1300 return first->core.attr.sample_id_all; 1193 1301 } 1194 1302 ··· 1421 1529 return perf_evsel__parse_sample_timestamp(evsel, event, timestamp); 1422 1530 } 1423 1531 1424 - size_t perf_evlist__fprintf(struct evlist *evlist, FILE *fp) 1425 - { 1426 - struct evsel *evsel; 1427 - size_t printed = 0; 1428 - 1429 - evlist__for_each_entry(evlist, evsel) { 1430 - printed += fprintf(fp, "%s%s", evsel->idx ? ", " : "", 1431 - perf_evsel__name(evsel)); 1432 - } 1433 - 1434 - return printed + fprintf(fp, "\n"); 1435 - } 1436 - 1437 1532 int perf_evlist__strerror_open(struct evlist *evlist, 1438 1533 int err, char *buf, size_t size) 1439 1534 { ··· 1450 1571 "Hint:\tThe current value is %d.", value); 1451 1572 break; 1452 1573 case EINVAL: { 1453 - struct evsel *first = perf_evlist__first(evlist); 1574 + struct evsel *first = evlist__first(evlist); 1454 1575 int max_freq; 1455 1576 1456 1577 if (sysctl__read_int("kernel/perf_event_max_sample_rate", &max_freq) < 0) ··· 1478 1599 int perf_evlist__strerror_mmap(struct evlist *evlist, int err, char *buf, size_t size) 1479 1600 { 1480 1601 char sbuf[STRERR_BUFSIZE], *emsg = str_error_r(err, sbuf, sizeof(sbuf)); 1481 - int pages_attempted = evlist->mmap_len / 1024, pages_max_per_user, printed = 0; 1602 + int pages_attempted = evlist->core.mmap_len / 1024, pages_max_per_user, printed = 0; 1482 1603 1483 1604 switch (err) { 1484 1605 case EPERM: ··· 1512 1633 struct evsel *evsel, *n; 1513 1634 LIST_HEAD(move); 1514 1635 1515 - if (move_evsel == perf_evlist__first(evlist)) 1636 + if (move_evsel == evlist__first(evlist)) 1516 1637 return; 1517 1638 1518 1639 evlist__for_each_entry_safe(evlist, n, evsel) { ··· 1633 1754 void perf_evlist__force_leader(struct evlist *evlist) 1634 1755 { 1635 1756 if (!evlist->nr_groups) { 1636 - struct evsel *leader = perf_evlist__first(evlist); 1757 + struct evsel *leader = evlist__first(evlist); 1637 1758 1638 1759 perf_evlist__set_leader(evlist); 1639 1760 leader->forced_leader = true; ··· 1659 1780 is_open = false; 1660 1781 if (c2->leader == leader) { 1661 1782 if (is_open) 1662 - evsel__close(c2); 1783 + perf_evsel__close(&evsel->core); 1663 1784 c2->leader = c2; 1664 1785 c2->core.nr_members = 0; 1665 1786 } ··· 1723 1844 draining = true; 1724 1845 1725 1846 if (!draining) 1726 - perf_evlist__poll(evlist, 1000); 1847 + evlist__poll(evlist, 1000); 1727 1848 1728 - for (i = 0; i < evlist->nr_mmaps; i++) { 1729 - struct perf_mmap *map = &evlist->mmap[i]; 1849 + for (i = 0; i < evlist->core.nr_mmaps; i++) { 1850 + struct mmap *map = &evlist->mmap[i]; 1730 1851 union perf_event *event; 1731 1852 1732 1853 if (perf_mmap__read_init(map)) ··· 1768 1889 goto out_delete_evlist; 1769 1890 } 1770 1891 1771 - if (perf_evlist__mmap(evlist, UINT_MAX)) 1892 + if (evlist__mmap(evlist, UINT_MAX)) 1772 1893 goto out_delete_evlist; 1773 1894 1774 1895 evlist__for_each_entry(evlist, counter) {
+48 -33
tools/perf/util/evlist.h
··· 7 7 #include <linux/refcount.h> 8 8 #include <linux/list.h> 9 9 #include <api/fd/array.h> 10 - #include <stdio.h> 11 10 #include <internal/evlist.h> 11 + #include <internal/evsel.h> 12 12 #include "events_stats.h" 13 13 #include "evsel.h" 14 - #include "mmap.h" 14 + #include <pthread.h> 15 15 #include <signal.h> 16 16 #include <unistd.h> 17 17 ··· 20 20 struct perf_cpu_map; 21 21 struct record_opts; 22 22 23 - #define PERF_EVLIST__HLIST_BITS 8 24 - #define PERF_EVLIST__HLIST_SIZE (1 << PERF_EVLIST__HLIST_BITS) 23 + /* 24 + * State machine of bkw_mmap_state: 25 + * 26 + * .________________(forbid)_____________. 27 + * | V 28 + * NOTREADY --(0)--> RUNNING --(1)--> DATA_PENDING --(2)--> EMPTY 29 + * ^ ^ | ^ | 30 + * | |__(forbid)____/ |___(forbid)___/| 31 + * | | 32 + * \_________________(3)_______________/ 33 + * 34 + * NOTREADY : Backward ring buffers are not ready 35 + * RUNNING : Backward ring buffers are recording 36 + * DATA_PENDING : We are required to collect data from backward ring buffers 37 + * EMPTY : We have collected data from backward ring buffers. 38 + * 39 + * (0): Setup backward ring buffer 40 + * (1): Pause ring buffers for reading 41 + * (2): Read from ring buffers 42 + * (3): Resume ring buffers for recording 43 + */ 44 + enum bkw_mmap_state { 45 + BKW_MMAP_NOTREADY, 46 + BKW_MMAP_RUNNING, 47 + BKW_MMAP_DATA_PENDING, 48 + BKW_MMAP_EMPTY, 49 + }; 25 50 26 51 struct evlist { 27 52 struct perf_evlist core; 28 - struct hlist_head heads[PERF_EVLIST__HLIST_SIZE]; 29 53 int nr_groups; 30 - int nr_mmaps; 31 54 bool enabled; 32 - size_t mmap_len; 33 55 int id_pos; 34 56 int is_pos; 35 57 u64 combined_sample_type; ··· 60 38 int cork_fd; 61 39 pid_t pid; 62 40 } workload; 63 - struct fdarray pollfd; 64 - struct perf_mmap *mmap; 65 - struct perf_mmap *overwrite_mmap; 41 + struct mmap *mmap; 42 + struct mmap *overwrite_mmap; 66 43 struct evsel *selected; 67 44 struct events_stats stats; 68 45 struct perf_env *env; ··· 86 65 struct evlist *perf_evlist__new_dummy(void); 87 66 void evlist__init(struct evlist *evlist, struct perf_cpu_map *cpus, 88 67 struct perf_thread_map *threads); 89 - void perf_evlist__exit(struct evlist *evlist); 68 + void evlist__exit(struct evlist *evlist); 90 69 void evlist__delete(struct evlist *evlist); 91 70 92 71 void evlist__add(struct evlist *evlist, struct evsel *entry); ··· 140 119 perf_evlist__find_tracepoint_by_name(struct evlist *evlist, 141 120 const char *name); 142 121 143 - void perf_evlist__id_add(struct evlist *evlist, struct evsel *evsel, 144 - int cpu, int thread, u64 id); 145 - int perf_evlist__id_add_fd(struct evlist *evlist, 146 - struct evsel *evsel, 147 - int cpu, int thread, int fd); 122 + int evlist__add_pollfd(struct evlist *evlist, int fd); 123 + int evlist__filter_pollfd(struct evlist *evlist, short revents_and_mask); 148 124 149 - int perf_evlist__add_pollfd(struct evlist *evlist, int fd); 150 - int perf_evlist__alloc_pollfd(struct evlist *evlist); 151 - int perf_evlist__filter_pollfd(struct evlist *evlist, short revents_and_mask); 152 - 153 - int perf_evlist__poll(struct evlist *evlist, int timeout); 125 + int evlist__poll(struct evlist *evlist, int timeout); 154 126 155 127 struct evsel *perf_evlist__id2evsel(struct evlist *evlist, u64 id); 156 128 struct evsel *perf_evlist__id2evsel_strict(struct evlist *evlist, ··· 153 139 154 140 void perf_evlist__toggle_bkw_mmap(struct evlist *evlist, enum bkw_mmap_state state); 155 141 156 - void perf_evlist__mmap_consume(struct evlist *evlist, int idx); 142 + void evlist__mmap_consume(struct evlist *evlist, int idx); 157 143 158 144 int evlist__open(struct evlist *evlist); 159 145 void evlist__close(struct evlist *evlist); ··· 184 170 185 171 unsigned long perf_event_mlock_kb_in_pages(void); 186 172 187 - int perf_evlist__mmap_ex(struct evlist *evlist, unsigned int pages, 173 + int evlist__mmap_ex(struct evlist *evlist, unsigned int pages, 188 174 unsigned int auxtrace_pages, 189 175 bool auxtrace_overwrite, int nr_cblocks, 190 176 int affinity, int flush, int comp_level); 191 - int perf_evlist__mmap(struct evlist *evlist, unsigned int pages); 192 - void perf_evlist__munmap(struct evlist *evlist); 177 + int evlist__mmap(struct evlist *evlist, unsigned int pages); 178 + void evlist__munmap(struct evlist *evlist); 193 179 194 - size_t perf_evlist__mmap_size(unsigned long pages); 180 + size_t evlist__mmap_size(unsigned long pages); 195 181 196 182 void evlist__disable(struct evlist *evlist); 197 183 void evlist__enable(struct evlist *evlist); ··· 209 195 void __perf_evlist__set_leader(struct list_head *list); 210 196 void perf_evlist__set_leader(struct evlist *evlist); 211 197 212 - u64 perf_evlist__read_format(struct evlist *evlist); 213 198 u64 __perf_evlist__combined_sample_type(struct evlist *evlist); 214 199 u64 perf_evlist__combined_sample_type(struct evlist *evlist); 215 200 u64 perf_evlist__combined_branch_type(struct evlist *evlist); ··· 234 221 return list_empty(&evlist->core.entries); 235 222 } 236 223 237 - static inline struct evsel *perf_evlist__first(struct evlist *evlist) 224 + static inline struct evsel *evlist__first(struct evlist *evlist) 238 225 { 239 - return list_entry(evlist->core.entries.next, struct evsel, core.node); 226 + struct perf_evsel *evsel = perf_evlist__first(&evlist->core); 227 + 228 + return container_of(evsel, struct evsel, core); 240 229 } 241 230 242 - static inline struct evsel *perf_evlist__last(struct evlist *evlist) 231 + static inline struct evsel *evlist__last(struct evlist *evlist) 243 232 { 244 - return list_entry(evlist->core.entries.prev, struct evsel, core.node); 245 - } 233 + struct perf_evsel *evsel = perf_evlist__last(&evlist->core); 246 234 247 - size_t perf_evlist__fprintf(struct evlist *evlist, FILE *fp); 235 + return container_of(evsel, struct evsel, core); 236 + } 248 237 249 238 int perf_evlist__strerror_open(struct evlist *evlist, int err, char *buf, size_t size); 250 239 int perf_evlist__strerror_mmap(struct evlist *evlist, int err, char *buf, size_t size);
+21 -463
tools/perf/util/evsel.c
··· 30 30 #include "counts.h" 31 31 #include "event.h" 32 32 #include "evsel.h" 33 + #include "util/evsel_config.h" 34 + #include "util/evsel_fprintf.h" 33 35 #include "evlist.h" 34 - #include "cpumap.h" 36 + #include <perf/cpumap.h> 35 37 #include "thread_map.h" 36 38 #include "target.h" 37 39 #include "perf_regs.h" ··· 47 45 #include "../perf-sys.h" 48 46 #include "util/parse-branch-options.h" 49 47 #include <internal/xyarray.h> 48 + #include <internal/lib.h> 50 49 51 50 #include <linux/ctype.h> 52 51 ··· 1229 1226 return err; 1230 1227 } 1231 1228 1232 - int perf_evsel__alloc_id(struct evsel *evsel, int ncpus, int nthreads) 1233 - { 1234 - if (ncpus == 0 || nthreads == 0) 1235 - return 0; 1236 - 1237 - if (evsel->system_wide) 1238 - nthreads = 1; 1239 - 1240 - evsel->sample_id = xyarray__new(ncpus, nthreads, sizeof(struct perf_sample_id)); 1241 - if (evsel->sample_id == NULL) 1242 - return -ENOMEM; 1243 - 1244 - evsel->id = zalloc(ncpus * nthreads * sizeof(u64)); 1245 - if (evsel->id == NULL) { 1246 - xyarray__delete(evsel->sample_id); 1247 - evsel->sample_id = NULL; 1248 - return -ENOMEM; 1249 - } 1250 - 1251 - return 0; 1252 - } 1253 - 1254 - static void perf_evsel__free_id(struct evsel *evsel) 1255 - { 1256 - xyarray__delete(evsel->sample_id); 1257 - evsel->sample_id = NULL; 1258 - zfree(&evsel->id); 1259 - evsel->ids = 0; 1260 - } 1261 - 1262 1229 static void perf_evsel__free_config_terms(struct evsel *evsel) 1263 1230 { 1264 1231 struct perf_evsel_config_term *term, *h; ··· 1245 1272 assert(evsel->evlist == NULL); 1246 1273 perf_evsel__free_counts(evsel); 1247 1274 perf_evsel__free_fd(&evsel->core); 1248 - perf_evsel__free_id(evsel); 1275 + perf_evsel__free_id(&evsel->core); 1249 1276 perf_evsel__free_config_terms(evsel); 1250 1277 cgroup__put(evsel->cgrp); 1251 1278 perf_cpu_map__put(evsel->core.cpus); ··· 1445 1472 return fd; 1446 1473 } 1447 1474 1448 - struct bit_names { 1449 - int bit; 1450 - const char *name; 1451 - }; 1452 - 1453 - static void __p_bits(char *buf, size_t size, u64 value, struct bit_names *bits) 1454 - { 1455 - bool first_bit = true; 1456 - int i = 0; 1457 - 1458 - do { 1459 - if (value & bits[i].bit) { 1460 - buf += scnprintf(buf, size, "%s%s", first_bit ? "" : "|", bits[i].name); 1461 - first_bit = false; 1462 - } 1463 - } while (bits[++i].name != NULL); 1464 - } 1465 - 1466 - static void __p_sample_type(char *buf, size_t size, u64 value) 1467 - { 1468 - #define bit_name(n) { PERF_SAMPLE_##n, #n } 1469 - struct bit_names bits[] = { 1470 - bit_name(IP), bit_name(TID), bit_name(TIME), bit_name(ADDR), 1471 - bit_name(READ), bit_name(CALLCHAIN), bit_name(ID), bit_name(CPU), 1472 - bit_name(PERIOD), bit_name(STREAM_ID), bit_name(RAW), 1473 - bit_name(BRANCH_STACK), bit_name(REGS_USER), bit_name(STACK_USER), 1474 - bit_name(IDENTIFIER), bit_name(REGS_INTR), bit_name(DATA_SRC), 1475 - bit_name(WEIGHT), bit_name(PHYS_ADDR), 1476 - { .name = NULL, } 1477 - }; 1478 - #undef bit_name 1479 - __p_bits(buf, size, value, bits); 1480 - } 1481 - 1482 - static void __p_branch_sample_type(char *buf, size_t size, u64 value) 1483 - { 1484 - #define bit_name(n) { PERF_SAMPLE_BRANCH_##n, #n } 1485 - struct bit_names bits[] = { 1486 - bit_name(USER), bit_name(KERNEL), bit_name(HV), bit_name(ANY), 1487 - bit_name(ANY_CALL), bit_name(ANY_RETURN), bit_name(IND_CALL), 1488 - bit_name(ABORT_TX), bit_name(IN_TX), bit_name(NO_TX), 1489 - bit_name(COND), bit_name(CALL_STACK), bit_name(IND_JUMP), 1490 - bit_name(CALL), bit_name(NO_FLAGS), bit_name(NO_CYCLES), 1491 - { .name = NULL, } 1492 - }; 1493 - #undef bit_name 1494 - __p_bits(buf, size, value, bits); 1495 - } 1496 - 1497 - static void __p_read_format(char *buf, size_t size, u64 value) 1498 - { 1499 - #define bit_name(n) { PERF_FORMAT_##n, #n } 1500 - struct bit_names bits[] = { 1501 - bit_name(TOTAL_TIME_ENABLED), bit_name(TOTAL_TIME_RUNNING), 1502 - bit_name(ID), bit_name(GROUP), 1503 - { .name = NULL, } 1504 - }; 1505 - #undef bit_name 1506 - __p_bits(buf, size, value, bits); 1507 - } 1508 - 1509 - #define BUF_SIZE 1024 1510 - 1511 - #define p_hex(val) snprintf(buf, BUF_SIZE, "%#"PRIx64, (uint64_t)(val)) 1512 - #define p_unsigned(val) snprintf(buf, BUF_SIZE, "%"PRIu64, (uint64_t)(val)) 1513 - #define p_signed(val) snprintf(buf, BUF_SIZE, "%"PRId64, (int64_t)(val)) 1514 - #define p_sample_type(val) __p_sample_type(buf, BUF_SIZE, val) 1515 - #define p_branch_sample_type(val) __p_branch_sample_type(buf, BUF_SIZE, val) 1516 - #define p_read_format(val) __p_read_format(buf, BUF_SIZE, val) 1517 - 1518 - #define PRINT_ATTRn(_n, _f, _p) \ 1519 - do { \ 1520 - if (attr->_f) { \ 1521 - _p(attr->_f); \ 1522 - ret += attr__fprintf(fp, _n, buf, priv);\ 1523 - } \ 1524 - } while (0) 1525 - 1526 - #define PRINT_ATTRf(_f, _p) PRINT_ATTRn(#_f, _f, _p) 1527 - 1528 - int perf_event_attr__fprintf(FILE *fp, struct perf_event_attr *attr, 1529 - attr__fprintf_f attr__fprintf, void *priv) 1530 - { 1531 - char buf[BUF_SIZE]; 1532 - int ret = 0; 1533 - 1534 - PRINT_ATTRf(type, p_unsigned); 1535 - PRINT_ATTRf(size, p_unsigned); 1536 - PRINT_ATTRf(config, p_hex); 1537 - PRINT_ATTRn("{ sample_period, sample_freq }", sample_period, p_unsigned); 1538 - PRINT_ATTRf(sample_type, p_sample_type); 1539 - PRINT_ATTRf(read_format, p_read_format); 1540 - 1541 - PRINT_ATTRf(disabled, p_unsigned); 1542 - PRINT_ATTRf(inherit, p_unsigned); 1543 - PRINT_ATTRf(pinned, p_unsigned); 1544 - PRINT_ATTRf(exclusive, p_unsigned); 1545 - PRINT_ATTRf(exclude_user, p_unsigned); 1546 - PRINT_ATTRf(exclude_kernel, p_unsigned); 1547 - PRINT_ATTRf(exclude_hv, p_unsigned); 1548 - PRINT_ATTRf(exclude_idle, p_unsigned); 1549 - PRINT_ATTRf(mmap, p_unsigned); 1550 - PRINT_ATTRf(comm, p_unsigned); 1551 - PRINT_ATTRf(freq, p_unsigned); 1552 - PRINT_ATTRf(inherit_stat, p_unsigned); 1553 - PRINT_ATTRf(enable_on_exec, p_unsigned); 1554 - PRINT_ATTRf(task, p_unsigned); 1555 - PRINT_ATTRf(watermark, p_unsigned); 1556 - PRINT_ATTRf(precise_ip, p_unsigned); 1557 - PRINT_ATTRf(mmap_data, p_unsigned); 1558 - PRINT_ATTRf(sample_id_all, p_unsigned); 1559 - PRINT_ATTRf(exclude_host, p_unsigned); 1560 - PRINT_ATTRf(exclude_guest, p_unsigned); 1561 - PRINT_ATTRf(exclude_callchain_kernel, p_unsigned); 1562 - PRINT_ATTRf(exclude_callchain_user, p_unsigned); 1563 - PRINT_ATTRf(mmap2, p_unsigned); 1564 - PRINT_ATTRf(comm_exec, p_unsigned); 1565 - PRINT_ATTRf(use_clockid, p_unsigned); 1566 - PRINT_ATTRf(context_switch, p_unsigned); 1567 - PRINT_ATTRf(write_backward, p_unsigned); 1568 - PRINT_ATTRf(namespaces, p_unsigned); 1569 - PRINT_ATTRf(ksymbol, p_unsigned); 1570 - PRINT_ATTRf(bpf_event, p_unsigned); 1571 - PRINT_ATTRf(aux_output, p_unsigned); 1572 - 1573 - PRINT_ATTRn("{ wakeup_events, wakeup_watermark }", wakeup_events, p_unsigned); 1574 - PRINT_ATTRf(bp_type, p_unsigned); 1575 - PRINT_ATTRn("{ bp_addr, config1 }", bp_addr, p_hex); 1576 - PRINT_ATTRn("{ bp_len, config2 }", bp_len, p_hex); 1577 - PRINT_ATTRf(branch_sample_type, p_branch_sample_type); 1578 - PRINT_ATTRf(sample_regs_user, p_hex); 1579 - PRINT_ATTRf(sample_stack_user, p_unsigned); 1580 - PRINT_ATTRf(clockid, p_signed); 1581 - PRINT_ATTRf(sample_regs_intr, p_hex); 1582 - PRINT_ATTRf(aux_watermark, p_unsigned); 1583 - PRINT_ATTRf(sample_max_stack, p_unsigned); 1584 - 1585 - return ret; 1586 - } 1587 - 1588 - static int __open_attr__fprintf(FILE *fp, const char *name, const char *val, 1589 - void *priv __maybe_unused) 1590 - { 1591 - return fprintf(fp, " %-32s %s\n", name, val); 1592 - } 1593 - 1594 1475 static void perf_evsel__remove_fd(struct evsel *pos, 1595 1476 int nr_cpus, int nr_threads, 1596 1477 int thread_idx) ··· 1489 1662 return false; 1490 1663 1491 1664 /* The system wide setup does not work with threads. */ 1492 - if (evsel->system_wide) 1665 + if (evsel->core.system_wide) 1493 1666 return false; 1494 1667 1495 1668 /* The -ESRCH is perf event syscall errno for pid's not found. */ ··· 1513 1686 pr_warning("WARNING: Ignored open failure for pid %d\n", 1514 1687 ignore_pid); 1515 1688 return true; 1689 + } 1690 + 1691 + static int __open_attr__fprintf(FILE *fp, const char *name, const char *val, 1692 + void *priv __maybe_unused) 1693 + { 1694 + return fprintf(fp, " %-32s %s\n", name, val); 1516 1695 } 1517 1696 1518 1697 static void display_attr(struct perf_event_attr *attr) ··· 1604 1771 threads = empty_thread_map; 1605 1772 } 1606 1773 1607 - if (evsel->system_wide) 1774 + if (evsel->core.system_wide) 1608 1775 nthreads = 1; 1609 1776 else 1610 1777 nthreads = threads->nr; ··· 1651 1818 for (thread = 0; thread < nthreads; thread++) { 1652 1819 int fd, group_fd; 1653 1820 1654 - if (!evsel->cgrp && !evsel->system_wide) 1821 + if (!evsel->cgrp && !evsel->core.system_wide) 1655 1822 pid = perf_thread_map__pid(threads, thread); 1656 1823 1657 1824 group_fd = get_group_fd(evsel, cpu, thread); ··· 1824 1991 void evsel__close(struct evsel *evsel) 1825 1992 { 1826 1993 perf_evsel__close(&evsel->core); 1827 - perf_evsel__free_id(evsel); 1994 + perf_evsel__free_id(&evsel->core); 1828 1995 } 1829 1996 1830 1997 int perf_evsel__open_per_cpu(struct evsel *evsel, ··· 2252 2419 return 0; 2253 2420 } 2254 2421 2255 - size_t perf_event__sample_event_size(const struct perf_sample *sample, u64 type, 2256 - u64 read_format) 2257 - { 2258 - size_t sz, result = sizeof(struct perf_record_sample); 2259 - 2260 - if (type & PERF_SAMPLE_IDENTIFIER) 2261 - result += sizeof(u64); 2262 - 2263 - if (type & PERF_SAMPLE_IP) 2264 - result += sizeof(u64); 2265 - 2266 - if (type & PERF_SAMPLE_TID) 2267 - result += sizeof(u64); 2268 - 2269 - if (type & PERF_SAMPLE_TIME) 2270 - result += sizeof(u64); 2271 - 2272 - if (type & PERF_SAMPLE_ADDR) 2273 - result += sizeof(u64); 2274 - 2275 - if (type & PERF_SAMPLE_ID) 2276 - result += sizeof(u64); 2277 - 2278 - if (type & PERF_SAMPLE_STREAM_ID) 2279 - result += sizeof(u64); 2280 - 2281 - if (type & PERF_SAMPLE_CPU) 2282 - result += sizeof(u64); 2283 - 2284 - if (type & PERF_SAMPLE_PERIOD) 2285 - result += sizeof(u64); 2286 - 2287 - if (type & PERF_SAMPLE_READ) { 2288 - result += sizeof(u64); 2289 - if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) 2290 - result += sizeof(u64); 2291 - if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) 2292 - result += sizeof(u64); 2293 - /* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */ 2294 - if (read_format & PERF_FORMAT_GROUP) { 2295 - sz = sample->read.group.nr * 2296 - sizeof(struct sample_read_value); 2297 - result += sz; 2298 - } else { 2299 - result += sizeof(u64); 2300 - } 2301 - } 2302 - 2303 - if (type & PERF_SAMPLE_CALLCHAIN) { 2304 - sz = (sample->callchain->nr + 1) * sizeof(u64); 2305 - result += sz; 2306 - } 2307 - 2308 - if (type & PERF_SAMPLE_RAW) { 2309 - result += sizeof(u32); 2310 - result += sample->raw_size; 2311 - } 2312 - 2313 - if (type & PERF_SAMPLE_BRANCH_STACK) { 2314 - sz = sample->branch_stack->nr * sizeof(struct branch_entry); 2315 - sz += sizeof(u64); 2316 - result += sz; 2317 - } 2318 - 2319 - if (type & PERF_SAMPLE_REGS_USER) { 2320 - if (sample->user_regs.abi) { 2321 - result += sizeof(u64); 2322 - sz = hweight64(sample->user_regs.mask) * sizeof(u64); 2323 - result += sz; 2324 - } else { 2325 - result += sizeof(u64); 2326 - } 2327 - } 2328 - 2329 - if (type & PERF_SAMPLE_STACK_USER) { 2330 - sz = sample->user_stack.size; 2331 - result += sizeof(u64); 2332 - if (sz) { 2333 - result += sz; 2334 - result += sizeof(u64); 2335 - } 2336 - } 2337 - 2338 - if (type & PERF_SAMPLE_WEIGHT) 2339 - result += sizeof(u64); 2340 - 2341 - if (type & PERF_SAMPLE_DATA_SRC) 2342 - result += sizeof(u64); 2343 - 2344 - if (type & PERF_SAMPLE_TRANSACTION) 2345 - result += sizeof(u64); 2346 - 2347 - if (type & PERF_SAMPLE_REGS_INTR) { 2348 - if (sample->intr_regs.abi) { 2349 - result += sizeof(u64); 2350 - sz = hweight64(sample->intr_regs.mask) * sizeof(u64); 2351 - result += sz; 2352 - } else { 2353 - result += sizeof(u64); 2354 - } 2355 - } 2356 - 2357 - if (type & PERF_SAMPLE_PHYS_ADDR) 2358 - result += sizeof(u64); 2359 - 2360 - return result; 2361 - } 2362 - 2363 - int perf_event__synthesize_sample(union perf_event *event, u64 type, 2364 - u64 read_format, 2365 - const struct perf_sample *sample) 2366 - { 2367 - __u64 *array; 2368 - size_t sz; 2369 - /* 2370 - * used for cross-endian analysis. See git commit 65014ab3 2371 - * for why this goofiness is needed. 2372 - */ 2373 - union u64_swap u; 2374 - 2375 - array = event->sample.array; 2376 - 2377 - if (type & PERF_SAMPLE_IDENTIFIER) { 2378 - *array = sample->id; 2379 - array++; 2380 - } 2381 - 2382 - if (type & PERF_SAMPLE_IP) { 2383 - *array = sample->ip; 2384 - array++; 2385 - } 2386 - 2387 - if (type & PERF_SAMPLE_TID) { 2388 - u.val32[0] = sample->pid; 2389 - u.val32[1] = sample->tid; 2390 - *array = u.val64; 2391 - array++; 2392 - } 2393 - 2394 - if (type & PERF_SAMPLE_TIME) { 2395 - *array = sample->time; 2396 - array++; 2397 - } 2398 - 2399 - if (type & PERF_SAMPLE_ADDR) { 2400 - *array = sample->addr; 2401 - array++; 2402 - } 2403 - 2404 - if (type & PERF_SAMPLE_ID) { 2405 - *array = sample->id; 2406 - array++; 2407 - } 2408 - 2409 - if (type & PERF_SAMPLE_STREAM_ID) { 2410 - *array = sample->stream_id; 2411 - array++; 2412 - } 2413 - 2414 - if (type & PERF_SAMPLE_CPU) { 2415 - u.val32[0] = sample->cpu; 2416 - u.val32[1] = 0; 2417 - *array = u.val64; 2418 - array++; 2419 - } 2420 - 2421 - if (type & PERF_SAMPLE_PERIOD) { 2422 - *array = sample->period; 2423 - array++; 2424 - } 2425 - 2426 - if (type & PERF_SAMPLE_READ) { 2427 - if (read_format & PERF_FORMAT_GROUP) 2428 - *array = sample->read.group.nr; 2429 - else 2430 - *array = sample->read.one.value; 2431 - array++; 2432 - 2433 - if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) { 2434 - *array = sample->read.time_enabled; 2435 - array++; 2436 - } 2437 - 2438 - if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) { 2439 - *array = sample->read.time_running; 2440 - array++; 2441 - } 2442 - 2443 - /* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */ 2444 - if (read_format & PERF_FORMAT_GROUP) { 2445 - sz = sample->read.group.nr * 2446 - sizeof(struct sample_read_value); 2447 - memcpy(array, sample->read.group.values, sz); 2448 - array = (void *)array + sz; 2449 - } else { 2450 - *array = sample->read.one.id; 2451 - array++; 2452 - } 2453 - } 2454 - 2455 - if (type & PERF_SAMPLE_CALLCHAIN) { 2456 - sz = (sample->callchain->nr + 1) * sizeof(u64); 2457 - memcpy(array, sample->callchain, sz); 2458 - array = (void *)array + sz; 2459 - } 2460 - 2461 - if (type & PERF_SAMPLE_RAW) { 2462 - u.val32[0] = sample->raw_size; 2463 - *array = u.val64; 2464 - array = (void *)array + sizeof(u32); 2465 - 2466 - memcpy(array, sample->raw_data, sample->raw_size); 2467 - array = (void *)array + sample->raw_size; 2468 - } 2469 - 2470 - if (type & PERF_SAMPLE_BRANCH_STACK) { 2471 - sz = sample->branch_stack->nr * sizeof(struct branch_entry); 2472 - sz += sizeof(u64); 2473 - memcpy(array, sample->branch_stack, sz); 2474 - array = (void *)array + sz; 2475 - } 2476 - 2477 - if (type & PERF_SAMPLE_REGS_USER) { 2478 - if (sample->user_regs.abi) { 2479 - *array++ = sample->user_regs.abi; 2480 - sz = hweight64(sample->user_regs.mask) * sizeof(u64); 2481 - memcpy(array, sample->user_regs.regs, sz); 2482 - array = (void *)array + sz; 2483 - } else { 2484 - *array++ = 0; 2485 - } 2486 - } 2487 - 2488 - if (type & PERF_SAMPLE_STACK_USER) { 2489 - sz = sample->user_stack.size; 2490 - *array++ = sz; 2491 - if (sz) { 2492 - memcpy(array, sample->user_stack.data, sz); 2493 - array = (void *)array + sz; 2494 - *array++ = sz; 2495 - } 2496 - } 2497 - 2498 - if (type & PERF_SAMPLE_WEIGHT) { 2499 - *array = sample->weight; 2500 - array++; 2501 - } 2502 - 2503 - if (type & PERF_SAMPLE_DATA_SRC) { 2504 - *array = sample->data_src; 2505 - array++; 2506 - } 2507 - 2508 - if (type & PERF_SAMPLE_TRANSACTION) { 2509 - *array = sample->transaction; 2510 - array++; 2511 - } 2512 - 2513 - if (type & PERF_SAMPLE_REGS_INTR) { 2514 - if (sample->intr_regs.abi) { 2515 - *array++ = sample->intr_regs.abi; 2516 - sz = hweight64(sample->intr_regs.mask) * sizeof(u64); 2517 - memcpy(array, sample->intr_regs.regs, sz); 2518 - array = (void *)array + sz; 2519 - } else { 2520 - *array++ = 0; 2521 - } 2522 - } 2523 - 2524 - if (type & PERF_SAMPLE_PHYS_ADDR) { 2525 - *array = sample->phys_addr; 2526 - array++; 2527 - } 2528 - 2529 - return 0; 2530 - } 2531 - 2532 2422 struct tep_format_field *perf_evsel__field(struct evsel *evsel, const char *name) 2533 2423 { 2534 2424 return tep_find_field(evsel->tp_format, name); ··· 2367 2811 if (evsel->name) 2368 2812 free(evsel->name); 2369 2813 evsel->name = new_name; 2370 - scnprintf(msg, msgsize, 2371 - "kernel.perf_event_paranoid=%d, trying to fall back to excluding kernel samples", paranoid); 2814 + scnprintf(msg, msgsize, "kernel.perf_event_paranoid=%d, trying " 2815 + "to fall back to excluding kernel and hypervisor " 2816 + " samples", paranoid); 2372 2817 evsel->core.attr.exclude_kernel = 1; 2818 + evsel->core.attr.exclude_hv = 1; 2373 2819 2374 2820 return true; 2375 2821 } ··· 2524 2966 thread++) { 2525 2967 int fd = FD(evsel, cpu, thread); 2526 2968 2527 - if (perf_evlist__id_add_fd(evlist, evsel, 2969 + if (perf_evlist__id_add_fd(&evlist->core, &evsel->core, 2528 2970 cpu, thread, fd) < 0) 2529 2971 return -1; 2530 2972 } ··· 2538 2980 struct perf_cpu_map *cpus = evsel->core.cpus; 2539 2981 struct perf_thread_map *threads = evsel->core.threads; 2540 2982 2541 - if (perf_evsel__alloc_id(evsel, cpus->nr, threads->nr)) 2983 + if (perf_evsel__alloc_id(&evsel->core, cpus->nr, threads->nr)) 2542 2984 return -ENOMEM; 2543 2985 2544 2986 return store_evsel_ids(evsel, evlist);
+3 -123
tools/perf/util/evsel.h
··· 4 4 5 5 #include <linux/list.h> 6 6 #include <stdbool.h> 7 - #include <stdio.h> 8 7 #include <sys/types.h> 9 8 #include <linux/perf_event.h> 10 9 #include <linux/types.h> ··· 12 13 #include "symbol_conf.h" 13 14 #include <internal/cpumap.h> 14 15 15 - struct addr_location; 16 - struct evsel; 17 - union perf_event; 18 - 19 - /* 20 - * Per fd, to map back from PERF_SAMPLE_ID to evsel, only used when there are 21 - * more than one entry in the evlist. 22 - */ 23 - struct perf_sample_id { 24 - struct hlist_node node; 25 - u64 id; 26 - struct evsel *evsel; 27 - /* 28 - * 'idx' will be used for AUX area sampling. A sample will have AUX area 29 - * data that will be queued for decoding, where there are separate 30 - * queues for each CPU (per-cpu tracing) or task (per-thread tracing). 31 - * The sample ID can be used to lookup 'idx' which is effectively the 32 - * queue number. 33 - */ 34 - int idx; 35 - int cpu; 36 - pid_t tid; 37 - 38 - /* Holds total ID period value for PERF_SAMPLE_READ processing. */ 39 - u64 period; 40 - }; 41 - 16 + struct bpf_object; 42 17 struct cgroup; 43 - 44 - /* 45 - * The 'struct perf_evsel_config_term' is used to pass event 46 - * specific configuration data to perf_evsel__config routine. 47 - * It is allocated within event parsing and attached to 48 - * perf_evsel::config_terms list head. 49 - */ 50 - enum term_type { 51 - PERF_EVSEL__CONFIG_TERM_PERIOD, 52 - PERF_EVSEL__CONFIG_TERM_FREQ, 53 - PERF_EVSEL__CONFIG_TERM_TIME, 54 - PERF_EVSEL__CONFIG_TERM_CALLGRAPH, 55 - PERF_EVSEL__CONFIG_TERM_STACK_USER, 56 - PERF_EVSEL__CONFIG_TERM_INHERIT, 57 - PERF_EVSEL__CONFIG_TERM_MAX_STACK, 58 - PERF_EVSEL__CONFIG_TERM_MAX_EVENTS, 59 - PERF_EVSEL__CONFIG_TERM_OVERWRITE, 60 - PERF_EVSEL__CONFIG_TERM_DRV_CFG, 61 - PERF_EVSEL__CONFIG_TERM_BRANCH, 62 - PERF_EVSEL__CONFIG_TERM_PERCORE, 63 - PERF_EVSEL__CONFIG_TERM_AUX_OUTPUT, 64 - }; 65 - 66 - struct perf_evsel_config_term { 67 - struct list_head list; 68 - enum term_type type; 69 - union { 70 - u64 period; 71 - u64 freq; 72 - bool time; 73 - char *callgraph; 74 - char *drv_cfg; 75 - u64 stack_user; 76 - int max_stack; 77 - bool inherit; 78 - bool overwrite; 79 - char *branch; 80 - unsigned long max_events; 81 - bool percore; 82 - bool aux_output; 83 - } val; 84 - bool weak; 85 - }; 86 - 18 + struct perf_counts; 87 19 struct perf_stat_evsel; 20 + union perf_event; 88 21 89 22 typedef int (perf_evsel__sb_cb_t)(union perf_event *event, void *data); 90 23 ··· 24 93 PERF_TOOL_NONE = 0, 25 94 PERF_TOOL_DURATION_TIME = 1, 26 95 }; 27 - 28 - struct bpf_object; 29 - struct perf_counts; 30 - struct xyarray; 31 96 32 97 /** struct evsel - event selector 33 98 * ··· 44 117 struct perf_evsel core; 45 118 struct evlist *evlist; 46 119 char *filter; 47 - struct xyarray *sample_id; 48 - u64 *id; 49 120 struct perf_counts *counts; 50 121 struct perf_counts *prev_raw_counts; 51 122 int idx; 52 - u32 ids; 53 123 unsigned long max_events; 54 124 unsigned long nr_events_printed; 55 125 char *name; ··· 70 146 bool disabled; 71 147 bool no_aux_samples; 72 148 bool immediate; 73 - bool system_wide; 74 149 bool tracking; 75 150 bool per_pkg; 76 151 bool precise_max; ··· 100 177 perf_evsel__sb_cb_t *cb; 101 178 void *data; 102 179 } side_band; 103 - }; 104 - 105 - union u64_swap { 106 - u64 val64; 107 - u32 val32[2]; 108 180 }; 109 181 110 182 struct perf_missing_features { ··· 199 281 200 282 const char *perf_evsel__group_name(struct evsel *evsel); 201 283 int perf_evsel__group_desc(struct evsel *evsel, char *buf, size_t size); 202 - 203 - int perf_evsel__alloc_id(struct evsel *evsel, int ncpus, int nthreads); 204 284 205 285 void __perf_evsel__set_sample_bit(struct evsel *evsel, 206 286 enum perf_event_sample_format bit); ··· 355 439 perf_evsel__match(evsel, SOFTWARE, SW_TASK_CLOCK); 356 440 } 357 441 358 - struct perf_attr_details { 359 - bool freq; 360 - bool verbose; 361 - bool event_group; 362 - bool force; 363 - bool trace_fields; 364 - }; 365 - 366 - int perf_evsel__fprintf(struct evsel *evsel, 367 - struct perf_attr_details *details, FILE *fp); 368 - 369 - #define EVSEL__PRINT_IP (1<<0) 370 - #define EVSEL__PRINT_SYM (1<<1) 371 - #define EVSEL__PRINT_DSO (1<<2) 372 - #define EVSEL__PRINT_SYMOFFSET (1<<3) 373 - #define EVSEL__PRINT_ONELINE (1<<4) 374 - #define EVSEL__PRINT_SRCLINE (1<<5) 375 - #define EVSEL__PRINT_UNKNOWN_AS_ADDR (1<<6) 376 - #define EVSEL__PRINT_CALLCHAIN_ARROW (1<<7) 377 - #define EVSEL__PRINT_SKIP_IGNORED (1<<8) 378 - 379 - struct callchain_cursor; 380 - 381 - int sample__fprintf_callchain(struct perf_sample *sample, int left_alignment, 382 - unsigned int print_opts, 383 - struct callchain_cursor *cursor, FILE *fp); 384 - 385 - int sample__fprintf_sym(struct perf_sample *sample, struct addr_location *al, 386 - int left_alignment, unsigned int print_opts, 387 - struct callchain_cursor *cursor, FILE *fp); 388 - 389 442 bool perf_evsel__fallback(struct evsel *evsel, int err, 390 443 char *msg, size_t msgsize); 391 444 int perf_evsel__open_strerror(struct evsel *evsel, struct target *target, ··· 386 501 { 387 502 return (evsel->core.attr.sample_type & PERF_SAMPLE_CALLCHAIN) != 0; 388 503 } 389 - 390 - typedef int (*attr__fprintf_f)(FILE *, const char *, const char *, void *); 391 - 392 - int perf_event_attr__fprintf(FILE *fp, struct perf_event_attr *attr, 393 - attr__fprintf_f attr__fprintf, void *priv); 394 504 395 505 struct perf_env *perf_evsel__env(struct evsel *evsel); 396 506
+50
tools/perf/util/evsel_config.h
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + #ifndef __PERF_EVSEL_CONFIG_H 3 + #define __PERF_EVSEL_CONFIG_H 1 4 + 5 + #include <linux/types.h> 6 + #include <stdbool.h> 7 + 8 + /* 9 + * The 'struct perf_evsel_config_term' is used to pass event 10 + * specific configuration data to perf_evsel__config routine. 11 + * It is allocated within event parsing and attached to 12 + * perf_evsel::config_terms list head. 13 + */ 14 + enum evsel_term_type { 15 + PERF_EVSEL__CONFIG_TERM_PERIOD, 16 + PERF_EVSEL__CONFIG_TERM_FREQ, 17 + PERF_EVSEL__CONFIG_TERM_TIME, 18 + PERF_EVSEL__CONFIG_TERM_CALLGRAPH, 19 + PERF_EVSEL__CONFIG_TERM_STACK_USER, 20 + PERF_EVSEL__CONFIG_TERM_INHERIT, 21 + PERF_EVSEL__CONFIG_TERM_MAX_STACK, 22 + PERF_EVSEL__CONFIG_TERM_MAX_EVENTS, 23 + PERF_EVSEL__CONFIG_TERM_OVERWRITE, 24 + PERF_EVSEL__CONFIG_TERM_DRV_CFG, 25 + PERF_EVSEL__CONFIG_TERM_BRANCH, 26 + PERF_EVSEL__CONFIG_TERM_PERCORE, 27 + PERF_EVSEL__CONFIG_TERM_AUX_OUTPUT, 28 + }; 29 + 30 + struct perf_evsel_config_term { 31 + struct list_head list; 32 + enum evsel_term_type type; 33 + union { 34 + u64 period; 35 + u64 freq; 36 + bool time; 37 + char *callgraph; 38 + char *drv_cfg; 39 + u64 stack_user; 40 + int max_stack; 41 + bool inherit; 42 + bool overwrite; 43 + char *branch; 44 + unsigned long max_events; 45 + bool percore; 46 + bool aux_output; 47 + } val; 48 + bool weak; 49 + }; 50 + #endif // __PERF_EVSEL_CONFIG_H
+8 -8
tools/perf/util/evsel_fprintf.c
··· 4 4 #include <stdbool.h> 5 5 #include <traceevent/event-parse.h> 6 6 #include "evsel.h" 7 + #include "util/evsel_fprintf.h" 8 + #include "util/event.h" 7 9 #include "callchain.h" 8 10 #include "map.h" 9 11 #include "strlist.h" ··· 103 101 104 102 int sample__fprintf_callchain(struct perf_sample *sample, int left_alignment, 105 103 unsigned int print_opts, struct callchain_cursor *cursor, 106 - FILE *fp) 104 + struct strlist *bt_stop_list, FILE *fp) 107 105 { 108 106 int printed = 0; 109 107 struct callchain_cursor_node *node; ··· 176 174 printed += fprintf(fp, "\n"); 177 175 178 176 /* Add srccode here too? */ 179 - if (symbol_conf.bt_stop_list && 180 - node->sym && 181 - strlist__has_entry(symbol_conf.bt_stop_list, 182 - node->sym->name)) { 177 + if (bt_stop_list && node->sym && 178 + strlist__has_entry(bt_stop_list, node->sym->name)) { 183 179 break; 184 180 } 185 181 ··· 192 192 193 193 int sample__fprintf_sym(struct perf_sample *sample, struct addr_location *al, 194 194 int left_alignment, unsigned int print_opts, 195 - struct callchain_cursor *cursor, FILE *fp) 195 + struct callchain_cursor *cursor, struct strlist *bt_stop_list, FILE *fp) 196 196 { 197 197 int printed = 0; 198 198 int print_ip = print_opts & EVSEL__PRINT_IP; ··· 203 203 int print_unknown_as_addr = print_opts & EVSEL__PRINT_UNKNOWN_AS_ADDR; 204 204 205 205 if (cursor != NULL) { 206 - printed += sample__fprintf_callchain(sample, left_alignment, 207 - print_opts, cursor, fp); 206 + printed += sample__fprintf_callchain(sample, left_alignment, print_opts, 207 + cursor, bt_stop_list, fp); 208 208 } else { 209 209 printed += fprintf(fp, "%-*.*s", left_alignment, left_alignment, " "); 210 210
+50
tools/perf/util/evsel_fprintf.h
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + #ifndef __PERF_EVSEL_FPRINTF_H 3 + #define __PERF_EVSEL_FPRINTF_H 1 4 + 5 + #include <stdio.h> 6 + #include <stdbool.h> 7 + 8 + struct evsel; 9 + 10 + struct perf_attr_details { 11 + bool freq; 12 + bool verbose; 13 + bool event_group; 14 + bool force; 15 + bool trace_fields; 16 + }; 17 + 18 + int perf_evsel__fprintf(struct evsel *evsel, 19 + struct perf_attr_details *details, FILE *fp); 20 + 21 + #define EVSEL__PRINT_IP (1<<0) 22 + #define EVSEL__PRINT_SYM (1<<1) 23 + #define EVSEL__PRINT_DSO (1<<2) 24 + #define EVSEL__PRINT_SYMOFFSET (1<<3) 25 + #define EVSEL__PRINT_ONELINE (1<<4) 26 + #define EVSEL__PRINT_SRCLINE (1<<5) 27 + #define EVSEL__PRINT_UNKNOWN_AS_ADDR (1<<6) 28 + #define EVSEL__PRINT_CALLCHAIN_ARROW (1<<7) 29 + #define EVSEL__PRINT_SKIP_IGNORED (1<<8) 30 + 31 + struct addr_location; 32 + struct perf_event_attr; 33 + struct perf_sample; 34 + struct callchain_cursor; 35 + struct strlist; 36 + 37 + int sample__fprintf_callchain(struct perf_sample *sample, int left_alignment, 38 + unsigned int print_opts, struct callchain_cursor *cursor, 39 + struct strlist *bt_stop_list, FILE *fp); 40 + 41 + int sample__fprintf_sym(struct perf_sample *sample, struct addr_location *al, 42 + int left_alignment, unsigned int print_opts, 43 + struct callchain_cursor *cursor, 44 + struct strlist *bt_stop_list, FILE *fp); 45 + 46 + typedef int (*attr__fprintf_f)(FILE *, const char *, const char *, void *); 47 + 48 + int perf_event_attr__fprintf(FILE *fp, struct perf_event_attr *attr, 49 + attr__fprintf_f attr__fprintf, void *priv); 50 + #endif // __PERF_EVSEL_H
+3
tools/perf/util/genelf.h
··· 35 35 #elif defined(__sparc__) 36 36 #define GEN_ELF_ARCH EM_SPARC 37 37 #define GEN_ELF_CLASS ELFCLASS32 38 + #elif defined(__s390x__) 39 + #define GEN_ELF_ARCH EM_S390 40 + #define GEN_ELF_CLASS ELFCLASS64 38 41 #else 39 42 #error "unsupported architecture" 40 43 #endif
+20 -404
tools/perf/util/header.c
··· 25 25 #include "dso.h" 26 26 #include "evlist.h" 27 27 #include "evsel.h" 28 + #include "util/evsel_fprintf.h" 28 29 #include "header.h" 29 30 #include "memswap.h" 30 31 #include "trace-event.h" ··· 43 42 #include "tool.h" 44 43 #include "time-utils.h" 45 44 #include "units.h" 46 - #include "util.h" 45 + #include "util/util.h" // perf_exe() 47 46 #include "cputopo.h" 48 47 #include "bpf-event.h" 49 48 50 49 #include <linux/ctype.h> 50 + #include <internal/lib.h> 51 51 52 52 /* 53 53 * magic2 = "PERFILE2" ··· 70 68 struct perf_file_attr { 71 69 struct perf_event_attr attr; 72 70 struct perf_file_section ids; 73 - }; 74 - 75 - struct feat_fd { 76 - struct perf_header *ph; 77 - int fd; 78 - void *buf; /* Either buf != NULL or fd >= 0 */ 79 - ssize_t offset; 80 - size_t size; 81 - struct evsel *events; 82 71 }; 83 72 84 73 void perf_header__set_feat(struct perf_header *header, int feat) ··· 517 524 * copy into an nri to be independent of the 518 525 * type of ids, 519 526 */ 520 - nri = evsel->ids; 527 + nri = evsel->core.ids; 521 528 ret = do_write(ff, &nri, sizeof(nri)); 522 529 if (ret < 0) 523 530 return ret; ··· 531 538 /* 532 539 * write unique ids for this event 533 540 */ 534 - ret = do_write(ff, evsel->id, evsel->ids * sizeof(u64)); 541 + ret = do_write(ff, evsel->core.id, evsel->core.ids * sizeof(u64)); 535 542 if (ret < 0) 536 543 return ret; 537 544 } ··· 1074 1081 1075 1082 scnprintf(file, PATH_MAX, "%s/shared_cpu_list", path); 1076 1083 if (sysfs__read_str(file, &cache->map, &len)) { 1077 - zfree(&cache->map); 1084 + zfree(&cache->size); 1078 1085 zfree(&cache->type); 1079 1086 return -1; 1080 1087 } ··· 1591 1598 1592 1599 for (evsel = events; evsel->core.attr.size; evsel++) { 1593 1600 zfree(&evsel->name); 1594 - zfree(&evsel->id); 1601 + zfree(&evsel->core.id); 1595 1602 } 1596 1603 1597 1604 free(events); ··· 1657 1664 id = calloc(nr, sizeof(*id)); 1658 1665 if (!id) 1659 1666 goto error; 1660 - evsel->ids = nr; 1661 - evsel->id = id; 1667 + evsel->core.ids = nr; 1668 + evsel->core.id = id; 1662 1669 1663 1670 for (j = 0 ; j < nr; j++) { 1664 1671 if (do_read_u64(ff, id)) ··· 1700 1707 for (evsel = events; evsel->core.attr.size; evsel++) { 1701 1708 fprintf(fp, "# event : name = %s, ", evsel->name); 1702 1709 1703 - if (evsel->ids) { 1710 + if (evsel->core.ids) { 1704 1711 fprintf(fp, ", id = {"); 1705 - for (j = 0, id = evsel->id; j < evsel->ids; j++, id++) { 1712 + for (j = 0, id = evsel->core.id; j < evsel->core.ids; j++, id++) { 1706 1713 if (j) 1707 1714 fputc(',', fp); 1708 1715 fprintf(fp, " %"PRIu64, *id); ··· 2816 2823 return 0; 2817 2824 } 2818 2825 2819 - struct feature_ops { 2820 - int (*write)(struct feat_fd *ff, struct evlist *evlist); 2821 - void (*print)(struct feat_fd *ff, FILE *fp); 2822 - int (*process)(struct feat_fd *ff, void *data); 2823 - const char *name; 2824 - bool full_only; 2825 - bool synthesize; 2826 - }; 2827 - 2828 2826 #define FEAT_OPR(n, func, __full_only) \ 2829 2827 [HEADER_##n] = { \ 2830 2828 .name = __stringify(n), \ ··· 2842 2858 #define process_branch_stack NULL 2843 2859 #define process_stat NULL 2844 2860 2861 + // Only used in util/synthetic-events.c 2862 + const struct perf_header_feature_ops feat_ops[HEADER_LAST_FEATURE]; 2845 2863 2846 - static const struct feature_ops feat_ops[HEADER_LAST_FEATURE] = { 2864 + const struct perf_header_feature_ops feat_ops[HEADER_LAST_FEATURE] = { 2847 2865 FEAT_OPN(TRACING_DATA, tracing_data, false), 2848 2866 FEAT_OPN(BUILD_ID, build_id, false), 2849 2867 FEAT_OPR(HOSTNAME, hostname, false), ··· 3069 3083 3070 3084 evlist__for_each_entry(session->evlist, evsel) { 3071 3085 evsel->id_offset = lseek(fd, 0, SEEK_CUR); 3072 - err = do_write(&ff, evsel->id, evsel->ids * sizeof(u64)); 3086 + err = do_write(&ff, evsel->core.id, evsel->core.ids * sizeof(u64)); 3073 3087 if (err < 0) { 3074 3088 pr_debug("failed to write perf header\n"); 3075 3089 return err; ··· 3083 3097 .attr = evsel->core.attr, 3084 3098 .ids = { 3085 3099 .offset = evsel->id_offset, 3086 - .size = evsel->ids * sizeof(u64), 3100 + .size = evsel->core.ids * sizeof(u64), 3087 3101 } 3088 3102 }; 3089 3103 err = do_write(&ff, &f_attr, sizeof(f_attr)); ··· 3610 3624 * for allocating the perf_sample_id table we fake 1 cpu and 3611 3625 * hattr->ids threads. 3612 3626 */ 3613 - if (perf_evsel__alloc_id(evsel, 1, nr_ids)) 3627 + if (perf_evsel__alloc_id(&evsel->core, 1, nr_ids)) 3614 3628 goto out_delete_evlist; 3615 3629 3616 3630 lseek(fd, f_attr.ids.offset, SEEK_SET); ··· 3619 3633 if (perf_header__getbuffer64(header, fd, &f_id, sizeof(f_id))) 3620 3634 goto out_errno; 3621 3635 3622 - perf_evlist__id_add(session->evlist, evsel, 0, j, f_id); 3636 + perf_evlist__id_add(&session->evlist->core, &evsel->core, 0, j, f_id); 3623 3637 } 3624 3638 3625 3639 lseek(fd, tmp, SEEK_SET); ··· 3640 3654 evlist__delete(session->evlist); 3641 3655 session->evlist = NULL; 3642 3656 return -ENOMEM; 3643 - } 3644 - 3645 - int perf_event__synthesize_attr(struct perf_tool *tool, 3646 - struct perf_event_attr *attr, u32 ids, u64 *id, 3647 - perf_event__handler_t process) 3648 - { 3649 - union perf_event *ev; 3650 - size_t size; 3651 - int err; 3652 - 3653 - size = sizeof(struct perf_event_attr); 3654 - size = PERF_ALIGN(size, sizeof(u64)); 3655 - size += sizeof(struct perf_event_header); 3656 - size += ids * sizeof(u64); 3657 - 3658 - ev = zalloc(size); 3659 - 3660 - if (ev == NULL) 3661 - return -ENOMEM; 3662 - 3663 - ev->attr.attr = *attr; 3664 - memcpy(ev->attr.id, id, ids * sizeof(u64)); 3665 - 3666 - ev->attr.header.type = PERF_RECORD_HEADER_ATTR; 3667 - ev->attr.header.size = (u16)size; 3668 - 3669 - if (ev->attr.header.size == size) 3670 - err = process(tool, ev, NULL, NULL); 3671 - else 3672 - err = -E2BIG; 3673 - 3674 - free(ev); 3675 - 3676 - return err; 3677 - } 3678 - 3679 - int perf_event__synthesize_features(struct perf_tool *tool, 3680 - struct perf_session *session, 3681 - struct evlist *evlist, 3682 - perf_event__handler_t process) 3683 - { 3684 - struct perf_header *header = &session->header; 3685 - struct feat_fd ff; 3686 - struct perf_record_header_feature *fe; 3687 - size_t sz, sz_hdr; 3688 - int feat, ret; 3689 - 3690 - sz_hdr = sizeof(fe->header); 3691 - sz = sizeof(union perf_event); 3692 - /* get a nice alignment */ 3693 - sz = PERF_ALIGN(sz, page_size); 3694 - 3695 - memset(&ff, 0, sizeof(ff)); 3696 - 3697 - ff.buf = malloc(sz); 3698 - if (!ff.buf) 3699 - return -ENOMEM; 3700 - 3701 - ff.size = sz - sz_hdr; 3702 - ff.ph = &session->header; 3703 - 3704 - for_each_set_bit(feat, header->adds_features, HEADER_FEAT_BITS) { 3705 - if (!feat_ops[feat].synthesize) { 3706 - pr_debug("No record header feature for header :%d\n", feat); 3707 - continue; 3708 - } 3709 - 3710 - ff.offset = sizeof(*fe); 3711 - 3712 - ret = feat_ops[feat].write(&ff, evlist); 3713 - if (ret || ff.offset <= (ssize_t)sizeof(*fe)) { 3714 - pr_debug("Error writing feature\n"); 3715 - continue; 3716 - } 3717 - /* ff.buf may have changed due to realloc in do_write() */ 3718 - fe = ff.buf; 3719 - memset(fe, 0, sizeof(*fe)); 3720 - 3721 - fe->feat_id = feat; 3722 - fe->header.type = PERF_RECORD_HEADER_FEATURE; 3723 - fe->header.size = ff.offset; 3724 - 3725 - ret = process(tool, ff.buf, NULL, NULL); 3726 - if (ret) { 3727 - free(ff.buf); 3728 - return ret; 3729 - } 3730 - } 3731 - 3732 - /* Send HEADER_LAST_FEATURE mark. */ 3733 - fe = ff.buf; 3734 - fe->feat_id = HEADER_LAST_FEATURE; 3735 - fe->header.type = PERF_RECORD_HEADER_FEATURE; 3736 - fe->header.size = sizeof(*fe); 3737 - 3738 - ret = process(tool, ff.buf, NULL, NULL); 3739 - 3740 - free(ff.buf); 3741 - return ret; 3742 3657 } 3743 3658 3744 3659 int perf_event__process_feature(struct perf_session *session, ··· 3684 3797 return 0; 3685 3798 } 3686 3799 3687 - static struct perf_record_event_update * 3688 - event_update_event__new(size_t size, u64 type, u64 id) 3689 - { 3690 - struct perf_record_event_update *ev; 3691 - 3692 - size += sizeof(*ev); 3693 - size = PERF_ALIGN(size, sizeof(u64)); 3694 - 3695 - ev = zalloc(size); 3696 - if (ev) { 3697 - ev->header.type = PERF_RECORD_EVENT_UPDATE; 3698 - ev->header.size = (u16)size; 3699 - ev->type = type; 3700 - ev->id = id; 3701 - } 3702 - return ev; 3703 - } 3704 - 3705 - int 3706 - perf_event__synthesize_event_update_unit(struct perf_tool *tool, 3707 - struct evsel *evsel, 3708 - perf_event__handler_t process) 3709 - { 3710 - struct perf_record_event_update *ev; 3711 - size_t size = strlen(evsel->unit); 3712 - int err; 3713 - 3714 - ev = event_update_event__new(size + 1, PERF_EVENT_UPDATE__UNIT, evsel->id[0]); 3715 - if (ev == NULL) 3716 - return -ENOMEM; 3717 - 3718 - strlcpy(ev->data, evsel->unit, size + 1); 3719 - err = process(tool, (union perf_event *)ev, NULL, NULL); 3720 - free(ev); 3721 - return err; 3722 - } 3723 - 3724 - int 3725 - perf_event__synthesize_event_update_scale(struct perf_tool *tool, 3726 - struct evsel *evsel, 3727 - perf_event__handler_t process) 3728 - { 3729 - struct perf_record_event_update *ev; 3730 - struct perf_record_event_update_scale *ev_data; 3731 - int err; 3732 - 3733 - ev = event_update_event__new(sizeof(*ev_data), PERF_EVENT_UPDATE__SCALE, evsel->id[0]); 3734 - if (ev == NULL) 3735 - return -ENOMEM; 3736 - 3737 - ev_data = (struct perf_record_event_update_scale *)ev->data; 3738 - ev_data->scale = evsel->scale; 3739 - err = process(tool, (union perf_event*) ev, NULL, NULL); 3740 - free(ev); 3741 - return err; 3742 - } 3743 - 3744 - int 3745 - perf_event__synthesize_event_update_name(struct perf_tool *tool, 3746 - struct evsel *evsel, 3747 - perf_event__handler_t process) 3748 - { 3749 - struct perf_record_event_update *ev; 3750 - size_t len = strlen(evsel->name); 3751 - int err; 3752 - 3753 - ev = event_update_event__new(len + 1, PERF_EVENT_UPDATE__NAME, evsel->id[0]); 3754 - if (ev == NULL) 3755 - return -ENOMEM; 3756 - 3757 - strlcpy(ev->data, evsel->name, len + 1); 3758 - err = process(tool, (union perf_event*) ev, NULL, NULL); 3759 - free(ev); 3760 - return err; 3761 - } 3762 - 3763 - int 3764 - perf_event__synthesize_event_update_cpus(struct perf_tool *tool, 3765 - struct evsel *evsel, 3766 - perf_event__handler_t process) 3767 - { 3768 - size_t size = sizeof(struct perf_record_event_update); 3769 - struct perf_record_event_update *ev; 3770 - int max, err; 3771 - u16 type; 3772 - 3773 - if (!evsel->core.own_cpus) 3774 - return 0; 3775 - 3776 - ev = cpu_map_data__alloc(evsel->core.own_cpus, &size, &type, &max); 3777 - if (!ev) 3778 - return -ENOMEM; 3779 - 3780 - ev->header.type = PERF_RECORD_EVENT_UPDATE; 3781 - ev->header.size = (u16)size; 3782 - ev->type = PERF_EVENT_UPDATE__CPUS; 3783 - ev->id = evsel->id[0]; 3784 - 3785 - cpu_map_data__synthesize((struct perf_record_cpu_map_data *)ev->data, 3786 - evsel->core.own_cpus, 3787 - type, max); 3788 - 3789 - err = process(tool, (union perf_event*) ev, NULL, NULL); 3790 - free(ev); 3791 - return err; 3792 - } 3793 - 3794 3800 size_t perf_event__fprintf_event_update(union perf_event *event, FILE *fp) 3795 3801 { 3796 3802 struct perf_record_event_update *ev = &event->event_update; ··· 3723 3943 return ret; 3724 3944 } 3725 3945 3726 - int perf_event__synthesize_attrs(struct perf_tool *tool, 3727 - struct evlist *evlist, 3728 - perf_event__handler_t process) 3729 - { 3730 - struct evsel *evsel; 3731 - int err = 0; 3732 - 3733 - evlist__for_each_entry(evlist, evsel) { 3734 - err = perf_event__synthesize_attr(tool, &evsel->core.attr, evsel->ids, 3735 - evsel->id, process); 3736 - if (err) { 3737 - pr_debug("failed to create perf header attribute\n"); 3738 - return err; 3739 - } 3740 - } 3741 - 3742 - return err; 3743 - } 3744 - 3745 - static bool has_unit(struct evsel *counter) 3746 - { 3747 - return counter->unit && *counter->unit; 3748 - } 3749 - 3750 - static bool has_scale(struct evsel *counter) 3751 - { 3752 - return counter->scale != 1; 3753 - } 3754 - 3755 - int perf_event__synthesize_extra_attr(struct perf_tool *tool, 3756 - struct evlist *evsel_list, 3757 - perf_event__handler_t process, 3758 - bool is_pipe) 3759 - { 3760 - struct evsel *counter; 3761 - int err; 3762 - 3763 - /* 3764 - * Synthesize other events stuff not carried within 3765 - * attr event - unit, scale, name 3766 - */ 3767 - evlist__for_each_entry(evsel_list, counter) { 3768 - if (!counter->supported) 3769 - continue; 3770 - 3771 - /* 3772 - * Synthesize unit and scale only if it's defined. 3773 - */ 3774 - if (has_unit(counter)) { 3775 - err = perf_event__synthesize_event_update_unit(tool, counter, process); 3776 - if (err < 0) { 3777 - pr_err("Couldn't synthesize evsel unit.\n"); 3778 - return err; 3779 - } 3780 - } 3781 - 3782 - if (has_scale(counter)) { 3783 - err = perf_event__synthesize_event_update_scale(tool, counter, process); 3784 - if (err < 0) { 3785 - pr_err("Couldn't synthesize evsel counter.\n"); 3786 - return err; 3787 - } 3788 - } 3789 - 3790 - if (counter->core.own_cpus) { 3791 - err = perf_event__synthesize_event_update_cpus(tool, counter, process); 3792 - if (err < 0) { 3793 - pr_err("Couldn't synthesize evsel cpus.\n"); 3794 - return err; 3795 - } 3796 - } 3797 - 3798 - /* 3799 - * Name is needed only for pipe output, 3800 - * perf.data carries event names. 3801 - */ 3802 - if (is_pipe) { 3803 - err = perf_event__synthesize_event_update_name(tool, counter, process); 3804 - if (err < 0) { 3805 - pr_err("Couldn't synthesize evsel name.\n"); 3806 - return err; 3807 - } 3808 - } 3809 - } 3810 - return 0; 3811 - } 3812 - 3813 3946 int perf_event__process_attr(struct perf_tool *tool __maybe_unused, 3814 3947 union perf_event *event, 3815 3948 struct evlist **pevlist) ··· 3751 4058 * for allocating the perf_sample_id table we fake 1 cpu and 3752 4059 * hattr->ids threads. 3753 4060 */ 3754 - if (perf_evsel__alloc_id(evsel, 1, n_ids)) 4061 + if (perf_evsel__alloc_id(&evsel->core, 1, n_ids)) 3755 4062 return -ENOMEM; 3756 4063 3757 4064 for (i = 0; i < n_ids; i++) { 3758 - perf_evlist__id_add(evlist, evsel, 0, i, event->attr.id[i]); 4065 + perf_evlist__id_add(&evlist->core, &evsel->core, 0, i, event->attr.id[i]); 3759 4066 } 3760 4067 3761 4068 return 0; ··· 3807 4114 return 0; 3808 4115 } 3809 4116 3810 - int perf_event__synthesize_tracing_data(struct perf_tool *tool, int fd, 3811 - struct evlist *evlist, 3812 - perf_event__handler_t process) 3813 - { 3814 - union perf_event ev; 3815 - struct tracing_data *tdata; 3816 - ssize_t size = 0, aligned_size = 0, padding; 3817 - struct feat_fd ff; 3818 - int err __maybe_unused = 0; 3819 - 3820 - /* 3821 - * We are going to store the size of the data followed 3822 - * by the data contents. Since the fd descriptor is a pipe, 3823 - * we cannot seek back to store the size of the data once 3824 - * we know it. Instead we: 3825 - * 3826 - * - write the tracing data to the temp file 3827 - * - get/write the data size to pipe 3828 - * - write the tracing data from the temp file 3829 - * to the pipe 3830 - */ 3831 - tdata = tracing_data_get(&evlist->core.entries, fd, true); 3832 - if (!tdata) 3833 - return -1; 3834 - 3835 - memset(&ev, 0, sizeof(ev)); 3836 - 3837 - ev.tracing_data.header.type = PERF_RECORD_HEADER_TRACING_DATA; 3838 - size = tdata->size; 3839 - aligned_size = PERF_ALIGN(size, sizeof(u64)); 3840 - padding = aligned_size - size; 3841 - ev.tracing_data.header.size = sizeof(ev.tracing_data); 3842 - ev.tracing_data.size = aligned_size; 3843 - 3844 - process(tool, &ev, NULL, NULL); 3845 - 3846 - /* 3847 - * The put function will copy all the tracing data 3848 - * stored in temp file to the pipe. 3849 - */ 3850 - tracing_data_put(tdata); 3851 - 3852 - ff = (struct feat_fd){ .fd = fd }; 3853 - if (write_padded(&ff, NULL, 0, padding)) 3854 - return -1; 3855 - 3856 - return aligned_size; 3857 - } 3858 - 3859 4117 int perf_event__process_tracing_data(struct perf_session *session, 3860 4118 union perf_event *event) 3861 4119 { ··· 3844 4200 session->tevent.pevent); 3845 4201 3846 4202 return size_read + padding; 3847 - } 3848 - 3849 - int perf_event__synthesize_build_id(struct perf_tool *tool, 3850 - struct dso *pos, u16 misc, 3851 - perf_event__handler_t process, 3852 - struct machine *machine) 3853 - { 3854 - union perf_event ev; 3855 - size_t len; 3856 - int err = 0; 3857 - 3858 - if (!pos->hit) 3859 - return err; 3860 - 3861 - memset(&ev, 0, sizeof(ev)); 3862 - 3863 - len = pos->long_name_len + 1; 3864 - len = PERF_ALIGN(len, NAME_ALIGN); 3865 - memcpy(&ev.build_id.build_id, pos->build_id, sizeof(pos->build_id)); 3866 - ev.build_id.header.type = PERF_RECORD_HEADER_BUILD_ID; 3867 - ev.build_id.header.misc = misc; 3868 - ev.build_id.pid = machine->pid; 3869 - ev.build_id.header.size = sizeof(ev.build_id) + len; 3870 - memcpy(&ev.build_id.filename, pos->long_name, pos->long_name_len); 3871 - 3872 - err = process(tool, &ev, NULL, machine); 3873 - 3874 - return err; 3875 4203 } 3876 4204 3877 4205 int perf_event__process_build_id(struct perf_session *session,
+21 -39
tools/perf/util/header.h
··· 5 5 #include <linux/stddef.h> 6 6 #include <linux/perf_event.h> 7 7 #include <sys/types.h> 8 + #include <stdio.h> // FILE 8 9 #include <stdbool.h> 9 10 #include <linux/bitmap.h> 10 11 #include <linux/types.h> 11 - #include "event.h" 12 12 #include "env.h" 13 13 #include "pmu.h" 14 14 ··· 92 92 struct perf_env env; 93 93 }; 94 94 95 + struct feat_fd { 96 + struct perf_header *ph; 97 + int fd; 98 + void *buf; /* Either buf != NULL or fd >= 0 */ 99 + ssize_t offset; 100 + size_t size; 101 + struct evsel *events; 102 + }; 103 + 104 + struct perf_header_feature_ops { 105 + int (*write)(struct feat_fd *ff, struct evlist *evlist); 106 + void (*print)(struct feat_fd *ff, FILE *fp); 107 + int (*process)(struct feat_fd *ff, void *data); 108 + const char *name; 109 + bool full_only; 110 + bool synthesize; 111 + }; 112 + 95 113 struct evlist; 96 114 struct perf_session; 115 + struct perf_tool; 116 + union perf_event; 97 117 98 118 int perf_session__read_header(struct perf_session *session); 99 119 int perf_session__write_header(struct perf_session *session, ··· 135 115 136 116 int perf_header__fprintf_info(struct perf_session *s, FILE *fp, bool full); 137 117 138 - int perf_event__synthesize_features(struct perf_tool *tool, 139 - struct perf_session *session, 140 - struct evlist *evlist, 141 - perf_event__handler_t process); 142 - 143 - int perf_event__synthesize_extra_attr(struct perf_tool *tool, 144 - struct evlist *evsel_list, 145 - perf_event__handler_t process, 146 - bool is_pipe); 147 - 148 118 int perf_event__process_feature(struct perf_session *session, 149 119 union perf_event *event); 150 - 151 - int perf_event__synthesize_attr(struct perf_tool *tool, 152 - struct perf_event_attr *attr, u32 ids, u64 *id, 153 - perf_event__handler_t process); 154 - int perf_event__synthesize_attrs(struct perf_tool *tool, 155 - struct evlist *evlist, 156 - perf_event__handler_t process); 157 - int perf_event__synthesize_event_update_unit(struct perf_tool *tool, 158 - struct evsel *evsel, 159 - perf_event__handler_t process); 160 - int perf_event__synthesize_event_update_scale(struct perf_tool *tool, 161 - struct evsel *evsel, 162 - perf_event__handler_t process); 163 - int perf_event__synthesize_event_update_name(struct perf_tool *tool, 164 - struct evsel *evsel, 165 - perf_event__handler_t process); 166 - int perf_event__synthesize_event_update_cpus(struct perf_tool *tool, 167 - struct evsel *evsel, 168 - perf_event__handler_t process); 169 120 int perf_event__process_attr(struct perf_tool *tool, union perf_event *event, 170 121 struct evlist **pevlist); 171 122 int perf_event__process_event_update(struct perf_tool *tool, 172 123 union perf_event *event, 173 124 struct evlist **pevlist); 174 125 size_t perf_event__fprintf_event_update(union perf_event *event, FILE *fp); 175 - 176 - int perf_event__synthesize_tracing_data(struct perf_tool *tool, 177 - int fd, struct evlist *evlist, 178 - perf_event__handler_t process); 179 126 int perf_event__process_tracing_data(struct perf_session *session, 180 127 union perf_event *event); 181 - 182 - int perf_event__synthesize_build_id(struct perf_tool *tool, 183 - struct dso *pos, u16 misc, 184 - perf_event__handler_t process, 185 - struct machine *machine); 186 128 int perf_event__process_build_id(struct perf_session *session, 187 129 union perf_event *event); 188 130 bool is_perf_magic(u64 magic);
+1
tools/perf/util/hist.h
··· 15 15 struct map_symbol; 16 16 struct mem_info; 17 17 struct branch_info; 18 + struct branch_stack; 18 19 struct block_info; 19 20 struct symbol; 20 21 struct ui_progress;
+3 -3
tools/perf/util/intel-bts.c
··· 14 14 #include <linux/log2.h> 15 15 #include <linux/zalloc.h> 16 16 17 - #include "cpumap.h" 18 17 #include "color.h" 19 18 #include "evsel.h" 20 19 #include "evlist.h" ··· 28 29 #include "auxtrace.h" 29 30 #include "intel-pt-decoder/intel-pt-insn-decoder.h" 30 31 #include "intel-bts.h" 32 + #include "util/synthetic-events.h" 31 33 32 34 #define MAX_TIMESTAMP (~0ULL) 33 35 ··· 768 768 int err; 769 769 770 770 evlist__for_each_entry(evlist, evsel) { 771 - if (evsel->core.attr.type == bts->pmu_type && evsel->ids) { 771 + if (evsel->core.attr.type == bts->pmu_type && evsel->core.ids) { 772 772 found = true; 773 773 break; 774 774 } ··· 795 795 attr.sample_id_all = evsel->core.attr.sample_id_all; 796 796 attr.read_format = evsel->core.attr.read_format; 797 797 798 - id = evsel->id[0] + 1000000000; 798 + id = evsel->core.id[0] + 1000000000; 799 799 if (!id) 800 800 id = 1; 801 801
+6 -5
tools/perf/util/intel-pt.c
··· 33 33 #include "tsc.h" 34 34 #include "intel-pt.h" 35 35 #include "config.h" 36 + #include "util/synthetic-events.h" 36 37 #include "time-utils.h" 37 38 38 39 #include "../arch/x86/include/uapi/asm/perf_regs.h" ··· 1705 1704 struct intel_pt *pt = ptq->pt; 1706 1705 struct evsel *evsel = pt->pebs_evsel; 1707 1706 u64 sample_type = evsel->core.attr.sample_type; 1708 - u64 id = evsel->id[0]; 1707 + u64 id = evsel->core.id[0]; 1709 1708 u8 cpumode; 1710 1709 1711 1710 if (intel_pt_skip_event(pt)) ··· 2720 2719 struct evsel *evsel; 2721 2720 2722 2721 evlist__for_each_entry(evlist, evsel) { 2723 - if (evsel->id && evsel->id[0] == id) { 2722 + if (evsel->core.id && evsel->core.id[0] == id) { 2724 2723 if (evsel->name) 2725 2724 zfree(&evsel->name); 2726 2725 evsel->name = strdup(name); ··· 2735 2734 struct evsel *evsel; 2736 2735 2737 2736 evlist__for_each_entry(evlist, evsel) { 2738 - if (evsel->core.attr.type == pt->pmu_type && evsel->ids) 2737 + if (evsel->core.attr.type == pt->pmu_type && evsel->core.ids) 2739 2738 return evsel; 2740 2739 } 2741 2740 ··· 2776 2775 attr.sample_id_all = evsel->core.attr.sample_id_all; 2777 2776 attr.read_format = evsel->core.attr.read_format; 2778 2777 2779 - id = evsel->id[0] + 1000000000; 2778 + id = evsel->core.id[0] + 1000000000; 2780 2779 if (!id) 2781 2780 id = 1; 2782 2781 ··· 2903 2902 return; 2904 2903 2905 2904 evlist__for_each_entry(pt->session->evlist, evsel) { 2906 - if (evsel->core.attr.aux_output && evsel->id) { 2905 + if (evsel->core.attr.aux_output && evsel->core.id) { 2907 2906 pt->sample_pebs = true; 2908 2907 pt->pebs_evsel = evsel; 2909 2908 return;
+1 -3
tools/perf/util/jitdump.c
··· 15 15 #include <linux/stringify.h> 16 16 17 17 #include "build-id.h" 18 - #include "util.h" 19 18 #include "event.h" 20 19 #include "debug.h" 21 20 #include "evlist.h" ··· 26 27 #include "jit.h" 27 28 #include "jitdump.h" 28 29 #include "genelf.h" 29 - #include "../builtin.h" 30 30 31 31 #include <linux/ctype.h> 32 32 #include <linux/zalloc.h> ··· 777 779 * track sample_type to compute id_all layout 778 780 * perf sets the same sample type to all events as of now 779 781 */ 780 - first = perf_evlist__first(session->evlist); 782 + first = evlist__first(session->evlist); 781 783 jd.sample_type = first->core.attr.sample_type; 782 784 783 785 *nbytes = 0;
+4
tools/perf/util/kvm-stat.h
··· 2 2 #ifndef __PERF_KVM_STAT_H 3 3 #define __PERF_KVM_STAT_H 4 4 5 + #ifdef HAVE_KVM_STAT_SUPPORT 6 + 5 7 #include "tool.h" 6 8 #include "stat.h" 7 9 #include "record.h" ··· 146 144 extern const char *kvm_exit_reason; 147 145 extern const char *kvm_entry_trace; 148 146 extern const char *kvm_exit_trace; 147 + #endif /* HAVE_KVM_STAT_SUPPORT */ 149 148 149 + extern int kvm_add_default_arch_event(int *argc, const char **argv); 150 150 #endif /* __PERF_KVM_STAT_H */
-1
tools/perf/util/libunwind/arm64.c
··· 22 22 #define LIBUNWIND__ARCH_REG_SP PERF_REG_ARM64_SP 23 23 24 24 #include "unwind.h" 25 - #include "debug.h" 26 25 #include "libunwind-aarch64.h" 27 26 #include <../../../../arch/arm64/include/uapi/asm/perf_regs.h> 28 27 #include "../../arch/arm64/util/unwind-libunwind.c"
-1
tools/perf/util/libunwind/x86_32.c
··· 22 22 #define LIBUNWIND__ARCH_REG_SP PERF_REG_X86_SP 23 23 24 24 #include "unwind.h" 25 - #include "debug.h" 26 25 #include "libunwind-x86.h" 27 26 #include <../../../../arch/x86/include/uapi/asm/perf_regs.h> 28 27
+1
tools/perf/util/llvm-utils.c
··· 8 8 #include <limits.h> 9 9 #include <stdio.h> 10 10 #include <stdlib.h> 11 + #include <unistd.h> 11 12 #include <linux/err.h> 12 13 #include <linux/string.h> 13 14 #include <linux/zalloc.h>
+1 -1
tools/perf/util/lzma.c
··· 7 7 #include <sys/stat.h> 8 8 #include <fcntl.h> 9 9 #include "compress.h" 10 - #include "util.h" 11 10 #include "debug.h" 12 11 #include <string.h> 13 12 #include <unistd.h> 13 + #include <internal/lib.h> 14 14 15 15 #define BUFSIZE 8192 16 16
+1 -15
tools/perf/util/machine.c
··· 32 32 #include "linux/hash.h" 33 33 #include "asm/bug.h" 34 34 #include "bpf-event.h" 35 + #include <internal/lib.h> // page_size 35 36 36 37 #include <linux/ctype.h> 37 38 #include <symbol/kallsyms.h> ··· 2608 2607 return rc; 2609 2608 } 2610 2609 return rc; 2611 - } 2612 - 2613 - int __machine__synthesize_threads(struct machine *machine, struct perf_tool *tool, 2614 - struct target *target, struct perf_thread_map *threads, 2615 - perf_event__handler_t process, bool data_mmap, 2616 - unsigned int nr_threads_synthesize) 2617 - { 2618 - if (target__has_task(target)) 2619 - return perf_event__synthesize_thread_map(tool, threads, process, machine, data_mmap); 2620 - else if (target__has_cpu(target)) 2621 - return perf_event__synthesize_threads(tool, process, 2622 - machine, data_mmap, 2623 - nr_threads_synthesize); 2624 - /* command specified */ 2625 - return 0; 2626 2610 } 2627 2611 2628 2612 pid_t machine__get_current_tid(struct machine *machine, int cpu)
-15
tools/perf/util/machine.h
··· 6 6 #include <linux/rbtree.h> 7 7 #include "map_groups.h" 8 8 #include "dsos.h" 9 - #include "event.h" 10 9 #include "rwsem.h" 11 10 12 11 struct addr_location; ··· 250 251 int machines__for_each_thread(struct machines *machines, 251 252 int (*fn)(struct thread *thread, void *p), 252 253 void *priv); 253 - 254 - int __machine__synthesize_threads(struct machine *machine, struct perf_tool *tool, 255 - struct target *target, struct perf_thread_map *threads, 256 - perf_event__handler_t process, bool data_mmap, 257 - unsigned int nr_threads_synthesize); 258 - static inline 259 - int machine__synthesize_threads(struct machine *machine, struct target *target, 260 - struct perf_thread_map *threads, bool data_mmap, 261 - unsigned int nr_threads_synthesize) 262 - { 263 - return __machine__synthesize_threads(machine, NULL, target, threads, 264 - perf_event__process, data_mmap, 265 - nr_threads_synthesize); 266 - } 267 254 268 255 pid_t machine__get_current_tid(struct machine *machine, int cpu); 269 256 int machine__set_current_tid(struct machine *machine, int cpu, pid_t pid,
+7
tools/perf/util/memswap.h
··· 2 2 #ifndef PERF_MEMSWAP_H_ 3 3 #define PERF_MEMSWAP_H_ 4 4 5 + #include <linux/types.h> 6 + 7 + union u64_swap { 8 + u64 val64; 9 + u32 val32[2]; 10 + }; 11 + 5 12 void mem_bswap_64(void *src, int byte_size); 6 13 void mem_bswap_32(void *src, int byte_size); 7 14
+93 -92
tools/perf/util/mmap.c
··· 12 12 #include <linux/zalloc.h> 13 13 #include <stdlib.h> 14 14 #include <string.h> 15 + #include <unistd.h> // sysconf() 15 16 #ifdef HAVE_LIBNUMA_SUPPORT 16 17 #include <numaif.h> 17 18 #endif ··· 21 20 #include "event.h" 22 21 #include "mmap.h" 23 22 #include "../perf.h" 24 - #include "util.h" /* page_size */ 23 + #include <internal/lib.h> /* page_size */ 25 24 26 - size_t perf_mmap__mmap_len(struct perf_mmap *map) 25 + size_t perf_mmap__mmap_len(struct mmap *map) 27 26 { 28 - return map->mask + 1 + page_size; 27 + return map->core.mask + 1 + page_size; 29 28 } 30 29 31 30 /* When check_messup is true, 'end' must points to a good entry */ 32 - static union perf_event *perf_mmap__read(struct perf_mmap *map, 31 + static union perf_event *perf_mmap__read(struct mmap *map, 33 32 u64 *startp, u64 end) 34 33 { 35 - unsigned char *data = map->base + page_size; 34 + unsigned char *data = map->core.base + page_size; 36 35 union perf_event *event = NULL; 37 36 int diff = end - *startp; 38 37 39 38 if (diff >= (int)sizeof(event->header)) { 40 39 size_t size; 41 40 42 - event = (union perf_event *)&data[*startp & map->mask]; 41 + event = (union perf_event *)&data[*startp & map->core.mask]; 43 42 size = event->header.size; 44 43 45 44 if (size < sizeof(event->header) || diff < (int)size) ··· 49 48 * Event straddles the mmap boundary -- header should always 50 49 * be inside due to u64 alignment of output. 51 50 */ 52 - if ((*startp & map->mask) + size != ((*startp + size) & map->mask)) { 51 + if ((*startp & map->core.mask) + size != ((*startp + size) & map->core.mask)) { 53 52 unsigned int offset = *startp; 54 53 unsigned int len = min(sizeof(*event), size), cpy; 55 - void *dst = map->event_copy; 54 + void *dst = map->core.event_copy; 56 55 57 56 do { 58 - cpy = min(map->mask + 1 - (offset & map->mask), len); 59 - memcpy(dst, &data[offset & map->mask], cpy); 57 + cpy = min(map->core.mask + 1 - (offset & map->core.mask), len); 58 + memcpy(dst, &data[offset & map->core.mask], cpy); 60 59 offset += cpy; 61 60 dst += cpy; 62 61 len -= cpy; 63 62 } while (len); 64 63 65 - event = (union perf_event *)map->event_copy; 64 + event = (union perf_event *)map->core.event_copy; 66 65 } 67 66 68 67 *startp += size; ··· 83 82 * } 84 83 * perf_mmap__read_done() 85 84 */ 86 - union perf_event *perf_mmap__read_event(struct perf_mmap *map) 85 + union perf_event *perf_mmap__read_event(struct mmap *map) 87 86 { 88 87 union perf_event *event; 89 88 90 89 /* 91 90 * Check if event was unmapped due to a POLLHUP/POLLERR. 92 91 */ 93 - if (!refcount_read(&map->refcnt)) 92 + if (!refcount_read(&map->core.refcnt)) 94 93 return NULL; 95 94 96 95 /* non-overwirte doesn't pause the ringbuffer */ 97 - if (!map->overwrite) 98 - map->end = perf_mmap__read_head(map); 96 + if (!map->core.overwrite) 97 + map->core.end = perf_mmap__read_head(map); 99 98 100 - event = perf_mmap__read(map, &map->start, map->end); 99 + event = perf_mmap__read(map, &map->core.start, map->core.end); 101 100 102 - if (!map->overwrite) 103 - map->prev = map->start; 101 + if (!map->core.overwrite) 102 + map->core.prev = map->core.start; 104 103 105 104 return event; 106 105 } 107 106 108 - static bool perf_mmap__empty(struct perf_mmap *map) 107 + static bool perf_mmap__empty(struct mmap *map) 109 108 { 110 - return perf_mmap__read_head(map) == map->prev && !map->auxtrace_mmap.base; 109 + return perf_mmap__read_head(map) == map->core.prev && !map->auxtrace_mmap.base; 111 110 } 112 111 113 - void perf_mmap__get(struct perf_mmap *map) 112 + void perf_mmap__get(struct mmap *map) 114 113 { 115 - refcount_inc(&map->refcnt); 114 + refcount_inc(&map->core.refcnt); 116 115 } 117 116 118 - void perf_mmap__put(struct perf_mmap *map) 117 + void perf_mmap__put(struct mmap *map) 119 118 { 120 - BUG_ON(map->base && refcount_read(&map->refcnt) == 0); 119 + BUG_ON(map->core.base && refcount_read(&map->core.refcnt) == 0); 121 120 122 - if (refcount_dec_and_test(&map->refcnt)) 121 + if (refcount_dec_and_test(&map->core.refcnt)) 123 122 perf_mmap__munmap(map); 124 123 } 125 124 126 - void perf_mmap__consume(struct perf_mmap *map) 125 + void perf_mmap__consume(struct mmap *map) 127 126 { 128 - if (!map->overwrite) { 129 - u64 old = map->prev; 127 + if (!map->core.overwrite) { 128 + u64 old = map->core.prev; 130 129 131 130 perf_mmap__write_tail(map, old); 132 131 } 133 132 134 - if (refcount_read(&map->refcnt) == 1 && perf_mmap__empty(map)) 133 + if (refcount_read(&map->core.refcnt) == 1 && perf_mmap__empty(map)) 135 134 perf_mmap__put(map); 136 135 } 137 136 ··· 162 161 } 163 162 164 163 #ifdef HAVE_AIO_SUPPORT 165 - static int perf_mmap__aio_enabled(struct perf_mmap *map) 164 + static int perf_mmap__aio_enabled(struct mmap *map) 166 165 { 167 166 return map->aio.nr_cblocks > 0; 168 167 } 169 168 170 169 #ifdef HAVE_LIBNUMA_SUPPORT 171 - static int perf_mmap__aio_alloc(struct perf_mmap *map, int idx) 170 + static int perf_mmap__aio_alloc(struct mmap *map, int idx) 172 171 { 173 172 map->aio.data[idx] = mmap(NULL, perf_mmap__mmap_len(map), PROT_READ|PROT_WRITE, 174 173 MAP_PRIVATE|MAP_ANONYMOUS, 0, 0); ··· 180 179 return 0; 181 180 } 182 181 183 - static void perf_mmap__aio_free(struct perf_mmap *map, int idx) 182 + static void perf_mmap__aio_free(struct mmap *map, int idx) 184 183 { 185 184 if (map->aio.data[idx]) { 186 185 munmap(map->aio.data[idx], perf_mmap__mmap_len(map)); ··· 188 187 } 189 188 } 190 189 191 - static int perf_mmap__aio_bind(struct perf_mmap *map, int idx, int cpu, int affinity) 190 + static int perf_mmap__aio_bind(struct mmap *map, int idx, int cpu, int affinity) 192 191 { 193 192 void *data; 194 193 size_t mmap_len; ··· 208 207 return 0; 209 208 } 210 209 #else /* !HAVE_LIBNUMA_SUPPORT */ 211 - static int perf_mmap__aio_alloc(struct perf_mmap *map, int idx) 210 + static int perf_mmap__aio_alloc(struct mmap *map, int idx) 212 211 { 213 212 map->aio.data[idx] = malloc(perf_mmap__mmap_len(map)); 214 213 if (map->aio.data[idx] == NULL) ··· 217 216 return 0; 218 217 } 219 218 220 - static void perf_mmap__aio_free(struct perf_mmap *map, int idx) 219 + static void perf_mmap__aio_free(struct mmap *map, int idx) 221 220 { 222 221 zfree(&(map->aio.data[idx])); 223 222 } 224 223 225 - static int perf_mmap__aio_bind(struct perf_mmap *map __maybe_unused, int idx __maybe_unused, 224 + static int perf_mmap__aio_bind(struct mmap *map __maybe_unused, int idx __maybe_unused, 226 225 int cpu __maybe_unused, int affinity __maybe_unused) 227 226 { 228 227 return 0; 229 228 } 230 229 #endif 231 230 232 - static int perf_mmap__aio_mmap(struct perf_mmap *map, struct mmap_params *mp) 231 + static int perf_mmap__aio_mmap(struct mmap *map, struct mmap_params *mp) 233 232 { 234 233 int delta_max, i, prio, ret; 235 234 ··· 257 256 pr_debug2("failed to allocate data buffer area, error %m"); 258 257 return -1; 259 258 } 260 - ret = perf_mmap__aio_bind(map, i, map->cpu, mp->affinity); 259 + ret = perf_mmap__aio_bind(map, i, map->core.cpu, mp->affinity); 261 260 if (ret == -1) 262 261 return -1; 263 262 /* ··· 283 282 return 0; 284 283 } 285 284 286 - static void perf_mmap__aio_munmap(struct perf_mmap *map) 285 + static void perf_mmap__aio_munmap(struct mmap *map) 287 286 { 288 287 int i; 289 288 ··· 295 294 zfree(&map->aio.aiocb); 296 295 } 297 296 #else /* !HAVE_AIO_SUPPORT */ 298 - static int perf_mmap__aio_enabled(struct perf_mmap *map __maybe_unused) 297 + static int perf_mmap__aio_enabled(struct mmap *map __maybe_unused) 299 298 { 300 299 return 0; 301 300 } 302 301 303 - static int perf_mmap__aio_mmap(struct perf_mmap *map __maybe_unused, 302 + static int perf_mmap__aio_mmap(struct mmap *map __maybe_unused, 304 303 struct mmap_params *mp __maybe_unused) 305 304 { 306 305 return 0; 307 306 } 308 307 309 - static void perf_mmap__aio_munmap(struct perf_mmap *map __maybe_unused) 308 + static void perf_mmap__aio_munmap(struct mmap *map __maybe_unused) 310 309 { 311 310 } 312 311 #endif 313 312 314 - void perf_mmap__munmap(struct perf_mmap *map) 313 + void perf_mmap__munmap(struct mmap *map) 315 314 { 316 315 perf_mmap__aio_munmap(map); 317 316 if (map->data != NULL) { 318 317 munmap(map->data, perf_mmap__mmap_len(map)); 319 318 map->data = NULL; 320 319 } 321 - if (map->base != NULL) { 322 - munmap(map->base, perf_mmap__mmap_len(map)); 323 - map->base = NULL; 324 - map->fd = -1; 325 - refcount_set(&map->refcnt, 0); 320 + if (map->core.base != NULL) { 321 + munmap(map->core.base, perf_mmap__mmap_len(map)); 322 + map->core.base = NULL; 323 + map->core.fd = -1; 324 + refcount_set(&map->core.refcnt, 0); 326 325 } 327 326 auxtrace_mmap__munmap(&map->auxtrace_mmap); 328 327 } ··· 344 343 } 345 344 } 346 345 347 - static void perf_mmap__setup_affinity_mask(struct perf_mmap *map, struct mmap_params *mp) 346 + static void perf_mmap__setup_affinity_mask(struct mmap *map, struct mmap_params *mp) 348 347 { 349 348 CPU_ZERO(&map->affinity_mask); 350 349 if (mp->affinity == PERF_AFFINITY_NODE && cpu__max_node() > 1) 351 - build_node_mask(cpu__get_node(map->cpu), &map->affinity_mask); 350 + build_node_mask(cpu__get_node(map->core.cpu), &map->affinity_mask); 352 351 else if (mp->affinity == PERF_AFFINITY_CPU) 353 - CPU_SET(map->cpu, &map->affinity_mask); 352 + CPU_SET(map->core.cpu, &map->affinity_mask); 354 353 } 355 354 356 - int perf_mmap__mmap(struct perf_mmap *map, struct mmap_params *mp, int fd, int cpu) 355 + int perf_mmap__mmap(struct mmap *map, struct mmap_params *mp, int fd, int cpu) 357 356 { 358 357 /* 359 358 * The last one will be done at perf_mmap__consume(), so that we ··· 368 367 * evlist layer can't just drop it when filtering events in 369 368 * perf_evlist__filter_pollfd(). 370 369 */ 371 - refcount_set(&map->refcnt, 2); 372 - map->prev = 0; 373 - map->mask = mp->mask; 374 - map->base = mmap(NULL, perf_mmap__mmap_len(map), mp->prot, 370 + refcount_set(&map->core.refcnt, 2); 371 + map->core.prev = 0; 372 + map->core.mask = mp->mask; 373 + map->core.base = mmap(NULL, perf_mmap__mmap_len(map), mp->prot, 375 374 MAP_SHARED, fd, 0); 376 - if (map->base == MAP_FAILED) { 375 + if (map->core.base == MAP_FAILED) { 377 376 pr_debug2("failed to mmap perf event ring buffer, error %d\n", 378 377 errno); 379 - map->base = NULL; 378 + map->core.base = NULL; 380 379 return -1; 381 380 } 382 - map->fd = fd; 383 - map->cpu = cpu; 381 + map->core.fd = fd; 382 + map->core.cpu = cpu; 384 383 385 384 perf_mmap__setup_affinity_mask(map, mp); 386 385 387 - map->flush = mp->flush; 386 + map->core.flush = mp->flush; 388 387 389 388 map->comp_level = mp->comp_level; 390 389 ··· 400 399 } 401 400 402 401 if (auxtrace_mmap__mmap(&map->auxtrace_mmap, 403 - &mp->auxtrace_mp, map->base, fd)) 402 + &mp->auxtrace_mp, map->core.base, fd)) 404 403 return -1; 405 404 406 405 return perf_mmap__aio_mmap(map, mp); ··· 441 440 /* 442 441 * Report the start and end of the available data in ringbuffer 443 442 */ 444 - static int __perf_mmap__read_init(struct perf_mmap *md) 443 + static int __perf_mmap__read_init(struct mmap *md) 445 444 { 446 445 u64 head = perf_mmap__read_head(md); 447 - u64 old = md->prev; 448 - unsigned char *data = md->base + page_size; 446 + u64 old = md->core.prev; 447 + unsigned char *data = md->core.base + page_size; 449 448 unsigned long size; 450 449 451 - md->start = md->overwrite ? head : old; 452 - md->end = md->overwrite ? old : head; 450 + md->core.start = md->core.overwrite ? head : old; 451 + md->core.end = md->core.overwrite ? old : head; 453 452 454 - if ((md->end - md->start) < md->flush) 453 + if ((md->core.end - md->core.start) < md->core.flush) 455 454 return -EAGAIN; 456 455 457 - size = md->end - md->start; 458 - if (size > (unsigned long)(md->mask) + 1) { 459 - if (!md->overwrite) { 456 + size = md->core.end - md->core.start; 457 + if (size > (unsigned long)(md->core.mask) + 1) { 458 + if (!md->core.overwrite) { 460 459 WARN_ONCE(1, "failed to keep up with mmap data. (warn only once)\n"); 461 460 462 - md->prev = head; 461 + md->core.prev = head; 463 462 perf_mmap__consume(md); 464 463 return -EAGAIN; 465 464 } ··· 468 467 * Backward ring buffer is full. We still have a chance to read 469 468 * most of data from it. 470 469 */ 471 - if (overwrite_rb_find_range(data, md->mask, &md->start, &md->end)) 470 + if (overwrite_rb_find_range(data, md->core.mask, &md->core.start, &md->core.end)) 472 471 return -EINVAL; 473 472 } 474 473 475 474 return 0; 476 475 } 477 476 478 - int perf_mmap__read_init(struct perf_mmap *map) 477 + int perf_mmap__read_init(struct mmap *map) 479 478 { 480 479 /* 481 480 * Check if event was unmapped due to a POLLHUP/POLLERR. 482 481 */ 483 - if (!refcount_read(&map->refcnt)) 482 + if (!refcount_read(&map->core.refcnt)) 484 483 return -ENOENT; 485 484 486 485 return __perf_mmap__read_init(map); 487 486 } 488 487 489 - int perf_mmap__push(struct perf_mmap *md, void *to, 490 - int push(struct perf_mmap *map, void *to, void *buf, size_t size)) 488 + int perf_mmap__push(struct mmap *md, void *to, 489 + int push(struct mmap *map, void *to, void *buf, size_t size)) 491 490 { 492 491 u64 head = perf_mmap__read_head(md); 493 - unsigned char *data = md->base + page_size; 492 + unsigned char *data = md->core.base + page_size; 494 493 unsigned long size; 495 494 void *buf; 496 495 int rc = 0; ··· 499 498 if (rc < 0) 500 499 return (rc == -EAGAIN) ? 1 : -1; 501 500 502 - size = md->end - md->start; 501 + size = md->core.end - md->core.start; 503 502 504 - if ((md->start & md->mask) + size != (md->end & md->mask)) { 505 - buf = &data[md->start & md->mask]; 506 - size = md->mask + 1 - (md->start & md->mask); 507 - md->start += size; 503 + if ((md->core.start & md->core.mask) + size != (md->core.end & md->core.mask)) { 504 + buf = &data[md->core.start & md->core.mask]; 505 + size = md->core.mask + 1 - (md->core.start & md->core.mask); 506 + md->core.start += size; 508 507 509 508 if (push(md, to, buf, size) < 0) { 510 509 rc = -1; ··· 512 511 } 513 512 } 514 513 515 - buf = &data[md->start & md->mask]; 516 - size = md->end - md->start; 517 - md->start += size; 514 + buf = &data[md->core.start & md->core.mask]; 515 + size = md->core.end - md->core.start; 516 + md->core.start += size; 518 517 519 518 if (push(md, to, buf, size) < 0) { 520 519 rc = -1; 521 520 goto out; 522 521 } 523 522 524 - md->prev = head; 523 + md->core.prev = head; 525 524 perf_mmap__consume(md); 526 525 out: 527 526 return rc; ··· 530 529 /* 531 530 * Mandatory for overwrite mode 532 531 * The direction of overwrite mode is backward. 533 - * The last perf_mmap__read() will set tail to map->prev. 534 - * Need to correct the map->prev to head which is the end of next read. 532 + * The last perf_mmap__read() will set tail to map->core.prev. 533 + * Need to correct the map->core.prev to head which is the end of next read. 535 534 */ 536 - void perf_mmap__read_done(struct perf_mmap *map) 535 + void perf_mmap__read_done(struct mmap *map) 537 536 { 538 537 /* 539 538 * Check if event was unmapped due to a POLLHUP/POLLERR. 540 539 */ 541 - if (!refcount_read(&map->refcnt)) 540 + if (!refcount_read(&map->core.refcnt)) 542 541 return; 543 542 544 - map->prev = perf_mmap__read_head(map); 543 + map->core.prev = perf_mmap__read_head(map); 545 544 }
+20 -57
tools/perf/util/mmap.h
··· 1 1 #ifndef __PERF_MMAP_H 2 2 #define __PERF_MMAP_H 1 3 3 4 + #include <internal/mmap.h> 4 5 #include <linux/compiler.h> 5 6 #include <linux/refcount.h> 6 7 #include <linux/types.h> ··· 16 15 17 16 struct aiocb; 18 17 /** 19 - * struct perf_mmap - perf's ring buffer mmap details 18 + * struct mmap - perf's ring buffer mmap details 20 19 * 21 20 * @refcnt - e.g. code using PERF_EVENT_IOC_SET_OUTPUT to share this 22 21 */ 23 - struct perf_mmap { 24 - void *base; 25 - int mask; 26 - int fd; 27 - int cpu; 28 - refcount_t refcnt; 29 - u64 prev; 30 - u64 start; 31 - u64 end; 32 - bool overwrite; 22 + struct mmap { 23 + struct perf_mmap core; 33 24 struct auxtrace_mmap auxtrace_mmap; 34 - char event_copy[PERF_SAMPLE_MAX_SIZE] __aligned(8); 35 25 #ifdef HAVE_AIO_SUPPORT 36 26 struct { 37 27 void **data; ··· 32 40 } aio; 33 41 #endif 34 42 cpu_set_t affinity_mask; 35 - u64 flush; 36 43 void *data; 37 44 int comp_level; 38 - }; 39 - 40 - /* 41 - * State machine of bkw_mmap_state: 42 - * 43 - * .________________(forbid)_____________. 44 - * | V 45 - * NOTREADY --(0)--> RUNNING --(1)--> DATA_PENDING --(2)--> EMPTY 46 - * ^ ^ | ^ | 47 - * | |__(forbid)____/ |___(forbid)___/| 48 - * | | 49 - * \_________________(3)_______________/ 50 - * 51 - * NOTREADY : Backward ring buffers are not ready 52 - * RUNNING : Backward ring buffers are recording 53 - * DATA_PENDING : We are required to collect data from backward ring buffers 54 - * EMPTY : We have collected data from backward ring buffers. 55 - * 56 - * (0): Setup backward ring buffer 57 - * (1): Pause ring buffers for reading 58 - * (2): Read from ring buffers 59 - * (3): Resume ring buffers for recording 60 - */ 61 - enum bkw_mmap_state { 62 - BKW_MMAP_NOTREADY, 63 - BKW_MMAP_RUNNING, 64 - BKW_MMAP_DATA_PENDING, 65 - BKW_MMAP_EMPTY, 66 45 }; 67 46 68 47 struct mmap_params { ··· 41 78 struct auxtrace_mmap_params auxtrace_mp; 42 79 }; 43 80 44 - int perf_mmap__mmap(struct perf_mmap *map, struct mmap_params *mp, int fd, int cpu); 45 - void perf_mmap__munmap(struct perf_mmap *map); 81 + int perf_mmap__mmap(struct mmap *map, struct mmap_params *mp, int fd, int cpu); 82 + void perf_mmap__munmap(struct mmap *map); 46 83 47 - void perf_mmap__get(struct perf_mmap *map); 48 - void perf_mmap__put(struct perf_mmap *map); 84 + void perf_mmap__get(struct mmap *map); 85 + void perf_mmap__put(struct mmap *map); 49 86 50 - void perf_mmap__consume(struct perf_mmap *map); 87 + void perf_mmap__consume(struct mmap *map); 51 88 52 - static inline u64 perf_mmap__read_head(struct perf_mmap *mm) 89 + static inline u64 perf_mmap__read_head(struct mmap *mm) 53 90 { 54 - return ring_buffer_read_head(mm->base); 91 + return ring_buffer_read_head(mm->core.base); 55 92 } 56 93 57 - static inline void perf_mmap__write_tail(struct perf_mmap *md, u64 tail) 94 + static inline void perf_mmap__write_tail(struct mmap *md, u64 tail) 58 95 { 59 - ring_buffer_write_tail(md->base, tail); 96 + ring_buffer_write_tail(md->core.base, tail); 60 97 } 61 98 62 - union perf_event *perf_mmap__read_forward(struct perf_mmap *map); 99 + union perf_event *perf_mmap__read_forward(struct mmap *map); 63 100 64 - union perf_event *perf_mmap__read_event(struct perf_mmap *map); 101 + union perf_event *perf_mmap__read_event(struct mmap *map); 65 102 66 - int perf_mmap__push(struct perf_mmap *md, void *to, 67 - int push(struct perf_mmap *map, void *to, void *buf, size_t size)); 103 + int perf_mmap__push(struct mmap *md, void *to, 104 + int push(struct mmap *map, void *to, void *buf, size_t size)); 68 105 69 - size_t perf_mmap__mmap_len(struct perf_mmap *map); 106 + size_t perf_mmap__mmap_len(struct mmap *map); 70 107 71 - int perf_mmap__read_init(struct perf_mmap *md); 72 - void perf_mmap__read_done(struct perf_mmap *map); 108 + int perf_mmap__read_init(struct mmap *md); 109 + void perf_mmap__read_done(struct mmap *map); 73 110 #endif /*__PERF_MMAP_H */
+18
tools/perf/util/namespaces.c
··· 17 17 #include <string.h> 18 18 #include <unistd.h> 19 19 #include <asm/bug.h> 20 + #include <linux/kernel.h> 20 21 #include <linux/zalloc.h> 22 + 23 + static const char *perf_ns__names[] = { 24 + [NET_NS_INDEX] = "net", 25 + [UTS_NS_INDEX] = "uts", 26 + [IPC_NS_INDEX] = "ipc", 27 + [PID_NS_INDEX] = "pid", 28 + [USER_NS_INDEX] = "user", 29 + [MNT_NS_INDEX] = "mnt", 30 + [CGROUP_NS_INDEX] = "cgroup", 31 + }; 32 + 33 + const char *perf_ns__name(unsigned int id) 34 + { 35 + if (id >= ARRAY_SIZE(perf_ns__names)) 36 + return "UNKNOWN"; 37 + return perf_ns__names[id]; 38 + } 21 39 22 40 struct namespaces *namespaces__new(struct perf_record_namespaces *event) 23 41 {
+2
tools/perf/util/namespaces.h
··· 66 66 67 67 #define nsinfo__zput(nsi) __nsinfo__zput(&nsi) 68 68 69 + const char *perf_ns__name(unsigned int id); 70 + 69 71 #endif /* __PERF_NAMESPACES_H */
+5 -4
tools/perf/util/parse-events.c
··· 30 30 #include "parse-events-flex.h" 31 31 #include "pmu.h" 32 32 #include "thread_map.h" 33 - #include "cpumap.h" 34 33 #include "probe-file.h" 35 34 #include "asm/bug.h" 36 35 #include "util/parse-branch-options.h" 37 36 #include "metricgroup.h" 37 + #include "util/evsel_config.h" 38 + #include "util/event.h" 38 39 39 40 #define MAX_NAME_LEN 100 40 41 ··· 336 335 (*idx)++; 337 336 evsel->core.cpus = perf_cpu_map__get(cpus); 338 337 evsel->core.own_cpus = perf_cpu_map__get(cpus); 339 - evsel->system_wide = pmu ? pmu->is_uncore : false; 338 + evsel->core.system_wide = pmu ? pmu->is_uncore : false; 340 339 evsel->auto_merge_stats = auto_merge_stats; 341 340 342 341 if (name) ··· 1937 1936 1938 1937 perf_evlist__splice_list_tail(evlist, &parse_state.list); 1939 1938 evlist->nr_groups += parse_state.nr_groups; 1940 - last = perf_evlist__last(evlist); 1939 + last = evlist__last(evlist); 1941 1940 last->cmdline_group_boundary = true; 1942 1941 1943 1942 return 0; ··· 2051 2050 * So no need to WARN here, let *func do this. 2052 2051 */ 2053 2052 if (evlist->core.nr_entries > 0) 2054 - last = perf_evlist__last(evlist); 2053 + last = evlist__last(evlist); 2055 2054 2056 2055 do { 2057 2056 err = (*func)(last, arg);
+1 -3
tools/perf/util/parse-events.y
··· 9 9 #define YYDEBUG 1 10 10 11 11 #include <fnmatch.h> 12 + #include <stdio.h> 12 13 #include <linux/compiler.h> 13 - #include <linux/list.h> 14 14 #include <linux/types.h> 15 - #include "util.h" 16 15 #include "pmu.h" 17 16 #include "evsel.h" 18 - #include "debug.h" 19 17 #include "parse-events.h" 20 18 #include "parse-events-bison.h" 21 19
-1
tools/perf/util/perf-hooks.c
··· 12 12 #include <setjmp.h> 13 13 #include <linux/err.h> 14 14 #include <linux/kernel.h> 15 - #include "util/util.h" 16 15 #include "util/debug.h" 17 16 #include "util/perf-hooks.h" 18 17
+148
tools/perf/util/perf_event_attr_fprintf.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + #include <inttypes.h> 3 + #include <stdio.h> 4 + #include <stdbool.h> 5 + #include <linux/kernel.h> 6 + #include <linux/types.h> 7 + #include <linux/perf_event.h> 8 + #include "util/evsel_fprintf.h" 9 + 10 + struct bit_names { 11 + int bit; 12 + const char *name; 13 + }; 14 + 15 + static void __p_bits(char *buf, size_t size, u64 value, struct bit_names *bits) 16 + { 17 + bool first_bit = true; 18 + int i = 0; 19 + 20 + do { 21 + if (value & bits[i].bit) { 22 + buf += scnprintf(buf, size, "%s%s", first_bit ? "" : "|", bits[i].name); 23 + first_bit = false; 24 + } 25 + } while (bits[++i].name != NULL); 26 + } 27 + 28 + static void __p_sample_type(char *buf, size_t size, u64 value) 29 + { 30 + #define bit_name(n) { PERF_SAMPLE_##n, #n } 31 + struct bit_names bits[] = { 32 + bit_name(IP), bit_name(TID), bit_name(TIME), bit_name(ADDR), 33 + bit_name(READ), bit_name(CALLCHAIN), bit_name(ID), bit_name(CPU), 34 + bit_name(PERIOD), bit_name(STREAM_ID), bit_name(RAW), 35 + bit_name(BRANCH_STACK), bit_name(REGS_USER), bit_name(STACK_USER), 36 + bit_name(IDENTIFIER), bit_name(REGS_INTR), bit_name(DATA_SRC), 37 + bit_name(WEIGHT), bit_name(PHYS_ADDR), 38 + { .name = NULL, } 39 + }; 40 + #undef bit_name 41 + __p_bits(buf, size, value, bits); 42 + } 43 + 44 + static void __p_branch_sample_type(char *buf, size_t size, u64 value) 45 + { 46 + #define bit_name(n) { PERF_SAMPLE_BRANCH_##n, #n } 47 + struct bit_names bits[] = { 48 + bit_name(USER), bit_name(KERNEL), bit_name(HV), bit_name(ANY), 49 + bit_name(ANY_CALL), bit_name(ANY_RETURN), bit_name(IND_CALL), 50 + bit_name(ABORT_TX), bit_name(IN_TX), bit_name(NO_TX), 51 + bit_name(COND), bit_name(CALL_STACK), bit_name(IND_JUMP), 52 + bit_name(CALL), bit_name(NO_FLAGS), bit_name(NO_CYCLES), 53 + { .name = NULL, } 54 + }; 55 + #undef bit_name 56 + __p_bits(buf, size, value, bits); 57 + } 58 + 59 + static void __p_read_format(char *buf, size_t size, u64 value) 60 + { 61 + #define bit_name(n) { PERF_FORMAT_##n, #n } 62 + struct bit_names bits[] = { 63 + bit_name(TOTAL_TIME_ENABLED), bit_name(TOTAL_TIME_RUNNING), 64 + bit_name(ID), bit_name(GROUP), 65 + { .name = NULL, } 66 + }; 67 + #undef bit_name 68 + __p_bits(buf, size, value, bits); 69 + } 70 + 71 + #define BUF_SIZE 1024 72 + 73 + #define p_hex(val) snprintf(buf, BUF_SIZE, "%#"PRIx64, (uint64_t)(val)) 74 + #define p_unsigned(val) snprintf(buf, BUF_SIZE, "%"PRIu64, (uint64_t)(val)) 75 + #define p_signed(val) snprintf(buf, BUF_SIZE, "%"PRId64, (int64_t)(val)) 76 + #define p_sample_type(val) __p_sample_type(buf, BUF_SIZE, val) 77 + #define p_branch_sample_type(val) __p_branch_sample_type(buf, BUF_SIZE, val) 78 + #define p_read_format(val) __p_read_format(buf, BUF_SIZE, val) 79 + 80 + #define PRINT_ATTRn(_n, _f, _p) \ 81 + do { \ 82 + if (attr->_f) { \ 83 + _p(attr->_f); \ 84 + ret += attr__fprintf(fp, _n, buf, priv);\ 85 + } \ 86 + } while (0) 87 + 88 + #define PRINT_ATTRf(_f, _p) PRINT_ATTRn(#_f, _f, _p) 89 + 90 + int perf_event_attr__fprintf(FILE *fp, struct perf_event_attr *attr, 91 + attr__fprintf_f attr__fprintf, void *priv) 92 + { 93 + char buf[BUF_SIZE]; 94 + int ret = 0; 95 + 96 + PRINT_ATTRf(type, p_unsigned); 97 + PRINT_ATTRf(size, p_unsigned); 98 + PRINT_ATTRf(config, p_hex); 99 + PRINT_ATTRn("{ sample_period, sample_freq }", sample_period, p_unsigned); 100 + PRINT_ATTRf(sample_type, p_sample_type); 101 + PRINT_ATTRf(read_format, p_read_format); 102 + 103 + PRINT_ATTRf(disabled, p_unsigned); 104 + PRINT_ATTRf(inherit, p_unsigned); 105 + PRINT_ATTRf(pinned, p_unsigned); 106 + PRINT_ATTRf(exclusive, p_unsigned); 107 + PRINT_ATTRf(exclude_user, p_unsigned); 108 + PRINT_ATTRf(exclude_kernel, p_unsigned); 109 + PRINT_ATTRf(exclude_hv, p_unsigned); 110 + PRINT_ATTRf(exclude_idle, p_unsigned); 111 + PRINT_ATTRf(mmap, p_unsigned); 112 + PRINT_ATTRf(comm, p_unsigned); 113 + PRINT_ATTRf(freq, p_unsigned); 114 + PRINT_ATTRf(inherit_stat, p_unsigned); 115 + PRINT_ATTRf(enable_on_exec, p_unsigned); 116 + PRINT_ATTRf(task, p_unsigned); 117 + PRINT_ATTRf(watermark, p_unsigned); 118 + PRINT_ATTRf(precise_ip, p_unsigned); 119 + PRINT_ATTRf(mmap_data, p_unsigned); 120 + PRINT_ATTRf(sample_id_all, p_unsigned); 121 + PRINT_ATTRf(exclude_host, p_unsigned); 122 + PRINT_ATTRf(exclude_guest, p_unsigned); 123 + PRINT_ATTRf(exclude_callchain_kernel, p_unsigned); 124 + PRINT_ATTRf(exclude_callchain_user, p_unsigned); 125 + PRINT_ATTRf(mmap2, p_unsigned); 126 + PRINT_ATTRf(comm_exec, p_unsigned); 127 + PRINT_ATTRf(use_clockid, p_unsigned); 128 + PRINT_ATTRf(context_switch, p_unsigned); 129 + PRINT_ATTRf(write_backward, p_unsigned); 130 + PRINT_ATTRf(namespaces, p_unsigned); 131 + PRINT_ATTRf(ksymbol, p_unsigned); 132 + PRINT_ATTRf(bpf_event, p_unsigned); 133 + PRINT_ATTRf(aux_output, p_unsigned); 134 + 135 + PRINT_ATTRn("{ wakeup_events, wakeup_watermark }", wakeup_events, p_unsigned); 136 + PRINT_ATTRf(bp_type, p_unsigned); 137 + PRINT_ATTRn("{ bp_addr, config1 }", bp_addr, p_hex); 138 + PRINT_ATTRn("{ bp_len, config2 }", bp_len, p_hex); 139 + PRINT_ATTRf(branch_sample_type, p_branch_sample_type); 140 + PRINT_ATTRf(sample_regs_user, p_hex); 141 + PRINT_ATTRf(sample_stack_user, p_unsigned); 142 + PRINT_ATTRf(clockid, p_signed); 143 + PRINT_ATTRf(sample_regs_intr, p_hex); 144 + PRINT_ATTRf(aux_watermark, p_unsigned); 145 + PRINT_ATTRf(sample_max_stack, p_unsigned); 146 + 147 + return ret; 148 + }
-1
tools/perf/util/pmu.c
··· 20 20 #include "debug.h" 21 21 #include "pmu.h" 22 22 #include "parse-events.h" 23 - #include "cpumap.h" 24 23 #include "header.h" 25 24 #include "pmu-events/pmu-events.h" 26 25 #include "string2.h"
+1
tools/perf/util/probe-event.c
··· 2331 2331 } 2332 2332 } 2333 2333 zfree(&tev->args); 2334 + tev->nargs = 0; 2334 2335 } 2335 2336 2336 2337 struct kprobe_blacklist_node {
+1
tools/perf/util/probe-file.c
··· 16 16 #include "strlist.h" 17 17 #include "strfilter.h" 18 18 #include "debug.h" 19 + #include "build-id.h" 19 20 #include "dso.h" 20 21 #include "color.h" 21 22 #include "symbol.h"
+19
tools/perf/util/probe-finder.c
··· 1245 1245 return n; 1246 1246 } 1247 1247 1248 + static bool trace_event_finder_overlap(struct trace_event_finder *tf) 1249 + { 1250 + int i; 1251 + 1252 + for (i = 0; i < tf->ntevs; i++) { 1253 + if (tf->pf.addr == tf->tevs[i].point.address) 1254 + return true; 1255 + } 1256 + return false; 1257 + } 1258 + 1248 1259 /* Add a found probe point into trace event list */ 1249 1260 static int add_probe_trace_event(Dwarf_Die *sc_die, struct probe_finder *pf) 1250 1261 { ··· 1265 1254 struct probe_trace_event *tev; 1266 1255 struct perf_probe_arg *args = NULL; 1267 1256 int ret, i; 1257 + 1258 + /* 1259 + * For some reason (e.g. different column assigned to same address) 1260 + * This callback can be called with the address which already passed. 1261 + * Ignore it first. 1262 + */ 1263 + if (trace_event_finder_overlap(tf)) 1264 + return 0; 1268 1265 1269 1266 /* Check number of tevs */ 1270 1267 if (tf->ntevs == tf->max_tevs) {
+1
tools/perf/util/python-ext-sources
··· 10 10 util/cap.c 11 11 util/evlist.c 12 12 util/evsel.c 13 + util/perf_event_attr_fprintf.c 13 14 util/cpumap.c 14 15 util/memswap.c 15 16 util/mmap.c
+14 -14
tools/perf/util/python.c
··· 6 6 #include <linux/err.h> 7 7 #include <perf/cpumap.h> 8 8 #include <traceevent/event-parse.h> 9 - #include "debug.h" 10 9 #include "evlist.h" 11 10 #include "callchain.h" 12 11 #include "evsel.h" 13 12 #include "event.h" 14 - #include "cpumap.h" 15 13 #include "print_binary.h" 16 14 #include "thread_map.h" 17 15 #include "trace-event.h" 18 16 #include "mmap.h" 19 - #include "util.h" 17 + #include <internal/lib.h> 20 18 #include "../perf-sys.h" 21 19 22 20 #if PY_MAJOR_VERSION < 3 ··· 58 60 * implementing 'verbose' and 'eprintf'. 59 61 */ 60 62 int verbose; 63 + 64 + int eprintf(int level, int var, const char *fmt, ...); 61 65 62 66 int eprintf(int level, int var, const char *fmt, ...) 63 67 { ··· 884 884 885 885 static void pyrf_evlist__delete(struct pyrf_evlist *pevlist) 886 886 { 887 - perf_evlist__exit(&pevlist->evlist); 887 + evlist__exit(&pevlist->evlist); 888 888 Py_TYPE(pevlist)->tp_free((PyObject*)pevlist); 889 889 } 890 890 ··· 899 899 &pages, &overwrite)) 900 900 return NULL; 901 901 902 - if (perf_evlist__mmap(evlist, pages) < 0) { 902 + if (evlist__mmap(evlist, pages) < 0) { 903 903 PyErr_SetFromErrno(PyExc_OSError); 904 904 return NULL; 905 905 } ··· 918 918 if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|i", kwlist, &timeout)) 919 919 return NULL; 920 920 921 - n = perf_evlist__poll(evlist, timeout); 921 + n = evlist__poll(evlist, timeout); 922 922 if (n < 0) { 923 923 PyErr_SetFromErrno(PyExc_OSError); 924 924 return NULL; ··· 935 935 PyObject *list = PyList_New(0); 936 936 int i; 937 937 938 - for (i = 0; i < evlist->pollfd.nr; ++i) { 938 + for (i = 0; i < evlist->core.pollfd.nr; ++i) { 939 939 PyObject *file; 940 940 #if PY_MAJOR_VERSION < 3 941 - FILE *fp = fdopen(evlist->pollfd.entries[i].fd, "r"); 941 + FILE *fp = fdopen(evlist->core.pollfd.entries[i].fd, "r"); 942 942 943 943 if (fp == NULL) 944 944 goto free_list; 945 945 946 946 file = PyFile_FromFile(fp, "perf", "r", NULL); 947 947 #else 948 - file = PyFile_FromFd(evlist->pollfd.entries[i].fd, "perf", "r", -1, 948 + file = PyFile_FromFd(evlist->core.pollfd.entries[i].fd, "perf", "r", -1, 949 949 NULL, NULL, NULL, 0); 950 950 #endif 951 951 if (file == NULL) ··· 984 984 return Py_BuildValue("i", evlist->core.nr_entries); 985 985 } 986 986 987 - static struct perf_mmap *get_md(struct evlist *evlist, int cpu) 987 + static struct mmap *get_md(struct evlist *evlist, int cpu) 988 988 { 989 989 int i; 990 990 991 - for (i = 0; i < evlist->nr_mmaps; i++) { 992 - struct perf_mmap *md = &evlist->mmap[i]; 991 + for (i = 0; i < evlist->core.nr_mmaps; i++) { 992 + struct mmap *md = &evlist->mmap[i]; 993 993 994 - if (md->cpu == cpu) 994 + if (md->core.cpu == cpu) 995 995 return md; 996 996 } 997 997 ··· 1005 1005 union perf_event *event; 1006 1006 int sample_id_all = 1, cpu; 1007 1007 static char *kwlist[] = { "cpu", "sample_id_all", NULL }; 1008 - struct perf_mmap *md; 1008 + struct mmap *md; 1009 1009 int err; 1010 1010 1011 1011 if (!PyArg_ParseTupleAndKeywords(args, kwargs, "i|i", kwlist,
+3 -5
tools/perf/util/record.c
··· 2 2 #include "debug.h" 3 3 #include "evlist.h" 4 4 #include "evsel.h" 5 - #include "cpumap.h" 6 5 #include "parse-events.h" 7 6 #include <errno.h> 8 7 #include <limits.h> ··· 9 10 #include <api/fs/fs.h> 10 11 #include <subcmd/parse-options.h> 11 12 #include <perf/cpumap.h> 12 - #include "util.h" 13 13 #include "cloexec.h" 14 14 #include "record.h" 15 15 #include "../perf-sys.h" ··· 30 32 if (parse_events(evlist, str, NULL)) 31 33 goto out_delete; 32 34 33 - evsel = perf_evlist__first(evlist); 35 + evsel = evlist__first(evlist); 34 36 35 37 while (1) { 36 38 fd = sys_perf_event_open(&evsel->core.attr, pid, cpu, -1, flags); ··· 171 173 use_sample_identifier = perf_can_sample_identifier(); 172 174 sample_id = true; 173 175 } else if (evlist->core.nr_entries > 1) { 174 - struct evsel *first = perf_evlist__first(evlist); 176 + struct evsel *first = evlist__first(evlist); 175 177 176 178 evlist__for_each_entry(evlist, evsel) { 177 179 if (evsel->core.attr.sample_type == first->core.attr.sample_type) ··· 276 278 if (err) 277 279 goto out_delete; 278 280 279 - evsel = perf_evlist__last(temp_evlist); 281 + evsel = evlist__last(temp_evlist); 280 282 281 283 if (!evlist || perf_cpu_map__empty(evlist->core.cpus)) { 282 284 struct perf_cpu_map *cpus = perf_cpu_map__new(NULL);
+1
tools/perf/util/rwsem.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 1 2 #include "util.h" 2 3 #include "rwsem.h" 3 4
-1
tools/perf/util/s390-cpumsf.c
··· 151 151 #include <sys/stat.h> 152 152 #include <sys/types.h> 153 153 154 - #include "cpumap.h" 155 154 #include "color.h" 156 155 #include "evsel.h" 157 156 #include "evlist.h"
-1
tools/perf/util/s390-sample-raw.c
··· 22 22 #include <asm/byteorder.h> 23 23 24 24 #include "debug.h" 25 - #include "util.h" 26 25 #include "session.h" 27 26 #include "evlist.h" 28 27 #include "color.h"
-2
tools/perf/util/scripting-engines/trace-event-python.c
··· 37 37 #include "../dso.h" 38 38 #include "../callchain.h" 39 39 #include "../evsel.h" 40 - #include "../util.h" 41 40 #include "../event.h" 42 41 #include "../thread.h" 43 42 #include "../comm.h" ··· 48 49 #include "map.h" 49 50 #include "symbol.h" 50 51 #include "thread_map.h" 51 - #include "cpumap.h" 52 52 #include "print_binary.h" 53 53 #include "stat.h" 54 54 #include "mem-events.h"
+15 -77
tools/perf/util/session.c
··· 22 22 #include "symbol.h" 23 23 #include "session.h" 24 24 #include "tool.h" 25 - #include "cpumap.h" 26 25 #include "perf_regs.h" 27 26 #include "asm/bug.h" 28 27 #include "auxtrace.h" ··· 29 30 #include "thread-stack.h" 30 31 #include "sample-raw.h" 31 32 #include "stat.h" 32 - #include "util.h" 33 33 #include "ui/progress.h" 34 34 #include "../perf.h" 35 35 #include "arch/common.h" 36 + #include <internal/lib.h> 37 + #include <linux/err.h> 36 38 37 39 #ifdef HAVE_ZSTD_SUPPORT 38 40 static int perf_session__process_compressed_event(struct perf_session *session, ··· 187 187 struct perf_session *perf_session__new(struct perf_data *data, 188 188 bool repipe, struct perf_tool *tool) 189 189 { 190 + int ret = -ENOMEM; 190 191 struct perf_session *session = zalloc(sizeof(*session)); 191 192 192 193 if (!session) ··· 202 201 203 202 perf_env__init(&session->header.env); 204 203 if (data) { 205 - if (perf_data__open(data)) 204 + ret = perf_data__open(data); 205 + if (ret < 0) 206 206 goto out_delete; 207 207 208 208 session->data = data; 209 209 210 210 if (perf_data__is_read(data)) { 211 - if (perf_session__open(session) < 0) 211 + ret = perf_session__open(session); 212 + if (ret < 0) 212 213 goto out_delete; 213 214 214 215 /* ··· 225 222 perf_evlist__init_trace_event_sample_raw(session->evlist); 226 223 227 224 /* Open the directory data. */ 228 - if (data->is_dir && perf_data__open_dir(data)) 225 + if (data->is_dir) { 226 + ret = perf_data__open_dir(data); 227 + if (ret) 229 228 goto out_delete; 229 + } 230 230 } 231 231 } else { 232 232 session->machines.host.env = &perf_env; ··· 262 256 out_delete: 263 257 perf_session__delete(session); 264 258 out: 265 - return NULL; 259 + return ERR_PTR(ret); 266 260 } 267 261 268 262 static void perf_session__delete_threads(struct perf_session *session) ··· 1323 1317 struct machine *machine) 1324 1318 { 1325 1319 struct perf_sample_id *sid = perf_evlist__id2sid(evlist, v->id); 1320 + struct evsel *evsel; 1326 1321 1327 1322 if (sid) { 1328 1323 sample->id = v->id; ··· 1343 1336 if (!sample->period) 1344 1337 return 0; 1345 1338 1346 - return tool->sample(tool, event, sample, sid->evsel, machine); 1339 + evsel = container_of(sid->evsel, struct evsel, core); 1340 + return tool->sample(tool, event, sample, evsel, machine); 1347 1341 } 1348 1342 1349 1343 static int deliver_sample_group(struct evlist *evlist, ··· 2419 2411 sid->tid = e->tid; 2420 2412 } 2421 2413 return 0; 2422 - } 2423 - 2424 - int perf_event__synthesize_id_index(struct perf_tool *tool, 2425 - perf_event__handler_t process, 2426 - struct evlist *evlist, 2427 - struct machine *machine) 2428 - { 2429 - union perf_event *ev; 2430 - struct evsel *evsel; 2431 - size_t nr = 0, i = 0, sz, max_nr, n; 2432 - int err; 2433 - 2434 - pr_debug2("Synthesizing id index\n"); 2435 - 2436 - max_nr = (UINT16_MAX - sizeof(struct perf_record_id_index)) / 2437 - sizeof(struct id_index_entry); 2438 - 2439 - evlist__for_each_entry(evlist, evsel) 2440 - nr += evsel->ids; 2441 - 2442 - n = nr > max_nr ? max_nr : nr; 2443 - sz = sizeof(struct perf_record_id_index) + n * sizeof(struct id_index_entry); 2444 - ev = zalloc(sz); 2445 - if (!ev) 2446 - return -ENOMEM; 2447 - 2448 - ev->id_index.header.type = PERF_RECORD_ID_INDEX; 2449 - ev->id_index.header.size = sz; 2450 - ev->id_index.nr = n; 2451 - 2452 - evlist__for_each_entry(evlist, evsel) { 2453 - u32 j; 2454 - 2455 - for (j = 0; j < evsel->ids; j++) { 2456 - struct id_index_entry *e; 2457 - struct perf_sample_id *sid; 2458 - 2459 - if (i >= n) { 2460 - err = process(tool, ev, NULL, machine); 2461 - if (err) 2462 - goto out_err; 2463 - nr -= n; 2464 - i = 0; 2465 - } 2466 - 2467 - e = &ev->id_index.entries[i++]; 2468 - 2469 - e->id = evsel->id[j]; 2470 - 2471 - sid = perf_evlist__id2sid(evlist, e->id); 2472 - if (!sid) { 2473 - free(ev); 2474 - return -ENOENT; 2475 - } 2476 - 2477 - e->idx = sid->idx; 2478 - e->cpu = sid->cpu; 2479 - e->tid = sid->tid; 2480 - } 2481 - } 2482 - 2483 - sz = sizeof(struct perf_record_id_index) + nr * sizeof(struct id_index_entry); 2484 - ev->id_index.header.size = sz; 2485 - ev->id_index.nr = nr; 2486 - 2487 - err = process(tool, ev, NULL, machine); 2488 - out_err: 2489 - free(ev); 2490 - 2491 - return err; 2492 2414 }
-5
tools/perf/util/session.h
··· 138 138 int perf_event__process_id_index(struct perf_session *session, 139 139 union perf_event *event); 140 140 141 - int perf_event__synthesize_id_index(struct perf_tool *tool, 142 - perf_event__handler_t process, 143 - struct evlist *evlist, 144 - struct machine *machine); 145 - 146 141 #endif /* __PERF_SESSION_H */
+1 -1
tools/perf/util/sort.c
··· 2329 2329 if (nr > evlist->core.nr_entries) 2330 2330 return NULL; 2331 2331 2332 - evsel = perf_evlist__first(evlist); 2332 + evsel = evlist__first(evlist); 2333 2333 while (--nr > 0) 2334 2334 evsel = perf_evsel__next(evsel); 2335 2335
+1 -1
tools/perf/util/srccode.c
··· 15 15 #include <string.h> 16 16 #include "srccode.h" 17 17 #include "debug.h" 18 - #include "util.h" 18 + #include <internal/lib.h> // page_size 19 19 20 20 #define MAXSRCCACHE (32*1024*1024) 21 21 #define MAXSRCFILES 64
+2 -2
tools/perf/util/stat-shadow.c
··· 738 738 char *n, *pn; 739 739 740 740 expr__ctx_init(&pctx); 741 + /* Must be first id entry */ 742 + expr__add_id(&pctx, name, avg); 741 743 for (i = 0; metric_events[i]; i++) { 742 744 struct saved_value *v; 743 745 struct stats *stats; ··· 777 775 else 778 776 expr__add_id(&pctx, n, avg_stats(stats)*scale); 779 777 } 780 - 781 - expr__add_id(&pctx, name, avg); 782 778 783 779 if (!metric_events[i]) { 784 780 const char *p = metric_expr;
+19 -43
tools/perf/util/stat.c
··· 4 4 #include <math.h> 5 5 #include <string.h> 6 6 #include "counts.h" 7 + #include "cpumap.h" 7 8 #include "debug.h" 8 9 #include "header.h" 9 10 #include "stat.h" ··· 162 161 evsel->prev_raw_counts = NULL; 163 162 } 164 163 164 + static void perf_evsel__reset_prev_raw_counts(struct evsel *evsel) 165 + { 166 + if (evsel->prev_raw_counts) { 167 + evsel->prev_raw_counts->aggr.val = 0; 168 + evsel->prev_raw_counts->aggr.ena = 0; 169 + evsel->prev_raw_counts->aggr.run = 0; 170 + } 171 + } 172 + 165 173 static int perf_evsel__alloc_stats(struct evsel *evsel, bool alloc_raw) 166 174 { 167 175 int ncpus = perf_evsel__nr_cpus(evsel); ··· 219 209 perf_evsel__reset_stat_priv(evsel); 220 210 perf_evsel__reset_counts(evsel); 221 211 } 212 + } 213 + 214 + void perf_evlist__reset_prev_raw_counts(struct evlist *evlist) 215 + { 216 + struct evsel *evsel; 217 + 218 + evlist__for_each_entry(evlist, evsel) 219 + perf_evsel__reset_prev_raw_counts(evsel); 222 220 } 223 221 224 222 static void zero_per_pkg(struct evsel *counter) ··· 336 318 int ncpus = perf_evsel__nr_cpus(counter); 337 319 int cpu, thread; 338 320 339 - if (counter->system_wide) 321 + if (counter->core.system_wide) 340 322 nthreads = 1; 341 323 342 324 for (thread = 0; thread < nthreads; thread++) { ··· 510 492 return perf_evsel__open_per_cpu(evsel, evsel__cpus(evsel)); 511 493 512 494 return perf_evsel__open_per_thread(evsel, evsel->core.threads); 513 - } 514 - 515 - int perf_stat_synthesize_config(struct perf_stat_config *config, 516 - struct perf_tool *tool, 517 - struct evlist *evlist, 518 - perf_event__handler_t process, 519 - bool attrs) 520 - { 521 - int err; 522 - 523 - if (attrs) { 524 - err = perf_event__synthesize_attrs(tool, evlist, process); 525 - if (err < 0) { 526 - pr_err("Couldn't synthesize attrs.\n"); 527 - return err; 528 - } 529 - } 530 - 531 - err = perf_event__synthesize_extra_attr(tool, evlist, process, 532 - attrs); 533 - 534 - err = perf_event__synthesize_thread_map2(tool, evlist->core.threads, 535 - process, NULL); 536 - if (err < 0) { 537 - pr_err("Couldn't synthesize thread map.\n"); 538 - return err; 539 - } 540 - 541 - err = perf_event__synthesize_cpu_map(tool, evlist->core.cpus, 542 - process, NULL); 543 - if (err < 0) { 544 - pr_err("Couldn't synthesize thread map.\n"); 545 - return err; 546 - } 547 - 548 - err = perf_event__synthesize_stat_config(tool, config, process, NULL); 549 - if (err < 0) { 550 - pr_err("Couldn't synthesize config.\n"); 551 - return err; 552 - } 553 - 554 - return 0; 555 495 }
+3 -6
tools/perf/util/stat.h
··· 7 7 #include <sys/types.h> 8 8 #include <sys/resource.h> 9 9 #include "rblist.h" 10 - #include "event.h" 11 10 11 + struct perf_cpu_map; 12 + struct perf_stat_config; 12 13 struct timespec; 13 14 14 15 struct stats { ··· 193 192 int perf_evlist__alloc_stats(struct evlist *evlist, bool alloc_raw); 194 193 void perf_evlist__free_stats(struct evlist *evlist); 195 194 void perf_evlist__reset_stats(struct evlist *evlist); 195 + void perf_evlist__reset_prev_raw_counts(struct evlist *evlist); 196 196 197 197 int perf_stat_process_counter(struct perf_stat_config *config, 198 198 struct evsel *counter); ··· 212 210 int create_perf_stat_counter(struct evsel *evsel, 213 211 struct perf_stat_config *config, 214 212 struct target *target); 215 - int perf_stat_synthesize_config(struct perf_stat_config *config, 216 - struct perf_tool *tool, 217 - struct evlist *evlist, 218 - perf_event__handler_t process, 219 - bool attrs); 220 213 void 221 214 perf_evlist__print_counters(struct evlist *evlist, 222 215 struct perf_stat_config *config,
+1 -1
tools/perf/util/svghelper.c
··· 17 17 #include <linux/string.h> 18 18 #include <linux/time64.h> 19 19 #include <linux/zalloc.h> 20 + #include <internal/cpumap.h> 20 21 #include <perf/cpumap.h> 21 22 22 23 #include "env.h" 23 24 #include "svghelper.h" 24 - #include "cpumap.h" 25 25 26 26 static u64 first_time, last_time; 27 27 static u64 turbo_frequency, max_freq;
+4 -1
tools/perf/util/symbol-elf.c
··· 7 7 #include <unistd.h> 8 8 #include <inttypes.h> 9 9 10 + #include "dso.h" 10 11 #include "map.h" 11 12 #include "map_groups.h" 12 13 #include "symbol.h" ··· 17 16 #include "machine.h" 18 17 #include "vdso.h" 19 18 #include "debug.h" 20 - #include "util.h" 19 + #include "util/copyfile.h" 21 20 #include <linux/ctype.h> 21 + #include <linux/kernel.h> 22 22 #include <linux/zalloc.h> 23 23 #include <symbol/kallsyms.h> 24 + #include <internal/lib.h> 24 25 25 26 #ifndef EM_AARCH64 26 27 #define EM_AARCH64 183 /* ARM 64 bit */
+1 -2
tools/perf/util/symbol-minimal.c
··· 1 - // SPDX-License-Identifier: GPL-2.0 2 1 #include "dso.h" 3 2 #include "symbol.h" 4 3 #include "symsrc.h" 5 - #include "util.h" 6 4 7 5 #include <errno.h> 8 6 #include <unistd.h> ··· 11 13 #include <byteswap.h> 12 14 #include <sys/stat.h> 13 15 #include <linux/zalloc.h> 16 + #include <internal/lib.h> 14 17 15 18 static bool check_need_swap(int file_endian) 16 19 {
+1 -1
tools/perf/util/symbol.c
··· 19 19 #include "build-id.h" 20 20 #include "cap.h" 21 21 #include "dso.h" 22 - #include "util.h" 22 + #include "util.h" // lsdir() 23 23 #include "debug.h" 24 24 #include "event.h" 25 25 #include "machine.h"
+1884
tools/perf/util/synthetic-events.c
··· 1 + // SPDX-License-Identifier: GPL-2.0-only 2 + 3 + #include "util/debug.h" 4 + #include "util/dso.h" 5 + #include "util/event.h" 6 + #include "util/evlist.h" 7 + #include "util/machine.h" 8 + #include "util/map.h" 9 + #include "util/map_symbol.h" 10 + #include "util/branch.h" 11 + #include "util/memswap.h" 12 + #include "util/namespaces.h" 13 + #include "util/session.h" 14 + #include "util/stat.h" 15 + #include "util/symbol.h" 16 + #include "util/synthetic-events.h" 17 + #include "util/target.h" 18 + #include "util/time-utils.h" 19 + #include <linux/bitops.h> 20 + #include <linux/kernel.h> 21 + #include <linux/string.h> 22 + #include <linux/zalloc.h> 23 + #include <linux/perf_event.h> 24 + #include <asm/bug.h> 25 + #include <perf/evsel.h> 26 + #include <internal/cpumap.h> 27 + #include <perf/cpumap.h> 28 + #include <internal/lib.h> // page_size 29 + #include <internal/threadmap.h> 30 + #include <perf/threadmap.h> 31 + #include <symbol/kallsyms.h> 32 + #include <dirent.h> 33 + #include <errno.h> 34 + #include <inttypes.h> 35 + #include <stdio.h> 36 + #include <string.h> 37 + #include <uapi/linux/mman.h> /* To get things like MAP_HUGETLB even on older libc headers */ 38 + #include <api/fs/fs.h> 39 + #include <sys/types.h> 40 + #include <sys/stat.h> 41 + #include <fcntl.h> 42 + #include <unistd.h> 43 + 44 + #define DEFAULT_PROC_MAP_PARSE_TIMEOUT 500 45 + 46 + unsigned int proc_map_timeout = DEFAULT_PROC_MAP_PARSE_TIMEOUT; 47 + 48 + int perf_tool__process_synth_event(struct perf_tool *tool, 49 + union perf_event *event, 50 + struct machine *machine, 51 + perf_event__handler_t process) 52 + { 53 + struct perf_sample synth_sample = { 54 + .pid = -1, 55 + .tid = -1, 56 + .time = -1, 57 + .stream_id = -1, 58 + .cpu = -1, 59 + .period = 1, 60 + .cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK, 61 + }; 62 + 63 + return process(tool, event, &synth_sample, machine); 64 + }; 65 + 66 + /* 67 + * Assumes that the first 4095 bytes of /proc/pid/stat contains 68 + * the comm, tgid and ppid. 69 + */ 70 + static int perf_event__get_comm_ids(pid_t pid, char *comm, size_t len, 71 + pid_t *tgid, pid_t *ppid) 72 + { 73 + char filename[PATH_MAX]; 74 + char bf[4096]; 75 + int fd; 76 + size_t size = 0; 77 + ssize_t n; 78 + char *name, *tgids, *ppids; 79 + 80 + *tgid = -1; 81 + *ppid = -1; 82 + 83 + snprintf(filename, sizeof(filename), "/proc/%d/status", pid); 84 + 85 + fd = open(filename, O_RDONLY); 86 + if (fd < 0) { 87 + pr_debug("couldn't open %s\n", filename); 88 + return -1; 89 + } 90 + 91 + n = read(fd, bf, sizeof(bf) - 1); 92 + close(fd); 93 + if (n <= 0) { 94 + pr_warning("Couldn't get COMM, tigd and ppid for pid %d\n", 95 + pid); 96 + return -1; 97 + } 98 + bf[n] = '\0'; 99 + 100 + name = strstr(bf, "Name:"); 101 + tgids = strstr(bf, "Tgid:"); 102 + ppids = strstr(bf, "PPid:"); 103 + 104 + if (name) { 105 + char *nl; 106 + 107 + name = skip_spaces(name + 5); /* strlen("Name:") */ 108 + nl = strchr(name, '\n'); 109 + if (nl) 110 + *nl = '\0'; 111 + 112 + size = strlen(name); 113 + if (size >= len) 114 + size = len - 1; 115 + memcpy(comm, name, size); 116 + comm[size] = '\0'; 117 + } else { 118 + pr_debug("Name: string not found for pid %d\n", pid); 119 + } 120 + 121 + if (tgids) { 122 + tgids += 5; /* strlen("Tgid:") */ 123 + *tgid = atoi(tgids); 124 + } else { 125 + pr_debug("Tgid: string not found for pid %d\n", pid); 126 + } 127 + 128 + if (ppids) { 129 + ppids += 5; /* strlen("PPid:") */ 130 + *ppid = atoi(ppids); 131 + } else { 132 + pr_debug("PPid: string not found for pid %d\n", pid); 133 + } 134 + 135 + return 0; 136 + } 137 + 138 + static int perf_event__prepare_comm(union perf_event *event, pid_t pid, 139 + struct machine *machine, 140 + pid_t *tgid, pid_t *ppid) 141 + { 142 + size_t size; 143 + 144 + *ppid = -1; 145 + 146 + memset(&event->comm, 0, sizeof(event->comm)); 147 + 148 + if (machine__is_host(machine)) { 149 + if (perf_event__get_comm_ids(pid, event->comm.comm, 150 + sizeof(event->comm.comm), 151 + tgid, ppid) != 0) { 152 + return -1; 153 + } 154 + } else { 155 + *tgid = machine->pid; 156 + } 157 + 158 + if (*tgid < 0) 159 + return -1; 160 + 161 + event->comm.pid = *tgid; 162 + event->comm.header.type = PERF_RECORD_COMM; 163 + 164 + size = strlen(event->comm.comm) + 1; 165 + size = PERF_ALIGN(size, sizeof(u64)); 166 + memset(event->comm.comm + size, 0, machine->id_hdr_size); 167 + event->comm.header.size = (sizeof(event->comm) - 168 + (sizeof(event->comm.comm) - size) + 169 + machine->id_hdr_size); 170 + event->comm.tid = pid; 171 + 172 + return 0; 173 + } 174 + 175 + pid_t perf_event__synthesize_comm(struct perf_tool *tool, 176 + union perf_event *event, pid_t pid, 177 + perf_event__handler_t process, 178 + struct machine *machine) 179 + { 180 + pid_t tgid, ppid; 181 + 182 + if (perf_event__prepare_comm(event, pid, machine, &tgid, &ppid) != 0) 183 + return -1; 184 + 185 + if (perf_tool__process_synth_event(tool, event, machine, process) != 0) 186 + return -1; 187 + 188 + return tgid; 189 + } 190 + 191 + static void perf_event__get_ns_link_info(pid_t pid, const char *ns, 192 + struct perf_ns_link_info *ns_link_info) 193 + { 194 + struct stat64 st; 195 + char proc_ns[128]; 196 + 197 + sprintf(proc_ns, "/proc/%u/ns/%s", pid, ns); 198 + if (stat64(proc_ns, &st) == 0) { 199 + ns_link_info->dev = st.st_dev; 200 + ns_link_info->ino = st.st_ino; 201 + } 202 + } 203 + 204 + int perf_event__synthesize_namespaces(struct perf_tool *tool, 205 + union perf_event *event, 206 + pid_t pid, pid_t tgid, 207 + perf_event__handler_t process, 208 + struct machine *machine) 209 + { 210 + u32 idx; 211 + struct perf_ns_link_info *ns_link_info; 212 + 213 + if (!tool || !tool->namespace_events) 214 + return 0; 215 + 216 + memset(&event->namespaces, 0, (sizeof(event->namespaces) + 217 + (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) + 218 + machine->id_hdr_size)); 219 + 220 + event->namespaces.pid = tgid; 221 + event->namespaces.tid = pid; 222 + 223 + event->namespaces.nr_namespaces = NR_NAMESPACES; 224 + 225 + ns_link_info = event->namespaces.link_info; 226 + 227 + for (idx = 0; idx < event->namespaces.nr_namespaces; idx++) 228 + perf_event__get_ns_link_info(pid, perf_ns__name(idx), 229 + &ns_link_info[idx]); 230 + 231 + event->namespaces.header.type = PERF_RECORD_NAMESPACES; 232 + 233 + event->namespaces.header.size = (sizeof(event->namespaces) + 234 + (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) + 235 + machine->id_hdr_size); 236 + 237 + if (perf_tool__process_synth_event(tool, event, machine, process) != 0) 238 + return -1; 239 + 240 + return 0; 241 + } 242 + 243 + static int perf_event__synthesize_fork(struct perf_tool *tool, 244 + union perf_event *event, 245 + pid_t pid, pid_t tgid, pid_t ppid, 246 + perf_event__handler_t process, 247 + struct machine *machine) 248 + { 249 + memset(&event->fork, 0, sizeof(event->fork) + machine->id_hdr_size); 250 + 251 + /* 252 + * for main thread set parent to ppid from status file. For other 253 + * threads set parent pid to main thread. ie., assume main thread 254 + * spawns all threads in a process 255 + */ 256 + if (tgid == pid) { 257 + event->fork.ppid = ppid; 258 + event->fork.ptid = ppid; 259 + } else { 260 + event->fork.ppid = tgid; 261 + event->fork.ptid = tgid; 262 + } 263 + event->fork.pid = tgid; 264 + event->fork.tid = pid; 265 + event->fork.header.type = PERF_RECORD_FORK; 266 + event->fork.header.misc = PERF_RECORD_MISC_FORK_EXEC; 267 + 268 + event->fork.header.size = (sizeof(event->fork) + machine->id_hdr_size); 269 + 270 + if (perf_tool__process_synth_event(tool, event, machine, process) != 0) 271 + return -1; 272 + 273 + return 0; 274 + } 275 + 276 + int perf_event__synthesize_mmap_events(struct perf_tool *tool, 277 + union perf_event *event, 278 + pid_t pid, pid_t tgid, 279 + perf_event__handler_t process, 280 + struct machine *machine, 281 + bool mmap_data) 282 + { 283 + char filename[PATH_MAX]; 284 + FILE *fp; 285 + unsigned long long t; 286 + bool truncation = false; 287 + unsigned long long timeout = proc_map_timeout * 1000000ULL; 288 + int rc = 0; 289 + const char *hugetlbfs_mnt = hugetlbfs__mountpoint(); 290 + int hugetlbfs_mnt_len = hugetlbfs_mnt ? strlen(hugetlbfs_mnt) : 0; 291 + 292 + if (machine__is_default_guest(machine)) 293 + return 0; 294 + 295 + snprintf(filename, sizeof(filename), "%s/proc/%d/task/%d/maps", 296 + machine->root_dir, pid, pid); 297 + 298 + fp = fopen(filename, "r"); 299 + if (fp == NULL) { 300 + /* 301 + * We raced with a task exiting - just return: 302 + */ 303 + pr_debug("couldn't open %s\n", filename); 304 + return -1; 305 + } 306 + 307 + event->header.type = PERF_RECORD_MMAP2; 308 + t = rdclock(); 309 + 310 + while (1) { 311 + char bf[BUFSIZ]; 312 + char prot[5]; 313 + char execname[PATH_MAX]; 314 + char anonstr[] = "//anon"; 315 + unsigned int ino; 316 + size_t size; 317 + ssize_t n; 318 + 319 + if (fgets(bf, sizeof(bf), fp) == NULL) 320 + break; 321 + 322 + if ((rdclock() - t) > timeout) { 323 + pr_warning("Reading %s time out. " 324 + "You may want to increase " 325 + "the time limit by --proc-map-timeout\n", 326 + filename); 327 + truncation = true; 328 + goto out; 329 + } 330 + 331 + /* ensure null termination since stack will be reused. */ 332 + strcpy(execname, ""); 333 + 334 + /* 00400000-0040c000 r-xp 00000000 fd:01 41038 /bin/cat */ 335 + n = sscanf(bf, "%"PRI_lx64"-%"PRI_lx64" %s %"PRI_lx64" %x:%x %u %[^\n]\n", 336 + &event->mmap2.start, &event->mmap2.len, prot, 337 + &event->mmap2.pgoff, &event->mmap2.maj, 338 + &event->mmap2.min, 339 + &ino, execname); 340 + 341 + /* 342 + * Anon maps don't have the execname. 343 + */ 344 + if (n < 7) 345 + continue; 346 + 347 + event->mmap2.ino = (u64)ino; 348 + 349 + /* 350 + * Just like the kernel, see __perf_event_mmap in kernel/perf_event.c 351 + */ 352 + if (machine__is_host(machine)) 353 + event->header.misc = PERF_RECORD_MISC_USER; 354 + else 355 + event->header.misc = PERF_RECORD_MISC_GUEST_USER; 356 + 357 + /* map protection and flags bits */ 358 + event->mmap2.prot = 0; 359 + event->mmap2.flags = 0; 360 + if (prot[0] == 'r') 361 + event->mmap2.prot |= PROT_READ; 362 + if (prot[1] == 'w') 363 + event->mmap2.prot |= PROT_WRITE; 364 + if (prot[2] == 'x') 365 + event->mmap2.prot |= PROT_EXEC; 366 + 367 + if (prot[3] == 's') 368 + event->mmap2.flags |= MAP_SHARED; 369 + else 370 + event->mmap2.flags |= MAP_PRIVATE; 371 + 372 + if (prot[2] != 'x') { 373 + if (!mmap_data || prot[0] != 'r') 374 + continue; 375 + 376 + event->header.misc |= PERF_RECORD_MISC_MMAP_DATA; 377 + } 378 + 379 + out: 380 + if (truncation) 381 + event->header.misc |= PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT; 382 + 383 + if (!strcmp(execname, "")) 384 + strcpy(execname, anonstr); 385 + 386 + if (hugetlbfs_mnt_len && 387 + !strncmp(execname, hugetlbfs_mnt, hugetlbfs_mnt_len)) { 388 + strcpy(execname, anonstr); 389 + event->mmap2.flags |= MAP_HUGETLB; 390 + } 391 + 392 + size = strlen(execname) + 1; 393 + memcpy(event->mmap2.filename, execname, size); 394 + size = PERF_ALIGN(size, sizeof(u64)); 395 + event->mmap2.len -= event->mmap.start; 396 + event->mmap2.header.size = (sizeof(event->mmap2) - 397 + (sizeof(event->mmap2.filename) - size)); 398 + memset(event->mmap2.filename + size, 0, machine->id_hdr_size); 399 + event->mmap2.header.size += machine->id_hdr_size; 400 + event->mmap2.pid = tgid; 401 + event->mmap2.tid = pid; 402 + 403 + if (perf_tool__process_synth_event(tool, event, machine, process) != 0) { 404 + rc = -1; 405 + break; 406 + } 407 + 408 + if (truncation) 409 + break; 410 + } 411 + 412 + fclose(fp); 413 + return rc; 414 + } 415 + 416 + int perf_event__synthesize_modules(struct perf_tool *tool, perf_event__handler_t process, 417 + struct machine *machine) 418 + { 419 + int rc = 0; 420 + struct map *pos; 421 + struct maps *maps = machine__kernel_maps(machine); 422 + union perf_event *event = zalloc((sizeof(event->mmap) + 423 + machine->id_hdr_size)); 424 + if (event == NULL) { 425 + pr_debug("Not enough memory synthesizing mmap event " 426 + "for kernel modules\n"); 427 + return -1; 428 + } 429 + 430 + event->header.type = PERF_RECORD_MMAP; 431 + 432 + /* 433 + * kernel uses 0 for user space maps, see kernel/perf_event.c 434 + * __perf_event_mmap 435 + */ 436 + if (machine__is_host(machine)) 437 + event->header.misc = PERF_RECORD_MISC_KERNEL; 438 + else 439 + event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL; 440 + 441 + for (pos = maps__first(maps); pos; pos = map__next(pos)) { 442 + size_t size; 443 + 444 + if (!__map__is_kmodule(pos)) 445 + continue; 446 + 447 + size = PERF_ALIGN(pos->dso->long_name_len + 1, sizeof(u64)); 448 + event->mmap.header.type = PERF_RECORD_MMAP; 449 + event->mmap.header.size = (sizeof(event->mmap) - 450 + (sizeof(event->mmap.filename) - size)); 451 + memset(event->mmap.filename + size, 0, machine->id_hdr_size); 452 + event->mmap.header.size += machine->id_hdr_size; 453 + event->mmap.start = pos->start; 454 + event->mmap.len = pos->end - pos->start; 455 + event->mmap.pid = machine->pid; 456 + 457 + memcpy(event->mmap.filename, pos->dso->long_name, 458 + pos->dso->long_name_len + 1); 459 + if (perf_tool__process_synth_event(tool, event, machine, process) != 0) { 460 + rc = -1; 461 + break; 462 + } 463 + } 464 + 465 + free(event); 466 + return rc; 467 + } 468 + 469 + static int __event__synthesize_thread(union perf_event *comm_event, 470 + union perf_event *mmap_event, 471 + union perf_event *fork_event, 472 + union perf_event *namespaces_event, 473 + pid_t pid, int full, perf_event__handler_t process, 474 + struct perf_tool *tool, struct machine *machine, bool mmap_data) 475 + { 476 + char filename[PATH_MAX]; 477 + DIR *tasks; 478 + struct dirent *dirent; 479 + pid_t tgid, ppid; 480 + int rc = 0; 481 + 482 + /* special case: only send one comm event using passed in pid */ 483 + if (!full) { 484 + tgid = perf_event__synthesize_comm(tool, comm_event, pid, 485 + process, machine); 486 + 487 + if (tgid == -1) 488 + return -1; 489 + 490 + if (perf_event__synthesize_namespaces(tool, namespaces_event, pid, 491 + tgid, process, machine) < 0) 492 + return -1; 493 + 494 + /* 495 + * send mmap only for thread group leader 496 + * see thread__init_map_groups 497 + */ 498 + if (pid == tgid && 499 + perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid, 500 + process, machine, mmap_data)) 501 + return -1; 502 + 503 + return 0; 504 + } 505 + 506 + if (machine__is_default_guest(machine)) 507 + return 0; 508 + 509 + snprintf(filename, sizeof(filename), "%s/proc/%d/task", 510 + machine->root_dir, pid); 511 + 512 + tasks = opendir(filename); 513 + if (tasks == NULL) { 514 + pr_debug("couldn't open %s\n", filename); 515 + return 0; 516 + } 517 + 518 + while ((dirent = readdir(tasks)) != NULL) { 519 + char *end; 520 + pid_t _pid; 521 + 522 + _pid = strtol(dirent->d_name, &end, 10); 523 + if (*end) 524 + continue; 525 + 526 + rc = -1; 527 + if (perf_event__prepare_comm(comm_event, _pid, machine, 528 + &tgid, &ppid) != 0) 529 + break; 530 + 531 + if (perf_event__synthesize_fork(tool, fork_event, _pid, tgid, 532 + ppid, process, machine) < 0) 533 + break; 534 + 535 + if (perf_event__synthesize_namespaces(tool, namespaces_event, _pid, 536 + tgid, process, machine) < 0) 537 + break; 538 + 539 + /* 540 + * Send the prepared comm event 541 + */ 542 + if (perf_tool__process_synth_event(tool, comm_event, machine, process) != 0) 543 + break; 544 + 545 + rc = 0; 546 + if (_pid == pid) { 547 + /* process the parent's maps too */ 548 + rc = perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid, 549 + process, machine, mmap_data); 550 + if (rc) 551 + break; 552 + } 553 + } 554 + 555 + closedir(tasks); 556 + return rc; 557 + } 558 + 559 + int perf_event__synthesize_thread_map(struct perf_tool *tool, 560 + struct perf_thread_map *threads, 561 + perf_event__handler_t process, 562 + struct machine *machine, 563 + bool mmap_data) 564 + { 565 + union perf_event *comm_event, *mmap_event, *fork_event; 566 + union perf_event *namespaces_event; 567 + int err = -1, thread, j; 568 + 569 + comm_event = malloc(sizeof(comm_event->comm) + machine->id_hdr_size); 570 + if (comm_event == NULL) 571 + goto out; 572 + 573 + mmap_event = malloc(sizeof(mmap_event->mmap2) + machine->id_hdr_size); 574 + if (mmap_event == NULL) 575 + goto out_free_comm; 576 + 577 + fork_event = malloc(sizeof(fork_event->fork) + machine->id_hdr_size); 578 + if (fork_event == NULL) 579 + goto out_free_mmap; 580 + 581 + namespaces_event = malloc(sizeof(namespaces_event->namespaces) + 582 + (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) + 583 + machine->id_hdr_size); 584 + if (namespaces_event == NULL) 585 + goto out_free_fork; 586 + 587 + err = 0; 588 + for (thread = 0; thread < threads->nr; ++thread) { 589 + if (__event__synthesize_thread(comm_event, mmap_event, 590 + fork_event, namespaces_event, 591 + perf_thread_map__pid(threads, thread), 0, 592 + process, tool, machine, 593 + mmap_data)) { 594 + err = -1; 595 + break; 596 + } 597 + 598 + /* 599 + * comm.pid is set to thread group id by 600 + * perf_event__synthesize_comm 601 + */ 602 + if ((int) comm_event->comm.pid != perf_thread_map__pid(threads, thread)) { 603 + bool need_leader = true; 604 + 605 + /* is thread group leader in thread_map? */ 606 + for (j = 0; j < threads->nr; ++j) { 607 + if ((int) comm_event->comm.pid == perf_thread_map__pid(threads, j)) { 608 + need_leader = false; 609 + break; 610 + } 611 + } 612 + 613 + /* if not, generate events for it */ 614 + if (need_leader && 615 + __event__synthesize_thread(comm_event, mmap_event, 616 + fork_event, namespaces_event, 617 + comm_event->comm.pid, 0, 618 + process, tool, machine, 619 + mmap_data)) { 620 + err = -1; 621 + break; 622 + } 623 + } 624 + } 625 + free(namespaces_event); 626 + out_free_fork: 627 + free(fork_event); 628 + out_free_mmap: 629 + free(mmap_event); 630 + out_free_comm: 631 + free(comm_event); 632 + out: 633 + return err; 634 + } 635 + 636 + static int __perf_event__synthesize_threads(struct perf_tool *tool, 637 + perf_event__handler_t process, 638 + struct machine *machine, 639 + bool mmap_data, 640 + struct dirent **dirent, 641 + int start, 642 + int num) 643 + { 644 + union perf_event *comm_event, *mmap_event, *fork_event; 645 + union perf_event *namespaces_event; 646 + int err = -1; 647 + char *end; 648 + pid_t pid; 649 + int i; 650 + 651 + comm_event = malloc(sizeof(comm_event->comm) + machine->id_hdr_size); 652 + if (comm_event == NULL) 653 + goto out; 654 + 655 + mmap_event = malloc(sizeof(mmap_event->mmap2) + machine->id_hdr_size); 656 + if (mmap_event == NULL) 657 + goto out_free_comm; 658 + 659 + fork_event = malloc(sizeof(fork_event->fork) + machine->id_hdr_size); 660 + if (fork_event == NULL) 661 + goto out_free_mmap; 662 + 663 + namespaces_event = malloc(sizeof(namespaces_event->namespaces) + 664 + (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) + 665 + machine->id_hdr_size); 666 + if (namespaces_event == NULL) 667 + goto out_free_fork; 668 + 669 + for (i = start; i < start + num; i++) { 670 + if (!isdigit(dirent[i]->d_name[0])) 671 + continue; 672 + 673 + pid = (pid_t)strtol(dirent[i]->d_name, &end, 10); 674 + /* only interested in proper numerical dirents */ 675 + if (*end) 676 + continue; 677 + /* 678 + * We may race with exiting thread, so don't stop just because 679 + * one thread couldn't be synthesized. 680 + */ 681 + __event__synthesize_thread(comm_event, mmap_event, fork_event, 682 + namespaces_event, pid, 1, process, 683 + tool, machine, mmap_data); 684 + } 685 + err = 0; 686 + 687 + free(namespaces_event); 688 + out_free_fork: 689 + free(fork_event); 690 + out_free_mmap: 691 + free(mmap_event); 692 + out_free_comm: 693 + free(comm_event); 694 + out: 695 + return err; 696 + } 697 + 698 + struct synthesize_threads_arg { 699 + struct perf_tool *tool; 700 + perf_event__handler_t process; 701 + struct machine *machine; 702 + bool mmap_data; 703 + struct dirent **dirent; 704 + int num; 705 + int start; 706 + }; 707 + 708 + static void *synthesize_threads_worker(void *arg) 709 + { 710 + struct synthesize_threads_arg *args = arg; 711 + 712 + __perf_event__synthesize_threads(args->tool, args->process, 713 + args->machine, args->mmap_data, 714 + args->dirent, 715 + args->start, args->num); 716 + return NULL; 717 + } 718 + 719 + int perf_event__synthesize_threads(struct perf_tool *tool, 720 + perf_event__handler_t process, 721 + struct machine *machine, 722 + bool mmap_data, 723 + unsigned int nr_threads_synthesize) 724 + { 725 + struct synthesize_threads_arg *args = NULL; 726 + pthread_t *synthesize_threads = NULL; 727 + char proc_path[PATH_MAX]; 728 + struct dirent **dirent; 729 + int num_per_thread; 730 + int m, n, i, j; 731 + int thread_nr; 732 + int base = 0; 733 + int err = -1; 734 + 735 + 736 + if (machine__is_default_guest(machine)) 737 + return 0; 738 + 739 + snprintf(proc_path, sizeof(proc_path), "%s/proc", machine->root_dir); 740 + n = scandir(proc_path, &dirent, 0, alphasort); 741 + if (n < 0) 742 + return err; 743 + 744 + if (nr_threads_synthesize == UINT_MAX) 745 + thread_nr = sysconf(_SC_NPROCESSORS_ONLN); 746 + else 747 + thread_nr = nr_threads_synthesize; 748 + 749 + if (thread_nr <= 1) { 750 + err = __perf_event__synthesize_threads(tool, process, 751 + machine, mmap_data, 752 + dirent, base, n); 753 + goto free_dirent; 754 + } 755 + if (thread_nr > n) 756 + thread_nr = n; 757 + 758 + synthesize_threads = calloc(sizeof(pthread_t), thread_nr); 759 + if (synthesize_threads == NULL) 760 + goto free_dirent; 761 + 762 + args = calloc(sizeof(*args), thread_nr); 763 + if (args == NULL) 764 + goto free_threads; 765 + 766 + num_per_thread = n / thread_nr; 767 + m = n % thread_nr; 768 + for (i = 0; i < thread_nr; i++) { 769 + args[i].tool = tool; 770 + args[i].process = process; 771 + args[i].machine = machine; 772 + args[i].mmap_data = mmap_data; 773 + args[i].dirent = dirent; 774 + } 775 + for (i = 0; i < m; i++) { 776 + args[i].num = num_per_thread + 1; 777 + args[i].start = i * args[i].num; 778 + } 779 + if (i != 0) 780 + base = args[i-1].start + args[i-1].num; 781 + for (j = i; j < thread_nr; j++) { 782 + args[j].num = num_per_thread; 783 + args[j].start = base + (j - i) * args[i].num; 784 + } 785 + 786 + for (i = 0; i < thread_nr; i++) { 787 + if (pthread_create(&synthesize_threads[i], NULL, 788 + synthesize_threads_worker, &args[i])) 789 + goto out_join; 790 + } 791 + err = 0; 792 + out_join: 793 + for (i = 0; i < thread_nr; i++) 794 + pthread_join(synthesize_threads[i], NULL); 795 + free(args); 796 + free_threads: 797 + free(synthesize_threads); 798 + free_dirent: 799 + for (i = 0; i < n; i++) 800 + zfree(&dirent[i]); 801 + free(dirent); 802 + 803 + return err; 804 + } 805 + 806 + int __weak perf_event__synthesize_extra_kmaps(struct perf_tool *tool __maybe_unused, 807 + perf_event__handler_t process __maybe_unused, 808 + struct machine *machine __maybe_unused) 809 + { 810 + return 0; 811 + } 812 + 813 + static int __perf_event__synthesize_kernel_mmap(struct perf_tool *tool, 814 + perf_event__handler_t process, 815 + struct machine *machine) 816 + { 817 + size_t size; 818 + struct map *map = machine__kernel_map(machine); 819 + struct kmap *kmap; 820 + int err; 821 + union perf_event *event; 822 + 823 + if (map == NULL) 824 + return -1; 825 + 826 + kmap = map__kmap(map); 827 + if (!kmap->ref_reloc_sym) 828 + return -1; 829 + 830 + /* 831 + * We should get this from /sys/kernel/sections/.text, but till that is 832 + * available use this, and after it is use this as a fallback for older 833 + * kernels. 834 + */ 835 + event = zalloc((sizeof(event->mmap) + machine->id_hdr_size)); 836 + if (event == NULL) { 837 + pr_debug("Not enough memory synthesizing mmap event " 838 + "for kernel modules\n"); 839 + return -1; 840 + } 841 + 842 + if (machine__is_host(machine)) { 843 + /* 844 + * kernel uses PERF_RECORD_MISC_USER for user space maps, 845 + * see kernel/perf_event.c __perf_event_mmap 846 + */ 847 + event->header.misc = PERF_RECORD_MISC_KERNEL; 848 + } else { 849 + event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL; 850 + } 851 + 852 + size = snprintf(event->mmap.filename, sizeof(event->mmap.filename), 853 + "%s%s", machine->mmap_name, kmap->ref_reloc_sym->name) + 1; 854 + size = PERF_ALIGN(size, sizeof(u64)); 855 + event->mmap.header.type = PERF_RECORD_MMAP; 856 + event->mmap.header.size = (sizeof(event->mmap) - 857 + (sizeof(event->mmap.filename) - size) + machine->id_hdr_size); 858 + event->mmap.pgoff = kmap->ref_reloc_sym->addr; 859 + event->mmap.start = map->start; 860 + event->mmap.len = map->end - event->mmap.start; 861 + event->mmap.pid = machine->pid; 862 + 863 + err = perf_tool__process_synth_event(tool, event, machine, process); 864 + free(event); 865 + 866 + return err; 867 + } 868 + 869 + int perf_event__synthesize_kernel_mmap(struct perf_tool *tool, 870 + perf_event__handler_t process, 871 + struct machine *machine) 872 + { 873 + int err; 874 + 875 + err = __perf_event__synthesize_kernel_mmap(tool, process, machine); 876 + if (err < 0) 877 + return err; 878 + 879 + return perf_event__synthesize_extra_kmaps(tool, process, machine); 880 + } 881 + 882 + int perf_event__synthesize_thread_map2(struct perf_tool *tool, 883 + struct perf_thread_map *threads, 884 + perf_event__handler_t process, 885 + struct machine *machine) 886 + { 887 + union perf_event *event; 888 + int i, err, size; 889 + 890 + size = sizeof(event->thread_map); 891 + size += threads->nr * sizeof(event->thread_map.entries[0]); 892 + 893 + event = zalloc(size); 894 + if (!event) 895 + return -ENOMEM; 896 + 897 + event->header.type = PERF_RECORD_THREAD_MAP; 898 + event->header.size = size; 899 + event->thread_map.nr = threads->nr; 900 + 901 + for (i = 0; i < threads->nr; i++) { 902 + struct perf_record_thread_map_entry *entry = &event->thread_map.entries[i]; 903 + char *comm = perf_thread_map__comm(threads, i); 904 + 905 + if (!comm) 906 + comm = (char *) ""; 907 + 908 + entry->pid = perf_thread_map__pid(threads, i); 909 + strncpy((char *) &entry->comm, comm, sizeof(entry->comm)); 910 + } 911 + 912 + err = process(tool, event, NULL, machine); 913 + 914 + free(event); 915 + return err; 916 + } 917 + 918 + static void synthesize_cpus(struct cpu_map_entries *cpus, 919 + struct perf_cpu_map *map) 920 + { 921 + int i; 922 + 923 + cpus->nr = map->nr; 924 + 925 + for (i = 0; i < map->nr; i++) 926 + cpus->cpu[i] = map->map[i]; 927 + } 928 + 929 + static void synthesize_mask(struct perf_record_record_cpu_map *mask, 930 + struct perf_cpu_map *map, int max) 931 + { 932 + int i; 933 + 934 + mask->nr = BITS_TO_LONGS(max); 935 + mask->long_size = sizeof(long); 936 + 937 + for (i = 0; i < map->nr; i++) 938 + set_bit(map->map[i], mask->mask); 939 + } 940 + 941 + static size_t cpus_size(struct perf_cpu_map *map) 942 + { 943 + return sizeof(struct cpu_map_entries) + map->nr * sizeof(u16); 944 + } 945 + 946 + static size_t mask_size(struct perf_cpu_map *map, int *max) 947 + { 948 + int i; 949 + 950 + *max = 0; 951 + 952 + for (i = 0; i < map->nr; i++) { 953 + /* bit possition of the cpu is + 1 */ 954 + int bit = map->map[i] + 1; 955 + 956 + if (bit > *max) 957 + *max = bit; 958 + } 959 + 960 + return sizeof(struct perf_record_record_cpu_map) + BITS_TO_LONGS(*max) * sizeof(long); 961 + } 962 + 963 + void *cpu_map_data__alloc(struct perf_cpu_map *map, size_t *size, u16 *type, int *max) 964 + { 965 + size_t size_cpus, size_mask; 966 + bool is_dummy = perf_cpu_map__empty(map); 967 + 968 + /* 969 + * Both array and mask data have variable size based 970 + * on the number of cpus and their actual values. 971 + * The size of the 'struct perf_record_cpu_map_data' is: 972 + * 973 + * array = size of 'struct cpu_map_entries' + 974 + * number of cpus * sizeof(u64) 975 + * 976 + * mask = size of 'struct perf_record_record_cpu_map' + 977 + * maximum cpu bit converted to size of longs 978 + * 979 + * and finaly + the size of 'struct perf_record_cpu_map_data'. 980 + */ 981 + size_cpus = cpus_size(map); 982 + size_mask = mask_size(map, max); 983 + 984 + if (is_dummy || (size_cpus < size_mask)) { 985 + *size += size_cpus; 986 + *type = PERF_CPU_MAP__CPUS; 987 + } else { 988 + *size += size_mask; 989 + *type = PERF_CPU_MAP__MASK; 990 + } 991 + 992 + *size += sizeof(struct perf_record_cpu_map_data); 993 + *size = PERF_ALIGN(*size, sizeof(u64)); 994 + return zalloc(*size); 995 + } 996 + 997 + void cpu_map_data__synthesize(struct perf_record_cpu_map_data *data, struct perf_cpu_map *map, 998 + u16 type, int max) 999 + { 1000 + data->type = type; 1001 + 1002 + switch (type) { 1003 + case PERF_CPU_MAP__CPUS: 1004 + synthesize_cpus((struct cpu_map_entries *) data->data, map); 1005 + break; 1006 + case PERF_CPU_MAP__MASK: 1007 + synthesize_mask((struct perf_record_record_cpu_map *)data->data, map, max); 1008 + default: 1009 + break; 1010 + }; 1011 + } 1012 + 1013 + static struct perf_record_cpu_map *cpu_map_event__new(struct perf_cpu_map *map) 1014 + { 1015 + size_t size = sizeof(struct perf_record_cpu_map); 1016 + struct perf_record_cpu_map *event; 1017 + int max; 1018 + u16 type; 1019 + 1020 + event = cpu_map_data__alloc(map, &size, &type, &max); 1021 + if (!event) 1022 + return NULL; 1023 + 1024 + event->header.type = PERF_RECORD_CPU_MAP; 1025 + event->header.size = size; 1026 + event->data.type = type; 1027 + 1028 + cpu_map_data__synthesize(&event->data, map, type, max); 1029 + return event; 1030 + } 1031 + 1032 + int perf_event__synthesize_cpu_map(struct perf_tool *tool, 1033 + struct perf_cpu_map *map, 1034 + perf_event__handler_t process, 1035 + struct machine *machine) 1036 + { 1037 + struct perf_record_cpu_map *event; 1038 + int err; 1039 + 1040 + event = cpu_map_event__new(map); 1041 + if (!event) 1042 + return -ENOMEM; 1043 + 1044 + err = process(tool, (union perf_event *) event, NULL, machine); 1045 + 1046 + free(event); 1047 + return err; 1048 + } 1049 + 1050 + int perf_event__synthesize_stat_config(struct perf_tool *tool, 1051 + struct perf_stat_config *config, 1052 + perf_event__handler_t process, 1053 + struct machine *machine) 1054 + { 1055 + struct perf_record_stat_config *event; 1056 + int size, i = 0, err; 1057 + 1058 + size = sizeof(*event); 1059 + size += (PERF_STAT_CONFIG_TERM__MAX * sizeof(event->data[0])); 1060 + 1061 + event = zalloc(size); 1062 + if (!event) 1063 + return -ENOMEM; 1064 + 1065 + event->header.type = PERF_RECORD_STAT_CONFIG; 1066 + event->header.size = size; 1067 + event->nr = PERF_STAT_CONFIG_TERM__MAX; 1068 + 1069 + #define ADD(__term, __val) \ 1070 + event->data[i].tag = PERF_STAT_CONFIG_TERM__##__term; \ 1071 + event->data[i].val = __val; \ 1072 + i++; 1073 + 1074 + ADD(AGGR_MODE, config->aggr_mode) 1075 + ADD(INTERVAL, config->interval) 1076 + ADD(SCALE, config->scale) 1077 + 1078 + WARN_ONCE(i != PERF_STAT_CONFIG_TERM__MAX, 1079 + "stat config terms unbalanced\n"); 1080 + #undef ADD 1081 + 1082 + err = process(tool, (union perf_event *) event, NULL, machine); 1083 + 1084 + free(event); 1085 + return err; 1086 + } 1087 + 1088 + int perf_event__synthesize_stat(struct perf_tool *tool, 1089 + u32 cpu, u32 thread, u64 id, 1090 + struct perf_counts_values *count, 1091 + perf_event__handler_t process, 1092 + struct machine *machine) 1093 + { 1094 + struct perf_record_stat event; 1095 + 1096 + event.header.type = PERF_RECORD_STAT; 1097 + event.header.size = sizeof(event); 1098 + event.header.misc = 0; 1099 + 1100 + event.id = id; 1101 + event.cpu = cpu; 1102 + event.thread = thread; 1103 + event.val = count->val; 1104 + event.ena = count->ena; 1105 + event.run = count->run; 1106 + 1107 + return process(tool, (union perf_event *) &event, NULL, machine); 1108 + } 1109 + 1110 + int perf_event__synthesize_stat_round(struct perf_tool *tool, 1111 + u64 evtime, u64 type, 1112 + perf_event__handler_t process, 1113 + struct machine *machine) 1114 + { 1115 + struct perf_record_stat_round event; 1116 + 1117 + event.header.type = PERF_RECORD_STAT_ROUND; 1118 + event.header.size = sizeof(event); 1119 + event.header.misc = 0; 1120 + 1121 + event.time = evtime; 1122 + event.type = type; 1123 + 1124 + return process(tool, (union perf_event *) &event, NULL, machine); 1125 + } 1126 + 1127 + size_t perf_event__sample_event_size(const struct perf_sample *sample, u64 type, u64 read_format) 1128 + { 1129 + size_t sz, result = sizeof(struct perf_record_sample); 1130 + 1131 + if (type & PERF_SAMPLE_IDENTIFIER) 1132 + result += sizeof(u64); 1133 + 1134 + if (type & PERF_SAMPLE_IP) 1135 + result += sizeof(u64); 1136 + 1137 + if (type & PERF_SAMPLE_TID) 1138 + result += sizeof(u64); 1139 + 1140 + if (type & PERF_SAMPLE_TIME) 1141 + result += sizeof(u64); 1142 + 1143 + if (type & PERF_SAMPLE_ADDR) 1144 + result += sizeof(u64); 1145 + 1146 + if (type & PERF_SAMPLE_ID) 1147 + result += sizeof(u64); 1148 + 1149 + if (type & PERF_SAMPLE_STREAM_ID) 1150 + result += sizeof(u64); 1151 + 1152 + if (type & PERF_SAMPLE_CPU) 1153 + result += sizeof(u64); 1154 + 1155 + if (type & PERF_SAMPLE_PERIOD) 1156 + result += sizeof(u64); 1157 + 1158 + if (type & PERF_SAMPLE_READ) { 1159 + result += sizeof(u64); 1160 + if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) 1161 + result += sizeof(u64); 1162 + if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) 1163 + result += sizeof(u64); 1164 + /* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */ 1165 + if (read_format & PERF_FORMAT_GROUP) { 1166 + sz = sample->read.group.nr * 1167 + sizeof(struct sample_read_value); 1168 + result += sz; 1169 + } else { 1170 + result += sizeof(u64); 1171 + } 1172 + } 1173 + 1174 + if (type & PERF_SAMPLE_CALLCHAIN) { 1175 + sz = (sample->callchain->nr + 1) * sizeof(u64); 1176 + result += sz; 1177 + } 1178 + 1179 + if (type & PERF_SAMPLE_RAW) { 1180 + result += sizeof(u32); 1181 + result += sample->raw_size; 1182 + } 1183 + 1184 + if (type & PERF_SAMPLE_BRANCH_STACK) { 1185 + sz = sample->branch_stack->nr * sizeof(struct branch_entry); 1186 + sz += sizeof(u64); 1187 + result += sz; 1188 + } 1189 + 1190 + if (type & PERF_SAMPLE_REGS_USER) { 1191 + if (sample->user_regs.abi) { 1192 + result += sizeof(u64); 1193 + sz = hweight64(sample->user_regs.mask) * sizeof(u64); 1194 + result += sz; 1195 + } else { 1196 + result += sizeof(u64); 1197 + } 1198 + } 1199 + 1200 + if (type & PERF_SAMPLE_STACK_USER) { 1201 + sz = sample->user_stack.size; 1202 + result += sizeof(u64); 1203 + if (sz) { 1204 + result += sz; 1205 + result += sizeof(u64); 1206 + } 1207 + } 1208 + 1209 + if (type & PERF_SAMPLE_WEIGHT) 1210 + result += sizeof(u64); 1211 + 1212 + if (type & PERF_SAMPLE_DATA_SRC) 1213 + result += sizeof(u64); 1214 + 1215 + if (type & PERF_SAMPLE_TRANSACTION) 1216 + result += sizeof(u64); 1217 + 1218 + if (type & PERF_SAMPLE_REGS_INTR) { 1219 + if (sample->intr_regs.abi) { 1220 + result += sizeof(u64); 1221 + sz = hweight64(sample->intr_regs.mask) * sizeof(u64); 1222 + result += sz; 1223 + } else { 1224 + result += sizeof(u64); 1225 + } 1226 + } 1227 + 1228 + if (type & PERF_SAMPLE_PHYS_ADDR) 1229 + result += sizeof(u64); 1230 + 1231 + return result; 1232 + } 1233 + 1234 + int perf_event__synthesize_sample(union perf_event *event, u64 type, u64 read_format, 1235 + const struct perf_sample *sample) 1236 + { 1237 + __u64 *array; 1238 + size_t sz; 1239 + /* 1240 + * used for cross-endian analysis. See git commit 65014ab3 1241 + * for why this goofiness is needed. 1242 + */ 1243 + union u64_swap u; 1244 + 1245 + array = event->sample.array; 1246 + 1247 + if (type & PERF_SAMPLE_IDENTIFIER) { 1248 + *array = sample->id; 1249 + array++; 1250 + } 1251 + 1252 + if (type & PERF_SAMPLE_IP) { 1253 + *array = sample->ip; 1254 + array++; 1255 + } 1256 + 1257 + if (type & PERF_SAMPLE_TID) { 1258 + u.val32[0] = sample->pid; 1259 + u.val32[1] = sample->tid; 1260 + *array = u.val64; 1261 + array++; 1262 + } 1263 + 1264 + if (type & PERF_SAMPLE_TIME) { 1265 + *array = sample->time; 1266 + array++; 1267 + } 1268 + 1269 + if (type & PERF_SAMPLE_ADDR) { 1270 + *array = sample->addr; 1271 + array++; 1272 + } 1273 + 1274 + if (type & PERF_SAMPLE_ID) { 1275 + *array = sample->id; 1276 + array++; 1277 + } 1278 + 1279 + if (type & PERF_SAMPLE_STREAM_ID) { 1280 + *array = sample->stream_id; 1281 + array++; 1282 + } 1283 + 1284 + if (type & PERF_SAMPLE_CPU) { 1285 + u.val32[0] = sample->cpu; 1286 + u.val32[1] = 0; 1287 + *array = u.val64; 1288 + array++; 1289 + } 1290 + 1291 + if (type & PERF_SAMPLE_PERIOD) { 1292 + *array = sample->period; 1293 + array++; 1294 + } 1295 + 1296 + if (type & PERF_SAMPLE_READ) { 1297 + if (read_format & PERF_FORMAT_GROUP) 1298 + *array = sample->read.group.nr; 1299 + else 1300 + *array = sample->read.one.value; 1301 + array++; 1302 + 1303 + if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) { 1304 + *array = sample->read.time_enabled; 1305 + array++; 1306 + } 1307 + 1308 + if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) { 1309 + *array = sample->read.time_running; 1310 + array++; 1311 + } 1312 + 1313 + /* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */ 1314 + if (read_format & PERF_FORMAT_GROUP) { 1315 + sz = sample->read.group.nr * 1316 + sizeof(struct sample_read_value); 1317 + memcpy(array, sample->read.group.values, sz); 1318 + array = (void *)array + sz; 1319 + } else { 1320 + *array = sample->read.one.id; 1321 + array++; 1322 + } 1323 + } 1324 + 1325 + if (type & PERF_SAMPLE_CALLCHAIN) { 1326 + sz = (sample->callchain->nr + 1) * sizeof(u64); 1327 + memcpy(array, sample->callchain, sz); 1328 + array = (void *)array + sz; 1329 + } 1330 + 1331 + if (type & PERF_SAMPLE_RAW) { 1332 + u.val32[0] = sample->raw_size; 1333 + *array = u.val64; 1334 + array = (void *)array + sizeof(u32); 1335 + 1336 + memcpy(array, sample->raw_data, sample->raw_size); 1337 + array = (void *)array + sample->raw_size; 1338 + } 1339 + 1340 + if (type & PERF_SAMPLE_BRANCH_STACK) { 1341 + sz = sample->branch_stack->nr * sizeof(struct branch_entry); 1342 + sz += sizeof(u64); 1343 + memcpy(array, sample->branch_stack, sz); 1344 + array = (void *)array + sz; 1345 + } 1346 + 1347 + if (type & PERF_SAMPLE_REGS_USER) { 1348 + if (sample->user_regs.abi) { 1349 + *array++ = sample->user_regs.abi; 1350 + sz = hweight64(sample->user_regs.mask) * sizeof(u64); 1351 + memcpy(array, sample->user_regs.regs, sz); 1352 + array = (void *)array + sz; 1353 + } else { 1354 + *array++ = 0; 1355 + } 1356 + } 1357 + 1358 + if (type & PERF_SAMPLE_STACK_USER) { 1359 + sz = sample->user_stack.size; 1360 + *array++ = sz; 1361 + if (sz) { 1362 + memcpy(array, sample->user_stack.data, sz); 1363 + array = (void *)array + sz; 1364 + *array++ = sz; 1365 + } 1366 + } 1367 + 1368 + if (type & PERF_SAMPLE_WEIGHT) { 1369 + *array = sample->weight; 1370 + array++; 1371 + } 1372 + 1373 + if (type & PERF_SAMPLE_DATA_SRC) { 1374 + *array = sample->data_src; 1375 + array++; 1376 + } 1377 + 1378 + if (type & PERF_SAMPLE_TRANSACTION) { 1379 + *array = sample->transaction; 1380 + array++; 1381 + } 1382 + 1383 + if (type & PERF_SAMPLE_REGS_INTR) { 1384 + if (sample->intr_regs.abi) { 1385 + *array++ = sample->intr_regs.abi; 1386 + sz = hweight64(sample->intr_regs.mask) * sizeof(u64); 1387 + memcpy(array, sample->intr_regs.regs, sz); 1388 + array = (void *)array + sz; 1389 + } else { 1390 + *array++ = 0; 1391 + } 1392 + } 1393 + 1394 + if (type & PERF_SAMPLE_PHYS_ADDR) { 1395 + *array = sample->phys_addr; 1396 + array++; 1397 + } 1398 + 1399 + return 0; 1400 + } 1401 + 1402 + int perf_event__synthesize_id_index(struct perf_tool *tool, perf_event__handler_t process, 1403 + struct evlist *evlist, struct machine *machine) 1404 + { 1405 + union perf_event *ev; 1406 + struct evsel *evsel; 1407 + size_t nr = 0, i = 0, sz, max_nr, n; 1408 + int err; 1409 + 1410 + pr_debug2("Synthesizing id index\n"); 1411 + 1412 + max_nr = (UINT16_MAX - sizeof(struct perf_record_id_index)) / 1413 + sizeof(struct id_index_entry); 1414 + 1415 + evlist__for_each_entry(evlist, evsel) 1416 + nr += evsel->core.ids; 1417 + 1418 + n = nr > max_nr ? max_nr : nr; 1419 + sz = sizeof(struct perf_record_id_index) + n * sizeof(struct id_index_entry); 1420 + ev = zalloc(sz); 1421 + if (!ev) 1422 + return -ENOMEM; 1423 + 1424 + ev->id_index.header.type = PERF_RECORD_ID_INDEX; 1425 + ev->id_index.header.size = sz; 1426 + ev->id_index.nr = n; 1427 + 1428 + evlist__for_each_entry(evlist, evsel) { 1429 + u32 j; 1430 + 1431 + for (j = 0; j < evsel->core.ids; j++) { 1432 + struct id_index_entry *e; 1433 + struct perf_sample_id *sid; 1434 + 1435 + if (i >= n) { 1436 + err = process(tool, ev, NULL, machine); 1437 + if (err) 1438 + goto out_err; 1439 + nr -= n; 1440 + i = 0; 1441 + } 1442 + 1443 + e = &ev->id_index.entries[i++]; 1444 + 1445 + e->id = evsel->core.id[j]; 1446 + 1447 + sid = perf_evlist__id2sid(evlist, e->id); 1448 + if (!sid) { 1449 + free(ev); 1450 + return -ENOENT; 1451 + } 1452 + 1453 + e->idx = sid->idx; 1454 + e->cpu = sid->cpu; 1455 + e->tid = sid->tid; 1456 + } 1457 + } 1458 + 1459 + sz = sizeof(struct perf_record_id_index) + nr * sizeof(struct id_index_entry); 1460 + ev->id_index.header.size = sz; 1461 + ev->id_index.nr = nr; 1462 + 1463 + err = process(tool, ev, NULL, machine); 1464 + out_err: 1465 + free(ev); 1466 + 1467 + return err; 1468 + } 1469 + 1470 + int __machine__synthesize_threads(struct machine *machine, struct perf_tool *tool, 1471 + struct target *target, struct perf_thread_map *threads, 1472 + perf_event__handler_t process, bool data_mmap, 1473 + unsigned int nr_threads_synthesize) 1474 + { 1475 + if (target__has_task(target)) 1476 + return perf_event__synthesize_thread_map(tool, threads, process, machine, data_mmap); 1477 + else if (target__has_cpu(target)) 1478 + return perf_event__synthesize_threads(tool, process, 1479 + machine, data_mmap, 1480 + nr_threads_synthesize); 1481 + /* command specified */ 1482 + return 0; 1483 + } 1484 + 1485 + int machine__synthesize_threads(struct machine *machine, struct target *target, 1486 + struct perf_thread_map *threads, bool data_mmap, 1487 + unsigned int nr_threads_synthesize) 1488 + { 1489 + return __machine__synthesize_threads(machine, NULL, target, threads, 1490 + perf_event__process, data_mmap, 1491 + nr_threads_synthesize); 1492 + } 1493 + 1494 + static struct perf_record_event_update *event_update_event__new(size_t size, u64 type, u64 id) 1495 + { 1496 + struct perf_record_event_update *ev; 1497 + 1498 + size += sizeof(*ev); 1499 + size = PERF_ALIGN(size, sizeof(u64)); 1500 + 1501 + ev = zalloc(size); 1502 + if (ev) { 1503 + ev->header.type = PERF_RECORD_EVENT_UPDATE; 1504 + ev->header.size = (u16)size; 1505 + ev->type = type; 1506 + ev->id = id; 1507 + } 1508 + return ev; 1509 + } 1510 + 1511 + int perf_event__synthesize_event_update_unit(struct perf_tool *tool, struct evsel *evsel, 1512 + perf_event__handler_t process) 1513 + { 1514 + size_t size = strlen(evsel->unit); 1515 + struct perf_record_event_update *ev; 1516 + int err; 1517 + 1518 + ev = event_update_event__new(size + 1, PERF_EVENT_UPDATE__UNIT, evsel->core.id[0]); 1519 + if (ev == NULL) 1520 + return -ENOMEM; 1521 + 1522 + strlcpy(ev->data, evsel->unit, size + 1); 1523 + err = process(tool, (union perf_event *)ev, NULL, NULL); 1524 + free(ev); 1525 + return err; 1526 + } 1527 + 1528 + int perf_event__synthesize_event_update_scale(struct perf_tool *tool, struct evsel *evsel, 1529 + perf_event__handler_t process) 1530 + { 1531 + struct perf_record_event_update *ev; 1532 + struct perf_record_event_update_scale *ev_data; 1533 + int err; 1534 + 1535 + ev = event_update_event__new(sizeof(*ev_data), PERF_EVENT_UPDATE__SCALE, evsel->core.id[0]); 1536 + if (ev == NULL) 1537 + return -ENOMEM; 1538 + 1539 + ev_data = (struct perf_record_event_update_scale *)ev->data; 1540 + ev_data->scale = evsel->scale; 1541 + err = process(tool, (union perf_event *)ev, NULL, NULL); 1542 + free(ev); 1543 + return err; 1544 + } 1545 + 1546 + int perf_event__synthesize_event_update_name(struct perf_tool *tool, struct evsel *evsel, 1547 + perf_event__handler_t process) 1548 + { 1549 + struct perf_record_event_update *ev; 1550 + size_t len = strlen(evsel->name); 1551 + int err; 1552 + 1553 + ev = event_update_event__new(len + 1, PERF_EVENT_UPDATE__NAME, evsel->core.id[0]); 1554 + if (ev == NULL) 1555 + return -ENOMEM; 1556 + 1557 + strlcpy(ev->data, evsel->name, len + 1); 1558 + err = process(tool, (union perf_event *)ev, NULL, NULL); 1559 + free(ev); 1560 + return err; 1561 + } 1562 + 1563 + int perf_event__synthesize_event_update_cpus(struct perf_tool *tool, struct evsel *evsel, 1564 + perf_event__handler_t process) 1565 + { 1566 + size_t size = sizeof(struct perf_record_event_update); 1567 + struct perf_record_event_update *ev; 1568 + int max, err; 1569 + u16 type; 1570 + 1571 + if (!evsel->core.own_cpus) 1572 + return 0; 1573 + 1574 + ev = cpu_map_data__alloc(evsel->core.own_cpus, &size, &type, &max); 1575 + if (!ev) 1576 + return -ENOMEM; 1577 + 1578 + ev->header.type = PERF_RECORD_EVENT_UPDATE; 1579 + ev->header.size = (u16)size; 1580 + ev->type = PERF_EVENT_UPDATE__CPUS; 1581 + ev->id = evsel->core.id[0]; 1582 + 1583 + cpu_map_data__synthesize((struct perf_record_cpu_map_data *)ev->data, 1584 + evsel->core.own_cpus, type, max); 1585 + 1586 + err = process(tool, (union perf_event *)ev, NULL, NULL); 1587 + free(ev); 1588 + return err; 1589 + } 1590 + 1591 + int perf_event__synthesize_attrs(struct perf_tool *tool, struct evlist *evlist, 1592 + perf_event__handler_t process) 1593 + { 1594 + struct evsel *evsel; 1595 + int err = 0; 1596 + 1597 + evlist__for_each_entry(evlist, evsel) { 1598 + err = perf_event__synthesize_attr(tool, &evsel->core.attr, evsel->core.ids, 1599 + evsel->core.id, process); 1600 + if (err) { 1601 + pr_debug("failed to create perf header attribute\n"); 1602 + return err; 1603 + } 1604 + } 1605 + 1606 + return err; 1607 + } 1608 + 1609 + static bool has_unit(struct evsel *evsel) 1610 + { 1611 + return evsel->unit && *evsel->unit; 1612 + } 1613 + 1614 + static bool has_scale(struct evsel *evsel) 1615 + { 1616 + return evsel->scale != 1; 1617 + } 1618 + 1619 + int perf_event__synthesize_extra_attr(struct perf_tool *tool, struct evlist *evsel_list, 1620 + perf_event__handler_t process, bool is_pipe) 1621 + { 1622 + struct evsel *evsel; 1623 + int err; 1624 + 1625 + /* 1626 + * Synthesize other events stuff not carried within 1627 + * attr event - unit, scale, name 1628 + */ 1629 + evlist__for_each_entry(evsel_list, evsel) { 1630 + if (!evsel->supported) 1631 + continue; 1632 + 1633 + /* 1634 + * Synthesize unit and scale only if it's defined. 1635 + */ 1636 + if (has_unit(evsel)) { 1637 + err = perf_event__synthesize_event_update_unit(tool, evsel, process); 1638 + if (err < 0) { 1639 + pr_err("Couldn't synthesize evsel unit.\n"); 1640 + return err; 1641 + } 1642 + } 1643 + 1644 + if (has_scale(evsel)) { 1645 + err = perf_event__synthesize_event_update_scale(tool, evsel, process); 1646 + if (err < 0) { 1647 + pr_err("Couldn't synthesize evsel evsel.\n"); 1648 + return err; 1649 + } 1650 + } 1651 + 1652 + if (evsel->core.own_cpus) { 1653 + err = perf_event__synthesize_event_update_cpus(tool, evsel, process); 1654 + if (err < 0) { 1655 + pr_err("Couldn't synthesize evsel cpus.\n"); 1656 + return err; 1657 + } 1658 + } 1659 + 1660 + /* 1661 + * Name is needed only for pipe output, 1662 + * perf.data carries event names. 1663 + */ 1664 + if (is_pipe) { 1665 + err = perf_event__synthesize_event_update_name(tool, evsel, process); 1666 + if (err < 0) { 1667 + pr_err("Couldn't synthesize evsel name.\n"); 1668 + return err; 1669 + } 1670 + } 1671 + } 1672 + return 0; 1673 + } 1674 + 1675 + int perf_event__synthesize_attr(struct perf_tool *tool, struct perf_event_attr *attr, 1676 + u32 ids, u64 *id, perf_event__handler_t process) 1677 + { 1678 + union perf_event *ev; 1679 + size_t size; 1680 + int err; 1681 + 1682 + size = sizeof(struct perf_event_attr); 1683 + size = PERF_ALIGN(size, sizeof(u64)); 1684 + size += sizeof(struct perf_event_header); 1685 + size += ids * sizeof(u64); 1686 + 1687 + ev = zalloc(size); 1688 + 1689 + if (ev == NULL) 1690 + return -ENOMEM; 1691 + 1692 + ev->attr.attr = *attr; 1693 + memcpy(ev->attr.id, id, ids * sizeof(u64)); 1694 + 1695 + ev->attr.header.type = PERF_RECORD_HEADER_ATTR; 1696 + ev->attr.header.size = (u16)size; 1697 + 1698 + if (ev->attr.header.size == size) 1699 + err = process(tool, ev, NULL, NULL); 1700 + else 1701 + err = -E2BIG; 1702 + 1703 + free(ev); 1704 + 1705 + return err; 1706 + } 1707 + 1708 + int perf_event__synthesize_tracing_data(struct perf_tool *tool, int fd, struct evlist *evlist, 1709 + perf_event__handler_t process) 1710 + { 1711 + union perf_event ev; 1712 + struct tracing_data *tdata; 1713 + ssize_t size = 0, aligned_size = 0, padding; 1714 + struct feat_fd ff; 1715 + 1716 + /* 1717 + * We are going to store the size of the data followed 1718 + * by the data contents. Since the fd descriptor is a pipe, 1719 + * we cannot seek back to store the size of the data once 1720 + * we know it. Instead we: 1721 + * 1722 + * - write the tracing data to the temp file 1723 + * - get/write the data size to pipe 1724 + * - write the tracing data from the temp file 1725 + * to the pipe 1726 + */ 1727 + tdata = tracing_data_get(&evlist->core.entries, fd, true); 1728 + if (!tdata) 1729 + return -1; 1730 + 1731 + memset(&ev, 0, sizeof(ev)); 1732 + 1733 + ev.tracing_data.header.type = PERF_RECORD_HEADER_TRACING_DATA; 1734 + size = tdata->size; 1735 + aligned_size = PERF_ALIGN(size, sizeof(u64)); 1736 + padding = aligned_size - size; 1737 + ev.tracing_data.header.size = sizeof(ev.tracing_data); 1738 + ev.tracing_data.size = aligned_size; 1739 + 1740 + process(tool, &ev, NULL, NULL); 1741 + 1742 + /* 1743 + * The put function will copy all the tracing data 1744 + * stored in temp file to the pipe. 1745 + */ 1746 + tracing_data_put(tdata); 1747 + 1748 + ff = (struct feat_fd){ .fd = fd }; 1749 + if (write_padded(&ff, NULL, 0, padding)) 1750 + return -1; 1751 + 1752 + return aligned_size; 1753 + } 1754 + 1755 + int perf_event__synthesize_build_id(struct perf_tool *tool, struct dso *pos, u16 misc, 1756 + perf_event__handler_t process, struct machine *machine) 1757 + { 1758 + union perf_event ev; 1759 + size_t len; 1760 + 1761 + if (!pos->hit) 1762 + return 0; 1763 + 1764 + memset(&ev, 0, sizeof(ev)); 1765 + 1766 + len = pos->long_name_len + 1; 1767 + len = PERF_ALIGN(len, NAME_ALIGN); 1768 + memcpy(&ev.build_id.build_id, pos->build_id, sizeof(pos->build_id)); 1769 + ev.build_id.header.type = PERF_RECORD_HEADER_BUILD_ID; 1770 + ev.build_id.header.misc = misc; 1771 + ev.build_id.pid = machine->pid; 1772 + ev.build_id.header.size = sizeof(ev.build_id) + len; 1773 + memcpy(&ev.build_id.filename, pos->long_name, pos->long_name_len); 1774 + 1775 + return process(tool, &ev, NULL, machine); 1776 + } 1777 + 1778 + int perf_event__synthesize_stat_events(struct perf_stat_config *config, struct perf_tool *tool, 1779 + struct evlist *evlist, perf_event__handler_t process, bool attrs) 1780 + { 1781 + int err; 1782 + 1783 + if (attrs) { 1784 + err = perf_event__synthesize_attrs(tool, evlist, process); 1785 + if (err < 0) { 1786 + pr_err("Couldn't synthesize attrs.\n"); 1787 + return err; 1788 + } 1789 + } 1790 + 1791 + err = perf_event__synthesize_extra_attr(tool, evlist, process, attrs); 1792 + err = perf_event__synthesize_thread_map2(tool, evlist->core.threads, process, NULL); 1793 + if (err < 0) { 1794 + pr_err("Couldn't synthesize thread map.\n"); 1795 + return err; 1796 + } 1797 + 1798 + err = perf_event__synthesize_cpu_map(tool, evlist->core.cpus, process, NULL); 1799 + if (err < 0) { 1800 + pr_err("Couldn't synthesize thread map.\n"); 1801 + return err; 1802 + } 1803 + 1804 + err = perf_event__synthesize_stat_config(tool, config, process, NULL); 1805 + if (err < 0) { 1806 + pr_err("Couldn't synthesize config.\n"); 1807 + return err; 1808 + } 1809 + 1810 + return 0; 1811 + } 1812 + 1813 + int __weak perf_event__synth_time_conv(const struct perf_event_mmap_page *pc __maybe_unused, 1814 + struct perf_tool *tool __maybe_unused, 1815 + perf_event__handler_t process __maybe_unused, 1816 + struct machine *machine __maybe_unused) 1817 + { 1818 + return 0; 1819 + } 1820 + 1821 + extern const struct perf_header_feature_ops feat_ops[HEADER_LAST_FEATURE]; 1822 + 1823 + int perf_event__synthesize_features(struct perf_tool *tool, struct perf_session *session, 1824 + struct evlist *evlist, perf_event__handler_t process) 1825 + { 1826 + struct perf_header *header = &session->header; 1827 + struct perf_record_header_feature *fe; 1828 + struct feat_fd ff; 1829 + size_t sz, sz_hdr; 1830 + int feat, ret; 1831 + 1832 + sz_hdr = sizeof(fe->header); 1833 + sz = sizeof(union perf_event); 1834 + /* get a nice alignment */ 1835 + sz = PERF_ALIGN(sz, page_size); 1836 + 1837 + memset(&ff, 0, sizeof(ff)); 1838 + 1839 + ff.buf = malloc(sz); 1840 + if (!ff.buf) 1841 + return -ENOMEM; 1842 + 1843 + ff.size = sz - sz_hdr; 1844 + ff.ph = &session->header; 1845 + 1846 + for_each_set_bit(feat, header->adds_features, HEADER_FEAT_BITS) { 1847 + if (!feat_ops[feat].synthesize) { 1848 + pr_debug("No record header feature for header :%d\n", feat); 1849 + continue; 1850 + } 1851 + 1852 + ff.offset = sizeof(*fe); 1853 + 1854 + ret = feat_ops[feat].write(&ff, evlist); 1855 + if (ret || ff.offset <= (ssize_t)sizeof(*fe)) { 1856 + pr_debug("Error writing feature\n"); 1857 + continue; 1858 + } 1859 + /* ff.buf may have changed due to realloc in do_write() */ 1860 + fe = ff.buf; 1861 + memset(fe, 0, sizeof(*fe)); 1862 + 1863 + fe->feat_id = feat; 1864 + fe->header.type = PERF_RECORD_HEADER_FEATURE; 1865 + fe->header.size = ff.offset; 1866 + 1867 + ret = process(tool, ff.buf, NULL, NULL); 1868 + if (ret) { 1869 + free(ff.buf); 1870 + return ret; 1871 + } 1872 + } 1873 + 1874 + /* Send HEADER_LAST_FEATURE mark. */ 1875 + fe = ff.buf; 1876 + fe->feat_id = HEADER_LAST_FEATURE; 1877 + fe->header.type = PERF_RECORD_HEADER_FEATURE; 1878 + fe->header.size = sizeof(*fe); 1879 + 1880 + ret = process(tool, ff.buf, NULL, NULL); 1881 + 1882 + free(ff.buf); 1883 + return ret; 1884 + }
+103
tools/perf/util/synthetic-events.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + #ifndef __PERF_SYNTHETIC_EVENTS_H 3 + #define __PERF_SYNTHETIC_EVENTS_H 4 + 5 + #include <stdbool.h> 6 + #include <sys/types.h> // pid_t 7 + #include <linux/compiler.h> 8 + #include <linux/types.h> 9 + 10 + struct auxtrace_record; 11 + struct dso; 12 + struct evlist; 13 + struct evsel; 14 + struct machine; 15 + struct perf_counts_values; 16 + struct perf_cpu_map; 17 + struct perf_event_attr; 18 + struct perf_event_mmap_page; 19 + struct perf_sample; 20 + struct perf_session; 21 + struct perf_stat_config; 22 + struct perf_thread_map; 23 + struct perf_tool; 24 + struct record_opts; 25 + struct target; 26 + 27 + union perf_event; 28 + 29 + typedef int (*perf_event__handler_t)(struct perf_tool *tool, union perf_event *event, 30 + struct perf_sample *sample, struct machine *machine); 31 + 32 + int perf_event__synthesize_attrs(struct perf_tool *tool, struct evlist *evlist, perf_event__handler_t process); 33 + int perf_event__synthesize_attr(struct perf_tool *tool, struct perf_event_attr *attr, u32 ids, u64 *id, perf_event__handler_t process); 34 + int perf_event__synthesize_build_id(struct perf_tool *tool, struct dso *pos, u16 misc, perf_event__handler_t process, struct machine *machine); 35 + int perf_event__synthesize_cpu_map(struct perf_tool *tool, struct perf_cpu_map *cpus, perf_event__handler_t process, struct machine *machine); 36 + int perf_event__synthesize_event_update_cpus(struct perf_tool *tool, struct evsel *evsel, perf_event__handler_t process); 37 + int perf_event__synthesize_event_update_name(struct perf_tool *tool, struct evsel *evsel, perf_event__handler_t process); 38 + int perf_event__synthesize_event_update_scale(struct perf_tool *tool, struct evsel *evsel, perf_event__handler_t process); 39 + int perf_event__synthesize_event_update_unit(struct perf_tool *tool, struct evsel *evsel, perf_event__handler_t process); 40 + int perf_event__synthesize_extra_attr(struct perf_tool *tool, struct evlist *evsel_list, perf_event__handler_t process, bool is_pipe); 41 + int perf_event__synthesize_extra_kmaps(struct perf_tool *tool, perf_event__handler_t process, struct machine *machine); 42 + int perf_event__synthesize_features(struct perf_tool *tool, struct perf_session *session, struct evlist *evlist, perf_event__handler_t process); 43 + int perf_event__synthesize_id_index(struct perf_tool *tool, perf_event__handler_t process, struct evlist *evlist, struct machine *machine); 44 + int perf_event__synthesize_kernel_mmap(struct perf_tool *tool, perf_event__handler_t process, struct machine *machine); 45 + int perf_event__synthesize_mmap_events(struct perf_tool *tool, union perf_event *event, pid_t pid, pid_t tgid, perf_event__handler_t process, struct machine *machine, bool mmap_data); 46 + int perf_event__synthesize_modules(struct perf_tool *tool, perf_event__handler_t process, struct machine *machine); 47 + int perf_event__synthesize_namespaces(struct perf_tool *tool, union perf_event *event, pid_t pid, pid_t tgid, perf_event__handler_t process, struct machine *machine); 48 + int perf_event__synthesize_sample(union perf_event *event, u64 type, u64 read_format, const struct perf_sample *sample); 49 + int perf_event__synthesize_stat_config(struct perf_tool *tool, struct perf_stat_config *config, perf_event__handler_t process, struct machine *machine); 50 + int perf_event__synthesize_stat_events(struct perf_stat_config *config, struct perf_tool *tool, struct evlist *evlist, perf_event__handler_t process, bool attrs); 51 + int perf_event__synthesize_stat_round(struct perf_tool *tool, u64 time, u64 type, perf_event__handler_t process, struct machine *machine); 52 + int perf_event__synthesize_stat(struct perf_tool *tool, u32 cpu, u32 thread, u64 id, struct perf_counts_values *count, perf_event__handler_t process, struct machine *machine); 53 + int perf_event__synthesize_thread_map2(struct perf_tool *tool, struct perf_thread_map *threads, perf_event__handler_t process, struct machine *machine); 54 + int perf_event__synthesize_thread_map(struct perf_tool *tool, struct perf_thread_map *threads, perf_event__handler_t process, struct machine *machine, bool mmap_data); 55 + int perf_event__synthesize_threads(struct perf_tool *tool, perf_event__handler_t process, struct machine *machine, bool mmap_data, unsigned int nr_threads_synthesize); 56 + int perf_event__synthesize_tracing_data(struct perf_tool *tool, int fd, struct evlist *evlist, perf_event__handler_t process); 57 + int perf_event__synth_time_conv(const struct perf_event_mmap_page *pc, struct perf_tool *tool, perf_event__handler_t process, struct machine *machine); 58 + pid_t perf_event__synthesize_comm(struct perf_tool *tool, union perf_event *event, pid_t pid, perf_event__handler_t process, struct machine *machine); 59 + 60 + int perf_tool__process_synth_event(struct perf_tool *tool, union perf_event *event, struct machine *machine, perf_event__handler_t process); 61 + 62 + size_t perf_event__sample_event_size(const struct perf_sample *sample, u64 type, u64 read_format); 63 + 64 + int __machine__synthesize_threads(struct machine *machine, struct perf_tool *tool, 65 + struct target *target, struct perf_thread_map *threads, 66 + perf_event__handler_t process, bool data_mmap, 67 + unsigned int nr_threads_synthesize); 68 + int machine__synthesize_threads(struct machine *machine, struct target *target, 69 + struct perf_thread_map *threads, bool data_mmap, 70 + unsigned int nr_threads_synthesize); 71 + 72 + #ifdef HAVE_AUXTRACE_SUPPORT 73 + int perf_event__synthesize_auxtrace_info(struct auxtrace_record *itr, struct perf_tool *tool, 74 + struct perf_session *session, perf_event__handler_t process); 75 + 76 + #else // HAVE_AUXTRACE_SUPPORT 77 + 78 + #include <errno.h> 79 + 80 + static inline int 81 + perf_event__synthesize_auxtrace_info(struct auxtrace_record *itr __maybe_unused, 82 + struct perf_tool *tool __maybe_unused, 83 + struct perf_session *session __maybe_unused, 84 + perf_event__handler_t process __maybe_unused) 85 + { 86 + return -EINVAL; 87 + } 88 + #endif // HAVE_AUXTRACE_SUPPORT 89 + 90 + #ifdef HAVE_LIBBPF_SUPPORT 91 + int perf_event__synthesize_bpf_events(struct perf_session *session, perf_event__handler_t process, 92 + struct machine *machine, struct record_opts *opts); 93 + #else // HAVE_LIBBPF_SUPPORT 94 + static inline int perf_event__synthesize_bpf_events(struct perf_session *session __maybe_unused, 95 + perf_event__handler_t process __maybe_unused, 96 + struct machine *machine __maybe_unused, 97 + struct record_opts *opts __maybe_unused) 98 + { 99 + return 0; 100 + } 101 + #endif // HAVE_LIBBPF_SUPPORT 102 + 103 + #endif // __PERF_SYNTHETIC_EVENTS_H
-2
tools/perf/util/target.c
··· 6 6 */ 7 7 8 8 #include "target.h" 9 - #include "util.h" 10 - #include "debug.h" 11 9 12 10 #include <pwd.h> 13 11 #include <stdio.h>
+1 -2
tools/perf/util/top.c
··· 5 5 * Refactored from builtin-top.c, see that files for further copyright notes. 6 6 */ 7 7 8 - #include "cpumap.h" 9 8 #include "event.h" 10 9 #include "evlist.h" 11 10 #include "evsel.h" ··· 71 72 } 72 73 73 74 if (top->evlist->core.nr_entries == 1) { 74 - struct evsel *first = perf_evlist__first(top->evlist); 75 + struct evsel *first = evlist__first(top->evlist); 75 76 ret += SNPRINTF(bf + ret, size - ret, "%" PRIu64 "%s ", 76 77 (uint64_t)first->core.attr.sample_period, 77 78 opts->freq ? "Hz" : "");
+1 -1
tools/perf/util/trace-event-info.c
··· 2 2 /* 3 3 * Copyright (C) 2008,2009, Steven Rostedt <srostedt@redhat.com> 4 4 */ 5 - #include "util.h" 6 5 #include <dirent.h> 7 6 #include <mntent.h> 8 7 #include <stdio.h> ··· 18 19 #include <linux/list.h> 19 20 #include <linux/kernel.h> 20 21 #include <linux/zalloc.h> 22 + #include <internal/lib.h> // page_size 21 23 22 24 #include "trace-event.h" 23 25 #include <api/fs/tracing_path.h>
-1
tools/perf/util/trace-event-read.c
··· 15 15 #include <unistd.h> 16 16 #include <errno.h> 17 17 18 - #include "util.h" 19 18 #include "trace-event.h" 20 19 #include "debug.h" 21 20
-1
tools/perf/util/trace-event.c
··· 14 14 #include <api/fs/fs.h> 15 15 #include "trace-event.h" 16 16 #include "machine.h" 17 - #include "util.h" 18 17 19 18 /* 20 19 * global trace_event object used by trace_event__tp_format
+2 -12
tools/perf/util/tsc.h
··· 4 4 5 5 #include <linux/types.h> 6 6 7 - #include "event.h" 8 - 9 7 struct perf_tsc_conversion { 10 8 u16 time_shift; 11 9 u32 time_mult; 12 10 u64 time_zero; 13 11 }; 12 + 14 13 struct perf_event_mmap_page; 15 14 16 15 int perf_read_tsc_conversion(const struct perf_event_mmap_page *pc, ··· 19 20 u64 tsc_to_perf_time(u64 cyc, struct perf_tsc_conversion *tc); 20 21 u64 rdtsc(void); 21 22 22 - struct perf_event_mmap_page; 23 - struct perf_tool; 24 - struct machine; 25 - 26 - int perf_event__synth_time_conv(const struct perf_event_mmap_page *pc, 27 - struct perf_tool *tool, 28 - perf_event__handler_t process, 29 - struct machine *machine); 30 - 31 - #endif 23 + #endif // __PERF_TSC_H
-1
tools/perf/util/unwind-libdw.c
··· 17 17 #include "event.h" 18 18 #include "perf_regs.h" 19 19 #include "callchain.h" 20 - #include "util.h" 21 20 22 21 static char *debuginfo_path; 23 22
-1
tools/perf/util/unwind-libunwind-local.c
··· 37 37 #include "unwind.h" 38 38 #include "map.h" 39 39 #include "symbol.h" 40 - #include "util.h" 41 40 #include "debug.h" 42 41 #include "asm/bug.h" 43 42 #include "dso.h"
-1
tools/perf/util/usage.c
··· 8 8 * Copyright (C) Linus Torvalds, 2005 9 9 */ 10 10 #include "util.h" 11 - #include "debug.h" 12 11 #include <stdio.h> 13 12 #include <stdlib.h> 14 13 #include <linux/compiler.h>
-136
tools/perf/util/util.c
··· 2 2 #include "util.h" 3 3 #include "debug.h" 4 4 #include "event.h" 5 - #include "namespaces.h" 6 5 #include <api/fs/fs.h> 7 - #include <sys/mman.h> 8 6 #include <sys/stat.h> 9 7 #include <sys/utsname.h> 10 8 #include <dirent.h> ··· 38 40 { 39 41 perf_singlethreaded = false; 40 42 } 41 - 42 - unsigned int page_size; 43 43 44 44 int sysctl_perf_event_max_stack = PERF_MAX_STACK_DEPTH; 45 45 int sysctl_perf_event_max_contexts_per_stack = PERF_MAX_CONTEXTS_PER_STACK; ··· 228 232 out: 229 233 closedir(dir); 230 234 return list; 231 - } 232 - 233 - static int slow_copyfile(const char *from, const char *to, struct nsinfo *nsi) 234 - { 235 - int err = -1; 236 - char *line = NULL; 237 - size_t n; 238 - FILE *from_fp, *to_fp; 239 - struct nscookie nsc; 240 - 241 - nsinfo__mountns_enter(nsi, &nsc); 242 - from_fp = fopen(from, "r"); 243 - nsinfo__mountns_exit(&nsc); 244 - if (from_fp == NULL) 245 - goto out; 246 - 247 - to_fp = fopen(to, "w"); 248 - if (to_fp == NULL) 249 - goto out_fclose_from; 250 - 251 - while (getline(&line, &n, from_fp) > 0) 252 - if (fputs(line, to_fp) == EOF) 253 - goto out_fclose_to; 254 - err = 0; 255 - out_fclose_to: 256 - fclose(to_fp); 257 - free(line); 258 - out_fclose_from: 259 - fclose(from_fp); 260 - out: 261 - return err; 262 - } 263 - 264 - int copyfile_offset(int ifd, loff_t off_in, int ofd, loff_t off_out, u64 size) 265 - { 266 - void *ptr; 267 - loff_t pgoff; 268 - 269 - pgoff = off_in & ~(page_size - 1); 270 - off_in -= pgoff; 271 - 272 - ptr = mmap(NULL, off_in + size, PROT_READ, MAP_PRIVATE, ifd, pgoff); 273 - if (ptr == MAP_FAILED) 274 - return -1; 275 - 276 - while (size) { 277 - ssize_t ret = pwrite(ofd, ptr + off_in, size, off_out); 278 - if (ret < 0 && errno == EINTR) 279 - continue; 280 - if (ret <= 0) 281 - break; 282 - 283 - size -= ret; 284 - off_in += ret; 285 - off_out += ret; 286 - } 287 - munmap(ptr, off_in + size); 288 - 289 - return size ? -1 : 0; 290 - } 291 - 292 - static int copyfile_mode_ns(const char *from, const char *to, mode_t mode, 293 - struct nsinfo *nsi) 294 - { 295 - int fromfd, tofd; 296 - struct stat st; 297 - int err; 298 - char *tmp = NULL, *ptr = NULL; 299 - struct nscookie nsc; 300 - 301 - nsinfo__mountns_enter(nsi, &nsc); 302 - err = stat(from, &st); 303 - nsinfo__mountns_exit(&nsc); 304 - if (err) 305 - goto out; 306 - err = -1; 307 - 308 - /* extra 'x' at the end is to reserve space for '.' */ 309 - if (asprintf(&tmp, "%s.XXXXXXx", to) < 0) { 310 - tmp = NULL; 311 - goto out; 312 - } 313 - ptr = strrchr(tmp, '/'); 314 - if (!ptr) 315 - goto out; 316 - ptr = memmove(ptr + 1, ptr, strlen(ptr) - 1); 317 - *ptr = '.'; 318 - 319 - tofd = mkstemp(tmp); 320 - if (tofd < 0) 321 - goto out; 322 - 323 - if (fchmod(tofd, mode)) 324 - goto out_close_to; 325 - 326 - if (st.st_size == 0) { /* /proc? do it slowly... */ 327 - err = slow_copyfile(from, tmp, nsi); 328 - goto out_close_to; 329 - } 330 - 331 - nsinfo__mountns_enter(nsi, &nsc); 332 - fromfd = open(from, O_RDONLY); 333 - nsinfo__mountns_exit(&nsc); 334 - if (fromfd < 0) 335 - goto out_close_to; 336 - 337 - err = copyfile_offset(fromfd, 0, tofd, 0, st.st_size); 338 - 339 - close(fromfd); 340 - out_close_to: 341 - close(tofd); 342 - if (!err) 343 - err = link(tmp, to); 344 - unlink(tmp); 345 - out: 346 - free(tmp); 347 - return err; 348 - } 349 - 350 - int copyfile_ns(const char *from, const char *to, struct nsinfo *nsi) 351 - { 352 - return copyfile_mode_ns(from, to, 0755, nsi); 353 - } 354 - 355 - int copyfile_mode(const char *from, const char *to, mode_t mode) 356 - { 357 - return copyfile_mode_ns(from, to, mode, NULL); 358 - } 359 - 360 - int copyfile(const char *from, const char *to) 361 - { 362 - return copyfile_mode(from, to, 0755); 363 235 } 364 236 365 237 size_t hex_width(u64 v)
-8
tools/perf/util/util.h
··· 11 11 #include <stddef.h> 12 12 #include <linux/compiler.h> 13 13 #include <sys/types.h> 14 - #include <internal/lib.h> 15 14 16 15 /* General helper functions */ 17 16 void usage(const char *err) __noreturn; 18 17 void die(const char *err, ...) __noreturn __printf(1, 2); 19 18 20 19 struct dirent; 21 - struct nsinfo; 22 20 struct strlist; 23 21 24 22 int mkdir_p(char *path, mode_t mode); ··· 24 26 int rm_rf_perf_data(const char *path); 25 27 struct strlist *lsdir(const char *name, bool (*filter)(const char *, struct dirent *)); 26 28 bool lsdir_no_dot_filter(const char *name, struct dirent *d); 27 - int copyfile(const char *from, const char *to); 28 - int copyfile_mode(const char *from, const char *to, mode_t mode); 29 - int copyfile_ns(const char *from, const char *to, struct nsinfo *nsi); 30 - int copyfile_offset(int ifd, loff_t off_in, int ofd, loff_t off_out, u64 size); 31 29 32 30 size_t hex_width(u64 v); 33 - 34 - extern unsigned int page_size; 35 31 36 32 int sysctl__max_stack(void); 37 33
+1 -1
tools/perf/util/vdso.c
··· 11 11 12 12 #include "vdso.h" 13 13 #include "dso.h" 14 - #include "util.h" 14 + #include <internal/lib.h> 15 15 #include "map.h" 16 16 #include "symbol.h" 17 17 #include "machine.h"
+1 -3
tools/perf/util/zlib.c
··· 7 7 #include <sys/mman.h> 8 8 #include <zlib.h> 9 9 #include <linux/compiler.h> 10 + #include <internal/lib.h> 10 11 11 12 #include "util/compress.h" 12 - #include "util/util.h" 13 - #include "util/debug.h" 14 - 15 13 16 14 #define CHUNK_SIZE 16384 17 15