Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * auxtrace.h: AUX area trace support
4 * Copyright (c) 2013-2015, Intel Corporation.
5 */
6
7#ifndef __PERF_AUXTRACE_H
8#define __PERF_AUXTRACE_H
9
10#include <sys/types.h>
11#include <errno.h>
12#include <stdbool.h>
13#include <stddef.h>
14#include <stdio.h> // FILE
15#include <linux/list.h>
16#include <linux/perf_event.h>
17#include <linux/types.h>
18#include <asm/bitsperlong.h>
19#include <asm/barrier.h>
20
21union perf_event;
22struct perf_session;
23struct evlist;
24struct evsel;
25struct perf_tool;
26struct mmap;
27struct perf_sample;
28struct option;
29struct record_opts;
30struct perf_record_auxtrace_error;
31struct perf_record_auxtrace_info;
32struct events_stats;
33struct perf_pmu;
34
35enum auxtrace_error_type {
36 PERF_AUXTRACE_ERROR_ITRACE = 1,
37 PERF_AUXTRACE_ERROR_MAX
38};
39
40/* Auxtrace records must have the same alignment as perf event records */
41#define PERF_AUXTRACE_RECORD_ALIGNMENT 8
42
43enum auxtrace_type {
44 PERF_AUXTRACE_UNKNOWN,
45 PERF_AUXTRACE_INTEL_PT,
46 PERF_AUXTRACE_INTEL_BTS,
47 PERF_AUXTRACE_CS_ETM,
48 PERF_AUXTRACE_ARM_SPE,
49 PERF_AUXTRACE_S390_CPUMSF,
50};
51
52enum itrace_period_type {
53 PERF_ITRACE_PERIOD_INSTRUCTIONS,
54 PERF_ITRACE_PERIOD_TICKS,
55 PERF_ITRACE_PERIOD_NANOSECS,
56};
57
58#define AUXTRACE_ERR_FLG_OVERFLOW (1 << ('o' - 'a'))
59#define AUXTRACE_ERR_FLG_DATA_LOST (1 << ('l' - 'a'))
60
61#define AUXTRACE_LOG_FLG_ALL_PERF_EVTS (1 << ('a' - 'a'))
62
63/**
64 * struct itrace_synth_opts - AUX area tracing synthesis options.
65 * @set: indicates whether or not options have been set
66 * @default_no_sample: Default to no sampling.
67 * @inject: indicates the event (not just the sample) must be fully synthesized
68 * because 'perf inject' will write it out
69 * @instructions: whether to synthesize 'instructions' events
70 * @branches: whether to synthesize 'branches' events
71 * (branch misses only for Arm SPE)
72 * @transactions: whether to synthesize events for transactions
73 * @ptwrites: whether to synthesize events for ptwrites
74 * @pwr_events: whether to synthesize power events
75 * @other_events: whether to synthesize other events recorded due to the use of
76 * aux_output
77 * @errors: whether to synthesize decoder error events
78 * @dont_decode: whether to skip decoding entirely
79 * @log: write a decoding log
80 * @calls: limit branch samples to calls (can be combined with @returns)
81 * @returns: limit branch samples to returns (can be combined with @calls)
82 * @callchain: add callchain to 'instructions' events
83 * @add_callchain: add callchain to existing event records
84 * @thread_stack: feed branches to the thread_stack
85 * @last_branch: add branch context to 'instruction' events
86 * @add_last_branch: add branch context to existing event records
87 * @flc: whether to synthesize first level cache events
88 * @llc: whether to synthesize last level cache events
89 * @tlb: whether to synthesize TLB events
90 * @remote_access: whether to synthesize remote access events
91 * @mem: whether to synthesize memory events
92 * @callchain_sz: maximum callchain size
93 * @last_branch_sz: branch context size
94 * @period: 'instructions' events period
95 * @period_type: 'instructions' events period type
96 * @initial_skip: skip N events at the beginning.
97 * @cpu_bitmap: CPUs for which to synthesize events, or NULL for all
98 * @ptime_range: time intervals to trace or NULL
99 * @range_num: number of time intervals to trace
100 * @error_plus_flags: flags to affect what errors are reported
101 * @error_minus_flags: flags to affect what errors are reported
102 * @log_plus_flags: flags to affect what is logged
103 * @log_minus_flags: flags to affect what is logged
104 * @quick: quicker (less detailed) decoding
105 */
106struct itrace_synth_opts {
107 bool set;
108 bool default_no_sample;
109 bool inject;
110 bool instructions;
111 bool branches;
112 bool transactions;
113 bool ptwrites;
114 bool pwr_events;
115 bool other_events;
116 bool errors;
117 bool dont_decode;
118 bool log;
119 bool calls;
120 bool returns;
121 bool callchain;
122 bool add_callchain;
123 bool thread_stack;
124 bool last_branch;
125 bool add_last_branch;
126 bool flc;
127 bool llc;
128 bool tlb;
129 bool remote_access;
130 bool mem;
131 unsigned int callchain_sz;
132 unsigned int last_branch_sz;
133 unsigned long long period;
134 enum itrace_period_type period_type;
135 unsigned long initial_skip;
136 unsigned long *cpu_bitmap;
137 struct perf_time_interval *ptime_range;
138 int range_num;
139 unsigned int error_plus_flags;
140 unsigned int error_minus_flags;
141 unsigned int log_plus_flags;
142 unsigned int log_minus_flags;
143 unsigned int quick;
144};
145
146/**
147 * struct auxtrace_index_entry - indexes a AUX area tracing event within a
148 * perf.data file.
149 * @file_offset: offset within the perf.data file
150 * @sz: size of the event
151 */
152struct auxtrace_index_entry {
153 u64 file_offset;
154 u64 sz;
155};
156
157#define PERF_AUXTRACE_INDEX_ENTRY_COUNT 256
158
159/**
160 * struct auxtrace_index - index of AUX area tracing events within a perf.data
161 * file.
162 * @list: linking a number of arrays of entries
163 * @nr: number of entries
164 * @entries: array of entries
165 */
166struct auxtrace_index {
167 struct list_head list;
168 size_t nr;
169 struct auxtrace_index_entry entries[PERF_AUXTRACE_INDEX_ENTRY_COUNT];
170};
171
172/**
173 * struct auxtrace - session callbacks to allow AUX area data decoding.
174 * @process_event: lets the decoder see all session events
175 * @process_auxtrace_event: process a PERF_RECORD_AUXTRACE event
176 * @queue_data: queue an AUX sample or PERF_RECORD_AUXTRACE event for later
177 * processing
178 * @dump_auxtrace_sample: dump AUX area sample data
179 * @flush_events: process any remaining data
180 * @free_events: free resources associated with event processing
181 * @free: free resources associated with the session
182 */
183struct auxtrace {
184 int (*process_event)(struct perf_session *session,
185 union perf_event *event,
186 struct perf_sample *sample,
187 struct perf_tool *tool);
188 int (*process_auxtrace_event)(struct perf_session *session,
189 union perf_event *event,
190 struct perf_tool *tool);
191 int (*queue_data)(struct perf_session *session,
192 struct perf_sample *sample, union perf_event *event,
193 u64 data_offset);
194 void (*dump_auxtrace_sample)(struct perf_session *session,
195 struct perf_sample *sample);
196 int (*flush_events)(struct perf_session *session,
197 struct perf_tool *tool);
198 void (*free_events)(struct perf_session *session);
199 void (*free)(struct perf_session *session);
200 bool (*evsel_is_auxtrace)(struct perf_session *session,
201 struct evsel *evsel);
202};
203
204/**
205 * struct auxtrace_buffer - a buffer containing AUX area tracing data.
206 * @list: buffers are queued in a list held by struct auxtrace_queue
207 * @size: size of the buffer in bytes
208 * @pid: in per-thread mode, the pid this buffer is associated with
209 * @tid: in per-thread mode, the tid this buffer is associated with
210 * @cpu: in per-cpu mode, the cpu this buffer is associated with
211 * @data: actual buffer data (can be null if the data has not been loaded)
212 * @data_offset: file offset at which the buffer can be read
213 * @mmap_addr: mmap address at which the buffer can be read
214 * @mmap_size: size of the mmap at @mmap_addr
215 * @data_needs_freeing: @data was malloc'd so free it when it is no longer
216 * needed
217 * @consecutive: the original data was split up and this buffer is consecutive
218 * to the previous buffer
219 * @offset: offset as determined by aux_head / aux_tail members of struct
220 * perf_event_mmap_page
221 * @reference: an implementation-specific reference determined when the data is
222 * recorded
223 * @buffer_nr: used to number each buffer
224 * @use_size: implementation actually only uses this number of bytes
225 * @use_data: implementation actually only uses data starting at this address
226 */
227struct auxtrace_buffer {
228 struct list_head list;
229 size_t size;
230 pid_t pid;
231 pid_t tid;
232 int cpu;
233 void *data;
234 off_t data_offset;
235 void *mmap_addr;
236 size_t mmap_size;
237 bool data_needs_freeing;
238 bool consecutive;
239 u64 offset;
240 u64 reference;
241 u64 buffer_nr;
242 size_t use_size;
243 void *use_data;
244};
245
246/**
247 * struct auxtrace_queue - a queue of AUX area tracing data buffers.
248 * @head: head of buffer list
249 * @tid: in per-thread mode, the tid this queue is associated with
250 * @cpu: in per-cpu mode, the cpu this queue is associated with
251 * @set: %true once this queue has been dedicated to a specific thread or cpu
252 * @priv: implementation-specific data
253 */
254struct auxtrace_queue {
255 struct list_head head;
256 pid_t tid;
257 int cpu;
258 bool set;
259 void *priv;
260};
261
262/**
263 * struct auxtrace_queues - an array of AUX area tracing queues.
264 * @queue_array: array of queues
265 * @nr_queues: number of queues
266 * @new_data: set whenever new data is queued
267 * @populated: queues have been fully populated using the auxtrace_index
268 * @next_buffer_nr: used to number each buffer
269 */
270struct auxtrace_queues {
271 struct auxtrace_queue *queue_array;
272 unsigned int nr_queues;
273 bool new_data;
274 bool populated;
275 u64 next_buffer_nr;
276};
277
278/**
279 * struct auxtrace_heap_item - element of struct auxtrace_heap.
280 * @queue_nr: queue number
281 * @ordinal: value used for sorting (lowest ordinal is top of the heap) expected
282 * to be a timestamp
283 */
284struct auxtrace_heap_item {
285 unsigned int queue_nr;
286 u64 ordinal;
287};
288
289/**
290 * struct auxtrace_heap - a heap suitable for sorting AUX area tracing queues.
291 * @heap_array: the heap
292 * @heap_cnt: the number of elements in the heap
293 * @heap_sz: maximum number of elements (grows as needed)
294 */
295struct auxtrace_heap {
296 struct auxtrace_heap_item *heap_array;
297 unsigned int heap_cnt;
298 unsigned int heap_sz;
299};
300
301/**
302 * struct auxtrace_mmap - records an mmap of the auxtrace buffer.
303 * @base: address of mapped area
304 * @userpg: pointer to buffer's perf_event_mmap_page
305 * @mask: %0 if @len is not a power of two, otherwise (@len - %1)
306 * @len: size of mapped area
307 * @prev: previous aux_head
308 * @idx: index of this mmap
309 * @tid: tid for a per-thread mmap (also set if there is only 1 tid on a per-cpu
310 * mmap) otherwise %0
311 * @cpu: cpu number for a per-cpu mmap otherwise %-1
312 */
313struct auxtrace_mmap {
314 void *base;
315 void *userpg;
316 size_t mask;
317 size_t len;
318 u64 prev;
319 int idx;
320 pid_t tid;
321 int cpu;
322};
323
324/**
325 * struct auxtrace_mmap_params - parameters to set up struct auxtrace_mmap.
326 * @mask: %0 if @len is not a power of two, otherwise (@len - %1)
327 * @offset: file offset of mapped area
328 * @len: size of mapped area
329 * @prot: mmap memory protection
330 * @idx: index of this mmap
331 * @tid: tid for a per-thread mmap (also set if there is only 1 tid on a per-cpu
332 * mmap) otherwise %0
333 * @cpu: cpu number for a per-cpu mmap otherwise %-1
334 */
335struct auxtrace_mmap_params {
336 size_t mask;
337 off_t offset;
338 size_t len;
339 int prot;
340 int idx;
341 pid_t tid;
342 int cpu;
343};
344
345/**
346 * struct auxtrace_record - callbacks for recording AUX area data.
347 * @recording_options: validate and process recording options
348 * @info_priv_size: return the size of the private data in auxtrace_info_event
349 * @info_fill: fill-in the private data in auxtrace_info_event
350 * @free: free this auxtrace record structure
351 * @snapshot_start: starting a snapshot
352 * @snapshot_finish: finishing a snapshot
353 * @find_snapshot: find data to snapshot within auxtrace mmap
354 * @parse_snapshot_options: parse snapshot options
355 * @reference: provide a 64-bit reference number for auxtrace_event
356 * @read_finish: called after reading from an auxtrace mmap
357 * @alignment: alignment (if any) for AUX area data
358 * @default_aux_sample_size: default sample size for --aux sample option
359 * @pmu: associated pmu
360 * @evlist: selected events list
361 */
362struct auxtrace_record {
363 int (*recording_options)(struct auxtrace_record *itr,
364 struct evlist *evlist,
365 struct record_opts *opts);
366 size_t (*info_priv_size)(struct auxtrace_record *itr,
367 struct evlist *evlist);
368 int (*info_fill)(struct auxtrace_record *itr,
369 struct perf_session *session,
370 struct perf_record_auxtrace_info *auxtrace_info,
371 size_t priv_size);
372 void (*free)(struct auxtrace_record *itr);
373 int (*snapshot_start)(struct auxtrace_record *itr);
374 int (*snapshot_finish)(struct auxtrace_record *itr);
375 int (*find_snapshot)(struct auxtrace_record *itr, int idx,
376 struct auxtrace_mmap *mm, unsigned char *data,
377 u64 *head, u64 *old);
378 int (*parse_snapshot_options)(struct auxtrace_record *itr,
379 struct record_opts *opts,
380 const char *str);
381 u64 (*reference)(struct auxtrace_record *itr);
382 int (*read_finish)(struct auxtrace_record *itr, int idx);
383 unsigned int alignment;
384 unsigned int default_aux_sample_size;
385 struct perf_pmu *pmu;
386 struct evlist *evlist;
387};
388
389/**
390 * struct addr_filter - address filter.
391 * @list: list node
392 * @range: true if it is a range filter
393 * @start: true if action is 'filter' or 'start'
394 * @action: 'filter', 'start' or 'stop' ('tracestop' is accepted but converted
395 * to 'stop')
396 * @sym_from: symbol name for the filter address
397 * @sym_to: symbol name that determines the filter size
398 * @sym_from_idx: selects n'th from symbols with the same name (0 means global
399 * and less than 0 means symbol must be unique)
400 * @sym_to_idx: same as @sym_from_idx but for @sym_to
401 * @addr: filter address
402 * @size: filter region size (for range filters)
403 * @filename: DSO file name or NULL for the kernel
404 * @str: allocated string that contains the other string members
405 */
406struct addr_filter {
407 struct list_head list;
408 bool range;
409 bool start;
410 const char *action;
411 const char *sym_from;
412 const char *sym_to;
413 int sym_from_idx;
414 int sym_to_idx;
415 u64 addr;
416 u64 size;
417 const char *filename;
418 char *str;
419};
420
421/**
422 * struct addr_filters - list of address filters.
423 * @head: list of address filters
424 * @cnt: number of address filters
425 */
426struct addr_filters {
427 struct list_head head;
428 int cnt;
429};
430
431struct auxtrace_cache;
432
433#ifdef HAVE_AUXTRACE_SUPPORT
434
435/*
436 * In snapshot mode the mmapped page is read-only which makes using
437 * __sync_val_compare_and_swap() problematic. However, snapshot mode expects
438 * the buffer is not updated while the snapshot is made (e.g. Intel PT disables
439 * the event) so there is not a race anyway.
440 */
441static inline u64 auxtrace_mmap__read_snapshot_head(struct auxtrace_mmap *mm)
442{
443 struct perf_event_mmap_page *pc = mm->userpg;
444 u64 head = READ_ONCE(pc->aux_head);
445
446 /* Ensure all reads are done after we read the head */
447 rmb();
448 return head;
449}
450
451static inline u64 auxtrace_mmap__read_head(struct auxtrace_mmap *mm)
452{
453 struct perf_event_mmap_page *pc = mm->userpg;
454#if BITS_PER_LONG == 64 || !defined(HAVE_SYNC_COMPARE_AND_SWAP_SUPPORT)
455 u64 head = READ_ONCE(pc->aux_head);
456#else
457 u64 head = __sync_val_compare_and_swap(&pc->aux_head, 0, 0);
458#endif
459
460 /* Ensure all reads are done after we read the head */
461 rmb();
462 return head;
463}
464
465static inline void auxtrace_mmap__write_tail(struct auxtrace_mmap *mm, u64 tail)
466{
467 struct perf_event_mmap_page *pc = mm->userpg;
468#if BITS_PER_LONG != 64 && defined(HAVE_SYNC_COMPARE_AND_SWAP_SUPPORT)
469 u64 old_tail;
470#endif
471
472 /* Ensure all reads are done before we write the tail out */
473 mb();
474#if BITS_PER_LONG == 64 || !defined(HAVE_SYNC_COMPARE_AND_SWAP_SUPPORT)
475 pc->aux_tail = tail;
476#else
477 do {
478 old_tail = __sync_val_compare_and_swap(&pc->aux_tail, 0, 0);
479 } while (!__sync_bool_compare_and_swap(&pc->aux_tail, old_tail, tail));
480#endif
481}
482
483int auxtrace_mmap__mmap(struct auxtrace_mmap *mm,
484 struct auxtrace_mmap_params *mp,
485 void *userpg, int fd);
486void auxtrace_mmap__munmap(struct auxtrace_mmap *mm);
487void auxtrace_mmap_params__init(struct auxtrace_mmap_params *mp,
488 off_t auxtrace_offset,
489 unsigned int auxtrace_pages,
490 bool auxtrace_overwrite);
491void auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params *mp,
492 struct evlist *evlist, int idx,
493 bool per_cpu);
494
495typedef int (*process_auxtrace_t)(struct perf_tool *tool,
496 struct mmap *map,
497 union perf_event *event, void *data1,
498 size_t len1, void *data2, size_t len2);
499
500int auxtrace_mmap__read(struct mmap *map, struct auxtrace_record *itr,
501 struct perf_tool *tool, process_auxtrace_t fn);
502
503int auxtrace_mmap__read_snapshot(struct mmap *map,
504 struct auxtrace_record *itr,
505 struct perf_tool *tool, process_auxtrace_t fn,
506 size_t snapshot_size);
507
508int auxtrace_queues__init(struct auxtrace_queues *queues);
509int auxtrace_queues__add_event(struct auxtrace_queues *queues,
510 struct perf_session *session,
511 union perf_event *event, off_t data_offset,
512 struct auxtrace_buffer **buffer_ptr);
513struct auxtrace_queue *
514auxtrace_queues__sample_queue(struct auxtrace_queues *queues,
515 struct perf_sample *sample,
516 struct perf_session *session);
517int auxtrace_queues__add_sample(struct auxtrace_queues *queues,
518 struct perf_session *session,
519 struct perf_sample *sample, u64 data_offset,
520 u64 reference);
521void auxtrace_queues__free(struct auxtrace_queues *queues);
522int auxtrace_queues__process_index(struct auxtrace_queues *queues,
523 struct perf_session *session);
524int auxtrace_queue_data(struct perf_session *session, bool samples,
525 bool events);
526struct auxtrace_buffer *auxtrace_buffer__next(struct auxtrace_queue *queue,
527 struct auxtrace_buffer *buffer);
528void *auxtrace_buffer__get_data(struct auxtrace_buffer *buffer, int fd);
529void auxtrace_buffer__put_data(struct auxtrace_buffer *buffer);
530void auxtrace_buffer__drop_data(struct auxtrace_buffer *buffer);
531void auxtrace_buffer__free(struct auxtrace_buffer *buffer);
532
533int auxtrace_heap__add(struct auxtrace_heap *heap, unsigned int queue_nr,
534 u64 ordinal);
535void auxtrace_heap__pop(struct auxtrace_heap *heap);
536void auxtrace_heap__free(struct auxtrace_heap *heap);
537
538struct auxtrace_cache_entry {
539 struct hlist_node hash;
540 u32 key;
541};
542
543struct auxtrace_cache *auxtrace_cache__new(unsigned int bits, size_t entry_size,
544 unsigned int limit_percent);
545void auxtrace_cache__free(struct auxtrace_cache *auxtrace_cache);
546void *auxtrace_cache__alloc_entry(struct auxtrace_cache *c);
547void auxtrace_cache__free_entry(struct auxtrace_cache *c, void *entry);
548int auxtrace_cache__add(struct auxtrace_cache *c, u32 key,
549 struct auxtrace_cache_entry *entry);
550void auxtrace_cache__remove(struct auxtrace_cache *c, u32 key);
551void *auxtrace_cache__lookup(struct auxtrace_cache *c, u32 key);
552
553struct auxtrace_record *auxtrace_record__init(struct evlist *evlist,
554 int *err);
555
556int auxtrace_parse_snapshot_options(struct auxtrace_record *itr,
557 struct record_opts *opts,
558 const char *str);
559int auxtrace_parse_sample_options(struct auxtrace_record *itr,
560 struct evlist *evlist,
561 struct record_opts *opts, const char *str);
562void auxtrace_regroup_aux_output(struct evlist *evlist);
563int auxtrace_record__options(struct auxtrace_record *itr,
564 struct evlist *evlist,
565 struct record_opts *opts);
566size_t auxtrace_record__info_priv_size(struct auxtrace_record *itr,
567 struct evlist *evlist);
568int auxtrace_record__info_fill(struct auxtrace_record *itr,
569 struct perf_session *session,
570 struct perf_record_auxtrace_info *auxtrace_info,
571 size_t priv_size);
572void auxtrace_record__free(struct auxtrace_record *itr);
573int auxtrace_record__snapshot_start(struct auxtrace_record *itr);
574int auxtrace_record__snapshot_finish(struct auxtrace_record *itr, bool on_exit);
575int auxtrace_record__find_snapshot(struct auxtrace_record *itr, int idx,
576 struct auxtrace_mmap *mm,
577 unsigned char *data, u64 *head, u64 *old);
578u64 auxtrace_record__reference(struct auxtrace_record *itr);
579int auxtrace_record__read_finish(struct auxtrace_record *itr, int idx);
580
581int auxtrace_index__auxtrace_event(struct list_head *head, union perf_event *event,
582 off_t file_offset);
583int auxtrace_index__write(int fd, struct list_head *head);
584int auxtrace_index__process(int fd, u64 size, struct perf_session *session,
585 bool needs_swap);
586void auxtrace_index__free(struct list_head *head);
587
588void auxtrace_synth_error(struct perf_record_auxtrace_error *auxtrace_error, int type,
589 int code, int cpu, pid_t pid, pid_t tid, u64 ip,
590 const char *msg, u64 timestamp);
591
592int perf_event__process_auxtrace_info(struct perf_session *session,
593 union perf_event *event);
594s64 perf_event__process_auxtrace(struct perf_session *session,
595 union perf_event *event);
596int perf_event__process_auxtrace_error(struct perf_session *session,
597 union perf_event *event);
598int itrace_parse_synth_opts(const struct option *opt, const char *str,
599 int unset);
600void itrace_synth_opts__set_default(struct itrace_synth_opts *synth_opts,
601 bool no_sample);
602
603size_t perf_event__fprintf_auxtrace_error(union perf_event *event, FILE *fp);
604void perf_session__auxtrace_error_inc(struct perf_session *session,
605 union perf_event *event);
606void events_stats__auxtrace_error_warn(const struct events_stats *stats);
607
608void addr_filters__init(struct addr_filters *filts);
609void addr_filters__exit(struct addr_filters *filts);
610int addr_filters__parse_bare_filter(struct addr_filters *filts,
611 const char *filter);
612int auxtrace_parse_filters(struct evlist *evlist);
613
614int auxtrace__process_event(struct perf_session *session, union perf_event *event,
615 struct perf_sample *sample, struct perf_tool *tool);
616void auxtrace__dump_auxtrace_sample(struct perf_session *session,
617 struct perf_sample *sample);
618int auxtrace__flush_events(struct perf_session *session, struct perf_tool *tool);
619void auxtrace__free_events(struct perf_session *session);
620void auxtrace__free(struct perf_session *session);
621bool auxtrace__evsel_is_auxtrace(struct perf_session *session,
622 struct evsel *evsel);
623
624#define ITRACE_HELP \
625" i[period]: synthesize instructions events\n" \
626" b: synthesize branches events (branch misses for Arm SPE)\n" \
627" c: synthesize branches events (calls only)\n" \
628" r: synthesize branches events (returns only)\n" \
629" x: synthesize transactions events\n" \
630" w: synthesize ptwrite events\n" \
631" p: synthesize power events\n" \
632" o: synthesize other events recorded due to the use\n" \
633" of aux-output (refer to perf record)\n" \
634" e[flags]: synthesize error events\n" \
635" each flag must be preceded by + or -\n" \
636" error flags are: o (overflow)\n" \
637" l (data lost)\n" \
638" d[flags]: create a debug log\n" \
639" each flag must be preceded by + or -\n" \
640" log flags are: a (all perf events)\n" \
641" f: synthesize first level cache events\n" \
642" m: synthesize last level cache events\n" \
643" t: synthesize TLB events\n" \
644" a: synthesize remote access events\n" \
645" g[len]: synthesize a call chain (use with i or x)\n" \
646" G[len]: synthesize a call chain on existing event records\n" \
647" l[len]: synthesize last branch entries (use with i or x)\n" \
648" L[len]: synthesize last branch entries on existing event records\n" \
649" sNUMBER: skip initial number of events\n" \
650" q: quicker (less detailed) decoding\n" \
651" PERIOD[ns|us|ms|i|t]: specify period to sample stream\n" \
652" concatenate multiple options. Default is ibxwpe or cewp\n"
653
654static inline
655void itrace_synth_opts__set_time_range(struct itrace_synth_opts *opts,
656 struct perf_time_interval *ptime_range,
657 int range_num)
658{
659 opts->ptime_range = ptime_range;
660 opts->range_num = range_num;
661}
662
663static inline
664void itrace_synth_opts__clear_time_range(struct itrace_synth_opts *opts)
665{
666 opts->ptime_range = NULL;
667 opts->range_num = 0;
668}
669
670#else
671#include "debug.h"
672
673static inline struct auxtrace_record *
674auxtrace_record__init(struct evlist *evlist __maybe_unused,
675 int *err)
676{
677 *err = 0;
678 return NULL;
679}
680
681static inline
682void auxtrace_record__free(struct auxtrace_record *itr __maybe_unused)
683{
684}
685
686static inline
687int auxtrace_record__options(struct auxtrace_record *itr __maybe_unused,
688 struct evlist *evlist __maybe_unused,
689 struct record_opts *opts __maybe_unused)
690{
691 return 0;
692}
693
694#define perf_event__process_auxtrace_info 0
695#define perf_event__process_auxtrace 0
696#define perf_event__process_auxtrace_error 0
697
698static inline
699void perf_session__auxtrace_error_inc(struct perf_session *session
700 __maybe_unused,
701 union perf_event *event
702 __maybe_unused)
703{
704}
705
706static inline
707void events_stats__auxtrace_error_warn(const struct events_stats *stats
708 __maybe_unused)
709{
710}
711
712static inline
713int itrace_parse_synth_opts(const struct option *opt __maybe_unused,
714 const char *str __maybe_unused,
715 int unset __maybe_unused)
716{
717 pr_err("AUX area tracing not supported\n");
718 return -EINVAL;
719}
720
721static inline
722int auxtrace_parse_snapshot_options(struct auxtrace_record *itr __maybe_unused,
723 struct record_opts *opts __maybe_unused,
724 const char *str)
725{
726 if (!str)
727 return 0;
728 pr_err("AUX area tracing not supported\n");
729 return -EINVAL;
730}
731
732static inline
733int auxtrace_parse_sample_options(struct auxtrace_record *itr __maybe_unused,
734 struct evlist *evlist __maybe_unused,
735 struct record_opts *opts __maybe_unused,
736 const char *str)
737{
738 if (!str)
739 return 0;
740 pr_err("AUX area tracing not supported\n");
741 return -EINVAL;
742}
743
744static inline
745void auxtrace_regroup_aux_output(struct evlist *evlist __maybe_unused)
746{
747}
748
749static inline
750int auxtrace__process_event(struct perf_session *session __maybe_unused,
751 union perf_event *event __maybe_unused,
752 struct perf_sample *sample __maybe_unused,
753 struct perf_tool *tool __maybe_unused)
754{
755 return 0;
756}
757
758static inline
759void auxtrace__dump_auxtrace_sample(struct perf_session *session __maybe_unused,
760 struct perf_sample *sample __maybe_unused)
761{
762}
763
764static inline
765int auxtrace__flush_events(struct perf_session *session __maybe_unused,
766 struct perf_tool *tool __maybe_unused)
767{
768 return 0;
769}
770
771static inline
772void auxtrace__free_events(struct perf_session *session __maybe_unused)
773{
774}
775
776static inline
777void auxtrace_cache__free(struct auxtrace_cache *auxtrace_cache __maybe_unused)
778{
779}
780
781static inline
782void auxtrace__free(struct perf_session *session __maybe_unused)
783{
784}
785
786static inline
787int auxtrace_index__write(int fd __maybe_unused,
788 struct list_head *head __maybe_unused)
789{
790 return -EINVAL;
791}
792
793static inline
794int auxtrace_index__process(int fd __maybe_unused,
795 u64 size __maybe_unused,
796 struct perf_session *session __maybe_unused,
797 bool needs_swap __maybe_unused)
798{
799 return -EINVAL;
800}
801
802static inline
803void auxtrace_index__free(struct list_head *head __maybe_unused)
804{
805}
806
807static inline
808bool auxtrace__evsel_is_auxtrace(struct perf_session *session __maybe_unused,
809 struct evsel *evsel __maybe_unused)
810{
811 return false;
812}
813
814static inline
815int auxtrace_parse_filters(struct evlist *evlist __maybe_unused)
816{
817 return 0;
818}
819
820int auxtrace_mmap__mmap(struct auxtrace_mmap *mm,
821 struct auxtrace_mmap_params *mp,
822 void *userpg, int fd);
823void auxtrace_mmap__munmap(struct auxtrace_mmap *mm);
824void auxtrace_mmap_params__init(struct auxtrace_mmap_params *mp,
825 off_t auxtrace_offset,
826 unsigned int auxtrace_pages,
827 bool auxtrace_overwrite);
828void auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params *mp,
829 struct evlist *evlist, int idx,
830 bool per_cpu);
831
832#define ITRACE_HELP ""
833
834static inline
835void itrace_synth_opts__set_time_range(struct itrace_synth_opts *opts
836 __maybe_unused,
837 struct perf_time_interval *ptime_range
838 __maybe_unused,
839 int range_num __maybe_unused)
840{
841}
842
843static inline
844void itrace_synth_opts__clear_time_range(struct itrace_synth_opts *opts
845 __maybe_unused)
846{
847}
848
849#endif
850
851#endif