Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

libperf: Move 'idx' from tools/perf to perf_evsel::idx

Move evsel::idx to perf_evsel::idx, so we can move the group interface
to libperf.

Committer notes:

Fixup evsel->idx usage in tools/perf/util/bpf_counter_cgroup.c, that
appeared in my tree in my local tree.

Also fixed up these:

$ find tools/perf/ -name "*.[ch]" | xargs grep 'evsel->idx'
tools/perf/ui/gtk/annotate.c: evsel->idx + i);
tools/perf/ui/gtk/annotate.c: evsel->idx);
$

That running 'make -C tools/perf build-test' caught.

Signed-off-by: Jiri Olsa <jolsa@kernel.org>
Requested-by: Shunsuke Nakamura <nakamura.shun@fujitsu.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Ian Rogers <irogers@google.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Michael Petlan <mpetlan@redhat.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Link: http://lore.kernel.org/lkml/20210706151704.73662-3-jolsa@kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>

authored by

Jiri Olsa and committed by
Arnaldo Carvalho de Melo
38fe0e01 3d970601

+59 -58
+1
tools/lib/perf/evlist.c
··· 66 66 void perf_evlist__add(struct perf_evlist *evlist, 67 67 struct perf_evsel *evsel) 68 68 { 69 + evsel->idx = evlist->nr_entries; 69 70 list_add_tail(&evsel->node, &evlist->entries); 70 71 evlist->nr_entries += 1; 71 72 __perf_evlist__propagate_maps(evlist, evsel);
+4 -2
tools/lib/perf/evsel.c
··· 18 18 #include <sys/ioctl.h> 19 19 #include <sys/mman.h> 20 20 21 - void perf_evsel__init(struct perf_evsel *evsel, struct perf_event_attr *attr) 21 + void perf_evsel__init(struct perf_evsel *evsel, struct perf_event_attr *attr, 22 + int idx) 22 23 { 23 24 INIT_LIST_HEAD(&evsel->node); 24 25 evsel->attr = *attr; 26 + evsel->idx = idx; 25 27 } 26 28 27 29 struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr) ··· 31 29 struct perf_evsel *evsel = zalloc(sizeof(*evsel)); 32 30 33 31 if (evsel != NULL) 34 - perf_evsel__init(evsel, attr); 32 + perf_evsel__init(evsel, attr, 0); 35 33 36 34 return evsel; 37 35 }
+3 -1
tools/lib/perf/include/internal/evsel.h
··· 49 49 /* parse modifier helper */ 50 50 int nr_members; 51 51 bool system_wide; 52 + int idx; 52 53 }; 53 54 54 - void perf_evsel__init(struct perf_evsel *evsel, struct perf_event_attr *attr); 55 + void perf_evsel__init(struct perf_evsel *evsel, struct perf_event_attr *attr, 56 + int idx); 55 57 int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads); 56 58 void perf_evsel__close_fd(struct perf_evsel *evsel); 57 59 void perf_evsel__free_fd(struct perf_evsel *evsel);
+2 -2
tools/perf/arch/x86/util/iostat.c
··· 322 322 } 323 323 324 324 evlist__for_each_entry(evl, evsel) { 325 - evsel->priv = list->rps[evsel->idx / metrics_count]; 325 + evsel->priv = list->rps[evsel->core.idx / metrics_count]; 326 326 } 327 327 list->nr_entries = 0; 328 328 err: ··· 428 428 { 429 429 double iostat_value = 0; 430 430 u64 prev_count_val = 0; 431 - const char *iostat_metric = iostat_metric_by_idx(evsel->idx); 431 + const char *iostat_metric = iostat_metric_by_idx(evsel->core.idx); 432 432 u8 die = ((struct iio_root_port *)evsel->priv)->die; 433 433 struct perf_counts_values *count = perf_counts(evsel->counts, die, 0); 434 434
+2 -2
tools/perf/builtin-diff.c
··· 1031 1031 continue; 1032 1032 1033 1033 es_base = evsel_streams__entry(data_base->evlist_streams, 1034 - evsel_base->idx); 1034 + evsel_base->core.idx); 1035 1035 if (!es_base) 1036 1036 return -1; 1037 1037 1038 1038 es_pair = evsel_streams__entry(data_pair->evlist_streams, 1039 - evsel_pair->idx); 1039 + evsel_pair->core.idx); 1040 1040 if (!es_pair) 1041 1041 return -1; 1042 1042
+2 -2
tools/perf/builtin-report.c
··· 332 332 const char *name = evsel__name(evsel); 333 333 int err = perf_read_values_add_value(&rep->show_threads_values, 334 334 event->read.pid, event->read.tid, 335 - evsel->idx, 335 + evsel->core.idx, 336 336 name, 337 337 event->read.value); 338 338 ··· 666 666 evlist__for_each_entry(rep->session->evlist, pos) { 667 667 struct hists *hists = evsel__hists(pos); 668 668 669 - if (pos->idx == 0) 669 + if (pos->core.idx == 0) 670 670 hists->symbol_filter_str = rep->symbol_filter_str; 671 671 672 672 hists->socket_filter = rep->socket_filter;
+4 -4
tools/perf/builtin-top.c
··· 264 264 265 265 if (top->evlist->enabled) { 266 266 if (top->zero) 267 - symbol__annotate_zero_histogram(symbol, top->sym_evsel->idx); 267 + symbol__annotate_zero_histogram(symbol, top->sym_evsel->core.idx); 268 268 else 269 - symbol__annotate_decay_histogram(symbol, top->sym_evsel->idx); 269 + symbol__annotate_decay_histogram(symbol, top->sym_evsel->core.idx); 270 270 } 271 271 if (more != 0) 272 272 printf("%d lines not displayed, maybe increase display entries [e]\n", more); ··· 530 530 fprintf(stderr, "\nAvailable events:"); 531 531 532 532 evlist__for_each_entry(top->evlist, top->sym_evsel) 533 - fprintf(stderr, "\n\t%d %s", top->sym_evsel->idx, evsel__name(top->sym_evsel)); 533 + fprintf(stderr, "\n\t%d %s", top->sym_evsel->core.idx, evsel__name(top->sym_evsel)); 534 534 535 535 prompt_integer(&counter, "Enter details event counter"); 536 536 ··· 541 541 break; 542 542 } 543 543 evlist__for_each_entry(top->evlist, top->sym_evsel) 544 - if (top->sym_evsel->idx == counter) 544 + if (top->sym_evsel->core.idx == counter) 545 545 break; 546 546 } else 547 547 top->sym_evsel = evlist__first(top->evlist);
+3 -3
tools/perf/tests/evsel-roundtrip-name.c
··· 44 44 45 45 for (i = 0; i < PERF_COUNT_HW_CACHE_RESULT_MAX; i++) { 46 46 __evsel__hw_cache_type_op_res_name(type, op, i, name, sizeof(name)); 47 - if (evsel->idx != idx) 47 + if (evsel->core.idx != idx) 48 48 continue; 49 49 50 50 ++idx; ··· 84 84 85 85 err = 0; 86 86 evlist__for_each_entry(evlist, evsel) { 87 - if (strcmp(evsel__name(evsel), names[evsel->idx / distance])) { 87 + if (strcmp(evsel__name(evsel), names[evsel->core.idx / distance])) { 88 88 --err; 89 - pr_debug("%s != %s\n", evsel__name(evsel), names[evsel->idx / distance]); 89 + pr_debug("%s != %s\n", evsel__name(evsel), names[evsel->core.idx / distance]); 90 90 } 91 91 } 92 92
+4 -4
tools/perf/tests/mmap-basic.c
··· 139 139 " doesn't map to an evsel\n", sample.id); 140 140 goto out_delete_evlist; 141 141 } 142 - nr_events[evsel->idx]++; 142 + nr_events[evsel->core.idx]++; 143 143 perf_mmap__consume(&md->core); 144 144 } 145 145 perf_mmap__read_done(&md->core); ··· 147 147 out_init: 148 148 err = 0; 149 149 evlist__for_each_entry(evlist, evsel) { 150 - if (nr_events[evsel->idx] != expected_nr_events[evsel->idx]) { 150 + if (nr_events[evsel->core.idx] != expected_nr_events[evsel->core.idx]) { 151 151 pr_debug("expected %d %s events, got %d\n", 152 - expected_nr_events[evsel->idx], 153 - evsel__name(evsel), nr_events[evsel->idx]); 152 + expected_nr_events[evsel->core.idx], 153 + evsel__name(evsel), nr_events[evsel->core.idx]); 154 154 err = -1; 155 155 goto out_delete_evlist; 156 156 }
+1 -1
tools/perf/ui/browsers/annotate.c
··· 749 749 hbt->timer(hbt->arg); 750 750 751 751 if (delay_secs != 0) { 752 - symbol__annotate_decay_histogram(sym, evsel->idx); 752 + symbol__annotate_decay_histogram(sym, evsel->core.idx); 753 753 hists__scnprintf_title(hists, title, sizeof(title)); 754 754 annotate_browser__show(&browser->b, title, help); 755 755 }
+2 -2
tools/perf/ui/gtk/annotate.c
··· 135 135 ret += perf_gtk__get_percent(s + ret, 136 136 sizeof(s) - ret, 137 137 sym, pos, 138 - evsel->idx + i); 138 + evsel->core.idx + i); 139 139 ret += scnprintf(s + ret, sizeof(s) - ret, " "); 140 140 } 141 141 } else { 142 142 ret = perf_gtk__get_percent(s, sizeof(s), sym, pos, 143 - evsel->idx); 143 + evsel->core.idx); 144 144 } 145 145 146 146 if (ret)
+4 -4
tools/perf/util/annotate.c
··· 961 961 if (sym == NULL) 962 962 return 0; 963 963 src = symbol__hists(sym, evsel->evlist->core.nr_entries); 964 - return src ? __symbol__inc_addr_samples(ms, src, evsel->idx, addr, sample) : 0; 964 + return src ? __symbol__inc_addr_samples(ms, src, evsel->core.idx, addr, sample) : 0; 965 965 } 966 966 967 967 static int symbol__account_cycles(u64 addr, u64 start, ··· 2159 2159 2160 2160 BUG_ON(i >= al->data_nr); 2161 2161 2162 - sym_hist = annotation__histogram(notes, evsel->idx); 2162 + sym_hist = annotation__histogram(notes, evsel->core.idx); 2163 2163 data = &al->data[i++]; 2164 2164 2165 2165 calc_percent(sym_hist, hists, data, al->offset, end); ··· 2340 2340 static void symbol__annotate_hits(struct symbol *sym, struct evsel *evsel) 2341 2341 { 2342 2342 struct annotation *notes = symbol__annotation(sym); 2343 - struct sym_hist *h = annotation__histogram(notes, evsel->idx); 2343 + struct sym_hist *h = annotation__histogram(notes, evsel->core.idx); 2344 2344 u64 len = symbol__size(sym), offset; 2345 2345 2346 2346 for (offset = 0; offset < len; ++offset) ··· 2373 2373 const char *d_filename; 2374 2374 const char *evsel_name = evsel__name(evsel); 2375 2375 struct annotation *notes = symbol__annotation(sym); 2376 - struct sym_hist *h = annotation__histogram(notes, evsel->idx); 2376 + struct sym_hist *h = annotation__histogram(notes, evsel->core.idx); 2377 2377 struct annotation_line *pos, *queue = NULL; 2378 2378 u64 start = map__rip_2objdump(map, sym->start); 2379 2379 int printed = 2, queue_len = 0, addr_fmt_width;
+6 -6
tools/perf/util/bpf_counter_cgroup.c
··· 124 124 map_fd = bpf_map__fd(skel->maps.events); 125 125 for (cpu = 0; cpu < nr_cpus; cpu++) { 126 126 int fd = FD(evsel, cpu); 127 - __u32 idx = evsel->idx * total_cpus + 127 + __u32 idx = evsel->core.idx * total_cpus + 128 128 evlist->core.all_cpus->map[cpu]; 129 129 130 130 err = bpf_map_update_elem(map_fd, &idx, &fd, ··· 221 221 222 222 static int bperf_cgrp__enable(struct evsel *evsel) 223 223 { 224 - if (evsel->idx) 224 + if (evsel->core.idx) 225 225 return 0; 226 226 227 227 bperf_cgrp__sync_counters(evsel->evlist); ··· 232 232 233 233 static int bperf_cgrp__disable(struct evsel *evsel) 234 234 { 235 - if (evsel->idx) 235 + if (evsel->core.idx) 236 236 return 0; 237 237 238 238 bperf_cgrp__sync_counters(evsel->evlist); ··· 251 251 int reading_map_fd, err = 0; 252 252 __u32 idx; 253 253 254 - if (evsel->idx) 254 + if (evsel->core.idx) 255 255 return 0; 256 256 257 257 bperf_cgrp__sync_counters(evsel->evlist); ··· 263 263 reading_map_fd = bpf_map__fd(skel->maps.cgrp_readings); 264 264 265 265 evlist__for_each_entry(evlist, evsel) { 266 - idx = evsel->idx; 266 + idx = evsel->core.idx; 267 267 err = bpf_map_lookup_elem(reading_map_fd, &idx, values); 268 268 if (err) { 269 269 pr_err("bpf map lookup falied: idx=%u, event=%s, cgrp=%s\n", ··· 288 288 289 289 static int bperf_cgrp__destroy(struct evsel *evsel) 290 290 { 291 - if (evsel->idx) 291 + if (evsel->core.idx) 292 292 return 0; 293 293 294 294 bperf_cgroup_bpf__destroy(skel);
+4 -6
tools/perf/util/evlist.c
··· 165 165 166 166 void evlist__add(struct evlist *evlist, struct evsel *entry) 167 167 { 168 - entry->evlist = evlist; 169 - entry->idx = evlist->core.nr_entries; 170 - entry->tracking = !entry->idx; 171 - 172 168 perf_evlist__add(&evlist->core, &entry->core); 169 + entry->evlist = evlist; 170 + entry->tracking = !entry->core.idx; 173 171 174 172 if (evlist->core.nr_entries == 1) 175 173 evlist__set_id_pos(evlist); ··· 230 232 leader = list_entry(list->next, struct evsel, core.node); 231 233 evsel = list_entry(list->prev, struct evsel, core.node); 232 234 233 - leader->core.nr_members = evsel->idx - leader->idx + 1; 235 + leader->core.nr_members = evsel->core.idx - leader->core.idx + 1; 234 236 235 237 __evlist__for_each_entry(list, evsel) { 236 238 evsel->leader = leader; ··· 2135 2137 struct evsel *evsel; 2136 2138 2137 2139 evlist__for_each_entry(evlist, evsel) { 2138 - if (evsel->idx == idx) 2140 + if (evsel->core.idx == idx) 2139 2141 return evsel; 2140 2142 } 2141 2143 return NULL;
+1 -2
tools/perf/util/evsel.c
··· 239 239 void evsel__init(struct evsel *evsel, 240 240 struct perf_event_attr *attr, int idx) 241 241 { 242 - perf_evsel__init(&evsel->core, attr); 243 - evsel->idx = idx; 242 + perf_evsel__init(&evsel->core, attr, idx); 244 243 evsel->tracking = !idx; 245 244 evsel->leader = evsel; 246 245 evsel->unit = "";
+1 -2
tools/perf/util/evsel.h
··· 49 49 struct perf_evsel core; 50 50 struct evlist *evlist; 51 51 off_t id_offset; 52 - int idx; 53 52 int id_pos; 54 53 int is_pos; 55 54 unsigned int sample_size; ··· 405 406 406 407 static inline int evsel__group_idx(struct evsel *evsel) 407 408 { 408 - return evsel->idx - evsel->leader->idx; 409 + return evsel->core.idx - evsel->leader->core.idx; 409 410 } 410 411 411 412 /* Iterates group WITHOUT the leader. */
+5 -5
tools/perf/util/header.c
··· 789 789 evlist__for_each_entry(evlist, evsel) { 790 790 if (evsel__is_group_leader(evsel) && evsel->core.nr_members > 1) { 791 791 const char *name = evsel->group_name ?: "{anon_group}"; 792 - u32 leader_idx = evsel->idx; 792 + u32 leader_idx = evsel->core.idx; 793 793 u32 nr_members = evsel->core.nr_members; 794 794 795 795 ret = do_write_string(ff, name); ··· 1844 1844 msz = sz; 1845 1845 1846 1846 for (i = 0, evsel = events; i < nre; evsel++, i++) { 1847 - evsel->idx = i; 1847 + evsel->core.idx = i; 1848 1848 1849 1849 /* 1850 1850 * must read entire on-file attr struct to ··· 2379 2379 struct evsel *evsel; 2380 2380 2381 2381 evlist__for_each_entry(evlist, evsel) { 2382 - if (evsel->idx == idx) 2382 + if (evsel->core.idx == idx) 2383 2383 return evsel; 2384 2384 } 2385 2385 ··· 2393 2393 if (!event->name) 2394 2394 return; 2395 2395 2396 - evsel = evlist__find_by_index(evlist, event->idx); 2396 + evsel = evlist__find_by_index(evlist, event->core.idx); 2397 2397 if (!evsel) 2398 2398 return; 2399 2399 ··· 2739 2739 2740 2740 i = nr = 0; 2741 2741 evlist__for_each_entry(session->evlist, evsel) { 2742 - if (evsel->idx == (int) desc[i].leader_idx) { 2742 + if (evsel->core.idx == (int) desc[i].leader_idx) { 2743 2743 evsel->leader = evsel; 2744 2744 /* {anon_group} is a dummy name */ 2745 2745 if (strcmp(desc[i].name, "{anon_group}")) {
+7 -7
tools/perf/util/metricgroup.c
··· 219 219 if (has_constraint && ev->weak_group) 220 220 continue; 221 221 /* Ignore event if already used and merging is disabled. */ 222 - if (metric_no_merge && test_bit(ev->idx, evlist_used)) 222 + if (metric_no_merge && test_bit(ev->core.idx, evlist_used)) 223 223 continue; 224 224 if (!has_constraint && ev->leader != current_leader) { 225 225 /* ··· 269 269 for (i = 0; i < idnum; i++) { 270 270 ev = metric_events[i]; 271 271 /* Don't free the used events. */ 272 - set_bit(ev->idx, evlist_used); 272 + set_bit(ev->core.idx, evlist_used); 273 273 /* 274 274 * The metric leader points to the identically named event in 275 275 * metric_events. ··· 291 291 evsel_same_pmu_or_none(ev->leader, metric_events[i]->leader)) 292 292 break; 293 293 if (!strcmp(metric_events[i]->name, ev->name)) { 294 - set_bit(ev->idx, evlist_used); 294 + set_bit(ev->core.idx, evlist_used); 295 295 ev->metric_leader = metric_events[i]; 296 296 } 297 297 } ··· 391 391 } 392 392 393 393 evlist__for_each_entry_safe(perf_evlist, tmp, evsel) { 394 - if (!test_bit(evsel->idx, evlist_used)) { 394 + if (!test_bit(evsel->core.idx, evlist_used)) { 395 395 evlist__remove(perf_evlist, evsel); 396 396 evsel__delete(evsel); 397 397 } ··· 1312 1312 nd = rblist__entry(old_metric_events, i); 1313 1313 old_me = container_of(nd, struct metric_event, nd); 1314 1314 1315 - evsel = evlist__find_evsel(evlist, old_me->evsel->idx); 1315 + evsel = evlist__find_evsel(evlist, old_me->evsel->core.idx); 1316 1316 if (!evsel) 1317 1317 return -EINVAL; 1318 1318 new_me = metricgroup__lookup(new_metric_events, evsel, true); ··· 1320 1320 return -ENOMEM; 1321 1321 1322 1322 pr_debug("copying metric event for cgroup '%s': %s (idx=%d)\n", 1323 - cgrp ? cgrp->name : "root", evsel->name, evsel->idx); 1323 + cgrp ? cgrp->name : "root", evsel->name, evsel->core.idx); 1324 1324 1325 1325 list_for_each_entry(old_expr, &old_me->head, nd) { 1326 1326 new_expr = malloc(sizeof(*new_expr)); ··· 1363 1363 /* copy evsel in the same position */ 1364 1364 for (idx = 0; idx < nr; idx++) { 1365 1365 evsel = old_expr->metric_events[idx]; 1366 - evsel = evlist__find_evsel(evlist, evsel->idx); 1366 + evsel = evlist__find_evsel(evlist, evsel->core.idx); 1367 1367 if (evsel == NULL) { 1368 1368 free(new_expr->metric_events); 1369 1369 free(new_expr->metric_refs);
+1 -1
tools/perf/util/parse-events.c
··· 1740 1740 1741 1741 leader = list_first_entry(list, struct evsel, core.node); 1742 1742 evsel = list_last_entry(list, struct evsel, core.node); 1743 - total_members = evsel->idx - leader->idx + 1; 1743 + total_members = evsel->core.idx - leader->core.idx + 1; 1744 1744 1745 1745 leaders = calloc(total_members, sizeof(uintptr_t)); 1746 1746 if (WARN_ON(!leaders))
+1 -1
tools/perf/util/python.c
··· 1032 1032 1033 1033 Py_INCREF(pevsel); 1034 1034 evsel = &((struct pyrf_evsel *)pevsel)->evsel; 1035 - evsel->idx = evlist->core.nr_entries; 1035 + evsel->core.idx = evlist->core.nr_entries; 1036 1036 evlist__add(evlist, evsel); 1037 1037 1038 1038 return Py_BuildValue("i", evlist->core.nr_entries);
+1 -1
tools/perf/util/stream.c
··· 139 139 140 140 hists__output_resort(hists, NULL); 141 141 init_hot_callchain(hists, &es[i]); 142 - es[i].evsel_idx = pos->idx; 142 + es[i].evsel_idx = pos->core.idx; 143 143 i++; 144 144 } 145 145