Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

perf tools: Pass struct perf_hpp_fmt to its callbacks

Currently ->cmp, ->collapse and ->sort callbacks doesn't pass
corresponding fmt. But it'll be needed by upcoming changes in
perf diff command.

Suggested-by: Jiri Olsa <jolsa@kernel.org>
Signed-off-by: Namhyung Kim <namhyung@kernel.org>
Acked-by: Jiri Olsa <jolsa@kernel.org>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Kan Liang <kan.liang@intel.com>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Link: http://lkml.kernel.org/r/1420677949-6719-6-git-send-email-namhyung@kernel.org
[ fix build by passing perf_hpp_fmt pointer to hist_entry__cmp_ methods ]
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>

authored by

Namhyung Kim and committed by
Arnaldo Carvalho de Melo
87bbdf76 ff21cef6

+61 -18
+10 -5
tools/perf/builtin-diff.c
··· 554 554 } 555 555 556 556 static int64_t 557 - hist_entry__cmp_nop(struct hist_entry *left __maybe_unused, 557 + hist_entry__cmp_nop(struct perf_hpp_fmt *fmt __maybe_unused, 558 + struct hist_entry *left __maybe_unused, 558 559 struct hist_entry *right __maybe_unused) 559 560 { 560 561 return 0; 561 562 } 562 563 563 564 static int64_t 564 - hist_entry__cmp_baseline(struct hist_entry *left, struct hist_entry *right) 565 + hist_entry__cmp_baseline(struct perf_hpp_fmt *fmt __maybe_unused, 566 + struct hist_entry *left, struct hist_entry *right) 565 567 { 566 568 if (sort_compute) 567 569 return 0; ··· 574 572 } 575 573 576 574 static int64_t 577 - hist_entry__cmp_delta(struct hist_entry *left, struct hist_entry *right) 575 + hist_entry__cmp_delta(struct perf_hpp_fmt *fmt __maybe_unused, 576 + struct hist_entry *left, struct hist_entry *right) 578 577 { 579 578 return hist_entry__cmp_compute(right, left, COMPUTE_DELTA); 580 579 } 581 580 582 581 static int64_t 583 - hist_entry__cmp_ratio(struct hist_entry *left, struct hist_entry *right) 582 + hist_entry__cmp_ratio(struct perf_hpp_fmt *fmt __maybe_unused, 583 + struct hist_entry *left, struct hist_entry *right) 584 584 { 585 585 return hist_entry__cmp_compute(right, left, COMPUTE_RATIO); 586 586 } 587 587 588 588 static int64_t 589 - hist_entry__cmp_wdiff(struct hist_entry *left, struct hist_entry *right) 589 + hist_entry__cmp_wdiff(struct perf_hpp_fmt *fmt __maybe_unused, 590 + struct hist_entry *left, struct hist_entry *right) 590 591 { 591 592 return hist_entry__cmp_compute(right, left, COMPUTE_WEIGHTED_DIFF); 592 593 }
+8 -4
tools/perf/ui/hist.c
··· 285 285 } 286 286 287 287 #define __HPP_SORT_FN(_type, _field) \ 288 - static int64_t hpp__sort_##_type(struct hist_entry *a, struct hist_entry *b) \ 288 + static int64_t hpp__sort_##_type(struct perf_hpp_fmt *fmt __maybe_unused, \ 289 + struct hist_entry *a, struct hist_entry *b) \ 289 290 { \ 290 291 return __hpp__sort(a, b, he_get_##_field); \ 291 292 } ··· 313 312 } 314 313 315 314 #define __HPP_SORT_ACC_FN(_type, _field) \ 316 - static int64_t hpp__sort_##_type(struct hist_entry *a, struct hist_entry *b) \ 315 + static int64_t hpp__sort_##_type(struct perf_hpp_fmt *fmt __maybe_unused, \ 316 + struct hist_entry *a, struct hist_entry *b) \ 317 317 { \ 318 318 return __hpp__sort_acc(a, b, he_get_acc_##_field); \ 319 319 } ··· 333 331 } 334 332 335 333 #define __HPP_SORT_RAW_FN(_type, _field) \ 336 - static int64_t hpp__sort_##_type(struct hist_entry *a, struct hist_entry *b) \ 334 + static int64_t hpp__sort_##_type(struct perf_hpp_fmt *fmt __maybe_unused, \ 335 + struct hist_entry *a, struct hist_entry *b) \ 337 336 { \ 338 337 return __hpp__sort(a, b, he_get_raw_##_field); \ 339 338 } ··· 364 361 HPP_RAW_FNS(samples, nr_events) 365 362 HPP_RAW_FNS(period, period) 366 363 367 - static int64_t hpp__nop_cmp(struct hist_entry *a __maybe_unused, 364 + static int64_t hpp__nop_cmp(struct perf_hpp_fmt *fmt __maybe_unused, 365 + struct hist_entry *a __maybe_unused, 368 366 struct hist_entry *b __maybe_unused) 369 367 { 370 368 return 0;
+3 -3
tools/perf/util/hist.c
··· 913 913 if (perf_hpp__should_skip(fmt)) 914 914 continue; 915 915 916 - cmp = fmt->cmp(left, right); 916 + cmp = fmt->cmp(fmt, left, right); 917 917 if (cmp) 918 918 break; 919 919 } ··· 931 931 if (perf_hpp__should_skip(fmt)) 932 932 continue; 933 933 934 - cmp = fmt->collapse(left, right); 934 + cmp = fmt->collapse(fmt, left, right); 935 935 if (cmp) 936 936 break; 937 937 } ··· 1061 1061 if (perf_hpp__should_skip(fmt)) 1062 1062 continue; 1063 1063 1064 - cmp = fmt->sort(a, b); 1064 + cmp = fmt->sort(fmt, a, b); 1065 1065 if (cmp) 1066 1066 break; 1067 1067 }
+6 -3
tools/perf/util/hist.h
··· 195 195 struct hist_entry *he); 196 196 int (*entry)(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, 197 197 struct hist_entry *he); 198 - int64_t (*cmp)(struct hist_entry *a, struct hist_entry *b); 199 - int64_t (*collapse)(struct hist_entry *a, struct hist_entry *b); 200 - int64_t (*sort)(struct hist_entry *a, struct hist_entry *b); 198 + int64_t (*cmp)(struct perf_hpp_fmt *fmt, 199 + struct hist_entry *a, struct hist_entry *b); 200 + int64_t (*collapse)(struct perf_hpp_fmt *fmt, 201 + struct hist_entry *a, struct hist_entry *b); 202 + int64_t (*sort)(struct perf_hpp_fmt *fmt, 203 + struct hist_entry *a, struct hist_entry *b); 201 204 202 205 struct list_head list; 203 206 struct list_head sort_list;
+34 -3
tools/perf/util/sort.c
··· 1304 1304 return hse->se->se_snprintf(he, hpp->buf, hpp->size, len); 1305 1305 } 1306 1306 1307 + static int64_t __sort__hpp_cmp(struct perf_hpp_fmt *fmt, 1308 + struct hist_entry *a, struct hist_entry *b) 1309 + { 1310 + struct hpp_sort_entry *hse; 1311 + 1312 + hse = container_of(fmt, struct hpp_sort_entry, hpp); 1313 + return hse->se->se_cmp(a, b); 1314 + } 1315 + 1316 + static int64_t __sort__hpp_collapse(struct perf_hpp_fmt *fmt, 1317 + struct hist_entry *a, struct hist_entry *b) 1318 + { 1319 + struct hpp_sort_entry *hse; 1320 + int64_t (*collapse_fn)(struct hist_entry *, struct hist_entry *); 1321 + 1322 + hse = container_of(fmt, struct hpp_sort_entry, hpp); 1323 + collapse_fn = hse->se->se_collapse ?: hse->se->se_cmp; 1324 + return collapse_fn(a, b); 1325 + } 1326 + 1327 + static int64_t __sort__hpp_sort(struct perf_hpp_fmt *fmt, 1328 + struct hist_entry *a, struct hist_entry *b) 1329 + { 1330 + struct hpp_sort_entry *hse; 1331 + int64_t (*sort_fn)(struct hist_entry *, struct hist_entry *); 1332 + 1333 + hse = container_of(fmt, struct hpp_sort_entry, hpp); 1334 + sort_fn = hse->se->se_sort ?: hse->se->se_cmp; 1335 + return sort_fn(a, b); 1336 + } 1337 + 1307 1338 static struct hpp_sort_entry * 1308 1339 __sort_dimension__alloc_hpp(struct sort_dimension *sd) 1309 1340 { ··· 1353 1322 hse->hpp.entry = __sort__hpp_entry; 1354 1323 hse->hpp.color = NULL; 1355 1324 1356 - hse->hpp.cmp = sd->entry->se_cmp; 1357 - hse->hpp.collapse = sd->entry->se_collapse ? : sd->entry->se_cmp; 1358 - hse->hpp.sort = sd->entry->se_sort ? : hse->hpp.collapse; 1325 + hse->hpp.cmp = __sort__hpp_cmp; 1326 + hse->hpp.collapse = __sort__hpp_collapse; 1327 + hse->hpp.sort = __sort__hpp_sort; 1359 1328 1360 1329 INIT_LIST_HEAD(&hse->hpp.list); 1361 1330 INIT_LIST_HEAD(&hse->hpp.sort_list);