Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

perf kvm: Use histograms list to replace cached list

perf kvm tool defines its own cached list which is managed with RB tree,
histograms also provide RB tree to manage data entries. Since now we
have introduced histograms in the tool, it's not necessary to use the
self defined list and we can directly use histograms list to manage
KVM events.

This patch changes to use histograms list to track KVM events, and it
invokes the common function hists__output_resort_cb() to sort result,
this also give us flexibility to extend more sorting key words easily.

After histograms list supported, the cached list is redundant so remove
the relevant code for it.

Committer notes:

kvm_hists__reinit() is only used by functions enclosed in:

#if defined(HAVE_KVM_STAT_SUPPORT) && defined(HAVE_LIBTRACEEVENT)

So do it with this new function as well.

Signed-off-by: Leo Yan <leo.yan@linaro.org>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Ian Rogers <irogers@google.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: James Clark <james.clark@arm.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: John Garry <john.g.garry@oracle.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: linux-arm-kernel@lists.infradead.org
Link: https://lore.kernel.org/r/20230315145112.186603-2-leo.yan@linaro.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>

authored by

Leo Yan and committed by
Arnaldo Carvalho de Melo
f57a6414 41f1138e

+93 -99
+93 -92
tools/perf/builtin-kvm.c
··· 323 323 perf_hpp_list__init(&kvm_hists.list); 324 324 return kvm_hpp_list__parse(&kvm_hists.list, NULL, "ev_name"); 325 325 } 326 + 327 + static int kvm_hists__reinit(const char *output, const char *sort) 328 + { 329 + perf_hpp__reset_output_field(&kvm_hists.list); 330 + return kvm_hpp_list__parse(&kvm_hists.list, output, sort); 331 + } 326 332 #endif // defined(HAVE_KVM_STAT_SUPPORT) && defined(HAVE_LIBTRACEEVENT) 327 333 328 334 static const char *get_filename_for_perf_kvm(void) ··· 428 422 struct kvm_event *last_event; 429 423 }; 430 424 431 - 432 - static void init_kvm_event_record(struct perf_kvm_stat *kvm) 433 - { 434 - unsigned int i; 435 - 436 - for (i = 0; i < EVENTS_CACHE_SIZE; i++) 437 - INIT_LIST_HEAD(&kvm->kvm_events_cache[i]); 438 - } 439 - 440 425 #ifdef HAVE_TIMERFD_SUPPORT 441 - static void clear_events_cache_stats(struct list_head *kvm_events_cache) 426 + static void clear_events_cache_stats(void) 442 427 { 443 - struct list_head *head; 428 + struct rb_root_cached *root; 429 + struct rb_node *nd; 444 430 struct kvm_event *event; 445 - unsigned int i; 446 - int j; 431 + int i; 447 432 448 - for (i = 0; i < EVENTS_CACHE_SIZE; i++) { 449 - head = &kvm_events_cache[i]; 450 - list_for_each_entry(event, head, hash_entry) { 451 - /* reset stats for event */ 452 - event->total.time = 0; 453 - init_stats(&event->total.stats); 433 + if (hists__has(&kvm_hists.hists, need_collapse)) 434 + root = &kvm_hists.hists.entries_collapsed; 435 + else 436 + root = kvm_hists.hists.entries_in; 454 437 455 - for (j = 0; j < event->max_vcpu; ++j) { 456 - event->vcpu[j].time = 0; 457 - init_stats(&event->vcpu[j].stats); 458 - } 438 + for (nd = rb_first_cached(root); nd; nd = rb_next(nd)) { 439 + struct hist_entry *he; 440 + 441 + he = rb_entry(nd, struct hist_entry, rb_node_in); 442 + event = container_of(he, struct kvm_event, he); 443 + 444 + /* reset stats for event */ 445 + event->total.time = 0; 446 + init_stats(&event->total.stats); 447 + 448 + for (i = 0; i < event->max_vcpu; ++i) { 449 + event->vcpu[i].time = 0; 450 + init_stats(&event->vcpu[i].stats); 459 451 } 460 452 } 461 453 } 462 454 #endif 463 - 464 - static int kvm_events_hash_fn(u64 key) 465 - { 466 - return key & (EVENTS_CACHE_SIZE - 1); 467 - } 468 455 469 456 static bool kvm_event_expand(struct kvm_event *event, int vcpu_id) 470 457 { ··· 484 485 return true; 485 486 } 486 487 487 - static struct kvm_event *kvm_alloc_init_event(struct perf_kvm_stat *kvm, 488 - struct event_key *key, 489 - struct perf_sample *sample __maybe_unused) 488 + static void *kvm_he_zalloc(size_t size) 490 489 { 491 - struct kvm_event *event; 490 + struct kvm_event *kvm_ev; 492 491 493 - event = zalloc(sizeof(*event)); 494 - if (!event) { 495 - pr_err("Not enough memory\n"); 492 + kvm_ev = zalloc(size + sizeof(*kvm_ev)); 493 + if (!kvm_ev) 496 494 return NULL; 497 - } 498 495 499 - event->perf_kvm = kvm; 500 - event->key = *key; 501 - init_stats(&event->total.stats); 502 - return event; 496 + init_stats(&kvm_ev->total.stats); 497 + hists__inc_nr_samples(&kvm_hists.hists, 0); 498 + return &kvm_ev->he; 503 499 } 500 + 501 + static void kvm_he_free(void *he) 502 + { 503 + struct kvm_event *kvm_ev; 504 + 505 + free(((struct hist_entry *)he)->kvm_info); 506 + kvm_ev = container_of(he, struct kvm_event, he); 507 + free(kvm_ev); 508 + } 509 + 510 + static struct hist_entry_ops kvm_ev_entry_ops = { 511 + .new = kvm_he_zalloc, 512 + .free = kvm_he_free, 513 + }; 504 514 505 515 static struct kvm_event *find_create_kvm_event(struct perf_kvm_stat *kvm, 506 516 struct event_key *key, 507 517 struct perf_sample *sample) 508 518 { 509 519 struct kvm_event *event; 510 - struct list_head *head; 520 + struct hist_entry *he; 521 + struct kvm_info *ki; 511 522 512 523 BUG_ON(key->key == INVALID_KEY); 513 524 514 - head = &kvm->kvm_events_cache[kvm_events_hash_fn(key->key)]; 515 - list_for_each_entry(event, head, hash_entry) { 516 - if (event->key.key == key->key && event->key.info == key->info) 517 - return event; 525 + ki = zalloc(sizeof(*ki)); 526 + if (!ki) { 527 + pr_err("Failed to allocate kvm info\n"); 528 + return NULL; 518 529 } 519 530 520 - event = kvm_alloc_init_event(kvm, key, sample); 521 - if (!event) 531 + kvm->events_ops->decode_key(kvm, key, ki->name); 532 + he = hists__add_entry_ops(&kvm_hists.hists, &kvm_ev_entry_ops, 533 + &kvm->al, NULL, NULL, NULL, ki, sample, true); 534 + if (he == NULL) { 535 + pr_err("Failed to allocate hist entry\n"); 536 + free(ki); 522 537 return NULL; 538 + } 523 539 524 - list_add(&event->hash_entry, head); 540 + event = container_of(he, struct kvm_event, he); 541 + if (!event->perf_kvm) { 542 + event->perf_kvm = kvm; 543 + event->key = *key; 544 + } 545 + 525 546 return event; 526 547 } 527 548 ··· 774 755 return false; 775 756 } 776 757 777 - static void insert_to_result(struct rb_root *result, struct kvm_event *event, 778 - key_cmp_fun bigger, int vcpu) 779 - { 780 - struct rb_node **rb = &result->rb_node; 781 - struct rb_node *parent = NULL; 782 - struct kvm_event *p; 783 - 784 - while (*rb) { 785 - p = container_of(*rb, struct kvm_event, rb); 786 - parent = *rb; 787 - 788 - if (bigger(event, p, vcpu) > 0) 789 - rb = &(*rb)->rb_left; 790 - else 791 - rb = &(*rb)->rb_right; 792 - } 793 - 794 - rb_link_node(&event->rb, parent, rb); 795 - rb_insert_color(&event->rb, result); 796 - } 797 - 798 758 static bool event_is_valid(struct kvm_event *event, int vcpu) 799 759 { 800 760 return !!get_event_count(event, vcpu); 801 761 } 802 762 803 - static void sort_result(struct perf_kvm_stat *kvm) 763 + static int filter_cb(struct hist_entry *he, void *arg __maybe_unused) 804 764 { 805 - unsigned int i; 806 - int vcpu = kvm->trace_vcpu; 807 765 struct kvm_event *event; 766 + struct perf_kvm_stat *perf_kvm; 808 767 809 - for (i = 0; i < EVENTS_CACHE_SIZE; i++) { 810 - list_for_each_entry(event, &kvm->kvm_events_cache[i], hash_entry) { 811 - if (event_is_valid(event, vcpu)) { 812 - insert_to_result(&kvm->result, event, 813 - kvm->compare, vcpu); 814 - } 815 - } 816 - } 768 + event = container_of(he, struct kvm_event, he); 769 + perf_kvm = event->perf_kvm; 770 + if (!event_is_valid(event, perf_kvm->trace_vcpu)) 771 + he->filtered = 1; 772 + else 773 + he->filtered = 0; 774 + return 0; 817 775 } 818 776 819 - /* returns left most element of result, and erase it */ 820 - static struct kvm_event *pop_from_result(struct rb_root *result) 777 + static void sort_result(struct perf_kvm_stat *kvm) 821 778 { 822 - struct rb_node *node = rb_first(result); 779 + const char *output_columns = "ev_name,sample,time,max_t,min_t,mean_t"; 823 780 824 - if (!node) 825 - return NULL; 826 - 827 - rb_erase(node, result); 828 - return container_of(node, struct kvm_event, rb); 781 + kvm_hists__reinit(output_columns, kvm->sort_key); 782 + hists__collapse_resort(&kvm_hists.hists, NULL); 783 + hists__output_resort_cb(&kvm_hists.hists, NULL, filter_cb); 829 784 } 830 785 831 786 static void print_vcpu_info(struct perf_kvm_stat *kvm) ··· 842 849 char decode[KVM_EVENT_NAME_LEN]; 843 850 struct kvm_event *event; 844 851 int vcpu = kvm->trace_vcpu; 852 + struct rb_node *nd; 845 853 846 854 if (kvm->live) { 847 855 puts(CONSOLE_CLEAR); ··· 861 867 pr_info("%16s ", "Avg time"); 862 868 pr_info("\n\n"); 863 869 864 - while ((event = pop_from_result(&kvm->result))) { 870 + for (nd = rb_first_cached(&kvm_hists.hists.entries); nd; nd = rb_next(nd)) { 871 + struct hist_entry *he; 865 872 u64 ecount, etime, max, min; 866 873 874 + he = rb_entry(nd, struct hist_entry, rb_node); 875 + if (he->filtered) 876 + continue; 877 + 878 + event = container_of(he, struct kvm_event, he); 867 879 ecount = get_event_count(event, vcpu); 868 880 etime = get_event_time(event, vcpu); 869 881 max = get_event_max(event, vcpu); ··· 1146 1146 sort_result(kvm); 1147 1147 print_result(kvm); 1148 1148 1149 + /* Reset sort list to "ev_name" */ 1150 + kvm_hists__reinit(NULL, "ev_name"); 1151 + 1149 1152 /* reset counts */ 1150 - clear_events_cache_stats(kvm->kvm_events_cache); 1153 + clear_events_cache_stats(); 1151 1154 kvm->total_count = 0; 1152 1155 kvm->total_time = 0; 1153 1156 kvm->lost_events = 0; ··· 1206 1203 } 1207 1204 1208 1205 set_term_quiet_input(&save); 1209 - init_kvm_event_record(kvm); 1210 1206 1211 1207 kvm_hists__init(); 1212 1208 ··· 1401 1399 if (!register_kvm_events_ops(kvm)) 1402 1400 goto exit; 1403 1401 1404 - init_kvm_event_record(kvm); 1405 1402 setup_pager(); 1406 1403 1407 1404 kvm_hists__init();
-7
tools/perf/util/kvm-stat.h
··· 36 36 37 37 struct kvm_event { 38 38 struct list_head hash_entry; 39 - struct rb_node rb; 40 39 41 40 struct perf_kvm_stat *perf_kvm; 42 41 struct event_key key; ··· 80 81 const char *reason; 81 82 }; 82 83 83 - #define EVENTS_BITS 12 84 - #define EVENTS_CACHE_SIZE (1UL << EVENTS_BITS) 85 - 86 84 struct perf_kvm_stat { 87 85 struct perf_tool tool; 88 86 struct record_opts opts; ··· 99 103 100 104 struct kvm_events_ops *events_ops; 101 105 key_cmp_fun compare; 102 - struct list_head kvm_events_cache[EVENTS_CACHE_SIZE]; 103 106 104 107 u64 total_time; 105 108 u64 total_count; ··· 106 111 u64 duration; 107 112 108 113 struct intlist *pid_list; 109 - 110 - struct rb_root result; 111 114 112 115 int timerfd; 113 116 unsigned int display_time;