Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

perf util: Use cached rbtree for rblists

At the cost of an extra pointer, we can avoid the O(logN) cost of
finding the first element in the tree (smallest node), which is
something required for any of the strlist or intlist traversals
(XXX_for_each_entry()). There are a number of users in perf of these
(particularly strlists), including probes, and buildid.

Signed-off-by: Davidlohr Bueso <dbueso@suse.de>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Link: http://lkml.kernel.org/r/20181206191819.30182-5-dave@stgolabs.net
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>

authored by

Davidlohr Bueso and committed by
Arnaldo Carvalho de Melo
ca227029 55ecd631

+24 -16
+1 -1
tools/perf/util/intlist.h
··· 45 45 /* For intlist iteration */ 46 46 static inline struct int_node *intlist__first(struct intlist *ilist) 47 47 { 48 - struct rb_node *rn = rb_first(&ilist->rblist.entries); 48 + struct rb_node *rn = rb_first_cached(&ilist->rblist.entries); 49 49 return rn ? rb_entry(rn, struct int_node, rb_node) : NULL; 50 50 } 51 51 static inline struct int_node *intlist__next(struct int_node *in)
+1 -1
tools/perf/util/metricgroup.c
··· 352 352 else if (metrics && !raw) 353 353 printf("\nMetrics:\n\n"); 354 354 355 - for (node = rb_first(&groups.entries); node; node = next) { 355 + for (node = rb_first_cached(&groups.entries); node; node = next) { 356 356 struct mep *me = container_of(node, struct mep, nd); 357 357 358 358 if (metricgroups)
+1 -1
tools/perf/util/rb_resort.h
··· 140 140 141 141 /* For 'struct intlist' */ 142 142 #define DECLARE_RESORT_RB_INTLIST(__name, __ilist) \ 143 - DECLARE_RESORT_RB(__name)(&__ilist->rblist.entries, \ 143 + DECLARE_RESORT_RB(__name)(&__ilist->rblist.entries.rb_root, \ 144 144 __ilist->rblist.nr_entries) 145 145 146 146 /* For 'struct machine->threads' */
+18 -10
tools/perf/util/rblist.c
··· 13 13 14 14 int rblist__add_node(struct rblist *rblist, const void *new_entry) 15 15 { 16 - struct rb_node **p = &rblist->entries.rb_node; 16 + struct rb_node **p = &rblist->entries.rb_root.rb_node; 17 17 struct rb_node *parent = NULL, *new_node; 18 + bool leftmost = true; 18 19 19 20 while (*p != NULL) { 20 21 int rc; ··· 25 24 rc = rblist->node_cmp(parent, new_entry); 26 25 if (rc > 0) 27 26 p = &(*p)->rb_left; 28 - else if (rc < 0) 27 + else if (rc < 0) { 29 28 p = &(*p)->rb_right; 29 + leftmost = false; 30 + } 30 31 else 31 32 return -EEXIST; 32 33 } ··· 38 35 return -ENOMEM; 39 36 40 37 rb_link_node(new_node, parent, p); 41 - rb_insert_color(new_node, &rblist->entries); 38 + rb_insert_color_cached(new_node, &rblist->entries, leftmost); 42 39 ++rblist->nr_entries; 43 40 44 41 return 0; ··· 46 43 47 44 void rblist__remove_node(struct rblist *rblist, struct rb_node *rb_node) 48 45 { 49 - rb_erase(rb_node, &rblist->entries); 46 + rb_erase_cached(rb_node, &rblist->entries); 50 47 --rblist->nr_entries; 51 48 rblist->node_delete(rblist, rb_node); 52 49 } ··· 55 52 const void *entry, 56 53 bool create) 57 54 { 58 - struct rb_node **p = &rblist->entries.rb_node; 55 + struct rb_node **p = &rblist->entries.rb_root.rb_node; 59 56 struct rb_node *parent = NULL, *new_node = NULL; 57 + bool leftmost = true; 60 58 61 59 while (*p != NULL) { 62 60 int rc; ··· 67 63 rc = rblist->node_cmp(parent, entry); 68 64 if (rc > 0) 69 65 p = &(*p)->rb_left; 70 - else if (rc < 0) 66 + else if (rc < 0) { 71 67 p = &(*p)->rb_right; 68 + leftmost = false; 69 + } 72 70 else 73 71 return parent; 74 72 } ··· 79 73 new_node = rblist->node_new(rblist, entry); 80 74 if (new_node) { 81 75 rb_link_node(new_node, parent, p); 82 - rb_insert_color(new_node, &rblist->entries); 76 + rb_insert_color_cached(new_node, 77 + &rblist->entries, leftmost); 83 78 ++rblist->nr_entries; 84 79 } 85 80 } ··· 101 94 void rblist__init(struct rblist *rblist) 102 95 { 103 96 if (rblist != NULL) { 104 - rblist->entries = RB_ROOT; 97 + rblist->entries = RB_ROOT_CACHED; 105 98 rblist->nr_entries = 0; 106 99 } 107 100 ··· 110 103 111 104 void rblist__exit(struct rblist *rblist) 112 105 { 113 - struct rb_node *pos, *next = rb_first(&rblist->entries); 106 + struct rb_node *pos, *next = rb_first_cached(&rblist->entries); 114 107 115 108 while (next) { 116 109 pos = next; ··· 131 124 { 132 125 struct rb_node *node; 133 126 134 - for (node = rb_first(&rblist->entries); node; node = rb_next(node)) { 127 + for (node = rb_first_cached(&rblist->entries); node; 128 + node = rb_next(node)) { 135 129 if (!idx--) 136 130 return node; 137 131 }
+1 -1
tools/perf/util/rblist.h
··· 20 20 */ 21 21 22 22 struct rblist { 23 - struct rb_root entries; 23 + struct rb_root_cached entries; 24 24 unsigned int nr_entries; 25 25 26 26 int (*node_cmp)(struct rb_node *rbn, const void *entry);
+1 -1
tools/perf/util/stat-shadow.c
··· 168 168 struct rb_node *pos, *next; 169 169 170 170 rblist = &st->value_list; 171 - next = rb_first(&rblist->entries); 171 + next = rb_first_cached(&rblist->entries); 172 172 while (next) { 173 173 pos = next; 174 174 next = rb_next(pos);
+1 -1
tools/perf/util/strlist.h
··· 57 57 /* For strlist iteration */ 58 58 static inline struct str_node *strlist__first(struct strlist *slist) 59 59 { 60 - struct rb_node *rn = rb_first(&slist->rblist.entries); 60 + struct rb_node *rn = rb_first_cached(&slist->rblist.entries); 61 61 return rn ? rb_entry(rn, struct str_node, rb_node) : NULL; 62 62 } 63 63 static inline struct str_node *strlist__next(struct str_node *sn)