Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'perf-core-for-mingo-5.0-20190126' of git://git.kernel.org/pub/scm/linux/kernel/git/acme/linux into perf/core

Pull perf/core improvements and fixes from Arnaldo Carvalho de Melo:

BPF:

Song Liu:

- Fix synthesized PERF_RECORD_KSYMBOL/BPF_EVENT

Arnaldo Carvalho de Melo:

- Add bpf_map() helper, to make BPF map declararions more compact and
allow for BTF annotations to be made transparently.

perf script python:

Tony Jones:

- Remove explicit shebangs.

- Fix the PYTHON=python3 builds.

Core:

Davidlohr Bueso:

- Update rbtree implementation, getting it closer to the kernel one.

- Use cached rbtrees.

Arnaldo Carvalho de Melo:

- Remove some needless headers from .c and .h files fixing up the fallout,
to reduce building time when changes are made to .h files

Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Signed-off-by: Ingo Molnar <mingo@kernel.org>

+810 -531
+46 -6
tools/include/linux/rbtree.h
··· 43 43 struct rb_node *rb_node; 44 44 }; 45 45 46 + /* 47 + * Leftmost-cached rbtrees. 48 + * 49 + * We do not cache the rightmost node based on footprint 50 + * size vs number of potential users that could benefit 51 + * from O(1) rb_last(). Just not worth it, users that want 52 + * this feature can always implement the logic explicitly. 53 + * Furthermore, users that want to cache both pointers may 54 + * find it a bit asymmetric, but that's ok. 55 + */ 56 + struct rb_root_cached { 57 + struct rb_root rb_root; 58 + struct rb_node *rb_leftmost; 59 + }; 46 60 47 61 #define rb_parent(r) ((struct rb_node *)((r)->__rb_parent_color & ~3)) 48 62 49 63 #define RB_ROOT (struct rb_root) { NULL, } 64 + #define RB_ROOT_CACHED (struct rb_root_cached) { {NULL, }, NULL } 50 65 #define rb_entry(ptr, type, member) container_of(ptr, type, member) 51 66 52 - #define RB_EMPTY_ROOT(root) ((root)->rb_node == NULL) 67 + #define RB_EMPTY_ROOT(root) (READ_ONCE((root)->rb_node) == NULL) 53 68 54 69 /* 'empty' nodes are nodes that are known not to be inserted in an rbtree */ 55 70 #define RB_EMPTY_NODE(node) \ ··· 83 68 extern struct rb_node *rb_first(const struct rb_root *); 84 69 extern struct rb_node *rb_last(const struct rb_root *); 85 70 71 + extern void rb_insert_color_cached(struct rb_node *, 72 + struct rb_root_cached *, bool); 73 + extern void rb_erase_cached(struct rb_node *node, struct rb_root_cached *); 74 + /* Same as rb_first(), but O(1) */ 75 + #define rb_first_cached(root) (root)->rb_leftmost 76 + 86 77 /* Postorder iteration - always visit the parent after its children */ 87 78 extern struct rb_node *rb_first_postorder(const struct rb_root *); 88 79 extern struct rb_node *rb_next_postorder(const struct rb_node *); ··· 96 75 /* Fast replacement of a single node without remove/rebalance/add/rebalance */ 97 76 extern void rb_replace_node(struct rb_node *victim, struct rb_node *new, 98 77 struct rb_root *root); 78 + extern void rb_replace_node_cached(struct rb_node *victim, struct rb_node *new, 79 + struct rb_root_cached *root); 99 80 100 81 static inline void rb_link_node(struct rb_node *node, struct rb_node *parent, 101 82 struct rb_node **rb_link) ··· 113 90 ____ptr ? rb_entry(____ptr, type, member) : NULL; \ 114 91 }) 115 92 116 - 117 - /* 118 - * Handy for checking that we are not deleting an entry that is 119 - * already in a list, found in block/{blk-throttle,cfq-iosched}.c, 120 - * probably should be moved to lib/rbtree.c... 93 + /** 94 + * rbtree_postorder_for_each_entry_safe - iterate in post-order over rb_root of 95 + * given type allowing the backing memory of @pos to be invalidated 96 + * 97 + * @pos: the 'type *' to use as a loop cursor. 98 + * @n: another 'type *' to use as temporary storage 99 + * @root: 'rb_root *' of the rbtree. 100 + * @field: the name of the rb_node field within 'type'. 101 + * 102 + * rbtree_postorder_for_each_entry_safe() provides a similar guarantee as 103 + * list_for_each_entry_safe() and allows the iteration to continue independent 104 + * of changes to @pos by the body of the loop. 105 + * 106 + * Note, however, that it cannot handle other modifications that re-order the 107 + * rbtree it is iterating over. This includes calling rb_erase() on @pos, as 108 + * rb_erase() may rebalance the tree, causing us to miss some nodes. 121 109 */ 110 + #define rbtree_postorder_for_each_entry_safe(pos, n, root, field) \ 111 + for (pos = rb_entry_safe(rb_first_postorder(root), typeof(*pos), field); \ 112 + pos && ({ n = rb_entry_safe(rb_next_postorder(&pos->field), \ 113 + typeof(*pos), field); 1; }); \ 114 + pos = n) 115 + 122 116 static inline void rb_erase_init(struct rb_node *n, struct rb_root *root) 123 117 { 124 118 rb_erase(n, root);
+48 -12
tools/include/linux/rbtree_augmented.h
··· 44 44 void (*rotate)(struct rb_node *old, struct rb_node *new); 45 45 }; 46 46 47 - extern void __rb_insert_augmented(struct rb_node *node, struct rb_root *root, 47 + extern void __rb_insert_augmented(struct rb_node *node, 48 + struct rb_root *root, 49 + bool newleft, struct rb_node **leftmost, 48 50 void (*augment_rotate)(struct rb_node *old, struct rb_node *new)); 49 51 /* 50 52 * Fixup the rbtree and update the augmented information when rebalancing. ··· 62 60 rb_insert_augmented(struct rb_node *node, struct rb_root *root, 63 61 const struct rb_augment_callbacks *augment) 64 62 { 65 - __rb_insert_augmented(node, root, augment->rotate); 63 + __rb_insert_augmented(node, root, false, NULL, augment->rotate); 64 + } 65 + 66 + static inline void 67 + rb_insert_augmented_cached(struct rb_node *node, 68 + struct rb_root_cached *root, bool newleft, 69 + const struct rb_augment_callbacks *augment) 70 + { 71 + __rb_insert_augmented(node, &root->rb_root, 72 + newleft, &root->rb_leftmost, augment->rotate); 66 73 } 67 74 68 75 #define RB_DECLARE_CALLBACKS(rbstatic, rbname, rbstruct, rbfield, \ ··· 104 93 old->rbaugmented = rbcompute(old); \ 105 94 } \ 106 95 rbstatic const struct rb_augment_callbacks rbname = { \ 107 - rbname ## _propagate, rbname ## _copy, rbname ## _rotate \ 96 + .propagate = rbname ## _propagate, \ 97 + .copy = rbname ## _copy, \ 98 + .rotate = rbname ## _rotate \ 108 99 }; 109 100 110 101 ··· 139 126 { 140 127 if (parent) { 141 128 if (parent->rb_left == old) 142 - parent->rb_left = new; 129 + WRITE_ONCE(parent->rb_left, new); 143 130 else 144 - parent->rb_right = new; 131 + WRITE_ONCE(parent->rb_right, new); 145 132 } else 146 - root->rb_node = new; 133 + WRITE_ONCE(root->rb_node, new); 147 134 } 148 135 149 136 extern void __rb_erase_color(struct rb_node *parent, struct rb_root *root, ··· 151 138 152 139 static __always_inline struct rb_node * 153 140 __rb_erase_augmented(struct rb_node *node, struct rb_root *root, 141 + struct rb_node **leftmost, 154 142 const struct rb_augment_callbacks *augment) 155 143 { 156 - struct rb_node *child = node->rb_right, *tmp = node->rb_left; 144 + struct rb_node *child = node->rb_right; 145 + struct rb_node *tmp = node->rb_left; 157 146 struct rb_node *parent, *rebalance; 158 147 unsigned long pc; 148 + 149 + if (leftmost && node == *leftmost) 150 + *leftmost = rb_next(node); 159 151 160 152 if (!tmp) { 161 153 /* ··· 188 170 tmp = parent; 189 171 } else { 190 172 struct rb_node *successor = child, *child2; 173 + 191 174 tmp = child->rb_left; 192 175 if (!tmp) { 193 176 /* ··· 202 183 */ 203 184 parent = successor; 204 185 child2 = successor->rb_right; 186 + 205 187 augment->copy(node, successor); 206 188 } else { 207 189 /* ··· 224 204 successor = tmp; 225 205 tmp = tmp->rb_left; 226 206 } while (tmp); 227 - parent->rb_left = child2 = successor->rb_right; 228 - successor->rb_right = child; 207 + child2 = successor->rb_right; 208 + WRITE_ONCE(parent->rb_left, child2); 209 + WRITE_ONCE(successor->rb_right, child); 229 210 rb_set_parent(child, successor); 211 + 230 212 augment->copy(node, successor); 231 213 augment->propagate(parent, successor); 232 214 } 233 215 234 - successor->rb_left = tmp = node->rb_left; 216 + tmp = node->rb_left; 217 + WRITE_ONCE(successor->rb_left, tmp); 235 218 rb_set_parent(tmp, successor); 236 219 237 220 pc = node->__rb_parent_color; 238 221 tmp = __rb_parent(pc); 239 222 __rb_change_child(node, successor, tmp, root); 223 + 240 224 if (child2) { 241 225 successor->__rb_parent_color = pc; 242 226 rb_set_parent_color(child2, parent, RB_BLACK); ··· 261 237 rb_erase_augmented(struct rb_node *node, struct rb_root *root, 262 238 const struct rb_augment_callbacks *augment) 263 239 { 264 - struct rb_node *rebalance = __rb_erase_augmented(node, root, augment); 240 + struct rb_node *rebalance = __rb_erase_augmented(node, root, 241 + NULL, augment); 265 242 if (rebalance) 266 243 __rb_erase_color(rebalance, root, augment->rotate); 267 244 } 268 245 269 - #endif /* _TOOLS_LINUX_RBTREE_AUGMENTED_H */ 246 + static __always_inline void 247 + rb_erase_augmented_cached(struct rb_node *node, struct rb_root_cached *root, 248 + const struct rb_augment_callbacks *augment) 249 + { 250 + struct rb_node *rebalance = __rb_erase_augmented(node, &root->rb_root, 251 + &root->rb_leftmost, 252 + augment); 253 + if (rebalance) 254 + __rb_erase_color(rebalance, &root->rb_root, augment->rotate); 255 + } 256 + 257 + #endif /* _TOOLS_LINUX_RBTREE_AUGMENTED_H */
+135 -43
tools/lib/rbtree.c
··· 22 22 */ 23 23 24 24 #include <linux/rbtree_augmented.h> 25 + #include <linux/export.h> 25 26 26 27 /* 27 28 * red-black trees properties: http://en.wikipedia.org/wiki/Rbtree ··· 42 41 * We shall indicate color with case, where black nodes are uppercase and red 43 42 * nodes will be lowercase. Unknown color nodes shall be drawn as red within 44 43 * parentheses and have some accompanying text comment. 44 + */ 45 + 46 + /* 47 + * Notes on lockless lookups: 48 + * 49 + * All stores to the tree structure (rb_left and rb_right) must be done using 50 + * WRITE_ONCE(). And we must not inadvertently cause (temporary) loops in the 51 + * tree structure as seen in program order. 52 + * 53 + * These two requirements will allow lockless iteration of the tree -- not 54 + * correct iteration mind you, tree rotations are not atomic so a lookup might 55 + * miss entire subtrees. 56 + * 57 + * But they do guarantee that any such traversal will only see valid elements 58 + * and that it will indeed complete -- does not get stuck in a loop. 59 + * 60 + * It also guarantees that if the lookup returns an element it is the 'correct' 61 + * one. But not returning an element does _NOT_ mean it's not present. 62 + * 63 + * NOTE: 64 + * 65 + * Stores to __rb_parent_color are not important for simple lookups so those 66 + * are left undone as of now. Nor did I check for loops involving parent 67 + * pointers. 45 68 */ 46 69 47 70 static inline void rb_set_black(struct rb_node *rb) ··· 95 70 96 71 static __always_inline void 97 72 __rb_insert(struct rb_node *node, struct rb_root *root, 73 + bool newleft, struct rb_node **leftmost, 98 74 void (*augment_rotate)(struct rb_node *old, struct rb_node *new)) 99 75 { 100 76 struct rb_node *parent = rb_red_parent(node), *gparent, *tmp; 101 77 78 + if (newleft) 79 + *leftmost = node; 80 + 102 81 while (true) { 103 82 /* 104 - * Loop invariant: node is red 105 - * 106 - * If there is a black parent, we are done. 107 - * Otherwise, take some corrective action as we don't 108 - * want a red root or two consecutive red nodes. 83 + * Loop invariant: node is red. 109 84 */ 110 - if (!parent) { 85 + if (unlikely(!parent)) { 86 + /* 87 + * The inserted node is root. Either this is the 88 + * first node, or we recursed at Case 1 below and 89 + * are no longer violating 4). 90 + */ 111 91 rb_set_parent_color(node, NULL, RB_BLACK); 112 92 break; 113 - } else if (rb_is_black(parent)) 93 + } 94 + 95 + /* 96 + * If there is a black parent, we are done. 97 + * Otherwise, take some corrective action as, 98 + * per 4), we don't want a red root or two 99 + * consecutive red nodes. 100 + */ 101 + if(rb_is_black(parent)) 114 102 break; 115 103 116 104 gparent = rb_red_parent(parent); ··· 132 94 if (parent != tmp) { /* parent == gparent->rb_left */ 133 95 if (tmp && rb_is_red(tmp)) { 134 96 /* 135 - * Case 1 - color flips 97 + * Case 1 - node's uncle is red (color flips). 136 98 * 137 99 * G g 138 100 * / \ / \ ··· 155 117 tmp = parent->rb_right; 156 118 if (node == tmp) { 157 119 /* 158 - * Case 2 - left rotate at parent 120 + * Case 2 - node's uncle is black and node is 121 + * the parent's right child (left rotate at parent). 159 122 * 160 123 * G G 161 124 * / \ / \ ··· 167 128 * This still leaves us in violation of 4), the 168 129 * continuation into Case 3 will fix that. 169 130 */ 170 - parent->rb_right = tmp = node->rb_left; 171 - node->rb_left = parent; 131 + tmp = node->rb_left; 132 + WRITE_ONCE(parent->rb_right, tmp); 133 + WRITE_ONCE(node->rb_left, parent); 172 134 if (tmp) 173 135 rb_set_parent_color(tmp, parent, 174 136 RB_BLACK); ··· 180 140 } 181 141 182 142 /* 183 - * Case 3 - right rotate at gparent 143 + * Case 3 - node's uncle is black and node is 144 + * the parent's left child (right rotate at gparent). 184 145 * 185 146 * G P 186 147 * / \ / \ ··· 189 148 * / \ 190 149 * n U 191 150 */ 192 - gparent->rb_left = tmp; /* == parent->rb_right */ 193 - parent->rb_right = gparent; 151 + WRITE_ONCE(gparent->rb_left, tmp); /* == parent->rb_right */ 152 + WRITE_ONCE(parent->rb_right, gparent); 194 153 if (tmp) 195 154 rb_set_parent_color(tmp, gparent, RB_BLACK); 196 155 __rb_rotate_set_parents(gparent, parent, root, RB_RED); ··· 211 170 tmp = parent->rb_left; 212 171 if (node == tmp) { 213 172 /* Case 2 - right rotate at parent */ 214 - parent->rb_left = tmp = node->rb_right; 215 - node->rb_right = parent; 173 + tmp = node->rb_right; 174 + WRITE_ONCE(parent->rb_left, tmp); 175 + WRITE_ONCE(node->rb_right, parent); 216 176 if (tmp) 217 177 rb_set_parent_color(tmp, parent, 218 178 RB_BLACK); ··· 224 182 } 225 183 226 184 /* Case 3 - left rotate at gparent */ 227 - gparent->rb_right = tmp; /* == parent->rb_left */ 228 - parent->rb_left = gparent; 185 + WRITE_ONCE(gparent->rb_right, tmp); /* == parent->rb_left */ 186 + WRITE_ONCE(parent->rb_left, gparent); 229 187 if (tmp) 230 188 rb_set_parent_color(tmp, gparent, RB_BLACK); 231 189 __rb_rotate_set_parents(gparent, parent, root, RB_RED); ··· 265 223 * / \ / \ 266 224 * Sl Sr N Sl 267 225 */ 268 - parent->rb_right = tmp1 = sibling->rb_left; 269 - sibling->rb_left = parent; 226 + tmp1 = sibling->rb_left; 227 + WRITE_ONCE(parent->rb_right, tmp1); 228 + WRITE_ONCE(sibling->rb_left, parent); 270 229 rb_set_parent_color(tmp1, parent, RB_BLACK); 271 230 __rb_rotate_set_parents(parent, sibling, root, 272 231 RB_RED); ··· 311 268 * 312 269 * (p) (p) 313 270 * / \ / \ 314 - * N S --> N Sl 271 + * N S --> N sl 315 272 * / \ \ 316 - * sl Sr s 273 + * sl Sr S 317 274 * \ 318 275 * Sr 276 + * 277 + * Note: p might be red, and then both 278 + * p and sl are red after rotation(which 279 + * breaks property 4). This is fixed in 280 + * Case 4 (in __rb_rotate_set_parents() 281 + * which set sl the color of p 282 + * and set p RB_BLACK) 283 + * 284 + * (p) (sl) 285 + * / \ / \ 286 + * N sl --> P S 287 + * \ / \ 288 + * S N Sr 289 + * \ 290 + * Sr 319 291 */ 320 - sibling->rb_left = tmp1 = tmp2->rb_right; 321 - tmp2->rb_right = sibling; 322 - parent->rb_right = tmp2; 292 + tmp1 = tmp2->rb_right; 293 + WRITE_ONCE(sibling->rb_left, tmp1); 294 + WRITE_ONCE(tmp2->rb_right, sibling); 295 + WRITE_ONCE(parent->rb_right, tmp2); 323 296 if (tmp1) 324 297 rb_set_parent_color(tmp1, sibling, 325 298 RB_BLACK); ··· 355 296 * / \ / \ 356 297 * (sl) sr N (sl) 357 298 */ 358 - parent->rb_right = tmp2 = sibling->rb_left; 359 - sibling->rb_left = parent; 299 + tmp2 = sibling->rb_left; 300 + WRITE_ONCE(parent->rb_right, tmp2); 301 + WRITE_ONCE(sibling->rb_left, parent); 360 302 rb_set_parent_color(tmp1, sibling, RB_BLACK); 361 303 if (tmp2) 362 304 rb_set_parent(tmp2, parent); ··· 369 309 sibling = parent->rb_left; 370 310 if (rb_is_red(sibling)) { 371 311 /* Case 1 - right rotate at parent */ 372 - parent->rb_left = tmp1 = sibling->rb_right; 373 - sibling->rb_right = parent; 312 + tmp1 = sibling->rb_right; 313 + WRITE_ONCE(parent->rb_left, tmp1); 314 + WRITE_ONCE(sibling->rb_right, parent); 374 315 rb_set_parent_color(tmp1, parent, RB_BLACK); 375 316 __rb_rotate_set_parents(parent, sibling, root, 376 317 RB_RED); ··· 395 334 } 396 335 break; 397 336 } 398 - /* Case 3 - right rotate at sibling */ 399 - sibling->rb_right = tmp1 = tmp2->rb_left; 400 - tmp2->rb_left = sibling; 401 - parent->rb_left = tmp2; 337 + /* Case 3 - left rotate at sibling */ 338 + tmp1 = tmp2->rb_left; 339 + WRITE_ONCE(sibling->rb_right, tmp1); 340 + WRITE_ONCE(tmp2->rb_left, sibling); 341 + WRITE_ONCE(parent->rb_left, tmp2); 402 342 if (tmp1) 403 343 rb_set_parent_color(tmp1, sibling, 404 344 RB_BLACK); ··· 407 345 tmp1 = sibling; 408 346 sibling = tmp2; 409 347 } 410 - /* Case 4 - left rotate at parent + color flips */ 411 - parent->rb_left = tmp2 = sibling->rb_right; 412 - sibling->rb_right = parent; 348 + /* Case 4 - right rotate at parent + color flips */ 349 + tmp2 = sibling->rb_right; 350 + WRITE_ONCE(parent->rb_left, tmp2); 351 + WRITE_ONCE(sibling->rb_right, parent); 413 352 rb_set_parent_color(tmp1, sibling, RB_BLACK); 414 353 if (tmp2) 415 354 rb_set_parent(tmp2, parent); ··· 441 378 static inline void dummy_rotate(struct rb_node *old, struct rb_node *new) {} 442 379 443 380 static const struct rb_augment_callbacks dummy_callbacks = { 444 - dummy_propagate, dummy_copy, dummy_rotate 381 + .propagate = dummy_propagate, 382 + .copy = dummy_copy, 383 + .rotate = dummy_rotate 445 384 }; 446 385 447 386 void rb_insert_color(struct rb_node *node, struct rb_root *root) 448 387 { 449 - __rb_insert(node, root, dummy_rotate); 388 + __rb_insert(node, root, false, NULL, dummy_rotate); 450 389 } 451 390 452 391 void rb_erase(struct rb_node *node, struct rb_root *root) 453 392 { 454 393 struct rb_node *rebalance; 455 - rebalance = __rb_erase_augmented(node, root, &dummy_callbacks); 394 + rebalance = __rb_erase_augmented(node, root, 395 + NULL, &dummy_callbacks); 456 396 if (rebalance) 457 397 ____rb_erase_color(rebalance, root, dummy_rotate); 398 + } 399 + 400 + void rb_insert_color_cached(struct rb_node *node, 401 + struct rb_root_cached *root, bool leftmost) 402 + { 403 + __rb_insert(node, &root->rb_root, leftmost, 404 + &root->rb_leftmost, dummy_rotate); 405 + } 406 + 407 + void rb_erase_cached(struct rb_node *node, struct rb_root_cached *root) 408 + { 409 + struct rb_node *rebalance; 410 + rebalance = __rb_erase_augmented(node, &root->rb_root, 411 + &root->rb_leftmost, &dummy_callbacks); 412 + if (rebalance) 413 + ____rb_erase_color(rebalance, &root->rb_root, dummy_rotate); 458 414 } 459 415 460 416 /* ··· 484 402 */ 485 403 486 404 void __rb_insert_augmented(struct rb_node *node, struct rb_root *root, 405 + bool newleft, struct rb_node **leftmost, 487 406 void (*augment_rotate)(struct rb_node *old, struct rb_node *new)) 488 407 { 489 - __rb_insert(node, root, augment_rotate); 408 + __rb_insert(node, root, newleft, leftmost, augment_rotate); 490 409 } 491 410 492 411 /* ··· 581 498 { 582 499 struct rb_node *parent = rb_parent(victim); 583 500 501 + /* Copy the pointers/colour from the victim to the replacement */ 502 + *new = *victim; 503 + 584 504 /* Set the surrounding nodes to point to the replacement */ 585 - __rb_change_child(victim, new, parent, root); 586 505 if (victim->rb_left) 587 506 rb_set_parent(victim->rb_left, new); 588 507 if (victim->rb_right) 589 508 rb_set_parent(victim->rb_right, new); 509 + __rb_change_child(victim, new, parent, root); 510 + } 590 511 591 - /* Copy the pointers/colour from the victim to the replacement */ 592 - *new = *victim; 512 + void rb_replace_node_cached(struct rb_node *victim, struct rb_node *new, 513 + struct rb_root_cached *root) 514 + { 515 + rb_replace_node(victim, new, &root->rb_root); 516 + 517 + if (root->rb_leftmost == victim) 518 + root->rb_leftmost = new; 593 519 } 594 520 595 521 static struct rb_node *rb_left_deepest_node(const struct rb_node *node)
+2 -2
tools/perf/Makefile.perf
··· 863 863 $(call QUIET_INSTALL, python-scripts) \ 864 864 $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/python/Perf-Trace-Util/lib/Perf/Trace'; \ 865 865 $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/python/bin'; \ 866 - $(INSTALL) scripts/python/Perf-Trace-Util/lib/Perf/Trace/* -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/python/Perf-Trace-Util/lib/Perf/Trace'; \ 867 - $(INSTALL) scripts/python/*.py -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/python'; \ 866 + $(INSTALL) scripts/python/Perf-Trace-Util/lib/Perf/Trace/* -m 644 -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/python/Perf-Trace-Util/lib/Perf/Trace'; \ 867 + $(INSTALL) scripts/python/*.py -m 644 -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/python'; \ 868 868 $(INSTALL) scripts/python/bin/* -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/python/bin' 869 869 endif 870 870 $(call QUIET_INSTALL, perf_completion-script) \
+2 -2
tools/perf/builtin-annotate.c
··· 227 227 * the DSO? 228 228 */ 229 229 if (al->sym != NULL) { 230 - rb_erase(&al->sym->rb_node, 230 + rb_erase_cached(&al->sym->rb_node, 231 231 &al->map->dso->symbols); 232 232 symbol__delete(al->sym); 233 233 dso__reset_find_symbol_cache(al->map->dso); ··· 305 305 struct perf_evsel *evsel, 306 306 struct perf_annotate *ann) 307 307 { 308 - struct rb_node *nd = rb_first(&hists->entries), *next; 308 + struct rb_node *nd = rb_first_cached(&hists->entries), *next; 309 309 int key = K_RIGHT; 310 310 311 311 while (nd) {
+3 -3
tools/perf/builtin-c2c.c
··· 2088 2088 2089 2089 static int hists__iterate_cb(struct hists *hists, hists__resort_cb_t cb) 2090 2090 { 2091 - struct rb_node *next = rb_first(&hists->entries); 2091 + struct rb_node *next = rb_first_cached(&hists->entries); 2092 2092 int ret = 0; 2093 2093 2094 2094 while (next) { ··· 2215 2215 if (WARN_ONCE(ret, "failed to setup sort entries\n")) 2216 2216 return; 2217 2217 2218 - nd = rb_first(&c2c.hists.hists.entries); 2218 + nd = rb_first_cached(&c2c.hists.hists.entries); 2219 2219 2220 2220 for (; nd; nd = rb_next(nd)) { 2221 2221 struct hist_entry *he = rb_entry(nd, struct hist_entry, rb_node); ··· 2283 2283 static void c2c_browser__update_nr_entries(struct hist_browser *hb) 2284 2284 { 2285 2285 u64 nr_entries = 0; 2286 - struct rb_node *nd = rb_first(&hb->hists->entries); 2286 + struct rb_node *nd = rb_first_cached(&hb->hists->entries); 2287 2287 2288 2288 while (nd) { 2289 2289 struct hist_entry *he = rb_entry(nd, struct hist_entry, rb_node);
+5 -5
tools/perf/builtin-diff.c
··· 429 429 430 430 static void hists__baseline_only(struct hists *hists) 431 431 { 432 - struct rb_root *root; 432 + struct rb_root_cached *root; 433 433 struct rb_node *next; 434 434 435 435 if (hists__has(hists, need_collapse)) ··· 437 437 else 438 438 root = hists->entries_in; 439 439 440 - next = rb_first(root); 440 + next = rb_first_cached(root); 441 441 while (next != NULL) { 442 442 struct hist_entry *he = rb_entry(next, struct hist_entry, rb_node_in); 443 443 444 444 next = rb_next(&he->rb_node_in); 445 445 if (!hist_entry__next_pair(he)) { 446 - rb_erase(&he->rb_node_in, root); 446 + rb_erase_cached(&he->rb_node_in, root); 447 447 hist_entry__delete(he); 448 448 } 449 449 } ··· 451 451 452 452 static void hists__precompute(struct hists *hists) 453 453 { 454 - struct rb_root *root; 454 + struct rb_root_cached *root; 455 455 struct rb_node *next; 456 456 457 457 if (hists__has(hists, need_collapse)) ··· 459 459 else 460 460 root = hists->entries_in; 461 461 462 - next = rb_first(root); 462 + next = rb_first_cached(root); 463 463 while (next != NULL) { 464 464 struct hist_entry *he, *pair; 465 465 struct data__file *d;
+1
tools/perf/builtin-probe.c
··· 32 32 33 33 #include "perf.h" 34 34 #include "builtin.h" 35 + #include "namespaces.h" 35 36 #include "util/util.h" 36 37 #include "util/strlist.h" 37 38 #include "util/strfilter.h"
+2 -1
tools/perf/builtin-report.c
··· 753 753 for (i = 0; i < THREADS__TABLE_SIZE; i++) { 754 754 struct threads *threads = &machine->threads[i]; 755 755 756 - for (nd = rb_first(&threads->entries); nd; nd = rb_next(nd)) { 756 + for (nd = rb_first_cached(&threads->entries); nd; 757 + nd = rb_next(nd)) { 757 758 task = tasks + itask++; 758 759 759 760 task->thread = rb_entry(nd, struct thread, rb_node);
+25 -20
tools/perf/builtin-sched.c
··· 213 213 u64 all_runtime; 214 214 u64 all_count; 215 215 u64 cpu_last_switched[MAX_CPUS]; 216 - struct rb_root atom_root, sorted_atom_root, merged_atom_root; 216 + struct rb_root_cached atom_root, sorted_atom_root, merged_atom_root; 217 217 struct list_head sort_list, cmp_pid; 218 218 bool force; 219 219 bool skip_merge; ··· 271 271 struct idle_thread_runtime { 272 272 struct thread_runtime tr; 273 273 struct thread *last_thread; 274 - struct rb_root sorted_root; 274 + struct rb_root_cached sorted_root; 275 275 struct callchain_root callchain; 276 276 struct callchain_cursor cursor; 277 277 }; ··· 950 950 } 951 951 952 952 static struct work_atoms * 953 - thread_atoms_search(struct rb_root *root, struct thread *thread, 953 + thread_atoms_search(struct rb_root_cached *root, struct thread *thread, 954 954 struct list_head *sort_list) 955 955 { 956 - struct rb_node *node = root->rb_node; 956 + struct rb_node *node = root->rb_root.rb_node; 957 957 struct work_atoms key = { .thread = thread }; 958 958 959 959 while (node) { ··· 976 976 } 977 977 978 978 static void 979 - __thread_latency_insert(struct rb_root *root, struct work_atoms *data, 979 + __thread_latency_insert(struct rb_root_cached *root, struct work_atoms *data, 980 980 struct list_head *sort_list) 981 981 { 982 - struct rb_node **new = &(root->rb_node), *parent = NULL; 982 + struct rb_node **new = &(root->rb_root.rb_node), *parent = NULL; 983 + bool leftmost = true; 983 984 984 985 while (*new) { 985 986 struct work_atoms *this; ··· 993 992 994 993 if (cmp > 0) 995 994 new = &((*new)->rb_left); 996 - else 995 + else { 997 996 new = &((*new)->rb_right); 997 + leftmost = false; 998 + } 998 999 } 999 1000 1000 1001 rb_link_node(&data->node, parent, new); 1001 - rb_insert_color(&data->node, root); 1002 + rb_insert_color_cached(&data->node, root, leftmost); 1002 1003 } 1003 1004 1004 1005 static int thread_atoms_insert(struct perf_sched *sched, struct thread *thread) ··· 1450 1447 static void perf_sched__sort_lat(struct perf_sched *sched) 1451 1448 { 1452 1449 struct rb_node *node; 1453 - struct rb_root *root = &sched->atom_root; 1450 + struct rb_root_cached *root = &sched->atom_root; 1454 1451 again: 1455 1452 for (;;) { 1456 1453 struct work_atoms *data; 1457 - node = rb_first(root); 1454 + node = rb_first_cached(root); 1458 1455 if (!node) 1459 1456 break; 1460 1457 1461 - rb_erase(node, root); 1458 + rb_erase_cached(node, root); 1462 1459 data = rb_entry(node, struct work_atoms, node); 1463 1460 __thread_latency_insert(&sched->sorted_atom_root, data, &sched->sort_list); 1464 1461 } ··· 2765 2762 return ret; 2766 2763 } 2767 2764 2768 - static size_t timehist_print_idlehist_callchain(struct rb_root *root) 2765 + static size_t timehist_print_idlehist_callchain(struct rb_root_cached *root) 2769 2766 { 2770 2767 size_t ret = 0; 2771 2768 FILE *fp = stdout; 2772 2769 struct callchain_node *chain; 2773 - struct rb_node *rb_node = rb_first(root); 2770 + struct rb_node *rb_node = rb_first_cached(root); 2774 2771 2775 2772 printf(" %16s %8s %s\n", "Idle time (msec)", "Count", "Callchains"); 2776 2773 printf(" %.16s %.8s %.50s\n", graph_dotted_line, graph_dotted_line, ··· 2871 2868 if (itr == NULL) 2872 2869 continue; 2873 2870 2874 - callchain_param.sort(&itr->sorted_root, &itr->callchain, 2871 + callchain_param.sort(&itr->sorted_root.rb_root, &itr->callchain, 2875 2872 0, &callchain_param); 2876 2873 2877 2874 printf(" CPU %2d:", i); ··· 3077 3074 } 3078 3075 } 3079 3076 3080 - static void __merge_work_atoms(struct rb_root *root, struct work_atoms *data) 3077 + static void __merge_work_atoms(struct rb_root_cached *root, struct work_atoms *data) 3081 3078 { 3082 - struct rb_node **new = &(root->rb_node), *parent = NULL; 3079 + struct rb_node **new = &(root->rb_root.rb_node), *parent = NULL; 3083 3080 struct work_atoms *this; 3084 3081 const char *comm = thread__comm_str(data->thread), *this_comm; 3082 + bool leftmost = true; 3085 3083 3086 3084 while (*new) { 3087 3085 int cmp; ··· 3096 3092 new = &((*new)->rb_left); 3097 3093 } else if (cmp < 0) { 3098 3094 new = &((*new)->rb_right); 3095 + leftmost = false; 3099 3096 } else { 3100 3097 this->num_merged++; 3101 3098 this->total_runtime += data->total_runtime; ··· 3114 3109 3115 3110 data->num_merged++; 3116 3111 rb_link_node(&data->node, parent, new); 3117 - rb_insert_color(&data->node, root); 3112 + rb_insert_color_cached(&data->node, root, leftmost); 3118 3113 } 3119 3114 3120 3115 static void perf_sched__merge_lat(struct perf_sched *sched) ··· 3125 3120 if (sched->skip_merge) 3126 3121 return; 3127 3122 3128 - while ((node = rb_first(&sched->atom_root))) { 3129 - rb_erase(node, &sched->atom_root); 3123 + while ((node = rb_first_cached(&sched->atom_root))) { 3124 + rb_erase_cached(node, &sched->atom_root); 3130 3125 data = rb_entry(node, struct work_atoms, node); 3131 3126 __merge_work_atoms(&sched->merged_atom_root, data); 3132 3127 } ··· 3148 3143 printf(" Task | Runtime ms | Switches | Average delay ms | Maximum delay ms | Maximum delay at |\n"); 3149 3144 printf(" -----------------------------------------------------------------------------------------------------------------\n"); 3150 3145 3151 - next = rb_first(&sched->sorted_atom_root); 3146 + next = rb_first_cached(&sched->sorted_atom_root); 3152 3147 3153 3148 while (next) { 3154 3149 struct work_atoms *work_list;
+1 -1
tools/perf/builtin-top.c
··· 367 367 if (p) 368 368 *p = 0; 369 369 370 - next = rb_first(&hists->entries); 370 + next = rb_first_cached(&hists->entries); 371 371 while (next) { 372 372 n = rb_entry(next, struct hist_entry, rb_node); 373 373 if (n->ms.sym && !strcmp(buf, n->ms.sym->name)) {
+2 -1
tools/perf/builtin-trace.c
··· 3854 3854 goto init_augmented_syscall_tp; 3855 3855 } 3856 3856 3857 - if (strcmp(perf_evsel__name(evsel), "raw_syscalls:sys_enter") == 0) { 3857 + if (trace.syscalls.events.augmented->priv == NULL && 3858 + strstr(perf_evsel__name(evsel), "syscalls:sys_enter")) { 3858 3859 struct perf_evsel *augmented = trace.syscalls.events.augmented; 3859 3860 if (perf_evsel__init_augmented_syscall_tp(augmented, evsel) || 3860 3861 perf_evsel__init_augmented_syscall_tp_args(augmented))
+2 -12
tools/perf/examples/bpf/augmented_raw_syscalls.c
··· 18 18 #include <pid_filter.h> 19 19 20 20 /* bpf-output associated map */ 21 - struct bpf_map SEC("maps") __augmented_syscalls__ = { 22 - .type = BPF_MAP_TYPE_PERF_EVENT_ARRAY, 23 - .key_size = sizeof(int), 24 - .value_size = sizeof(u32), 25 - .max_entries = __NR_CPUS__, 26 - }; 21 + bpf_map(__augmented_syscalls__, PERF_EVENT_ARRAY, int, u32, __NR_CPUS__); 27 22 28 23 struct syscall { 29 24 bool enabled; 30 25 }; 31 26 32 - struct bpf_map SEC("maps") syscalls = { 33 - .type = BPF_MAP_TYPE_ARRAY, 34 - .key_size = sizeof(int), 35 - .value_size = sizeof(struct syscall), 36 - .max_entries = 512, 37 - }; 27 + bpf_map(syscalls, ARRAY, int, struct syscall, 512); 38 28 39 29 struct syscall_enter_args { 40 30 unsigned long long common_tp_fields;
+2 -6
tools/perf/examples/bpf/augmented_syscalls.c
··· 19 19 #include <stdio.h> 20 20 #include <linux/socket.h> 21 21 22 - struct bpf_map SEC("maps") __augmented_syscalls__ = { 23 - .type = BPF_MAP_TYPE_PERF_EVENT_ARRAY, 24 - .key_size = sizeof(int), 25 - .value_size = sizeof(u32), 26 - .max_entries = __NR_CPUS__, 27 - }; 22 + /* bpf-output associated map */ 23 + bpf_map(__augmented_syscalls__, PERF_EVENT_ARRAY, int, u32, __NR_CPUS__); 28 24 29 25 struct syscall_exit_args { 30 26 unsigned long long common_tp_fields;
+2 -6
tools/perf/examples/bpf/etcsnoop.c
··· 21 21 22 22 #include <stdio.h> 23 23 24 - struct bpf_map SEC("maps") __augmented_syscalls__ = { 25 - .type = BPF_MAP_TYPE_PERF_EVENT_ARRAY, 26 - .key_size = sizeof(int), 27 - .value_size = sizeof(u32), 28 - .max_entries = __NR_CPUS__, 29 - }; 24 + /* bpf-output associated map */ 25 + bpf_map(__augmented_syscalls__, PERF_EVENT_ARRAY, int, u32, __NR_CPUS__); 30 26 31 27 struct augmented_filename { 32 28 int size;
+9 -7
tools/perf/include/bpf/bpf.h
··· 18 18 unsigned int numa_node; 19 19 }; 20 20 21 + #define bpf_map(name, _type, type_key, type_val, _max_entries) \ 22 + struct bpf_map SEC("maps") name = { \ 23 + .type = BPF_MAP_TYPE_##_type, \ 24 + .key_size = sizeof(type_key), \ 25 + .value_size = sizeof(type_val), \ 26 + .max_entries = _max_entries, \ 27 + } 28 + 21 29 /* 22 30 * FIXME: this should receive .max_entries as a parameter, as careful 23 31 * tuning of these limits is needed to avoid hitting limits that ··· 34 26 * For the current need, 'perf trace --filter-pids', 64 should 35 27 * be good enough, but this surely needs to be revisited. 36 28 */ 37 - #define pid_map(name, value_type) \ 38 - struct bpf_map SEC("maps") name = { \ 39 - .type = BPF_MAP_TYPE_HASH, \ 40 - .key_size = sizeof(pid_t), \ 41 - .value_size = sizeof(value_type), \ 42 - .max_entries = 64, \ 43 - } 29 + #define pid_map(name, value_type) bpf_map(name, HASH, pid_t, value_type, 64) 44 30 45 31 static int (*bpf_map_update_elem)(struct bpf_map *map, void *key, void *value, u64 flags) = (void *)BPF_FUNC_map_update_elem; 46 32 static void *(*bpf_map_lookup_elem)(struct bpf_map *map, void *key) = (void *)BPF_FUNC_map_lookup_elem;
-1
tools/perf/scripts/python/exported-sql-viewer.py
··· 1 - #!/usr/bin/python2 2 1 # SPDX-License-Identifier: GPL-2.0 3 2 # exported-sql-viewer.py: view data from sql database 4 3 # Copyright (c) 2014-2018, Intel Corporation.
-2
tools/perf/scripts/python/sched-migration.py
··· 1 - #!/usr/bin/python 2 - # 3 1 # Cpu task migration overview toy 4 2 # 5 3 # Copyright (C) 2010 Frederic Weisbecker <fweisbec@gmail.com>
-1
tools/perf/scripts/python/stat-cpi.py
··· 1 - #!/usr/bin/env python 2 1 # SPDX-License-Identifier: GPL-2.0 3 2 4 3 data = {}
+19 -14
tools/perf/tests/attr.py
··· 1 - #! /usr/bin/python 2 1 # SPDX-License-Identifier: GPL-2.0 2 + 3 + from __future__ import print_function 3 4 4 5 import os 5 6 import sys ··· 9 8 import tempfile 10 9 import logging 11 10 import shutil 12 - import ConfigParser 11 + 12 + try: 13 + import configparser 14 + except ImportError: 15 + import ConfigParser as configparser 13 16 14 17 def data_equal(a, b): 15 18 # Allow multiple values in assignment separated by '|' ··· 105 100 def equal(self, other): 106 101 for t in Event.terms: 107 102 log.debug(" [%s] %s %s" % (t, self[t], other[t])); 108 - if not self.has_key(t) or not other.has_key(t): 103 + if t not in self or t not in other: 109 104 return False 110 105 if not data_equal(self[t], other[t]): 111 106 return False 112 107 return True 113 108 114 109 def optional(self): 115 - if self.has_key('optional') and self['optional'] == '1': 110 + if 'optional' in self and self['optional'] == '1': 116 111 return True 117 112 return False 118 113 119 114 def diff(self, other): 120 115 for t in Event.terms: 121 - if not self.has_key(t) or not other.has_key(t): 116 + if t not in self or t not in other: 122 117 continue 123 118 if not data_equal(self[t], other[t]): 124 119 log.warning("expected %s=%s, got %s" % (t, self[t], other[t])) ··· 139 134 # - expected values assignments 140 135 class Test(object): 141 136 def __init__(self, path, options): 142 - parser = ConfigParser.SafeConfigParser() 137 + parser = configparser.SafeConfigParser() 143 138 parser.read(path) 144 139 145 140 log.warning("running '%s'" % path) ··· 198 193 return True 199 194 200 195 def load_events(self, path, events): 201 - parser_event = ConfigParser.SafeConfigParser() 196 + parser_event = configparser.SafeConfigParser() 202 197 parser_event.read(path) 203 198 204 199 # The event record section header contains 'event' word, ··· 212 207 # Read parent event if there's any 213 208 if (':' in section): 214 209 base = section[section.index(':') + 1:] 215 - parser_base = ConfigParser.SafeConfigParser() 210 + parser_base = configparser.SafeConfigParser() 216 211 parser_base.read(self.test_dir + '/' + base) 217 212 base_items = parser_base.items('event') 218 213 ··· 327 322 for f in glob.glob(options.test_dir + '/' + options.test): 328 323 try: 329 324 Test(f, options).run() 330 - except Unsup, obj: 325 + except Unsup as obj: 331 326 log.warning("unsupp %s" % obj.getMsg()) 332 - except Notest, obj: 327 + except Notest as obj: 333 328 log.warning("skipped %s" % obj.getMsg()) 334 329 335 330 def setup_log(verbose): ··· 368 363 parser.add_option("-p", "--perf", 369 364 action="store", type="string", dest="perf") 370 365 parser.add_option("-v", "--verbose", 371 - action="count", dest="verbose") 366 + default=0, action="count", dest="verbose") 372 367 373 368 options, args = parser.parse_args() 374 369 if args: ··· 378 373 setup_log(options.verbose) 379 374 380 375 if not options.test_dir: 381 - print 'FAILED no -d option specified' 376 + print('FAILED no -d option specified') 382 377 sys.exit(-1) 383 378 384 379 if not options.test: ··· 387 382 try: 388 383 run_tests(options) 389 384 390 - except Fail, obj: 391 - print "FAILED %s" % obj.getMsg(); 385 + except Fail as obj: 386 + print("FAILED %s" % obj.getMsg()) 392 387 sys.exit(-1) 393 388 394 389 sys.exit(0)
+4 -4
tools/perf/tests/hists_common.c
··· 161 161 void print_hists_in(struct hists *hists) 162 162 { 163 163 int i = 0; 164 - struct rb_root *root; 164 + struct rb_root_cached *root; 165 165 struct rb_node *node; 166 166 167 167 if (hists__has(hists, need_collapse)) ··· 170 170 root = hists->entries_in; 171 171 172 172 pr_info("----- %s --------\n", __func__); 173 - node = rb_first(root); 173 + node = rb_first_cached(root); 174 174 while (node) { 175 175 struct hist_entry *he; 176 176 ··· 191 191 void print_hists_out(struct hists *hists) 192 192 { 193 193 int i = 0; 194 - struct rb_root *root; 194 + struct rb_root_cached *root; 195 195 struct rb_node *node; 196 196 197 197 root = &hists->entries; 198 198 199 199 pr_info("----- %s --------\n", __func__); 200 - node = rb_first(root); 200 + node = rb_first_cached(root); 201 201 while (node) { 202 202 struct hist_entry *he; 203 203
+7 -7
tools/perf/tests/hists_cumulate.c
··· 125 125 static void del_hist_entries(struct hists *hists) 126 126 { 127 127 struct hist_entry *he; 128 - struct rb_root *root_in; 129 - struct rb_root *root_out; 128 + struct rb_root_cached *root_in; 129 + struct rb_root_cached *root_out; 130 130 struct rb_node *node; 131 131 132 132 if (hists__has(hists, need_collapse)) ··· 136 136 137 137 root_out = &hists->entries; 138 138 139 - while (!RB_EMPTY_ROOT(root_out)) { 140 - node = rb_first(root_out); 139 + while (!RB_EMPTY_ROOT(&root_out->rb_root)) { 140 + node = rb_first_cached(root_out); 141 141 142 142 he = rb_entry(node, struct hist_entry, rb_node); 143 - rb_erase(node, root_out); 144 - rb_erase(&he->rb_node_in, root_in); 143 + rb_erase_cached(node, root_out); 144 + rb_erase_cached(&he->rb_node_in, root_in); 145 145 hist_entry__delete(he); 146 146 } 147 147 } ··· 198 198 print_hists_out(hists); 199 199 } 200 200 201 - root = &hists->entries; 201 + root = &hists->entries.rb_root; 202 202 for (node = rb_first(root), i = 0; 203 203 node && (he = rb_entry(node, struct hist_entry, rb_node)); 204 204 node = rb_next(node), i++) {
+4 -4
tools/perf/tests/hists_link.c
··· 142 142 static int __validate_match(struct hists *hists) 143 143 { 144 144 size_t count = 0; 145 - struct rb_root *root; 145 + struct rb_root_cached *root; 146 146 struct rb_node *node; 147 147 148 148 /* ··· 153 153 else 154 154 root = hists->entries_in; 155 155 156 - node = rb_first(root); 156 + node = rb_first_cached(root); 157 157 while (node) { 158 158 struct hist_entry *he; 159 159 ··· 192 192 size_t count = 0; 193 193 size_t count_pair = 0; 194 194 size_t count_dummy = 0; 195 - struct rb_root *root; 195 + struct rb_root_cached *root; 196 196 struct rb_node *node; 197 197 198 198 /* ··· 205 205 else 206 206 root = hists->entries_in; 207 207 208 - node = rb_first(root); 208 + node = rb_first_cached(root); 209 209 while (node) { 210 210 struct hist_entry *he; 211 211
+16 -16
tools/perf/tests/hists_output.c
··· 91 91 static void del_hist_entries(struct hists *hists) 92 92 { 93 93 struct hist_entry *he; 94 - struct rb_root *root_in; 95 - struct rb_root *root_out; 94 + struct rb_root_cached *root_in; 95 + struct rb_root_cached *root_out; 96 96 struct rb_node *node; 97 97 98 98 if (hists__has(hists, need_collapse)) ··· 102 102 103 103 root_out = &hists->entries; 104 104 105 - while (!RB_EMPTY_ROOT(root_out)) { 106 - node = rb_first(root_out); 105 + while (!RB_EMPTY_ROOT(&root_out->rb_root)) { 106 + node = rb_first_cached(root_out); 107 107 108 108 he = rb_entry(node, struct hist_entry, rb_node); 109 - rb_erase(node, root_out); 110 - rb_erase(&he->rb_node_in, root_in); 109 + rb_erase_cached(node, root_out); 110 + rb_erase_cached(&he->rb_node_in, root_in); 111 111 hist_entry__delete(he); 112 112 } 113 113 } ··· 126 126 int err; 127 127 struct hists *hists = evsel__hists(evsel); 128 128 struct hist_entry *he; 129 - struct rb_root *root; 129 + struct rb_root_cached *root; 130 130 struct rb_node *node; 131 131 132 132 field_order = NULL; ··· 162 162 } 163 163 164 164 root = &hists->entries; 165 - node = rb_first(root); 165 + node = rb_first_cached(root); 166 166 he = rb_entry(node, struct hist_entry, rb_node); 167 167 TEST_ASSERT_VAL("Invalid hist entry", 168 168 !strcmp(COMM(he), "perf") && !strcmp(DSO(he), "perf") && ··· 228 228 int err; 229 229 struct hists *hists = evsel__hists(evsel); 230 230 struct hist_entry *he; 231 - struct rb_root *root; 231 + struct rb_root_cached *root; 232 232 struct rb_node *node; 233 233 234 234 field_order = "overhead,cpu"; ··· 262 262 } 263 263 264 264 root = &hists->entries; 265 - node = rb_first(root); 265 + node = rb_first_cached(root); 266 266 he = rb_entry(node, struct hist_entry, rb_node); 267 267 TEST_ASSERT_VAL("Invalid hist entry", 268 268 CPU(he) == 1 && PID(he) == 100 && he->stat.period == 300); ··· 284 284 int err; 285 285 struct hists *hists = evsel__hists(evsel); 286 286 struct hist_entry *he; 287 - struct rb_root *root; 287 + struct rb_root_cached *root; 288 288 struct rb_node *node; 289 289 290 290 field_order = "comm,overhead,dso"; ··· 316 316 } 317 317 318 318 root = &hists->entries; 319 - node = rb_first(root); 319 + node = rb_first_cached(root); 320 320 he = rb_entry(node, struct hist_entry, rb_node); 321 321 TEST_ASSERT_VAL("Invalid hist entry", 322 322 !strcmp(COMM(he), "bash") && !strcmp(DSO(he), "bash") && ··· 358 358 int err; 359 359 struct hists *hists = evsel__hists(evsel); 360 360 struct hist_entry *he; 361 - struct rb_root *root; 361 + struct rb_root_cached *root; 362 362 struct rb_node *node; 363 363 364 364 field_order = "dso,sym,comm,overhead,dso"; ··· 394 394 } 395 395 396 396 root = &hists->entries; 397 - node = rb_first(root); 397 + node = rb_first_cached(root); 398 398 he = rb_entry(node, struct hist_entry, rb_node); 399 399 TEST_ASSERT_VAL("Invalid hist entry", 400 400 !strcmp(DSO(he), "perf") && !strcmp(SYM(he), "cmd_record") && ··· 460 460 int err; 461 461 struct hists *hists = evsel__hists(evsel); 462 462 struct hist_entry *he; 463 - struct rb_root *root; 463 + struct rb_root_cached *root; 464 464 struct rb_node *node; 465 465 466 466 field_order = "cpu,pid,comm,dso,sym"; ··· 497 497 } 498 498 499 499 root = &hists->entries; 500 - node = rb_first(root); 500 + node = rb_first_cached(root); 501 501 he = rb_entry(node, struct hist_entry, rb_node); 502 502 503 503 TEST_ASSERT_VAL("Invalid hist entry",
+8 -8
tools/perf/ui/browsers/hists.c
··· 49 49 struct hists *hists = browser->hists; 50 50 int unfolded_rows = 0; 51 51 52 - for (nd = rb_first(&hists->entries); 52 + for (nd = rb_first_cached(&hists->entries); 53 53 (nd = hists__filter_entries(nd, browser->min_pcnt)) != NULL; 54 54 nd = rb_hierarchy_next(nd)) { 55 55 struct hist_entry *he = ··· 267 267 if (he->has_no_entry) 268 268 return 1; 269 269 270 - node = rb_first(&he->hroot_out); 270 + node = rb_first_cached(&he->hroot_out); 271 271 while (node) { 272 272 float percent; 273 273 ··· 372 372 he->has_children = !RB_EMPTY_ROOT(&he->sorted_chain); 373 373 callchain__init_have_children(&he->sorted_chain); 374 374 } else { 375 - he->has_children = !RB_EMPTY_ROOT(&he->hroot_out); 375 + he->has_children = !RB_EMPTY_ROOT(&he->hroot_out.rb_root); 376 376 } 377 377 378 378 he->init_have_children = true; ··· 508 508 struct hist_entry *child; 509 509 int n = 0; 510 510 511 - for (nd = rb_first(&he->hroot_out); nd; nd = rb_next(nd)) { 511 + for (nd = rb_first_cached(&he->hroot_out); nd; nd = rb_next(nd)) { 512 512 child = rb_entry(nd, struct hist_entry, rb_node); 513 513 percent = hist_entry__get_percent_limit(child); 514 514 if (!child->filtered && percent >= hb->min_pcnt) ··· 566 566 struct rb_node *nd; 567 567 struct hist_entry *he; 568 568 569 - nd = rb_first(&browser->hists->entries); 569 + nd = rb_first_cached(&browser->hists->entries); 570 570 while (nd) { 571 571 he = rb_entry(nd, struct hist_entry, rb_node); 572 572 ··· 1738 1738 struct hist_browser *hb; 1739 1739 1740 1740 hb = container_of(browser, struct hist_browser, b); 1741 - browser->top = rb_first(&hb->hists->entries); 1741 + browser->top = rb_first_cached(&hb->hists->entries); 1742 1742 } 1743 1743 } 1744 1744 ··· 2649 2649 static void hist_browser__update_nr_entries(struct hist_browser *hb) 2650 2650 { 2651 2651 u64 nr_entries = 0; 2652 - struct rb_node *nd = rb_first(&hb->hists->entries); 2652 + struct rb_node *nd = rb_first_cached(&hb->hists->entries); 2653 2653 2654 2654 if (hb->min_pcnt == 0 && !symbol_conf.report_hierarchy) { 2655 2655 hb->nr_non_filtered_entries = hb->hists->nr_non_filtered_entries; ··· 2669 2669 double percent) 2670 2670 { 2671 2671 struct hist_entry *he; 2672 - struct rb_node *nd = rb_first(&hb->hists->entries); 2672 + struct rb_node *nd = rb_first_cached(&hb->hists->entries); 2673 2673 u64 total = hists__total_period(hb->hists); 2674 2674 u64 min_callchain_hits = total * (percent / 100); 2675 2675
+1
tools/perf/ui/browsers/map.c
··· 6 6 #include <linux/bitops.h> 7 7 #include "../../util/util.h" 8 8 #include "../../util/debug.h" 9 + #include "../../util/map.h" 9 10 #include "../../util/symbol.h" 10 11 #include "../browser.h" 11 12 #include "../helpline.h"
+1
tools/perf/ui/gtk/annotate.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0 2 2 #include "gtk.h" 3 + #include "util/sort.h" 3 4 #include "util/debug.h" 4 5 #include "util/annotate.h" 5 6 #include "util/evsel.h"
+3 -3
tools/perf/ui/gtk/hists.c
··· 353 353 354 354 g_object_unref(GTK_TREE_MODEL(store)); 355 355 356 - for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) { 356 + for (nd = rb_first_cached(&hists->entries); nd; nd = rb_next(nd)) { 357 357 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node); 358 358 GtkTreeIter iter; 359 359 u64 total = hists__total_period(h->hists); ··· 401 401 } 402 402 403 403 static void perf_gtk__add_hierarchy_entries(struct hists *hists, 404 - struct rb_root *root, 404 + struct rb_root_cached *root, 405 405 GtkTreeStore *store, 406 406 GtkTreeIter *parent, 407 407 struct perf_hpp *hpp, ··· 415 415 u64 total = hists__total_period(hists); 416 416 int size; 417 417 418 - for (node = rb_first(root); node; node = rb_next(node)) { 418 + for (node = rb_first_cached(root); node; node = rb_next(node)) { 419 419 GtkTreeIter iter; 420 420 float percent; 421 421 char *bf;
+2 -1
tools/perf/ui/stdio/hist.c
··· 788 788 789 789 indent = hists__overhead_width(hists) + 4; 790 790 791 - for (nd = rb_first(&hists->entries); nd; nd = __rb_hierarchy_next(nd, HMD_FORCE_CHILD)) { 791 + for (nd = rb_first_cached(&hists->entries); nd; 792 + nd = __rb_hierarchy_next(nd, HMD_FORCE_CHILD)) { 792 793 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node); 793 794 float percent; 794 795
+1
tools/perf/util/annotate.c
··· 9 9 10 10 #include <errno.h> 11 11 #include <inttypes.h> 12 + #include <libgen.h> 12 13 #include "util.h" 13 14 #include "ui/ui.h" 14 15 #include "sort.h"
+11 -3
tools/perf/util/annotate.h
··· 4 4 5 5 #include <stdbool.h> 6 6 #include <stdint.h> 7 + #include <stdio.h> 7 8 #include <linux/types.h> 8 - #include "symbol.h" 9 - #include "hist.h" 10 - #include "sort.h" 11 9 #include <linux/list.h> 12 10 #include <linux/rbtree.h> 13 11 #include <pthread.h> 14 12 #include <asm/bug.h> 13 + #include "symbol_conf.h" 15 14 15 + struct hist_browser_timer; 16 + struct hist_entry; 16 17 struct ins_ops; 18 + struct map; 19 + struct map_symbol; 20 + struct addr_map_symbol; 21 + struct option; 22 + struct perf_sample; 23 + struct perf_evsel; 24 + struct symbol; 17 25 18 26 struct ins { 19 27 const char *name;
+2
tools/perf/util/block-range.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0 2 2 #include "block-range.h" 3 3 #include "annotate.h" 4 + #include <assert.h> 5 + #include <stdlib.h> 4 6 5 7 struct { 6 8 struct rb_root root;
+5 -1
tools/perf/util/block-range.h
··· 2 2 #ifndef __PERF_BLOCK_RANGE_H 3 3 #define __PERF_BLOCK_RANGE_H 4 4 5 - #include "symbol.h" 5 + #include <stdbool.h> 6 + #include <linux/rbtree.h> 7 + #include <linux/types.h> 8 + 9 + struct symbol; 6 10 7 11 /* 8 12 * struct block_range - non-overlapping parts of basic blocks
+8 -2
tools/perf/util/bpf-event.c
··· 7 7 #include "bpf-event.h" 8 8 #include "debug.h" 9 9 #include "symbol.h" 10 + #include "machine.h" 10 11 11 12 #define ptr_to_u64(ptr) ((__u64)(unsigned long)(ptr)) 12 13 ··· 150 149 *ksymbol_event = (struct ksymbol_event){ 151 150 .header = { 152 151 .type = PERF_RECORD_KSYMBOL, 153 - .size = sizeof(struct ksymbol_event), 152 + .size = offsetof(struct ksymbol_event, name), 154 153 }, 155 154 .addr = prog_addrs[i], 156 155 .len = prog_lens[i], ··· 179 178 180 179 ksymbol_event->header.size += PERF_ALIGN(name_len + 1, 181 180 sizeof(u64)); 181 + 182 + memset((void *)event + event->header.size, 0, machine->id_hdr_size); 183 + event->header.size += machine->id_hdr_size; 182 184 err = perf_tool__process_synth_event(tool, event, 183 185 machine, process); 184 186 } ··· 198 194 .id = info.id, 199 195 }; 200 196 memcpy(bpf_event->tag, prog_tags[i], BPF_TAG_SIZE); 197 + memset((void *)event + event->header.size, 0, machine->id_hdr_size); 198 + event->header.size += machine->id_hdr_size; 201 199 err = perf_tool__process_synth_event(tool, event, 202 200 machine, process); 203 201 } ··· 223 217 int err; 224 218 int fd; 225 219 226 - event = malloc(sizeof(event->bpf_event) + KSYM_NAME_LEN); 220 + event = malloc(sizeof(event->bpf_event) + KSYM_NAME_LEN + machine->id_hdr_size); 227 221 if (!event) 228 222 return -1; 229 223 while (true) {
+24 -3
tools/perf/util/branch.h
··· 1 1 #ifndef _PERF_BRANCH_H 2 2 #define _PERF_BRANCH_H 1 3 3 4 + #include <stdio.h> 4 5 #include <stdint.h> 5 - #include "../perf.h" 6 + #include <linux/perf_event.h> 7 + #include <linux/types.h> 8 + 9 + struct branch_flags { 10 + u64 mispred:1; 11 + u64 predicted:1; 12 + u64 in_tx:1; 13 + u64 abort:1; 14 + u64 cycles:16; 15 + u64 type:4; 16 + u64 reserved:40; 17 + }; 18 + 19 + struct branch_entry { 20 + u64 from; 21 + u64 to; 22 + struct branch_flags flags; 23 + }; 24 + 25 + struct branch_stack { 26 + u64 nr; 27 + struct branch_entry entries[0]; 28 + }; 6 29 7 30 struct branch_type_stat { 8 31 bool branch_to; ··· 35 12 u64 cross_4k; 36 13 u64 cross_2m; 37 14 }; 38 - 39 - struct branch_flags; 40 15 41 16 void branch_type_count(struct branch_type_stat *st, struct branch_flags *flags, 42 17 u64 from, u64 to);
+9 -4
tools/perf/util/build-id.c
··· 15 15 #include <sys/types.h> 16 16 #include "build-id.h" 17 17 #include "event.h" 18 + #include "namespaces.h" 18 19 #include "symbol.h" 19 20 #include "thread.h" 20 21 #include <linux/kernel.h> ··· 364 363 if (err) 365 364 return err; 366 365 367 - for (nd = rb_first(&session->machines.guests); nd; nd = rb_next(nd)) { 366 + for (nd = rb_first_cached(&session->machines.guests); nd; 367 + nd = rb_next(nd)) { 368 368 struct machine *pos = rb_entry(nd, struct machine, rb_node); 369 369 err = machine__write_buildid_table(pos, fd); 370 370 if (err) ··· 398 396 if (err) 399 397 return err; 400 398 401 - for (nd = rb_first(&session->machines.guests); nd; nd = rb_next(nd)) { 399 + for (nd = rb_first_cached(&session->machines.guests); nd; 400 + nd = rb_next(nd)) { 402 401 struct machine *pos = rb_entry(nd, struct machine, rb_node); 403 402 404 403 err = machine__hit_all_dsos(pos); ··· 852 849 853 850 ret = machine__cache_build_ids(&session->machines.host); 854 851 855 - for (nd = rb_first(&session->machines.guests); nd; nd = rb_next(nd)) { 852 + for (nd = rb_first_cached(&session->machines.guests); nd; 853 + nd = rb_next(nd)) { 856 854 struct machine *pos = rb_entry(nd, struct machine, rb_node); 857 855 ret |= machine__cache_build_ids(pos); 858 856 } ··· 870 866 struct rb_node *nd; 871 867 bool ret = machine__read_build_ids(&session->machines.host, with_hits); 872 868 873 - for (nd = rb_first(&session->machines.guests); nd; nd = rb_next(nd)) { 869 + for (nd = rb_first_cached(&session->machines.guests); nd; 870 + nd = rb_next(nd)) { 874 871 struct machine *pos = rb_entry(nd, struct machine, rb_node); 875 872 ret |= machine__read_build_ids(pos, with_hits); 876 873 }
+2 -1
tools/perf/util/build-id.h
··· 6 6 #define SBUILD_ID_SIZE (BUILD_ID_SIZE * 2 + 1) 7 7 8 8 #include "tool.h" 9 - #include "namespaces.h" 10 9 #include <linux/types.h> 10 + 11 + struct nsinfo; 11 12 12 13 extern struct perf_tool build_id__mark_dso_hit_ops; 13 14 struct dso;
-1
tools/perf/util/callchain.h
··· 2 2 #ifndef __PERF_CALLCHAIN_H 3 3 #define __PERF_CALLCHAIN_H 4 4 5 - #include "../perf.h" 6 5 #include <linux/list.h> 7 6 #include <linux/rbtree.h> 8 7 #include "event.h"
+1
tools/perf/util/color.h
··· 3 3 #define __PERF_COLOR_H 4 4 5 5 #include <stdio.h> 6 + #include <stdarg.h> 6 7 7 8 /* "\033[1;38;5;2xx;48;5;2xxm\0" is 23 bytes */ 8 9 #define COLOR_MAXLEN 24
+1
tools/perf/util/comm.c
··· 6 6 #include <stdio.h> 7 7 #include <string.h> 8 8 #include <linux/refcount.h> 9 + #include <linux/rbtree.h> 9 10 #include "rwsem.h" 10 11 11 12 struct comm_str {
+2 -2
tools/perf/util/comm.h
··· 2 2 #ifndef __PERF_COMM_H 3 3 #define __PERF_COMM_H 4 4 5 - #include "../perf.h" 6 - #include <linux/rbtree.h> 7 5 #include <linux/list.h> 6 + #include <linux/types.h> 7 + #include <stdbool.h> 8 8 9 9 struct comm_str; 10 10
+6 -4
tools/perf/util/dso.c
··· 8 8 #include <unistd.h> 9 9 #include <errno.h> 10 10 #include <fcntl.h> 11 + #include <libgen.h> 11 12 #include "compress.h" 13 + #include "namespaces.h" 12 14 #include "path.h" 13 15 #include "symbol.h" 14 16 #include "srcline.h" ··· 1197 1195 strcpy(dso->name, name); 1198 1196 dso__set_long_name(dso, dso->name, false); 1199 1197 dso__set_short_name(dso, dso->name, false); 1200 - dso->symbols = dso->symbol_names = RB_ROOT; 1198 + dso->symbols = dso->symbol_names = RB_ROOT_CACHED; 1201 1199 dso->data.cache = RB_ROOT; 1202 - dso->inlined_nodes = RB_ROOT; 1203 - dso->srclines = RB_ROOT; 1200 + dso->inlined_nodes = RB_ROOT_CACHED; 1201 + dso->srclines = RB_ROOT_CACHED; 1204 1202 dso->data.fd = -1; 1205 1203 dso->data.status = DSO_DATA_STATUS_UNKNOWN; 1206 1204 dso->symtab_type = DSO_BINARY_TYPE__NOT_FOUND; ··· 1469 1467 ret += fprintf(fp, "%sloaded, ", dso__loaded(dso) ? "" : "NOT "); 1470 1468 ret += dso__fprintf_buildid(dso, fp); 1471 1469 ret += fprintf(fp, ")\n"); 1472 - for (nd = rb_first(&dso->symbols); nd; nd = rb_next(nd)) { 1470 + for (nd = rb_first_cached(&dso->symbols); nd; nd = rb_next(nd)) { 1473 1471 struct symbol *pos = rb_entry(nd, struct symbol, rb_node); 1474 1472 ret += symbol__fprintf(pos, fp); 1475 1473 }
+9 -7
tools/perf/util/dso.h
··· 7 7 #include <linux/rbtree.h> 8 8 #include <sys/types.h> 9 9 #include <stdbool.h> 10 + #include <stdio.h> 10 11 #include "rwsem.h" 11 12 #include <linux/bitops.h> 12 - #include "map.h" 13 - #include "namespaces.h" 14 13 #include "build-id.h" 14 + 15 + struct machine; 16 + struct map; 15 17 16 18 enum dso_binary_type { 17 19 DSO_BINARY_TYPE__KALLSYMS = 0, ··· 141 139 struct list_head node; 142 140 struct rb_node rb_node; /* rbtree node sorted by long name */ 143 141 struct rb_root *root; /* root of rbtree that rb_node is in */ 144 - struct rb_root symbols; 145 - struct rb_root symbol_names; 146 - struct rb_root inlined_nodes; 147 - struct rb_root srclines; 142 + struct rb_root_cached symbols; 143 + struct rb_root_cached symbol_names; 144 + struct rb_root_cached inlined_nodes; 145 + struct rb_root_cached srclines; 148 146 struct { 149 147 u64 addr; 150 148 struct symbol *symbol; ··· 236 234 237 235 static inline bool dso__has_symbols(const struct dso *dso) 238 236 { 239 - return !RB_EMPTY_ROOT(&dso->symbols); 237 + return !RB_EMPTY_ROOT(&dso->symbols.rb_root); 240 238 } 241 239 242 240 bool dso__sorted_by_name(const struct dso *dso);
+1 -20
tools/perf/util/event.h
··· 161 161 u64 ips[0]; 162 162 }; 163 163 164 - struct branch_flags { 165 - u64 mispred:1; 166 - u64 predicted:1; 167 - u64 in_tx:1; 168 - u64 abort:1; 169 - u64 cycles:16; 170 - u64 type:4; 171 - u64 reserved:40; 172 - }; 173 - 174 - struct branch_entry { 175 - u64 from; 176 - u64 to; 177 - struct branch_flags flags; 178 - }; 179 - 180 - struct branch_stack { 181 - u64 nr; 182 - struct branch_entry entries[0]; 183 - }; 164 + struct branch_stack; 184 165 185 166 enum { 186 167 PERF_IP_FLAG_BRANCH = 1ULL << 0,
+112 -87
tools/perf/util/hist.c
··· 209 209 210 210 void hists__output_recalc_col_len(struct hists *hists, int max_rows) 211 211 { 212 - struct rb_node *next = rb_first(&hists->entries); 212 + struct rb_node *next = rb_first_cached(&hists->entries); 213 213 struct hist_entry *n; 214 214 int row = 0; 215 215 ··· 296 296 297 297 if (!he->leaf) { 298 298 struct hist_entry *child; 299 - struct rb_node *node = rb_first(&he->hroot_out); 299 + struct rb_node *node = rb_first_cached(&he->hroot_out); 300 300 while (node) { 301 301 child = rb_entry(node, struct hist_entry, rb_node); 302 302 node = rb_next(node); ··· 311 311 312 312 static void hists__delete_entry(struct hists *hists, struct hist_entry *he) 313 313 { 314 - struct rb_root *root_in; 315 - struct rb_root *root_out; 314 + struct rb_root_cached *root_in; 315 + struct rb_root_cached *root_out; 316 316 317 317 if (he->parent_he) { 318 318 root_in = &he->parent_he->hroot_in; ··· 325 325 root_out = &hists->entries; 326 326 } 327 327 328 - rb_erase(&he->rb_node_in, root_in); 329 - rb_erase(&he->rb_node, root_out); 328 + rb_erase_cached(&he->rb_node_in, root_in); 329 + rb_erase_cached(&he->rb_node, root_out); 330 330 331 331 --hists->nr_entries; 332 332 if (!he->filtered) ··· 337 337 338 338 void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel) 339 339 { 340 - struct rb_node *next = rb_first(&hists->entries); 340 + struct rb_node *next = rb_first_cached(&hists->entries); 341 341 struct hist_entry *n; 342 342 343 343 while (next) { ··· 353 353 354 354 void hists__delete_entries(struct hists *hists) 355 355 { 356 - struct rb_node *next = rb_first(&hists->entries); 356 + struct rb_node *next = rb_first_cached(&hists->entries); 357 357 struct hist_entry *n; 358 358 359 359 while (next) { ··· 435 435 } 436 436 INIT_LIST_HEAD(&he->pairs.node); 437 437 thread__get(he->thread); 438 - he->hroot_in = RB_ROOT; 439 - he->hroot_out = RB_ROOT; 438 + he->hroot_in = RB_ROOT_CACHED; 439 + he->hroot_out = RB_ROOT_CACHED; 440 440 441 441 if (!symbol_conf.report_hierarchy) 442 442 he->leaf = true; ··· 513 513 int64_t cmp; 514 514 u64 period = entry->stat.period; 515 515 u64 weight = entry->stat.weight; 516 + bool leftmost = true; 516 517 517 - p = &hists->entries_in->rb_node; 518 + p = &hists->entries_in->rb_root.rb_node; 518 519 519 520 while (*p != NULL) { 520 521 parent = *p; ··· 558 557 559 558 if (cmp < 0) 560 559 p = &(*p)->rb_left; 561 - else 560 + else { 562 561 p = &(*p)->rb_right; 562 + leftmost = false; 563 + } 563 564 } 564 565 565 566 he = hist_entry__new(entry, sample_self); ··· 573 570 hists->nr_entries++; 574 571 575 572 rb_link_node(&he->rb_node_in, parent, p); 576 - rb_insert_color(&he->rb_node_in, hists->entries_in); 573 + rb_insert_color_cached(&he->rb_node_in, hists->entries_in, leftmost); 577 574 out: 578 575 if (sample_self) 579 576 he_stat__add_cpumode_period(&he->stat, al->cpumode, period); ··· 1282 1279 } 1283 1280 1284 1281 static struct hist_entry *hierarchy_insert_entry(struct hists *hists, 1285 - struct rb_root *root, 1282 + struct rb_root_cached *root, 1286 1283 struct hist_entry *he, 1287 1284 struct hist_entry *parent_he, 1288 1285 struct perf_hpp_list *hpp_list) 1289 1286 { 1290 - struct rb_node **p = &root->rb_node; 1287 + struct rb_node **p = &root->rb_root.rb_node; 1291 1288 struct rb_node *parent = NULL; 1292 1289 struct hist_entry *iter, *new; 1293 1290 struct perf_hpp_fmt *fmt; 1294 1291 int64_t cmp; 1292 + bool leftmost = true; 1295 1293 1296 1294 while (*p != NULL) { 1297 1295 parent = *p; ··· 1312 1308 1313 1309 if (cmp < 0) 1314 1310 p = &parent->rb_left; 1315 - else 1311 + else { 1316 1312 p = &parent->rb_right; 1313 + leftmost = false; 1314 + } 1317 1315 } 1318 1316 1319 1317 new = hist_entry__new(he, true); ··· 1349 1343 } 1350 1344 1351 1345 rb_link_node(&new->rb_node_in, parent, p); 1352 - rb_insert_color(&new->rb_node_in, root); 1346 + rb_insert_color_cached(&new->rb_node_in, root, leftmost); 1353 1347 return new; 1354 1348 } 1355 1349 1356 1350 static int hists__hierarchy_insert_entry(struct hists *hists, 1357 - struct rb_root *root, 1351 + struct rb_root_cached *root, 1358 1352 struct hist_entry *he) 1359 1353 { 1360 1354 struct perf_hpp_list_node *node; ··· 1401 1395 } 1402 1396 1403 1397 static int hists__collapse_insert_entry(struct hists *hists, 1404 - struct rb_root *root, 1398 + struct rb_root_cached *root, 1405 1399 struct hist_entry *he) 1406 1400 { 1407 - struct rb_node **p = &root->rb_node; 1401 + struct rb_node **p = &root->rb_root.rb_node; 1408 1402 struct rb_node *parent = NULL; 1409 1403 struct hist_entry *iter; 1410 1404 int64_t cmp; 1405 + bool leftmost = true; 1411 1406 1412 1407 if (symbol_conf.report_hierarchy) 1413 1408 return hists__hierarchy_insert_entry(hists, root, he); ··· 1439 1432 1440 1433 if (cmp < 0) 1441 1434 p = &(*p)->rb_left; 1442 - else 1435 + else { 1443 1436 p = &(*p)->rb_right; 1437 + leftmost = false; 1438 + } 1444 1439 } 1445 1440 hists->nr_entries++; 1446 1441 1447 1442 rb_link_node(&he->rb_node_in, parent, p); 1448 - rb_insert_color(&he->rb_node_in, root); 1443 + rb_insert_color_cached(&he->rb_node_in, root, leftmost); 1449 1444 return 1; 1450 1445 } 1451 1446 1452 - struct rb_root *hists__get_rotate_entries_in(struct hists *hists) 1447 + struct rb_root_cached *hists__get_rotate_entries_in(struct hists *hists) 1453 1448 { 1454 - struct rb_root *root; 1449 + struct rb_root_cached *root; 1455 1450 1456 1451 pthread_mutex_lock(&hists->lock); 1457 1452 ··· 1476 1467 1477 1468 int hists__collapse_resort(struct hists *hists, struct ui_progress *prog) 1478 1469 { 1479 - struct rb_root *root; 1470 + struct rb_root_cached *root; 1480 1471 struct rb_node *next; 1481 1472 struct hist_entry *n; 1482 1473 int ret; ··· 1488 1479 1489 1480 root = hists__get_rotate_entries_in(hists); 1490 1481 1491 - next = rb_first(root); 1482 + next = rb_first_cached(root); 1492 1483 1493 1484 while (next) { 1494 1485 if (session_done()) ··· 1496 1487 n = rb_entry(next, struct hist_entry, rb_node_in); 1497 1488 next = rb_next(&n->rb_node_in); 1498 1489 1499 - rb_erase(&n->rb_node_in, root); 1490 + rb_erase_cached(&n->rb_node_in, root); 1500 1491 ret = hists__collapse_insert_entry(hists, &hists->entries_collapsed, n); 1501 1492 if (ret < 0) 1502 1493 return -1; ··· 1567 1558 struct rb_node *node; 1568 1559 struct hist_entry *he; 1569 1560 1570 - node = rb_first(&hists->entries); 1561 + node = rb_first_cached(&hists->entries); 1571 1562 1572 1563 hists->stats.total_period = 0; 1573 1564 hists->stats.total_non_filtered_period = 0; ··· 1587 1578 } 1588 1579 } 1589 1580 1590 - static void hierarchy_insert_output_entry(struct rb_root *root, 1581 + static void hierarchy_insert_output_entry(struct rb_root_cached *root, 1591 1582 struct hist_entry *he) 1592 1583 { 1593 - struct rb_node **p = &root->rb_node; 1584 + struct rb_node **p = &root->rb_root.rb_node; 1594 1585 struct rb_node *parent = NULL; 1595 1586 struct hist_entry *iter; 1596 1587 struct perf_hpp_fmt *fmt; 1588 + bool leftmost = true; 1597 1589 1598 1590 while (*p != NULL) { 1599 1591 parent = *p; ··· 1602 1592 1603 1593 if (hist_entry__sort(he, iter) > 0) 1604 1594 p = &parent->rb_left; 1605 - else 1595 + else { 1606 1596 p = &parent->rb_right; 1597 + leftmost = false; 1598 + } 1607 1599 } 1608 1600 1609 1601 rb_link_node(&he->rb_node, parent, p); 1610 - rb_insert_color(&he->rb_node, root); 1602 + rb_insert_color_cached(&he->rb_node, root, leftmost); 1611 1603 1612 1604 /* update column width of dynamic entry */ 1613 1605 perf_hpp_list__for_each_sort_list(he->hpp_list, fmt) { ··· 1620 1608 1621 1609 static void hists__hierarchy_output_resort(struct hists *hists, 1622 1610 struct ui_progress *prog, 1623 - struct rb_root *root_in, 1624 - struct rb_root *root_out, 1611 + struct rb_root_cached *root_in, 1612 + struct rb_root_cached *root_out, 1625 1613 u64 min_callchain_hits, 1626 1614 bool use_callchain) 1627 1615 { 1628 1616 struct rb_node *node; 1629 1617 struct hist_entry *he; 1630 1618 1631 - *root_out = RB_ROOT; 1632 - node = rb_first(root_in); 1619 + *root_out = RB_ROOT_CACHED; 1620 + node = rb_first_cached(root_in); 1633 1621 1634 1622 while (node) { 1635 1623 he = rb_entry(node, struct hist_entry, rb_node_in); ··· 1672 1660 } 1673 1661 } 1674 1662 1675 - static void __hists__insert_output_entry(struct rb_root *entries, 1663 + static void __hists__insert_output_entry(struct rb_root_cached *entries, 1676 1664 struct hist_entry *he, 1677 1665 u64 min_callchain_hits, 1678 1666 bool use_callchain) 1679 1667 { 1680 - struct rb_node **p = &entries->rb_node; 1668 + struct rb_node **p = &entries->rb_root.rb_node; 1681 1669 struct rb_node *parent = NULL; 1682 1670 struct hist_entry *iter; 1683 1671 struct perf_hpp_fmt *fmt; 1672 + bool leftmost = true; 1684 1673 1685 1674 if (use_callchain) { 1686 1675 if (callchain_param.mode == CHAIN_GRAPH_REL) { ··· 1702 1689 1703 1690 if (hist_entry__sort(he, iter) > 0) 1704 1691 p = &(*p)->rb_left; 1705 - else 1692 + else { 1706 1693 p = &(*p)->rb_right; 1694 + leftmost = false; 1695 + } 1707 1696 } 1708 1697 1709 1698 rb_link_node(&he->rb_node, parent, p); 1710 - rb_insert_color(&he->rb_node, entries); 1699 + rb_insert_color_cached(&he->rb_node, entries, leftmost); 1711 1700 1712 1701 perf_hpp_list__for_each_sort_list(&perf_hpp_list, fmt) { 1713 1702 if (perf_hpp__is_dynamic_entry(fmt) && ··· 1721 1706 static void output_resort(struct hists *hists, struct ui_progress *prog, 1722 1707 bool use_callchain, hists__resort_cb_t cb) 1723 1708 { 1724 - struct rb_root *root; 1709 + struct rb_root_cached *root; 1725 1710 struct rb_node *next; 1726 1711 struct hist_entry *n; 1727 1712 u64 callchain_total; ··· 1751 1736 else 1752 1737 root = hists->entries_in; 1753 1738 1754 - next = rb_first(root); 1755 - hists->entries = RB_ROOT; 1739 + next = rb_first_cached(root); 1740 + hists->entries = RB_ROOT_CACHED; 1756 1741 1757 1742 while (next) { 1758 1743 n = rb_entry(next, struct hist_entry, rb_node_in); ··· 1813 1798 struct hist_entry *he = rb_entry(node, struct hist_entry, rb_node); 1814 1799 1815 1800 while (can_goto_child(he, HMD_NORMAL)) { 1816 - node = rb_last(&he->hroot_out); 1801 + node = rb_last(&he->hroot_out.rb_root); 1817 1802 he = rb_entry(node, struct hist_entry, rb_node); 1818 1803 } 1819 1804 return node; ··· 1824 1809 struct hist_entry *he = rb_entry(node, struct hist_entry, rb_node); 1825 1810 1826 1811 if (can_goto_child(he, hmd)) 1827 - node = rb_first(&he->hroot_out); 1812 + node = rb_first_cached(&he->hroot_out); 1828 1813 else 1829 1814 node = rb_next(node); 1830 1815 ··· 1862 1847 if (he->leaf) 1863 1848 return false; 1864 1849 1865 - node = rb_first(&he->hroot_out); 1850 + node = rb_first_cached(&he->hroot_out); 1866 1851 child = rb_entry(node, struct hist_entry, rb_node); 1867 1852 1868 1853 while (node && child->filtered) { ··· 1980 1965 hists__reset_filter_stats(hists); 1981 1966 hists__reset_col_len(hists); 1982 1967 1983 - for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) { 1968 + for (nd = rb_first_cached(&hists->entries); nd; nd = rb_next(nd)) { 1984 1969 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node); 1985 1970 1986 1971 if (filter(hists, h)) ··· 1990 1975 } 1991 1976 } 1992 1977 1993 - static void resort_filtered_entry(struct rb_root *root, struct hist_entry *he) 1978 + static void resort_filtered_entry(struct rb_root_cached *root, 1979 + struct hist_entry *he) 1994 1980 { 1995 - struct rb_node **p = &root->rb_node; 1981 + struct rb_node **p = &root->rb_root.rb_node; 1996 1982 struct rb_node *parent = NULL; 1997 1983 struct hist_entry *iter; 1998 - struct rb_root new_root = RB_ROOT; 1984 + struct rb_root_cached new_root = RB_ROOT_CACHED; 1999 1985 struct rb_node *nd; 1986 + bool leftmost = true; 2000 1987 2001 1988 while (*p != NULL) { 2002 1989 parent = *p; ··· 2006 1989 2007 1990 if (hist_entry__sort(he, iter) > 0) 2008 1991 p = &(*p)->rb_left; 2009 - else 1992 + else { 2010 1993 p = &(*p)->rb_right; 1994 + leftmost = false; 1995 + } 2011 1996 } 2012 1997 2013 1998 rb_link_node(&he->rb_node, parent, p); 2014 - rb_insert_color(&he->rb_node, root); 1999 + rb_insert_color_cached(&he->rb_node, root, leftmost); 2015 2000 2016 2001 if (he->leaf || he->filtered) 2017 2002 return; 2018 2003 2019 - nd = rb_first(&he->hroot_out); 2004 + nd = rb_first_cached(&he->hroot_out); 2020 2005 while (nd) { 2021 2006 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node); 2022 2007 2023 2008 nd = rb_next(nd); 2024 - rb_erase(&h->rb_node, &he->hroot_out); 2009 + rb_erase_cached(&h->rb_node, &he->hroot_out); 2025 2010 2026 2011 resort_filtered_entry(&new_root, h); 2027 2012 } ··· 2034 2015 static void hists__filter_hierarchy(struct hists *hists, int type, const void *arg) 2035 2016 { 2036 2017 struct rb_node *nd; 2037 - struct rb_root new_root = RB_ROOT; 2018 + struct rb_root_cached new_root = RB_ROOT_CACHED; 2038 2019 2039 2020 hists->stats.nr_non_filtered_samples = 0; 2040 2021 2041 2022 hists__reset_filter_stats(hists); 2042 2023 hists__reset_col_len(hists); 2043 2024 2044 - nd = rb_first(&hists->entries); 2025 + nd = rb_first_cached(&hists->entries); 2045 2026 while (nd) { 2046 2027 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node); 2047 2028 int ret; ··· 2085 2066 * resort output after applying a new filter since filter in a lower 2086 2067 * hierarchy can change periods in a upper hierarchy. 2087 2068 */ 2088 - nd = rb_first(&hists->entries); 2069 + nd = rb_first_cached(&hists->entries); 2089 2070 while (nd) { 2090 2071 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node); 2091 2072 2092 2073 nd = rb_next(nd); 2093 - rb_erase(&h->rb_node, &hists->entries); 2074 + rb_erase_cached(&h->rb_node, &hists->entries); 2094 2075 2095 2076 resort_filtered_entry(&new_root, h); 2096 2077 } ··· 2159 2140 static struct hist_entry *hists__add_dummy_entry(struct hists *hists, 2160 2141 struct hist_entry *pair) 2161 2142 { 2162 - struct rb_root *root; 2143 + struct rb_root_cached *root; 2163 2144 struct rb_node **p; 2164 2145 struct rb_node *parent = NULL; 2165 2146 struct hist_entry *he; 2166 2147 int64_t cmp; 2148 + bool leftmost = true; 2167 2149 2168 2150 if (hists__has(hists, need_collapse)) 2169 2151 root = &hists->entries_collapsed; 2170 2152 else 2171 2153 root = hists->entries_in; 2172 2154 2173 - p = &root->rb_node; 2155 + p = &root->rb_root.rb_node; 2174 2156 2175 2157 while (*p != NULL) { 2176 2158 parent = *p; ··· 2184 2164 2185 2165 if (cmp < 0) 2186 2166 p = &(*p)->rb_left; 2187 - else 2167 + else { 2188 2168 p = &(*p)->rb_right; 2169 + leftmost = false; 2170 + } 2189 2171 } 2190 2172 2191 2173 he = hist_entry__new(pair, true); ··· 2197 2175 if (symbol_conf.cumulate_callchain) 2198 2176 memset(he->stat_acc, 0, sizeof(he->stat)); 2199 2177 rb_link_node(&he->rb_node_in, parent, p); 2200 - rb_insert_color(&he->rb_node_in, root); 2178 + rb_insert_color_cached(&he->rb_node_in, root, leftmost); 2201 2179 hists__inc_stats(hists, he); 2202 2180 he->dummy = true; 2203 2181 } ··· 2206 2184 } 2207 2185 2208 2186 static struct hist_entry *add_dummy_hierarchy_entry(struct hists *hists, 2209 - struct rb_root *root, 2187 + struct rb_root_cached *root, 2210 2188 struct hist_entry *pair) 2211 2189 { 2212 2190 struct rb_node **p; 2213 2191 struct rb_node *parent = NULL; 2214 2192 struct hist_entry *he; 2215 2193 struct perf_hpp_fmt *fmt; 2194 + bool leftmost = true; 2216 2195 2217 - p = &root->rb_node; 2196 + p = &root->rb_root.rb_node; 2218 2197 while (*p != NULL) { 2219 2198 int64_t cmp = 0; 2220 2199 ··· 2232 2209 2233 2210 if (cmp < 0) 2234 2211 p = &parent->rb_left; 2235 - else 2212 + else { 2236 2213 p = &parent->rb_right; 2214 + leftmost = false; 2215 + } 2237 2216 } 2238 2217 2239 2218 he = hist_entry__new(pair, true); 2240 2219 if (he) { 2241 2220 rb_link_node(&he->rb_node_in, parent, p); 2242 - rb_insert_color(&he->rb_node_in, root); 2221 + rb_insert_color_cached(&he->rb_node_in, root, leftmost); 2243 2222 2244 2223 he->dummy = true; 2245 2224 he->hists = hists; ··· 2258 2233 struct rb_node *n; 2259 2234 2260 2235 if (hists__has(hists, need_collapse)) 2261 - n = hists->entries_collapsed.rb_node; 2236 + n = hists->entries_collapsed.rb_root.rb_node; 2262 2237 else 2263 - n = hists->entries_in->rb_node; 2238 + n = hists->entries_in->rb_root.rb_node; 2264 2239 2265 2240 while (n) { 2266 2241 struct hist_entry *iter = rb_entry(n, struct hist_entry, rb_node_in); ··· 2277 2252 return NULL; 2278 2253 } 2279 2254 2280 - static struct hist_entry *hists__find_hierarchy_entry(struct rb_root *root, 2255 + static struct hist_entry *hists__find_hierarchy_entry(struct rb_root_cached *root, 2281 2256 struct hist_entry *he) 2282 2257 { 2283 - struct rb_node *n = root->rb_node; 2258 + struct rb_node *n = root->rb_root.rb_node; 2284 2259 2285 2260 while (n) { 2286 2261 struct hist_entry *iter; ··· 2305 2280 return NULL; 2306 2281 } 2307 2282 2308 - static void hists__match_hierarchy(struct rb_root *leader_root, 2309 - struct rb_root *other_root) 2283 + static void hists__match_hierarchy(struct rb_root_cached *leader_root, 2284 + struct rb_root_cached *other_root) 2310 2285 { 2311 2286 struct rb_node *nd; 2312 2287 struct hist_entry *pos, *pair; 2313 2288 2314 - for (nd = rb_first(leader_root); nd; nd = rb_next(nd)) { 2289 + for (nd = rb_first_cached(leader_root); nd; nd = rb_next(nd)) { 2315 2290 pos = rb_entry(nd, struct hist_entry, rb_node_in); 2316 2291 pair = hists__find_hierarchy_entry(other_root, pos); 2317 2292 ··· 2327 2302 */ 2328 2303 void hists__match(struct hists *leader, struct hists *other) 2329 2304 { 2330 - struct rb_root *root; 2305 + struct rb_root_cached *root; 2331 2306 struct rb_node *nd; 2332 2307 struct hist_entry *pos, *pair; 2333 2308 ··· 2342 2317 else 2343 2318 root = leader->entries_in; 2344 2319 2345 - for (nd = rb_first(root); nd; nd = rb_next(nd)) { 2320 + for (nd = rb_first_cached(root); nd; nd = rb_next(nd)) { 2346 2321 pos = rb_entry(nd, struct hist_entry, rb_node_in); 2347 2322 pair = hists__find_entry(other, pos); 2348 2323 ··· 2353 2328 2354 2329 static int hists__link_hierarchy(struct hists *leader_hists, 2355 2330 struct hist_entry *parent, 2356 - struct rb_root *leader_root, 2357 - struct rb_root *other_root) 2331 + struct rb_root_cached *leader_root, 2332 + struct rb_root_cached *other_root) 2358 2333 { 2359 2334 struct rb_node *nd; 2360 2335 struct hist_entry *pos, *leader; 2361 2336 2362 - for (nd = rb_first(other_root); nd; nd = rb_next(nd)) { 2337 + for (nd = rb_first_cached(other_root); nd; nd = rb_next(nd)) { 2363 2338 pos = rb_entry(nd, struct hist_entry, rb_node_in); 2364 2339 2365 2340 if (hist_entry__has_pairs(pos)) { ··· 2402 2377 */ 2403 2378 int hists__link(struct hists *leader, struct hists *other) 2404 2379 { 2405 - struct rb_root *root; 2380 + struct rb_root_cached *root; 2406 2381 struct rb_node *nd; 2407 2382 struct hist_entry *pos, *pair; 2408 2383 ··· 2418 2393 else 2419 2394 root = other->entries_in; 2420 2395 2421 - for (nd = rb_first(root); nd; nd = rb_next(nd)) { 2396 + for (nd = rb_first_cached(root); nd; nd = rb_next(nd)) { 2422 2397 pos = rb_entry(nd, struct hist_entry, rb_node_in); 2423 2398 2424 2399 if (!hist_entry__has_pairs(pos)) { ··· 2591 2566 int __hists__init(struct hists *hists, struct perf_hpp_list *hpp_list) 2592 2567 { 2593 2568 memset(hists, 0, sizeof(*hists)); 2594 - hists->entries_in_array[0] = hists->entries_in_array[1] = RB_ROOT; 2569 + hists->entries_in_array[0] = hists->entries_in_array[1] = RB_ROOT_CACHED; 2595 2570 hists->entries_in = &hists->entries_in_array[0]; 2596 - hists->entries_collapsed = RB_ROOT; 2597 - hists->entries = RB_ROOT; 2571 + hists->entries_collapsed = RB_ROOT_CACHED; 2572 + hists->entries = RB_ROOT_CACHED; 2598 2573 pthread_mutex_init(&hists->lock, NULL); 2599 2574 hists->socket_filter = -1; 2600 2575 hists->hpp_list = hpp_list; ··· 2602 2577 return 0; 2603 2578 } 2604 2579 2605 - static void hists__delete_remaining_entries(struct rb_root *root) 2580 + static void hists__delete_remaining_entries(struct rb_root_cached *root) 2606 2581 { 2607 2582 struct rb_node *node; 2608 2583 struct hist_entry *he; 2609 2584 2610 - while (!RB_EMPTY_ROOT(root)) { 2611 - node = rb_first(root); 2612 - rb_erase(node, root); 2585 + while (!RB_EMPTY_ROOT(&root->rb_root)) { 2586 + node = rb_first_cached(root); 2587 + rb_erase_cached(node, root); 2613 2588 2614 2589 he = rb_entry(node, struct hist_entry, rb_node_in); 2615 2590 hist_entry__delete(he);
+5 -5
tools/perf/util/hist.h
··· 70 70 struct dso; 71 71 72 72 struct hists { 73 - struct rb_root entries_in_array[2]; 74 - struct rb_root *entries_in; 75 - struct rb_root entries; 76 - struct rb_root entries_collapsed; 73 + struct rb_root_cached entries_in_array[2]; 74 + struct rb_root_cached *entries_in; 75 + struct rb_root_cached entries; 76 + struct rb_root_cached entries_collapsed; 77 77 u64 nr_entries; 78 78 u64 nr_non_filtered_entries; 79 79 u64 callchain_period; ··· 230 230 int hists__init(void); 231 231 int __hists__init(struct hists *hists, struct perf_hpp_list *hpp_list); 232 232 233 - struct rb_root *hists__get_rotate_entries_in(struct hists *hists); 233 + struct rb_root_cached *hists__get_rotate_entries_in(struct hists *hists); 234 234 235 235 struct perf_hpp { 236 236 char *buf;
+1 -1
tools/perf/util/intlist.h
··· 45 45 /* For intlist iteration */ 46 46 static inline struct int_node *intlist__first(struct intlist *ilist) 47 47 { 48 - struct rb_node *rn = rb_first(&ilist->rblist.entries); 48 + struct rb_node *rn = rb_first_cached(&ilist->rblist.entries); 49 49 return rn ? rb_entry(rn, struct int_node, rb_node) : NULL; 50 50 } 51 51 static inline struct int_node *intlist__next(struct int_node *in)
+1
tools/perf/util/jitdump.c
··· 2 2 #include <sys/sysmacros.h> 3 3 #include <sys/types.h> 4 4 #include <errno.h> 5 + #include <libgen.h> 5 6 #include <stdio.h> 6 7 #include <stdlib.h> 7 8 #include <string.h>
+31 -22
tools/perf/util/machine.c
··· 42 42 43 43 for (i = 0; i < THREADS__TABLE_SIZE; i++) { 44 44 struct threads *threads = &machine->threads[i]; 45 - threads->entries = RB_ROOT; 45 + threads->entries = RB_ROOT_CACHED; 46 46 init_rwsem(&threads->lock); 47 47 threads->nr = 0; 48 48 INIT_LIST_HEAD(&threads->dead); ··· 180 180 for (i = 0; i < THREADS__TABLE_SIZE; i++) { 181 181 struct threads *threads = &machine->threads[i]; 182 182 down_write(&threads->lock); 183 - nd = rb_first(&threads->entries); 183 + nd = rb_first_cached(&threads->entries); 184 184 while (nd) { 185 185 struct thread *t = rb_entry(nd, struct thread, rb_node); 186 186 ··· 223 223 void machines__init(struct machines *machines) 224 224 { 225 225 machine__init(&machines->host, "", HOST_KERNEL_ID); 226 - machines->guests = RB_ROOT; 226 + machines->guests = RB_ROOT_CACHED; 227 227 } 228 228 229 229 void machines__exit(struct machines *machines) ··· 235 235 struct machine *machines__add(struct machines *machines, pid_t pid, 236 236 const char *root_dir) 237 237 { 238 - struct rb_node **p = &machines->guests.rb_node; 238 + struct rb_node **p = &machines->guests.rb_root.rb_node; 239 239 struct rb_node *parent = NULL; 240 240 struct machine *pos, *machine = malloc(sizeof(*machine)); 241 + bool leftmost = true; 241 242 242 243 if (machine == NULL) 243 244 return NULL; ··· 253 252 pos = rb_entry(parent, struct machine, rb_node); 254 253 if (pid < pos->pid) 255 254 p = &(*p)->rb_left; 256 - else 255 + else { 257 256 p = &(*p)->rb_right; 257 + leftmost = false; 258 + } 258 259 } 259 260 260 261 rb_link_node(&machine->rb_node, parent, p); 261 - rb_insert_color(&machine->rb_node, &machines->guests); 262 + rb_insert_color_cached(&machine->rb_node, &machines->guests, leftmost); 262 263 263 264 return machine; 264 265 } ··· 271 268 272 269 machines->host.comm_exec = comm_exec; 273 270 274 - for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) { 271 + for (nd = rb_first_cached(&machines->guests); nd; nd = rb_next(nd)) { 275 272 struct machine *machine = rb_entry(nd, struct machine, rb_node); 276 273 277 274 machine->comm_exec = comm_exec; ··· 280 277 281 278 struct machine *machines__find(struct machines *machines, pid_t pid) 282 279 { 283 - struct rb_node **p = &machines->guests.rb_node; 280 + struct rb_node **p = &machines->guests.rb_root.rb_node; 284 281 struct rb_node *parent = NULL; 285 282 struct machine *machine; 286 283 struct machine *default_machine = NULL; ··· 343 340 { 344 341 struct rb_node *nd; 345 342 346 - for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) { 343 + for (nd = rb_first_cached(&machines->guests); nd; nd = rb_next(nd)) { 347 344 struct machine *pos = rb_entry(nd, struct machine, rb_node); 348 345 process(pos, data); 349 346 } ··· 356 353 357 354 machines->host.id_hdr_size = id_hdr_size; 358 355 359 - for (node = rb_first(&machines->guests); node; node = rb_next(node)) { 356 + for (node = rb_first_cached(&machines->guests); node; 357 + node = rb_next(node)) { 360 358 machine = rb_entry(node, struct machine, rb_node); 361 359 machine->id_hdr_size = id_hdr_size; 362 360 } ··· 470 466 pid_t pid, pid_t tid, 471 467 bool create) 472 468 { 473 - struct rb_node **p = &threads->entries.rb_node; 469 + struct rb_node **p = &threads->entries.rb_root.rb_node; 474 470 struct rb_node *parent = NULL; 475 471 struct thread *th; 472 + bool leftmost = true; 476 473 477 474 th = threads__get_last_match(threads, machine, pid, tid); 478 475 if (th) ··· 491 486 492 487 if (tid < th->tid) 493 488 p = &(*p)->rb_left; 494 - else 489 + else { 495 490 p = &(*p)->rb_right; 491 + leftmost = false; 492 + } 496 493 } 497 494 498 495 if (!create) ··· 503 496 th = thread__new(pid, tid); 504 497 if (th != NULL) { 505 498 rb_link_node(&th->rb_node, parent, p); 506 - rb_insert_color(&th->rb_node, &threads->entries); 499 + rb_insert_color_cached(&th->rb_node, &threads->entries, leftmost); 507 500 508 501 /* 509 502 * We have to initialize map_groups separately ··· 514 507 * leader and that would screwed the rb tree. 515 508 */ 516 509 if (thread__init_map_groups(th, machine)) { 517 - rb_erase_init(&th->rb_node, &threads->entries); 510 + rb_erase_cached(&th->rb_node, &threads->entries); 518 511 RB_CLEAR_NODE(&th->rb_node); 519 512 thread__put(th); 520 513 return NULL; ··· 805 798 struct rb_node *nd; 806 799 size_t ret = __dsos__fprintf(&machines->host.dsos.head, fp); 807 800 808 - for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) { 801 + for (nd = rb_first_cached(&machines->guests); nd; nd = rb_next(nd)) { 809 802 struct machine *pos = rb_entry(nd, struct machine, rb_node); 810 803 ret += __dsos__fprintf(&pos->dsos.head, fp); 811 804 } ··· 825 818 struct rb_node *nd; 826 819 size_t ret = machine__fprintf_dsos_buildid(&machines->host, fp, skip, parm); 827 820 828 - for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) { 821 + for (nd = rb_first_cached(&machines->guests); nd; nd = rb_next(nd)) { 829 822 struct machine *pos = rb_entry(nd, struct machine, rb_node); 830 823 ret += machine__fprintf_dsos_buildid(pos, fp, skip, parm); 831 824 } ··· 865 858 866 859 ret = fprintf(fp, "Threads: %u\n", threads->nr); 867 860 868 - for (nd = rb_first(&threads->entries); nd; nd = rb_next(nd)) { 861 + for (nd = rb_first_cached(&threads->entries); nd; 862 + nd = rb_next(nd)) { 869 863 struct thread *pos = rb_entry(nd, struct thread, rb_node); 870 864 871 865 ret += thread__fprintf(pos, fp); ··· 1169 1161 1170 1162 void machines__destroy_kernel_maps(struct machines *machines) 1171 1163 { 1172 - struct rb_node *next = rb_first(&machines->guests); 1164 + struct rb_node *next = rb_first_cached(&machines->guests); 1173 1165 1174 1166 machine__destroy_kernel_maps(&machines->host); 1175 1167 ··· 1177 1169 struct machine *pos = rb_entry(next, struct machine, rb_node); 1178 1170 1179 1171 next = rb_next(&pos->rb_node); 1180 - rb_erase(&pos->rb_node, &machines->guests); 1172 + rb_erase_cached(&pos->rb_node, &machines->guests); 1181 1173 machine__delete(pos); 1182 1174 } 1183 1175 } ··· 1742 1734 BUG_ON(refcount_read(&th->refcnt) == 0); 1743 1735 if (lock) 1744 1736 down_write(&threads->lock); 1745 - rb_erase_init(&th->rb_node, &threads->entries); 1737 + rb_erase_cached(&th->rb_node, &threads->entries); 1746 1738 RB_CLEAR_NODE(&th->rb_node); 1747 1739 --threads->nr; 1748 1740 /* ··· 2519 2511 2520 2512 for (i = 0; i < THREADS__TABLE_SIZE; i++) { 2521 2513 threads = &machine->threads[i]; 2522 - for (nd = rb_first(&threads->entries); nd; nd = rb_next(nd)) { 2514 + for (nd = rb_first_cached(&threads->entries); nd; 2515 + nd = rb_next(nd)) { 2523 2516 thread = rb_entry(nd, struct thread, rb_node); 2524 2517 rc = fn(thread, priv); 2525 2518 if (rc != 0) ··· 2547 2538 if (rc != 0) 2548 2539 return rc; 2549 2540 2550 - for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) { 2541 + for (nd = rb_first_cached(&machines->guests); nd; nd = rb_next(nd)) { 2551 2542 struct machine *machine = rb_entry(nd, struct machine, rb_node); 2552 2543 2553 2544 rc = machine__for_each_thread(machine, fn, priv);
+6 -6
tools/perf/util/machine.h
··· 29 29 #define THREADS__TABLE_SIZE (1 << THREADS__TABLE_BITS) 30 30 31 31 struct threads { 32 - struct rb_root entries; 33 - struct rw_semaphore lock; 34 - unsigned int nr; 35 - struct list_head dead; 36 - struct thread *last_match; 32 + struct rb_root_cached entries; 33 + struct rw_semaphore lock; 34 + unsigned int nr; 35 + struct list_head dead; 36 + struct thread *last_match; 37 37 }; 38 38 39 39 struct machine { ··· 140 140 141 141 struct machines { 142 142 struct machine host; 143 - struct rb_root guests; 143 + struct rb_root_cached guests; 144 144 }; 145 145 146 146 void machines__init(struct machines *machines);
+4 -4
tools/perf/util/map.c
··· 286 286 287 287 void map__fixup_start(struct map *map) 288 288 { 289 - struct rb_root *symbols = &map->dso->symbols; 290 - struct rb_node *nd = rb_first(symbols); 289 + struct rb_root_cached *symbols = &map->dso->symbols; 290 + struct rb_node *nd = rb_first_cached(symbols); 291 291 if (nd != NULL) { 292 292 struct symbol *sym = rb_entry(nd, struct symbol, rb_node); 293 293 map->start = sym->start; ··· 296 296 297 297 void map__fixup_end(struct map *map) 298 298 { 299 - struct rb_root *symbols = &map->dso->symbols; 300 - struct rb_node *nd = rb_last(symbols); 299 + struct rb_root_cached *symbols = &map->dso->symbols; 300 + struct rb_node *nd = rb_last(&symbols->rb_root); 301 301 if (nd != NULL) { 302 302 struct symbol *sym = rb_entry(nd, struct symbol, rb_node); 303 303 map->end = sym->end;
+1 -1
tools/perf/util/metricgroup.c
··· 352 352 else if (metrics && !raw) 353 353 printf("\nMetrics:\n\n"); 354 354 355 - for (node = rb_first(&groups.entries); node; node = next) { 355 + for (node = rb_first_cached(&groups.entries); node; node = next) { 356 356 struct mep *me = container_of(node, struct mep, nd); 357 357 358 358 if (metricgroups)
+3 -1
tools/perf/util/probe-event.c
··· 35 35 36 36 #include "util.h" 37 37 #include "event.h" 38 + #include "namespaces.h" 38 39 #include "strlist.h" 39 40 #include "strfilter.h" 40 41 #include "debug.h" ··· 3529 3528 /* Show all (filtered) symbols */ 3530 3529 setup_pager(); 3531 3530 3532 - for (nd = rb_first(&map->dso->symbol_names); nd; nd = rb_next(nd)) { 3531 + for (nd = rb_first_cached(&map->dso->symbol_names); nd; 3532 + nd = rb_next(nd)) { 3533 3533 struct symbol_name_rb_node *pos = rb_entry(nd, struct symbol_name_rb_node, rb_node); 3534 3534 3535 3535 if (strfilter__compare(_filter, pos->sym.name))
+3 -2
tools/perf/util/probe-event.h
··· 4 4 5 5 #include <linux/compiler.h> 6 6 #include <stdbool.h> 7 - #include "intlist.h" 8 - #include "namespaces.h" 7 + 8 + struct intlist; 9 + struct nsinfo; 9 10 10 11 /* Probe related configurations */ 11 12 struct probe_conf {
+1
tools/perf/util/probe-file.c
··· 20 20 #include <sys/types.h> 21 21 #include <sys/uio.h> 22 22 #include <unistd.h> 23 + #include "namespaces.h" 23 24 #include "util.h" 24 25 #include "event.h" 25 26 #include "strlist.h"
+4 -4
tools/perf/util/rb_resort.h
··· 140 140 141 141 /* For 'struct intlist' */ 142 142 #define DECLARE_RESORT_RB_INTLIST(__name, __ilist) \ 143 - DECLARE_RESORT_RB(__name)(&__ilist->rblist.entries, \ 143 + DECLARE_RESORT_RB(__name)(&__ilist->rblist.entries.rb_root, \ 144 144 __ilist->rblist.nr_entries) 145 145 146 146 /* For 'struct machine->threads' */ 147 - #define DECLARE_RESORT_RB_MACHINE_THREADS(__name, __machine, hash_bucket) \ 148 - DECLARE_RESORT_RB(__name)(&__machine->threads[hash_bucket].entries, \ 149 - __machine->threads[hash_bucket].nr) 147 + #define DECLARE_RESORT_RB_MACHINE_THREADS(__name, __machine, hash_bucket) \ 148 + DECLARE_RESORT_RB(__name)(&__machine->threads[hash_bucket].entries.rb_root, \ 149 + __machine->threads[hash_bucket].nr) 150 150 151 151 #endif /* _PERF_RESORT_RB_H_ */
+18 -10
tools/perf/util/rblist.c
··· 13 13 14 14 int rblist__add_node(struct rblist *rblist, const void *new_entry) 15 15 { 16 - struct rb_node **p = &rblist->entries.rb_node; 16 + struct rb_node **p = &rblist->entries.rb_root.rb_node; 17 17 struct rb_node *parent = NULL, *new_node; 18 + bool leftmost = true; 18 19 19 20 while (*p != NULL) { 20 21 int rc; ··· 25 24 rc = rblist->node_cmp(parent, new_entry); 26 25 if (rc > 0) 27 26 p = &(*p)->rb_left; 28 - else if (rc < 0) 27 + else if (rc < 0) { 29 28 p = &(*p)->rb_right; 29 + leftmost = false; 30 + } 30 31 else 31 32 return -EEXIST; 32 33 } ··· 38 35 return -ENOMEM; 39 36 40 37 rb_link_node(new_node, parent, p); 41 - rb_insert_color(new_node, &rblist->entries); 38 + rb_insert_color_cached(new_node, &rblist->entries, leftmost); 42 39 ++rblist->nr_entries; 43 40 44 41 return 0; ··· 46 43 47 44 void rblist__remove_node(struct rblist *rblist, struct rb_node *rb_node) 48 45 { 49 - rb_erase(rb_node, &rblist->entries); 46 + rb_erase_cached(rb_node, &rblist->entries); 50 47 --rblist->nr_entries; 51 48 rblist->node_delete(rblist, rb_node); 52 49 } ··· 55 52 const void *entry, 56 53 bool create) 57 54 { 58 - struct rb_node **p = &rblist->entries.rb_node; 55 + struct rb_node **p = &rblist->entries.rb_root.rb_node; 59 56 struct rb_node *parent = NULL, *new_node = NULL; 57 + bool leftmost = true; 60 58 61 59 while (*p != NULL) { 62 60 int rc; ··· 67 63 rc = rblist->node_cmp(parent, entry); 68 64 if (rc > 0) 69 65 p = &(*p)->rb_left; 70 - else if (rc < 0) 66 + else if (rc < 0) { 71 67 p = &(*p)->rb_right; 68 + leftmost = false; 69 + } 72 70 else 73 71 return parent; 74 72 } ··· 79 73 new_node = rblist->node_new(rblist, entry); 80 74 if (new_node) { 81 75 rb_link_node(new_node, parent, p); 82 - rb_insert_color(new_node, &rblist->entries); 76 + rb_insert_color_cached(new_node, 77 + &rblist->entries, leftmost); 83 78 ++rblist->nr_entries; 84 79 } 85 80 } ··· 101 94 void rblist__init(struct rblist *rblist) 102 95 { 103 96 if (rblist != NULL) { 104 - rblist->entries = RB_ROOT; 97 + rblist->entries = RB_ROOT_CACHED; 105 98 rblist->nr_entries = 0; 106 99 } 107 100 ··· 110 103 111 104 void rblist__exit(struct rblist *rblist) 112 105 { 113 - struct rb_node *pos, *next = rb_first(&rblist->entries); 106 + struct rb_node *pos, *next = rb_first_cached(&rblist->entries); 114 107 115 108 while (next) { 116 109 pos = next; ··· 131 124 { 132 125 struct rb_node *node; 133 126 134 - for (node = rb_first(&rblist->entries); node; node = rb_next(node)) { 127 + for (node = rb_first_cached(&rblist->entries); node; 128 + node = rb_next(node)) { 135 129 if (!idx--) 136 130 return node; 137 131 }
+1 -1
tools/perf/util/rblist.h
··· 20 20 */ 21 21 22 22 struct rblist { 23 - struct rb_root entries; 23 + struct rb_root_cached entries; 24 24 unsigned int nr_entries; 25 25 26 26 int (*node_cmp)(struct rb_node *rbn, const void *entry);
+10 -5
tools/perf/util/scripting-engines/trace-event-python.c
··· 733 733 Py_FatalError("couldn't create Python dictionary"); 734 734 735 735 pydict_set_item_string_decref(dict, "ev_name", _PyUnicode_FromString(perf_evsel__name(evsel))); 736 - pydict_set_item_string_decref(dict, "attr", _PyUnicode_FromStringAndSize( 737 - (const char *)&evsel->attr, sizeof(evsel->attr))); 736 + pydict_set_item_string_decref(dict, "attr", _PyBytes_FromStringAndSize((const char *)&evsel->attr, sizeof(evsel->attr))); 738 737 739 738 pydict_set_item_string_decref(dict_sample, "pid", 740 739 _PyLong_FromLong(sample->pid)); ··· 1493 1494 static int python_start_script(const char *script, int argc, const char **argv) 1494 1495 { 1495 1496 struct tables *tables = &tables_global; 1497 + PyMODINIT_FUNC (*initfunc)(void); 1496 1498 #if PY_MAJOR_VERSION < 3 1497 1499 const char **command_line; 1498 1500 #else 1499 1501 wchar_t **command_line; 1500 1502 #endif 1501 - char buf[PATH_MAX]; 1503 + /* 1504 + * Use a non-const name variable to cope with python 2.6's 1505 + * PyImport_AppendInittab prototype 1506 + */ 1507 + char buf[PATH_MAX], name[19] = "perf_trace_context"; 1502 1508 int i, err = 0; 1503 1509 FILE *fp; 1504 1510 1505 1511 #if PY_MAJOR_VERSION < 3 1512 + initfunc = initperf_trace_context; 1506 1513 command_line = malloc((argc + 1) * sizeof(const char *)); 1507 1514 command_line[0] = script; 1508 1515 for (i = 1; i < argc + 1; i++) 1509 1516 command_line[i] = argv[i - 1]; 1510 1517 #else 1518 + initfunc = PyInit_perf_trace_context; 1511 1519 command_line = malloc((argc + 1) * sizeof(wchar_t *)); 1512 1520 command_line[0] = Py_DecodeLocale(script, NULL); 1513 1521 for (i = 1; i < argc + 1; i++) 1514 1522 command_line[i] = Py_DecodeLocale(argv[i - 1], NULL); 1515 1523 #endif 1516 1524 1525 + PyImport_AppendInittab(name, initfunc); 1517 1526 Py_Initialize(); 1518 1527 1519 1528 #if PY_MAJOR_VERSION < 3 1520 - initperf_trace_context(); 1521 1529 PySys_SetArgv(argc + 1, (char **)command_line); 1522 1530 #else 1523 - PyInit_perf_trace_context(); 1524 1531 PySys_SetArgv(argc + 1, command_line); 1525 1532 #endif 1526 1533
-2
tools/perf/util/setup.py
··· 1 - #!/usr/bin/python 2 - 3 1 from os import getenv 4 2 from subprocess import Popen, PIPE 5 3 from re import sub
+2 -2
tools/perf/util/sort.h
··· 145 145 union { 146 146 /* this is for hierarchical entry structure */ 147 147 struct { 148 - struct rb_root hroot_in; 149 - struct rb_root hroot_out; 148 + struct rb_root_cached hroot_in; 149 + struct rb_root_cached hroot_out; 150 150 }; /* non-leaf entries */ 151 151 struct rb_root sorted_chain; /* leaf entry has callchains */ 152 152 };
+25 -18
tools/perf/util/srcline.c
··· 594 594 struct rb_node rb_node; 595 595 }; 596 596 597 - void srcline__tree_insert(struct rb_root *tree, u64 addr, char *srcline) 597 + void srcline__tree_insert(struct rb_root_cached *tree, u64 addr, char *srcline) 598 598 { 599 - struct rb_node **p = &tree->rb_node; 599 + struct rb_node **p = &tree->rb_root.rb_node; 600 600 struct rb_node *parent = NULL; 601 601 struct srcline_node *i, *node; 602 + bool leftmost = true; 602 603 603 604 node = zalloc(sizeof(struct srcline_node)); 604 605 if (!node) { ··· 615 614 i = rb_entry(parent, struct srcline_node, rb_node); 616 615 if (addr < i->addr) 617 616 p = &(*p)->rb_left; 618 - else 617 + else { 619 618 p = &(*p)->rb_right; 619 + leftmost = false; 620 + } 620 621 } 621 622 rb_link_node(&node->rb_node, parent, p); 622 - rb_insert_color(&node->rb_node, tree); 623 + rb_insert_color_cached(&node->rb_node, tree, leftmost); 623 624 } 624 625 625 - char *srcline__tree_find(struct rb_root *tree, u64 addr) 626 + char *srcline__tree_find(struct rb_root_cached *tree, u64 addr) 626 627 { 627 - struct rb_node *n = tree->rb_node; 628 + struct rb_node *n = tree->rb_root.rb_node; 628 629 629 630 while (n) { 630 631 struct srcline_node *i = rb_entry(n, struct srcline_node, ··· 643 640 return NULL; 644 641 } 645 642 646 - void srcline__tree_delete(struct rb_root *tree) 643 + void srcline__tree_delete(struct rb_root_cached *tree) 647 644 { 648 645 struct srcline_node *pos; 649 - struct rb_node *next = rb_first(tree); 646 + struct rb_node *next = rb_first_cached(tree); 650 647 651 648 while (next) { 652 649 pos = rb_entry(next, struct srcline_node, rb_node); 653 650 next = rb_next(&pos->rb_node); 654 - rb_erase(&pos->rb_node, tree); 651 + rb_erase_cached(&pos->rb_node, tree); 655 652 free_srcline(pos->srcline); 656 653 zfree(&pos); 657 654 } ··· 685 682 free(node); 686 683 } 687 684 688 - void inlines__tree_insert(struct rb_root *tree, struct inline_node *inlines) 685 + void inlines__tree_insert(struct rb_root_cached *tree, 686 + struct inline_node *inlines) 689 687 { 690 - struct rb_node **p = &tree->rb_node; 688 + struct rb_node **p = &tree->rb_root.rb_node; 691 689 struct rb_node *parent = NULL; 692 690 const u64 addr = inlines->addr; 693 691 struct inline_node *i; 692 + bool leftmost = true; 694 693 695 694 while (*p != NULL) { 696 695 parent = *p; 697 696 i = rb_entry(parent, struct inline_node, rb_node); 698 697 if (addr < i->addr) 699 698 p = &(*p)->rb_left; 700 - else 699 + else { 701 700 p = &(*p)->rb_right; 701 + leftmost = false; 702 + } 702 703 } 703 704 rb_link_node(&inlines->rb_node, parent, p); 704 - rb_insert_color(&inlines->rb_node, tree); 705 + rb_insert_color_cached(&inlines->rb_node, tree, leftmost); 705 706 } 706 707 707 - struct inline_node *inlines__tree_find(struct rb_root *tree, u64 addr) 708 + struct inline_node *inlines__tree_find(struct rb_root_cached *tree, u64 addr) 708 709 { 709 - struct rb_node *n = tree->rb_node; 710 + struct rb_node *n = tree->rb_root.rb_node; 710 711 711 712 while (n) { 712 713 struct inline_node *i = rb_entry(n, struct inline_node, ··· 727 720 return NULL; 728 721 } 729 722 730 - void inlines__tree_delete(struct rb_root *tree) 723 + void inlines__tree_delete(struct rb_root_cached *tree) 731 724 { 732 725 struct inline_node *pos; 733 - struct rb_node *next = rb_first(tree); 726 + struct rb_node *next = rb_first_cached(tree); 734 727 735 728 while (next) { 736 729 pos = rb_entry(next, struct inline_node, rb_node); 737 730 next = rb_next(&pos->rb_node); 738 - rb_erase(&pos->rb_node, tree); 731 + rb_erase_cached(&pos->rb_node, tree); 739 732 inline_node__delete(pos); 740 733 } 741 734 }
+7 -6
tools/perf/util/srcline.h
··· 19 19 char *get_srcline_split(struct dso *dso, u64 addr, unsigned *line); 20 20 21 21 /* insert the srcline into the DSO, which will take ownership */ 22 - void srcline__tree_insert(struct rb_root *tree, u64 addr, char *srcline); 22 + void srcline__tree_insert(struct rb_root_cached *tree, u64 addr, char *srcline); 23 23 /* find previously inserted srcline */ 24 - char *srcline__tree_find(struct rb_root *tree, u64 addr); 24 + char *srcline__tree_find(struct rb_root_cached *tree, u64 addr); 25 25 /* delete all srclines within the tree */ 26 - void srcline__tree_delete(struct rb_root *tree); 26 + void srcline__tree_delete(struct rb_root_cached *tree); 27 27 28 28 #define SRCLINE_UNKNOWN ((char *) "??:0") 29 29 ··· 46 46 void inline_node__delete(struct inline_node *node); 47 47 48 48 /* insert the inline node list into the DSO, which will take ownership */ 49 - void inlines__tree_insert(struct rb_root *tree, struct inline_node *inlines); 49 + void inlines__tree_insert(struct rb_root_cached *tree, 50 + struct inline_node *inlines); 50 51 /* find previously inserted inline node list */ 51 - struct inline_node *inlines__tree_find(struct rb_root *tree, u64 addr); 52 + struct inline_node *inlines__tree_find(struct rb_root_cached *tree, u64 addr); 52 53 /* delete all nodes within the tree of inline_node s */ 53 - void inlines__tree_delete(struct rb_root *tree); 54 + void inlines__tree_delete(struct rb_root_cached *tree); 54 55 55 56 #endif /* PERF_SRCLINE_H */
+1
tools/perf/util/stat-display.c
··· 2 2 #include <inttypes.h> 3 3 #include <linux/time64.h> 4 4 #include <math.h> 5 + #include "color.h" 5 6 #include "evlist.h" 6 7 #include "evsel.h" 7 8 #include "stat.h"
+1 -1
tools/perf/util/stat-shadow.c
··· 168 168 struct rb_node *pos, *next; 169 169 170 170 rblist = &st->value_list; 171 - next = rb_first(&rblist->entries); 171 + next = rb_first_cached(&rblist->entries); 172 172 while (next) { 173 173 pos = next; 174 174 next = rb_next(pos);
+1 -1
tools/perf/util/strlist.h
··· 57 57 /* For strlist iteration */ 58 58 static inline struct str_node *strlist__first(struct strlist *slist) 59 59 { 60 - struct rb_node *rn = rb_first(&slist->rblist.entries); 60 + struct rb_node *rn = rb_first_cached(&slist->rblist.entries); 61 61 return rn ? rb_entry(rn, struct str_node, rb_node) : NULL; 62 62 } 63 63 static inline struct str_node *strlist__next(struct str_node *sn)
+1
tools/perf/util/symbol-minimal.c
··· 3 3 #include "util.h" 4 4 5 5 #include <errno.h> 6 + #include <unistd.h> 6 7 #include <stdio.h> 7 8 #include <fcntl.h> 8 9 #include <string.h>
+47 -40
tools/perf/util/symbol.c
··· 163 163 return arch__choose_best_symbol(syma, symb); 164 164 } 165 165 166 - void symbols__fixup_duplicate(struct rb_root *symbols) 166 + void symbols__fixup_duplicate(struct rb_root_cached *symbols) 167 167 { 168 168 struct rb_node *nd; 169 169 struct symbol *curr, *next; ··· 171 171 if (symbol_conf.allow_aliases) 172 172 return; 173 173 174 - nd = rb_first(symbols); 174 + nd = rb_first_cached(symbols); 175 175 176 176 while (nd) { 177 177 curr = rb_entry(nd, struct symbol, rb_node); ··· 186 186 continue; 187 187 188 188 if (choose_best_symbol(curr, next) == SYMBOL_A) { 189 - rb_erase(&next->rb_node, symbols); 189 + rb_erase_cached(&next->rb_node, symbols); 190 190 symbol__delete(next); 191 191 goto again; 192 192 } else { 193 193 nd = rb_next(&curr->rb_node); 194 - rb_erase(&curr->rb_node, symbols); 194 + rb_erase_cached(&curr->rb_node, symbols); 195 195 symbol__delete(curr); 196 196 } 197 197 } 198 198 } 199 199 200 - void symbols__fixup_end(struct rb_root *symbols) 200 + void symbols__fixup_end(struct rb_root_cached *symbols) 201 201 { 202 - struct rb_node *nd, *prevnd = rb_first(symbols); 202 + struct rb_node *nd, *prevnd = rb_first_cached(symbols); 203 203 struct symbol *curr, *prev; 204 204 205 205 if (prevnd == NULL) ··· 282 282 free(((void *)sym) - symbol_conf.priv_size); 283 283 } 284 284 285 - void symbols__delete(struct rb_root *symbols) 285 + void symbols__delete(struct rb_root_cached *symbols) 286 286 { 287 287 struct symbol *pos; 288 - struct rb_node *next = rb_first(symbols); 288 + struct rb_node *next = rb_first_cached(symbols); 289 289 290 290 while (next) { 291 291 pos = rb_entry(next, struct symbol, rb_node); 292 292 next = rb_next(&pos->rb_node); 293 - rb_erase(&pos->rb_node, symbols); 293 + rb_erase_cached(&pos->rb_node, symbols); 294 294 symbol__delete(pos); 295 295 } 296 296 } 297 297 298 - void __symbols__insert(struct rb_root *symbols, struct symbol *sym, bool kernel) 298 + void __symbols__insert(struct rb_root_cached *symbols, 299 + struct symbol *sym, bool kernel) 299 300 { 300 - struct rb_node **p = &symbols->rb_node; 301 + struct rb_node **p = &symbols->rb_root.rb_node; 301 302 struct rb_node *parent = NULL; 302 303 const u64 ip = sym->start; 303 304 struct symbol *s; 305 + bool leftmost = true; 304 306 305 307 if (kernel) { 306 308 const char *name = sym->name; ··· 320 318 s = rb_entry(parent, struct symbol, rb_node); 321 319 if (ip < s->start) 322 320 p = &(*p)->rb_left; 323 - else 321 + else { 324 322 p = &(*p)->rb_right; 323 + leftmost = false; 324 + } 325 325 } 326 326 rb_link_node(&sym->rb_node, parent, p); 327 - rb_insert_color(&sym->rb_node, symbols); 327 + rb_insert_color_cached(&sym->rb_node, symbols, leftmost); 328 328 } 329 329 330 - void symbols__insert(struct rb_root *symbols, struct symbol *sym) 330 + void symbols__insert(struct rb_root_cached *symbols, struct symbol *sym) 331 331 { 332 332 __symbols__insert(symbols, sym, false); 333 333 } 334 334 335 - static struct symbol *symbols__find(struct rb_root *symbols, u64 ip) 335 + static struct symbol *symbols__find(struct rb_root_cached *symbols, u64 ip) 336 336 { 337 337 struct rb_node *n; 338 338 339 339 if (symbols == NULL) 340 340 return NULL; 341 341 342 - n = symbols->rb_node; 342 + n = symbols->rb_root.rb_node; 343 343 344 344 while (n) { 345 345 struct symbol *s = rb_entry(n, struct symbol, rb_node); ··· 357 353 return NULL; 358 354 } 359 355 360 - static struct symbol *symbols__first(struct rb_root *symbols) 356 + static struct symbol *symbols__first(struct rb_root_cached *symbols) 361 357 { 362 - struct rb_node *n = rb_first(symbols); 358 + struct rb_node *n = rb_first_cached(symbols); 363 359 364 360 if (n) 365 361 return rb_entry(n, struct symbol, rb_node); ··· 367 363 return NULL; 368 364 } 369 365 370 - static struct symbol *symbols__last(struct rb_root *symbols) 366 + static struct symbol *symbols__last(struct rb_root_cached *symbols) 371 367 { 372 - struct rb_node *n = rb_last(symbols); 368 + struct rb_node *n = rb_last(&symbols->rb_root); 373 369 374 370 if (n) 375 371 return rb_entry(n, struct symbol, rb_node); ··· 387 383 return NULL; 388 384 } 389 385 390 - static void symbols__insert_by_name(struct rb_root *symbols, struct symbol *sym) 386 + static void symbols__insert_by_name(struct rb_root_cached *symbols, struct symbol *sym) 391 387 { 392 - struct rb_node **p = &symbols->rb_node; 388 + struct rb_node **p = &symbols->rb_root.rb_node; 393 389 struct rb_node *parent = NULL; 394 390 struct symbol_name_rb_node *symn, *s; 391 + bool leftmost = true; 395 392 396 393 symn = container_of(sym, struct symbol_name_rb_node, sym); 397 394 ··· 401 396 s = rb_entry(parent, struct symbol_name_rb_node, rb_node); 402 397 if (strcmp(sym->name, s->sym.name) < 0) 403 398 p = &(*p)->rb_left; 404 - else 399 + else { 405 400 p = &(*p)->rb_right; 401 + leftmost = false; 402 + } 406 403 } 407 404 rb_link_node(&symn->rb_node, parent, p); 408 - rb_insert_color(&symn->rb_node, symbols); 405 + rb_insert_color_cached(&symn->rb_node, symbols, leftmost); 409 406 } 410 407 411 - static void symbols__sort_by_name(struct rb_root *symbols, 412 - struct rb_root *source) 408 + static void symbols__sort_by_name(struct rb_root_cached *symbols, 409 + struct rb_root_cached *source) 413 410 { 414 411 struct rb_node *nd; 415 412 416 - for (nd = rb_first(source); nd; nd = rb_next(nd)) { 413 + for (nd = rb_first_cached(source); nd; nd = rb_next(nd)) { 417 414 struct symbol *pos = rb_entry(nd, struct symbol, rb_node); 418 415 symbols__insert_by_name(symbols, pos); 419 416 } ··· 438 431 return arch__compare_symbol_names(name, str); 439 432 } 440 433 441 - static struct symbol *symbols__find_by_name(struct rb_root *symbols, 434 + static struct symbol *symbols__find_by_name(struct rb_root_cached *symbols, 442 435 const char *name, 443 436 enum symbol_tag_include includes) 444 437 { ··· 448 441 if (symbols == NULL) 449 442 return NULL; 450 443 451 - n = symbols->rb_node; 444 + n = symbols->rb_root.rb_node; 452 445 453 446 while (n) { 454 447 int cmp; ··· 651 644 { 652 645 struct symbol *sym; 653 646 struct dso *dso = arg; 654 - struct rb_root *root = &dso->symbols; 647 + struct rb_root_cached *root = &dso->symbols; 655 648 656 649 if (!symbol_type__filter(type)) 657 650 return 0; ··· 688 681 struct map *curr_map; 689 682 struct symbol *pos; 690 683 int count = 0; 691 - struct rb_root old_root = dso->symbols; 692 - struct rb_root *root = &dso->symbols; 693 - struct rb_node *next = rb_first(root); 684 + struct rb_root_cached old_root = dso->symbols; 685 + struct rb_root_cached *root = &dso->symbols; 686 + struct rb_node *next = rb_first_cached(root); 694 687 695 688 if (!kmaps) 696 689 return -1; 697 690 698 - *root = RB_ROOT; 691 + *root = RB_ROOT_CACHED; 699 692 700 693 while (next) { 701 694 char *module; ··· 703 696 pos = rb_entry(next, struct symbol, rb_node); 704 697 next = rb_next(&pos->rb_node); 705 698 706 - rb_erase_init(&pos->rb_node, &old_root); 707 - 699 + rb_erase_cached(&pos->rb_node, &old_root); 700 + RB_CLEAR_NODE(&pos->rb_node); 708 701 module = strchr(pos->name, '\t'); 709 702 if (module) 710 703 *module = '\0'; ··· 741 734 struct map *curr_map = initial_map; 742 735 struct symbol *pos; 743 736 int count = 0, moved = 0; 744 - struct rb_root *root = &dso->symbols; 745 - struct rb_node *next = rb_first(root); 737 + struct rb_root_cached *root = &dso->symbols; 738 + struct rb_node *next = rb_first_cached(root); 746 739 int kernel_range = 0; 747 740 bool x86_64; 748 741 ··· 856 849 } 857 850 add_symbol: 858 851 if (curr_map != initial_map) { 859 - rb_erase(&pos->rb_node, root); 852 + rb_erase_cached(&pos->rb_node, root); 860 853 symbols__insert(&curr_map->dso->symbols, pos); 861 854 ++moved; 862 855 } else ··· 864 857 865 858 continue; 866 859 discard_symbol: 867 - rb_erase(&pos->rb_node, root); 860 + rb_erase_cached(&pos->rb_node, root); 868 861 symbol__delete(pos); 869 862 } 870 863
+13 -75
tools/perf/util/symbol.h
··· 5 5 #include <linux/types.h> 6 6 #include <stdbool.h> 7 7 #include <stdint.h> 8 - #include "map.h" 9 - #include "../perf.h" 10 8 #include <linux/list.h> 11 9 #include <linux/rbtree.h> 12 10 #include <stdio.h> 13 - #include <byteswap.h> 14 - #include <libgen.h> 15 - #include "build-id.h" 16 - #include "event.h" 11 + #include "branch.h" 17 12 #include "path.h" 13 + #include "symbol_conf.h" 18 14 19 15 #ifdef HAVE_LIBELF_SUPPORT 20 16 #include <libelf.h> ··· 19 23 #include <elf.h> 20 24 21 25 #include "dso.h" 26 + 27 + struct map; 28 + struct map_groups; 29 + struct option; 22 30 23 31 /* 24 32 * libelf 0.8.x and earlier do not support ELF_C_READ_MMAP; ··· 68 68 }; 69 69 70 70 void symbol__delete(struct symbol *sym); 71 - void symbols__delete(struct rb_root *symbols); 71 + void symbols__delete(struct rb_root_cached *symbols); 72 72 73 73 /* symbols__for_each_entry - iterate over symbols (rb_root) 74 74 * ··· 77 77 * @nd: the 'struct rb_node *' to use as a temporary storage 78 78 */ 79 79 #define symbols__for_each_entry(symbols, pos, nd) \ 80 - for (nd = rb_first(symbols); \ 80 + for (nd = rb_first_cached(symbols); \ 81 81 nd && (pos = rb_entry(nd, struct symbol, rb_node)); \ 82 82 nd = rb_next(nd)) 83 83 ··· 88 88 89 89 struct strlist; 90 90 struct intlist; 91 - 92 - struct symbol_conf { 93 - unsigned short priv_size; 94 - bool try_vmlinux_path, 95 - init_annotation, 96 - force, 97 - ignore_vmlinux, 98 - ignore_vmlinux_buildid, 99 - show_kernel_path, 100 - use_modules, 101 - allow_aliases, 102 - sort_by_name, 103 - show_nr_samples, 104 - show_total_period, 105 - use_callchain, 106 - cumulate_callchain, 107 - show_branchflag_count, 108 - exclude_other, 109 - show_cpu_utilization, 110 - initialized, 111 - kptr_restrict, 112 - event_group, 113 - demangle, 114 - demangle_kernel, 115 - filter_relative, 116 - show_hist_headers, 117 - branch_callstack, 118 - has_filter, 119 - show_ref_callgraph, 120 - hide_unresolved, 121 - raw_trace, 122 - report_hierarchy, 123 - inline_name; 124 - const char *vmlinux_name, 125 - *kallsyms_name, 126 - *source_prefix, 127 - *field_sep, 128 - *graph_function; 129 - const char *default_guest_vmlinux_name, 130 - *default_guest_kallsyms, 131 - *default_guest_modules; 132 - const char *guestmount; 133 - const char *dso_list_str, 134 - *comm_list_str, 135 - *pid_list_str, 136 - *tid_list_str, 137 - *sym_list_str, 138 - *col_width_list_str, 139 - *bt_stop_list_str; 140 - struct strlist *dso_list, 141 - *comm_list, 142 - *sym_list, 143 - *dso_from_list, 144 - *dso_to_list, 145 - *sym_from_list, 146 - *sym_to_list, 147 - *bt_stop_list; 148 - struct intlist *pid_list, 149 - *tid_list; 150 - const char *symfs; 151 - }; 152 - 153 - extern struct symbol_conf symbol_conf; 154 91 155 92 struct symbol_name_rb_node { 156 93 struct rb_node rb_node; ··· 247 310 248 311 char *dso__demangle_sym(struct dso *dso, int kmodule, const char *elf_name); 249 312 250 - void __symbols__insert(struct rb_root *symbols, struct symbol *sym, bool kernel); 251 - void symbols__insert(struct rb_root *symbols, struct symbol *sym); 252 - void symbols__fixup_duplicate(struct rb_root *symbols); 253 - void symbols__fixup_end(struct rb_root *symbols); 313 + void __symbols__insert(struct rb_root_cached *symbols, struct symbol *sym, 314 + bool kernel); 315 + void symbols__insert(struct rb_root_cached *symbols, struct symbol *sym); 316 + void symbols__fixup_duplicate(struct rb_root_cached *symbols); 317 + void symbols__fixup_end(struct rb_root_cached *symbols); 254 318 void map_groups__fixup_end(struct map_groups *mg); 255 319 256 320 typedef int (*mapfn_t)(u64 start, u64 len, u64 pgoff, void *data);
+73
tools/perf/util/symbol_conf.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + #ifndef __PERF_SYMBOL_CONF 3 + #define __PERF_SYMBOL_CONF 1 4 + 5 + #include <stdbool.h> 6 + 7 + struct strlist; 8 + struct intlist; 9 + 10 + struct symbol_conf { 11 + unsigned short priv_size; 12 + bool try_vmlinux_path, 13 + init_annotation, 14 + force, 15 + ignore_vmlinux, 16 + ignore_vmlinux_buildid, 17 + show_kernel_path, 18 + use_modules, 19 + allow_aliases, 20 + sort_by_name, 21 + show_nr_samples, 22 + show_total_period, 23 + use_callchain, 24 + cumulate_callchain, 25 + show_branchflag_count, 26 + exclude_other, 27 + show_cpu_utilization, 28 + initialized, 29 + kptr_restrict, 30 + event_group, 31 + demangle, 32 + demangle_kernel, 33 + filter_relative, 34 + show_hist_headers, 35 + branch_callstack, 36 + has_filter, 37 + show_ref_callgraph, 38 + hide_unresolved, 39 + raw_trace, 40 + report_hierarchy, 41 + inline_name; 42 + const char *vmlinux_name, 43 + *kallsyms_name, 44 + *source_prefix, 45 + *field_sep, 46 + *graph_function; 47 + const char *default_guest_vmlinux_name, 48 + *default_guest_kallsyms, 49 + *default_guest_modules; 50 + const char *guestmount; 51 + const char *dso_list_str, 52 + *comm_list_str, 53 + *pid_list_str, 54 + *tid_list_str, 55 + *sym_list_str, 56 + *col_width_list_str, 57 + *bt_stop_list_str; 58 + struct strlist *dso_list, 59 + *comm_list, 60 + *sym_list, 61 + *dso_from_list, 62 + *dso_to_list, 63 + *sym_from_list, 64 + *sym_to_list, 65 + *bt_stop_list; 66 + struct intlist *pid_list, 67 + *tid_list; 68 + const char *symfs; 69 + }; 70 + 71 + extern struct symbol_conf symbol_conf; 72 + 73 + #endif // __PERF_SYMBOL_CONF
+2 -1
tools/perf/util/symbol_fprintf.c
··· 3 3 #include <inttypes.h> 4 4 #include <stdio.h> 5 5 6 + #include "map.h" 6 7 #include "symbol.h" 7 8 8 9 size_t symbol__fprintf(struct symbol *sym, FILE *fp) ··· 65 64 struct rb_node *nd; 66 65 struct symbol_name_rb_node *pos; 67 66 68 - for (nd = rb_first(&dso->symbol_names); nd; nd = rb_next(nd)) { 67 + for (nd = rb_first_cached(&dso->symbol_names); nd; nd = rb_next(nd)) { 69 68 pos = rb_entry(nd, struct symbol_name_rb_node, rb_node); 70 69 fprintf(fp, "%s\n", pos->sym.name); 71 70 }
+1
tools/perf/util/thread.h
··· 13 13 #include <intlist.h> 14 14 #include "rwsem.h" 15 15 16 + struct namespaces_event; 16 17 struct thread_stack; 17 18 struct unwind_libunwind_ops; 18 19
+1
tools/perf/util/util.c
··· 2 2 #include "../perf.h" 3 3 #include "util.h" 4 4 #include "debug.h" 5 + #include "namespaces.h" 5 6 #include <api/fs/fs.h> 6 7 #include <sys/mman.h> 7 8 #include <sys/stat.h>