Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

perf machine: Move machine's threads into its own abstraction

Move thread_rb_node into the machine.c file. This hides the
implementation of threads from the rest of the code allowing for it to
be refactored.

Locking discipline is tightened up in this change. As the lock is now
encapsulated in threads, the findnew function requires holding it (as
it already did in machine). Rather than do conditionals with locks
based on whether the thread should be created (which could potentially
be error prone with a read lock match with a write unlock), have a
separate threads__find that won't create the thread and only holds the
read lock. This effectively duplicates the findnew logic, with the
existing findnew logic only operating under a write lock assuming
creation is necessary as a previous find failed. The creation may
still fail with the write lock due to another thread. The duplication
is removed in a later next patch that delegates the implementation to
hashtable.

Signed-off-by: Ian Rogers <irogers@google.com>
Acked-by: Namhyung Kim <namhyung@kernel.org>
Cc: Yang Jihong <yangjihong1@huawei.com>
Cc: Oliver Upton <oliver.upton@linux.dev>
Signed-off-by: Namhyung Kim <namhyung@kernel.org>
Link: https://lore.kernel.org/r/20240301053646.1449657-5-irogers@google.com

authored by

Ian Rogers and committed by
Namhyung Kim
d436f90a 45ac4960

+250 -210
+2 -2
tools/perf/util/bpf_lock_contention.c
··· 328 328 329 329 /* do not update idle comm which contains CPU number */ 330 330 if (pid) { 331 - struct thread *t = __machine__findnew_thread(machine, /*pid=*/-1, pid); 331 + struct thread *t = machine__findnew_thread(machine, /*pid=*/-1, pid); 332 332 333 333 if (t == NULL) 334 334 return name; ··· 422 422 account_end_timestamp(con); 423 423 424 424 if (con->aggr_mode == LOCK_AGGR_TASK) { 425 - struct thread *idle = __machine__findnew_thread(machine, 425 + struct thread *idle = machine__findnew_thread(machine, 426 426 /*pid=*/0, 427 427 /*tid=*/0); 428 428 thread__set_comm(idle, "swapper", /*timestamp=*/0);
+230 -192
tools/perf/util/machine.c
··· 43 43 #include <linux/string.h> 44 44 #include <linux/zalloc.h> 45 45 46 - static void __machine__remove_thread(struct machine *machine, struct thread_rb_node *nd, 47 - struct thread *th, bool lock); 46 + struct thread_rb_node { 47 + struct rb_node rb_node; 48 + struct thread *thread; 49 + }; 50 + 51 + static struct threads_table_entry *threads__table(struct threads *threads, pid_t tid) 52 + { 53 + /* Cast it to handle tid == -1 */ 54 + return &threads->table[(unsigned int)tid % THREADS__TABLE_SIZE]; 55 + } 48 56 49 57 static struct dso *machine__kernel_dso(struct machine *machine) 50 58 { ··· 66 58 init_rwsem(&dsos->lock); 67 59 } 68 60 69 - static void machine__threads_init(struct machine *machine) 61 + void threads__init(struct threads *threads) 70 62 { 71 - int i; 63 + for (int i = 0; i < THREADS__TABLE_SIZE; i++) { 64 + struct threads_table_entry *table = &threads->table[i]; 72 65 73 - for (i = 0; i < THREADS__TABLE_SIZE; i++) { 74 - struct threads *threads = &machine->threads[i]; 75 - threads->entries = RB_ROOT_CACHED; 76 - init_rwsem(&threads->lock); 77 - threads->nr = 0; 78 - threads->last_match = NULL; 66 + table->entries = RB_ROOT_CACHED; 67 + init_rwsem(&table->lock); 68 + table->nr = 0; 69 + table->last_match = NULL; 79 70 } 80 - } 81 - 82 - static int thread_rb_node__cmp_tid(const void *key, const struct rb_node *nd) 83 - { 84 - int to_find = (int) *((pid_t *)key); 85 - 86 - return to_find - (int)thread__tid(rb_entry(nd, struct thread_rb_node, rb_node)->thread); 87 - } 88 - 89 - static struct thread_rb_node *thread_rb_node__find(const struct thread *th, 90 - struct rb_root *tree) 91 - { 92 - pid_t to_find = thread__tid(th); 93 - struct rb_node *nd = rb_find(&to_find, tree, thread_rb_node__cmp_tid); 94 - 95 - return rb_entry(nd, struct thread_rb_node, rb_node); 96 71 } 97 72 98 73 static int machine__set_mmap_name(struct machine *machine) ··· 111 120 RB_CLEAR_NODE(&machine->rb_node); 112 121 dsos__init(&machine->dsos); 113 122 114 - machine__threads_init(machine); 123 + threads__init(&machine->threads); 115 124 116 125 machine->vdso_info = NULL; 117 126 machine->env = NULL; ··· 210 219 exit_rwsem(&dsos->lock); 211 220 } 212 221 213 - void machine__delete_threads(struct machine *machine) 214 - { 215 - struct rb_node *nd; 216 - int i; 222 + static void __threads_table_entry__set_last_match(struct threads_table_entry *table, 223 + struct thread *th); 217 224 218 - for (i = 0; i < THREADS__TABLE_SIZE; i++) { 219 - struct threads *threads = &machine->threads[i]; 220 - down_write(&threads->lock); 221 - nd = rb_first_cached(&threads->entries); 225 + void threads__remove_all_threads(struct threads *threads) 226 + { 227 + for (int i = 0; i < THREADS__TABLE_SIZE; i++) { 228 + struct threads_table_entry *table = &threads->table[i]; 229 + struct rb_node *nd; 230 + 231 + down_write(&table->lock); 232 + __threads_table_entry__set_last_match(table, NULL); 233 + nd = rb_first_cached(&table->entries); 222 234 while (nd) { 223 235 struct thread_rb_node *trb = rb_entry(nd, struct thread_rb_node, rb_node); 224 236 225 237 nd = rb_next(nd); 226 - __machine__remove_thread(machine, trb, trb->thread, false); 238 + thread__put(trb->thread); 239 + rb_erase_cached(&trb->rb_node, &table->entries); 240 + RB_CLEAR_NODE(&trb->rb_node); 241 + --table->nr; 242 + 243 + free(trb); 227 244 } 228 - up_write(&threads->lock); 245 + assert(table->nr == 0); 246 + up_write(&table->lock); 247 + } 248 + } 249 + 250 + void machine__delete_threads(struct machine *machine) 251 + { 252 + threads__remove_all_threads(&machine->threads); 253 + } 254 + 255 + void threads__exit(struct threads *threads) 256 + { 257 + threads__remove_all_threads(threads); 258 + for (int i = 0; i < THREADS__TABLE_SIZE; i++) { 259 + struct threads_table_entry *table = &threads->table[i]; 260 + 261 + exit_rwsem(&table->lock); 229 262 } 230 263 } 231 264 232 265 void machine__exit(struct machine *machine) 233 266 { 234 - int i; 235 - 236 267 if (machine == NULL) 237 268 return; 238 269 ··· 267 254 zfree(&machine->current_tid); 268 255 zfree(&machine->kallsyms_filename); 269 256 270 - machine__delete_threads(machine); 271 - for (i = 0; i < THREADS__TABLE_SIZE; i++) { 272 - struct threads *threads = &machine->threads[i]; 273 - 274 - exit_rwsem(&threads->lock); 275 - } 257 + threads__exit(&machine->threads); 276 258 } 277 259 278 260 void machine__delete(struct machine *machine) ··· 534 526 if (thread__pid(th) == thread__tid(th)) 535 527 return; 536 528 537 - leader = __machine__findnew_thread(machine, thread__pid(th), thread__pid(th)); 529 + leader = machine__findnew_thread(machine, thread__pid(th), thread__pid(th)); 538 530 if (!leader) 539 531 goto out_err; 540 532 ··· 573 565 * so most of the time we dont have to look up 574 566 * the full rbtree: 575 567 */ 576 - static struct thread* 577 - __threads__get_last_match(struct threads *threads, struct machine *machine, 578 - int pid, int tid) 568 + static struct thread *__threads_table_entry__get_last_match(struct threads_table_entry *table, 569 + pid_t tid) 579 570 { 580 - struct thread *th; 571 + struct thread *th, *res = NULL; 581 572 582 - th = threads->last_match; 573 + th = table->last_match; 583 574 if (th != NULL) { 584 - if (thread__tid(th) == tid) { 585 - machine__update_thread_pid(machine, th, pid); 586 - return thread__get(th); 587 - } 588 - thread__put(threads->last_match); 589 - threads->last_match = NULL; 575 + if (thread__tid(th) == tid) 576 + res = thread__get(th); 590 577 } 591 - 592 - return NULL; 578 + return res; 593 579 } 594 580 595 - static struct thread* 596 - threads__get_last_match(struct threads *threads, struct machine *machine, 597 - int pid, int tid) 581 + static void __threads_table_entry__set_last_match(struct threads_table_entry *table, 582 + struct thread *th) 598 583 { 599 - struct thread *th = NULL; 600 - 601 - if (perf_singlethreaded) 602 - th = __threads__get_last_match(threads, machine, pid, tid); 603 - 604 - return th; 584 + thread__put(table->last_match); 585 + table->last_match = thread__get(th); 605 586 } 606 587 607 - static void 608 - __threads__set_last_match(struct threads *threads, struct thread *th) 588 + static void threads_table_entry__set_last_match(struct threads_table_entry *table, 589 + struct thread *th) 609 590 { 610 - thread__put(threads->last_match); 611 - threads->last_match = thread__get(th); 591 + down_write(&table->lock); 592 + __threads_table_entry__set_last_match(table, th); 593 + up_write(&table->lock); 612 594 } 613 595 614 - static void 615 - threads__set_last_match(struct threads *threads, struct thread *th) 596 + struct thread *threads__find(struct threads *threads, pid_t tid) 616 597 { 617 - if (perf_singlethreaded) 618 - __threads__set_last_match(threads, th); 598 + struct threads_table_entry *table = threads__table(threads, tid); 599 + struct rb_node **p; 600 + struct thread *res = NULL; 601 + 602 + down_read(&table->lock); 603 + res = __threads_table_entry__get_last_match(table, tid); 604 + if (res) 605 + return res; 606 + 607 + p = &table->entries.rb_root.rb_node; 608 + while (*p != NULL) { 609 + struct rb_node *parent = *p; 610 + struct thread *th = rb_entry(parent, struct thread_rb_node, rb_node)->thread; 611 + 612 + if (thread__tid(th) == tid) { 613 + res = thread__get(th); 614 + break; 615 + } 616 + 617 + if (tid < thread__tid(th)) 618 + p = &(*p)->rb_left; 619 + else 620 + p = &(*p)->rb_right; 621 + } 622 + up_read(&table->lock); 623 + if (res) 624 + threads_table_entry__set_last_match(table, res); 625 + return res; 619 626 } 620 627 621 - /* 622 - * Caller must eventually drop thread->refcnt returned with a successful 623 - * lookup/new thread inserted. 624 - */ 625 - static struct thread *____machine__findnew_thread(struct machine *machine, 626 - struct threads *threads, 627 - pid_t pid, pid_t tid, 628 - bool create) 628 + struct thread *threads__findnew(struct threads *threads, pid_t pid, pid_t tid, bool *created) 629 629 { 630 - struct rb_node **p = &threads->entries.rb_root.rb_node; 630 + struct threads_table_entry *table = threads__table(threads, tid); 631 + struct rb_node **p; 631 632 struct rb_node *parent = NULL; 632 - struct thread *th; 633 + struct thread *res = NULL; 633 634 struct thread_rb_node *nd; 634 635 bool leftmost = true; 635 636 636 - th = threads__get_last_match(threads, machine, pid, tid); 637 - if (th) 638 - return th; 639 - 637 + *created = false; 638 + down_write(&table->lock); 639 + p = &table->entries.rb_root.rb_node; 640 640 while (*p != NULL) { 641 + struct thread *th; 642 + 641 643 parent = *p; 642 644 th = rb_entry(parent, struct thread_rb_node, rb_node)->thread; 643 645 644 646 if (thread__tid(th) == tid) { 645 - threads__set_last_match(threads, th); 646 - machine__update_thread_pid(machine, th, pid); 647 - return thread__get(th); 647 + __threads_table_entry__set_last_match(table, th); 648 + res = thread__get(th); 649 + goto out_unlock; 648 650 } 649 651 650 652 if (tid < thread__tid(th)) ··· 664 646 leftmost = false; 665 647 } 666 648 } 649 + nd = malloc(sizeof(*nd)); 650 + if (nd == NULL) 651 + goto out_unlock; 652 + res = thread__new(pid, tid); 653 + if (!res) 654 + free(nd); 655 + else { 656 + *created = true; 657 + nd->thread = thread__get(res); 658 + rb_link_node(&nd->rb_node, parent, p); 659 + rb_insert_color_cached(&nd->rb_node, &table->entries, leftmost); 660 + ++table->nr; 661 + __threads_table_entry__set_last_match(table, res); 662 + } 663 + out_unlock: 664 + up_write(&table->lock); 665 + return res; 666 + } 667 + 668 + /* 669 + * Caller must eventually drop thread->refcnt returned with a successful 670 + * lookup/new thread inserted. 671 + */ 672 + static struct thread *__machine__findnew_thread(struct machine *machine, 673 + pid_t pid, 674 + pid_t tid, 675 + bool create) 676 + { 677 + struct thread *th = threads__find(&machine->threads, tid); 678 + bool created; 679 + 680 + if (th) { 681 + machine__update_thread_pid(machine, th, pid); 682 + return th; 683 + } 667 684 668 685 if (!create) 669 686 return NULL; 670 687 671 - th = thread__new(pid, tid); 672 - if (th == NULL) 673 - return NULL; 688 + th = threads__findnew(&machine->threads, pid, tid, &created); 689 + if (created) { 690 + /* 691 + * We have to initialize maps separately after rb tree is 692 + * updated. 693 + * 694 + * The reason is that we call machine__findnew_thread within 695 + * thread__init_maps to find the thread leader and that would 696 + * screwed the rb tree. 697 + */ 698 + if (thread__init_maps(th, machine)) { 699 + pr_err("Thread init failed thread %d\n", pid); 700 + threads__remove(&machine->threads, th); 701 + thread__put(th); 702 + return NULL; 703 + } 704 + } else 705 + machine__update_thread_pid(machine, th, pid); 674 706 675 - nd = malloc(sizeof(*nd)); 676 - if (nd == NULL) { 677 - thread__put(th); 678 - return NULL; 679 - } 680 - nd->thread = th; 681 - 682 - rb_link_node(&nd->rb_node, parent, p); 683 - rb_insert_color_cached(&nd->rb_node, &threads->entries, leftmost); 684 - /* 685 - * We have to initialize maps separately after rb tree is updated. 686 - * 687 - * The reason is that we call machine__findnew_thread within 688 - * thread__init_maps to find the thread leader and that would screwed 689 - * the rb tree. 690 - */ 691 - if (thread__init_maps(th, machine)) { 692 - pr_err("Thread init failed thread %d\n", pid); 693 - rb_erase_cached(&nd->rb_node, &threads->entries); 694 - RB_CLEAR_NODE(&nd->rb_node); 695 - free(nd); 696 - thread__put(th); 697 - return NULL; 698 - } 699 - /* 700 - * It is now in the rbtree, get a ref 701 - */ 702 - threads__set_last_match(threads, th); 703 - ++threads->nr; 704 - 705 - return thread__get(th); 706 - } 707 - 708 - struct thread *__machine__findnew_thread(struct machine *machine, pid_t pid, pid_t tid) 709 - { 710 - return ____machine__findnew_thread(machine, machine__threads(machine, tid), pid, tid, true); 711 - } 712 - 713 - struct thread *machine__findnew_thread(struct machine *machine, pid_t pid, 714 - pid_t tid) 715 - { 716 - struct threads *threads = machine__threads(machine, tid); 717 - struct thread *th; 718 - 719 - down_write(&threads->lock); 720 - th = __machine__findnew_thread(machine, pid, tid); 721 - up_write(&threads->lock); 722 707 return th; 708 + } 709 + 710 + struct thread *machine__findnew_thread(struct machine *machine, pid_t pid, pid_t tid) 711 + { 712 + return __machine__findnew_thread(machine, pid, tid, /*create=*/true); 723 713 } 724 714 725 715 struct thread *machine__find_thread(struct machine *machine, pid_t pid, 726 716 pid_t tid) 727 717 { 728 - struct threads *threads = machine__threads(machine, tid); 729 - struct thread *th; 730 - 731 - down_read(&threads->lock); 732 - th = ____machine__findnew_thread(machine, threads, pid, tid, false); 733 - up_read(&threads->lock); 734 - return th; 718 + return __machine__findnew_thread(machine, pid, tid, /*create=*/false); 735 719 } 736 720 737 721 /* ··· 1147 1127 return 0; 1148 1128 } 1149 1129 1150 - static size_t machine__threads_nr(const struct machine *machine) 1130 + size_t threads__nr(struct threads *threads) 1151 1131 { 1152 1132 size_t nr = 0; 1153 1133 1154 - for (int i = 0; i < THREADS__TABLE_SIZE; i++) 1155 - nr += machine->threads[i].nr; 1134 + for (int i = 0; i < THREADS__TABLE_SIZE; i++) { 1135 + struct threads_table_entry *table = &threads->table[i]; 1156 1136 1137 + down_read(&table->lock); 1138 + nr += table->nr; 1139 + up_read(&table->lock); 1140 + } 1157 1141 return nr; 1158 1142 } 1159 1143 ··· 1167 1143 .fp = fp, 1168 1144 .printed = 0, 1169 1145 }; 1170 - size_t ret = fprintf(fp, "Threads: %zu\n", machine__threads_nr(machine)); 1146 + size_t ret = fprintf(fp, "Threads: %zu\n", threads__nr(&machine->threads)); 1171 1147 1172 1148 machine__for_each_thread(machine, machine_fprintf_cb, &args); 1173 1149 return ret + args.printed; ··· 2093 2069 return 0; 2094 2070 } 2095 2071 2096 - static void __machine__remove_thread(struct machine *machine, struct thread_rb_node *nd, 2097 - struct thread *th, bool lock) 2072 + void threads__remove(struct threads *threads, struct thread *thread) 2098 2073 { 2099 - struct threads *threads = machine__threads(machine, thread__tid(th)); 2074 + struct rb_node **p; 2075 + struct threads_table_entry *table = threads__table(threads, thread__tid(thread)); 2076 + pid_t tid = thread__tid(thread); 2100 2077 2101 - if (!nd) 2102 - nd = thread_rb_node__find(th, &threads->entries.rb_root); 2078 + down_write(&table->lock); 2079 + if (table->last_match && RC_CHK_EQUAL(table->last_match, thread)) 2080 + __threads_table_entry__set_last_match(table, NULL); 2103 2081 2104 - if (threads->last_match && RC_CHK_EQUAL(threads->last_match, th)) 2105 - threads__set_last_match(threads, NULL); 2082 + p = &table->entries.rb_root.rb_node; 2083 + while (*p != NULL) { 2084 + struct rb_node *parent = *p; 2085 + struct thread_rb_node *nd = rb_entry(parent, struct thread_rb_node, rb_node); 2086 + struct thread *th = nd->thread; 2106 2087 2107 - if (lock) 2108 - down_write(&threads->lock); 2088 + if (RC_CHK_EQUAL(th, thread)) { 2089 + thread__put(nd->thread); 2090 + rb_erase_cached(&nd->rb_node, &table->entries); 2091 + RB_CLEAR_NODE(&nd->rb_node); 2092 + --table->nr; 2093 + free(nd); 2094 + break; 2095 + } 2109 2096 2110 - BUG_ON(refcount_read(thread__refcnt(th)) == 0); 2111 - 2112 - thread__put(nd->thread); 2113 - rb_erase_cached(&nd->rb_node, &threads->entries); 2114 - RB_CLEAR_NODE(&nd->rb_node); 2115 - --threads->nr; 2116 - 2117 - free(nd); 2118 - 2119 - if (lock) 2120 - up_write(&threads->lock); 2097 + if (tid < thread__tid(th)) 2098 + p = &(*p)->rb_left; 2099 + else 2100 + p = &(*p)->rb_right; 2101 + } 2102 + up_write(&table->lock); 2121 2103 } 2122 2104 2123 2105 void machine__remove_thread(struct machine *machine, struct thread *th) 2124 2106 { 2125 - return __machine__remove_thread(machine, NULL, th, true); 2107 + return threads__remove(&machine->threads, th); 2126 2108 } 2127 2109 2128 2110 int machine__process_fork_event(struct machine *machine, union perf_event *event, ··· 3258 3228 return ret; 3259 3229 } 3260 3230 3231 + int threads__for_each_thread(struct threads *threads, 3232 + int (*fn)(struct thread *thread, void *data), 3233 + void *data) 3234 + { 3235 + for (int i = 0; i < THREADS__TABLE_SIZE; i++) { 3236 + struct threads_table_entry *table = &threads->table[i]; 3237 + struct rb_node *nd; 3238 + 3239 + down_read(&table->lock); 3240 + for (nd = rb_first_cached(&table->entries); nd; nd = rb_next(nd)) { 3241 + struct thread_rb_node *trb = rb_entry(nd, struct thread_rb_node, rb_node); 3242 + int rc = fn(trb->thread, data); 3243 + 3244 + if (rc != 0) { 3245 + up_read(&table->lock); 3246 + return rc; 3247 + } 3248 + } 3249 + up_read(&table->lock); 3250 + } 3251 + return 0; 3252 + 3253 + } 3254 + 3261 3255 int machine__for_each_thread(struct machine *machine, 3262 3256 int (*fn)(struct thread *thread, void *p), 3263 3257 void *priv) 3264 3258 { 3265 - struct threads *threads; 3266 - struct rb_node *nd; 3267 - int rc = 0; 3268 - int i; 3269 - 3270 - for (i = 0; i < THREADS__TABLE_SIZE; i++) { 3271 - threads = &machine->threads[i]; 3272 - for (nd = rb_first_cached(&threads->entries); nd; 3273 - nd = rb_next(nd)) { 3274 - struct thread_rb_node *trb = rb_entry(nd, struct thread_rb_node, rb_node); 3275 - 3276 - rc = fn(trb->thread, priv); 3277 - if (rc != 0) 3278 - return rc; 3279 - } 3280 - } 3281 - return rc; 3259 + return threads__for_each_thread(&machine->threads, fn, priv); 3282 3260 } 3283 3261 3284 3262 int machines__for_each_thread(struct machines *machines,
+17 -9
tools/perf/util/machine.h
··· 31 31 #define THREADS__TABLE_BITS 8 32 32 #define THREADS__TABLE_SIZE (1 << THREADS__TABLE_BITS) 33 33 34 - struct threads { 34 + struct threads_table_entry { 35 35 struct rb_root_cached entries; 36 36 struct rw_semaphore lock; 37 37 unsigned int nr; 38 38 struct thread *last_match; 39 39 }; 40 + 41 + struct threads { 42 + struct threads_table_entry table[THREADS__TABLE_SIZE]; 43 + }; 44 + 45 + void threads__init(struct threads *threads); 46 + void threads__exit(struct threads *threads); 47 + size_t threads__nr(struct threads *threads); 48 + struct thread *threads__find(struct threads *threads, pid_t tid); 49 + struct thread *threads__findnew(struct threads *threads, pid_t pid, pid_t tid, bool *created); 50 + void threads__remove_all_threads(struct threads *threads); 51 + void threads__remove(struct threads *threads, struct thread *thread); 52 + int threads__for_each_thread(struct threads *threads, 53 + int (*fn)(struct thread *thread, void *data), 54 + void *data); 40 55 41 56 struct machine { 42 57 struct rb_node rb_node; ··· 63 48 char *root_dir; 64 49 char *mmap_name; 65 50 char *kallsyms_filename; 66 - struct threads threads[THREADS__TABLE_SIZE]; 51 + struct threads threads; 67 52 struct vdso_info *vdso_info; 68 53 struct perf_env *env; 69 54 struct dsos dsos; ··· 83 68 struct machines *machines; 84 69 bool trampolines_mapped; 85 70 }; 86 - 87 - static inline struct threads *machine__threads(struct machine *machine, pid_t tid) 88 - { 89 - /* Cast it to handle tid == -1 */ 90 - return &machine->threads[(unsigned int)tid % THREADS__TABLE_SIZE]; 91 - } 92 71 93 72 /* 94 73 * The main kernel (vmlinux) map ··· 229 220 bool machine__normalized_is(struct machine *machine, const char *arch); 230 221 int machine__nr_cpus_avail(struct machine *machine); 231 222 232 - struct thread *__machine__findnew_thread(struct machine *machine, pid_t pid, pid_t tid); 233 223 struct thread *machine__findnew_thread(struct machine *machine, pid_t pid, pid_t tid); 234 224 235 225 struct dso *machine__findnew_dso_id(struct machine *machine, const char *filename, struct dso_id *id);
+1 -1
tools/perf/util/thread.c
··· 26 26 if (pid == thread__tid(thread) || pid == -1) { 27 27 thread__set_maps(thread, maps__new(machine)); 28 28 } else { 29 - struct thread *leader = __machine__findnew_thread(machine, pid, pid); 29 + struct thread *leader = machine__findnew_thread(machine, pid, pid); 30 30 31 31 if (leader) { 32 32 thread__set_maps(thread, maps__get(thread__maps(leader)));
-6
tools/perf/util/thread.h
··· 3 3 #define __PERF_THREAD_H 4 4 5 5 #include <linux/refcount.h> 6 - #include <linux/rbtree.h> 7 6 #include <linux/list.h> 8 7 #include <stdio.h> 9 8 #include <unistd.h> ··· 26 27 struct list_head free_lists; 27 28 struct perf_sample prev_sample; 28 29 struct callchain_cursor_node *prev_lbr_cursor; 29 - }; 30 - 31 - struct thread_rb_node { 32 - struct rb_node rb_node; 33 - struct thread *thread; 34 30 }; 35 31 36 32 DECLARE_RC_STRUCT(thread) {