Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

perf tools: Rename struct thread_map to struct perf_thread_map

Rename struct thread_map to struct perf_thread_map, so it could be part
of libperf.

Signed-off-by: Jiri Olsa <jolsa@kernel.org>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Alexey Budankov <alexey.budankov@linux.intel.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Michael Petlan <mpetlan@redhat.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/r/20190721112506.12306-4-jolsa@kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>

authored by

Jiri Olsa and committed by
Arnaldo Carvalho de Melo
9749b90e f854839b

+102 -102
+1 -1
tools/perf/arch/x86/tests/perf-time-to-tsc.c
··· 49 49 }, 50 50 .sample_time = true, 51 51 }; 52 - struct thread_map *threads = NULL; 52 + struct perf_thread_map *threads = NULL; 53 53 struct perf_cpu_map *cpus = NULL; 54 54 struct perf_evlist *evlist = NULL; 55 55 struct perf_evsel *evsel = NULL;
+1 -1
tools/perf/builtin-record.c
··· 1047 1047 static int record__synthesize_workload(struct record *rec, bool tail) 1048 1048 { 1049 1049 int err; 1050 - struct thread_map *thread_map; 1050 + struct perf_thread_map *thread_map; 1051 1051 1052 1052 if (rec->opts.tail_synthesize != tail) 1053 1053 return 0;
+2 -2
tools/perf/builtin-sched.c
··· 159 159 DECLARE_BITMAP(comp_cpus_mask, MAX_CPUS); 160 160 int *comp_cpus; 161 161 bool comp; 162 - struct thread_map *color_pids; 162 + struct perf_thread_map *color_pids; 163 163 const char *color_pids_str; 164 164 struct perf_cpu_map *color_cpus; 165 165 const char *color_cpus_str; ··· 3195 3195 3196 3196 static int setup_color_pids(struct perf_sched *sched) 3197 3197 { 3198 - struct thread_map *map; 3198 + struct perf_thread_map *map; 3199 3199 3200 3200 if (!sched->map.color_pids_str) 3201 3201 return 0;
+1 -1
tools/perf/builtin-script.c
··· 1628 1628 bool allocated; 1629 1629 bool per_event_dump; 1630 1630 struct perf_cpu_map *cpus; 1631 - struct thread_map *threads; 1631 + struct perf_thread_map *threads; 1632 1632 int name_width; 1633 1633 const char *time_str; 1634 1634 struct perf_time_interval *ptime_range;
+2 -2
tools/perf/builtin-stat.c
··· 165 165 struct perf_tool tool; 166 166 bool maps_allocated; 167 167 struct perf_cpu_map *cpus; 168 - struct thread_map *threads; 168 + struct perf_thread_map *threads; 169 169 enum aggr_mode aggr_mode; 170 170 }; 171 171 ··· 395 395 } 396 396 397 397 static bool is_target_alive(struct target *_target, 398 - struct thread_map *threads) 398 + struct perf_thread_map *threads) 399 399 { 400 400 struct stat st; 401 401 int i;
+1 -1
tools/perf/tests/code-reading.c
··· 552 552 struct state state = { 553 553 .done_cnt = 0, 554 554 }; 555 - struct thread_map *threads = NULL; 555 + struct perf_thread_map *threads = NULL; 556 556 struct perf_cpu_map *cpus = NULL; 557 557 struct perf_evlist *evlist = NULL; 558 558 struct perf_evsel *evsel = NULL;
+2 -2
tools/perf/tests/event-times.c
··· 57 57 static int attach__current_disabled(struct perf_evlist *evlist) 58 58 { 59 59 struct perf_evsel *evsel = perf_evlist__last(evlist); 60 - struct thread_map *threads; 60 + struct perf_thread_map *threads; 61 61 int err; 62 62 63 63 pr_debug("attaching to current thread as disabled\n"); ··· 83 83 static int attach__current_enabled(struct perf_evlist *evlist) 84 84 { 85 85 struct perf_evsel *evsel = perf_evlist__last(evlist); 86 - struct thread_map *threads; 86 + struct perf_thread_map *threads; 87 87 int err; 88 88 89 89 pr_debug("attaching to current thread as enabled\n");
+1 -1
tools/perf/tests/keep-tracking.c
··· 65 65 .uses_mmap = true, 66 66 }, 67 67 }; 68 - struct thread_map *threads = NULL; 68 + struct perf_thread_map *threads = NULL; 69 69 struct perf_cpu_map *cpus = NULL; 70 70 struct perf_evlist *evlist = NULL; 71 71 struct perf_evsel *evsel = NULL;
+1 -1
tools/perf/tests/mmap-basic.c
··· 27 27 { 28 28 int err = -1; 29 29 union perf_event *event; 30 - struct thread_map *threads; 30 + struct perf_thread_map *threads; 31 31 struct perf_cpu_map *cpus; 32 32 struct perf_evlist *evlist; 33 33 cpu_set_t cpu_set;
+1 -1
tools/perf/tests/mmap-thread-lookup.c
··· 138 138 139 139 static int synth_process(struct machine *machine) 140 140 { 141 - struct thread_map *map; 141 + struct perf_thread_map *map; 142 142 int err; 143 143 144 144 map = thread_map__new_by_pid(getpid());
+1 -1
tools/perf/tests/openat-syscall-all-cpus.c
··· 24 24 struct perf_evsel *evsel; 25 25 unsigned int nr_openat_calls = 111, i; 26 26 cpu_set_t cpu_set; 27 - struct thread_map *threads = thread_map__new(-1, getpid(), UINT_MAX); 27 + struct perf_thread_map *threads = thread_map__new(-1, getpid(), UINT_MAX); 28 28 char sbuf[STRERR_BUFSIZE]; 29 29 char errbuf[BUFSIZ]; 30 30
+1 -1
tools/perf/tests/openat-syscall.c
··· 16 16 int err = -1, fd; 17 17 struct perf_evsel *evsel; 18 18 unsigned int nr_openat_calls = 111, i; 19 - struct thread_map *threads = thread_map__new(-1, getpid(), UINT_MAX); 19 + struct perf_thread_map *threads = thread_map__new(-1, getpid(), UINT_MAX); 20 20 char sbuf[STRERR_BUFSIZE]; 21 21 char errbuf[BUFSIZ]; 22 22
+1 -1
tools/perf/tests/sw-clock.c
··· 38 38 .freq = 1, 39 39 }; 40 40 struct perf_cpu_map *cpus; 41 - struct thread_map *threads; 41 + struct perf_thread_map *threads; 42 42 struct perf_mmap *md; 43 43 44 44 attr.sample_freq = 500;
+1 -1
tools/perf/tests/switch-tracking.c
··· 327 327 .uses_mmap = true, 328 328 }, 329 329 }; 330 - struct thread_map *threads = NULL; 330 + struct perf_thread_map *threads = NULL; 331 331 struct perf_cpu_map *cpus = NULL; 332 332 struct perf_evlist *evlist = NULL; 333 333 struct perf_evsel *evsel, *cpu_clocks_evsel, *cycles_evsel;
+1 -1
tools/perf/tests/task-exit.c
··· 46 46 const char *argv[] = { "true", NULL }; 47 47 char sbuf[STRERR_BUFSIZE]; 48 48 struct perf_cpu_map *cpus; 49 - struct thread_map *threads; 49 + struct perf_thread_map *threads; 50 50 struct perf_mmap *md; 51 51 52 52 signal(SIGCHLD, sig_handler);
+4 -4
tools/perf/tests/thread-map.c
··· 13 13 14 14 int test__thread_map(struct test *test __maybe_unused, int subtest __maybe_unused) 15 15 { 16 - struct thread_map *map; 16 + struct perf_thread_map *map; 17 17 18 18 TEST_ASSERT_VAL("failed to set process name", 19 19 !prctl(PR_SET_NAME, NAMEUL, 0, 0, 0)); ··· 57 57 struct machine *machine __maybe_unused) 58 58 { 59 59 struct thread_map_event *map = &event->thread_map; 60 - struct thread_map *threads; 60 + struct perf_thread_map *threads; 61 61 62 62 TEST_ASSERT_VAL("wrong nr", map->nr == 1); 63 63 TEST_ASSERT_VAL("wrong pid", map->entries[0].pid == (u64) getpid()); ··· 80 80 81 81 int test__thread_map_synthesize(struct test *test __maybe_unused, int subtest __maybe_unused) 82 82 { 83 - struct thread_map *threads; 83 + struct perf_thread_map *threads; 84 84 85 85 TEST_ASSERT_VAL("failed to set process name", 86 86 !prctl(PR_SET_NAME, NAMEUL, 0, 0, 0)); ··· 99 99 100 100 int test__thread_map_remove(struct test *test __maybe_unused, int subtest __maybe_unused) 101 101 { 102 - struct thread_map *threads; 102 + struct perf_thread_map *threads; 103 103 char *str; 104 104 int i; 105 105
+3 -3
tools/perf/util/event.c
··· 616 616 } 617 617 618 618 int perf_event__synthesize_thread_map(struct perf_tool *tool, 619 - struct thread_map *threads, 619 + struct perf_thread_map *threads, 620 620 perf_event__handler_t process, 621 621 struct machine *machine, 622 622 bool mmap_data) ··· 972 972 } 973 973 974 974 int perf_event__synthesize_thread_map2(struct perf_tool *tool, 975 - struct thread_map *threads, 975 + struct perf_thread_map *threads, 976 976 perf_event__handler_t process, 977 977 struct machine *machine) 978 978 { ··· 1377 1377 1378 1378 size_t perf_event__fprintf_thread_map(union perf_event *event, FILE *fp) 1379 1379 { 1380 - struct thread_map *threads = thread_map__new_event(&event->thread_map); 1380 + struct perf_thread_map *threads = thread_map__new_event(&event->thread_map); 1381 1381 size_t ret; 1382 1382 1383 1383 ret = fprintf(fp, " nr: ");
+3 -3
tools/perf/util/event.h
··· 674 674 void perf_event__print_totals(void); 675 675 676 676 struct perf_tool; 677 - struct thread_map; 677 + struct perf_thread_map; 678 678 struct perf_cpu_map; 679 679 struct perf_stat_config; 680 680 struct perf_counts_values; ··· 685 685 struct machine *machine); 686 686 687 687 int perf_event__synthesize_thread_map(struct perf_tool *tool, 688 - struct thread_map *threads, 688 + struct perf_thread_map *threads, 689 689 perf_event__handler_t process, 690 690 struct machine *machine, bool mmap_data); 691 691 int perf_event__synthesize_thread_map2(struct perf_tool *tool, 692 - struct thread_map *threads, 692 + struct perf_thread_map *threads, 693 693 perf_event__handler_t process, 694 694 struct machine *machine); 695 695 int perf_event__synthesize_cpu_map(struct perf_tool *tool,
+5 -5
tools/perf/util/evlist.c
··· 42 42 #define SID(e, x, y) xyarray__entry(e->sample_id, x, y) 43 43 44 44 void perf_evlist__init(struct perf_evlist *evlist, struct perf_cpu_map *cpus, 45 - struct thread_map *threads) 45 + struct perf_thread_map *threads) 46 46 { 47 47 int i; 48 48 ··· 1013 1013 { 1014 1014 struct perf_evsel *evsel; 1015 1015 const struct perf_cpu_map *cpus = evlist->cpus; 1016 - const struct thread_map *threads = evlist->threads; 1016 + const struct perf_thread_map *threads = evlist->threads; 1017 1017 /* 1018 1018 * Delay setting mp.prot: set it before calling perf_mmap__mmap. 1019 1019 * Its value is decided by evsel's write_backward. ··· 1059 1059 { 1060 1060 bool all_threads = (target->per_thread && target->system_wide); 1061 1061 struct perf_cpu_map *cpus; 1062 - struct thread_map *threads; 1062 + struct perf_thread_map *threads; 1063 1063 1064 1064 /* 1065 1065 * If specify '-a' and '--per-thread' to perf record, perf record ··· 1105 1105 } 1106 1106 1107 1107 void perf_evlist__set_maps(struct perf_evlist *evlist, struct perf_cpu_map *cpus, 1108 - struct thread_map *threads) 1108 + struct perf_thread_map *threads) 1109 1109 { 1110 1110 /* 1111 1111 * Allow for the possibility that one or another of the maps isn't being ··· 1359 1359 static int perf_evlist__create_syswide_maps(struct perf_evlist *evlist) 1360 1360 { 1361 1361 struct perf_cpu_map *cpus; 1362 - struct thread_map *threads; 1362 + struct perf_thread_map *threads; 1363 1363 int err = -ENOMEM; 1364 1364 1365 1365 /*
+3 -3
tools/perf/util/evlist.h
··· 44 44 struct fdarray pollfd; 45 45 struct perf_mmap *mmap; 46 46 struct perf_mmap *overwrite_mmap; 47 - struct thread_map *threads; 47 + struct perf_thread_map *threads; 48 48 struct perf_cpu_map *cpus; 49 49 struct perf_evsel *selected; 50 50 struct events_stats stats; ··· 69 69 struct perf_evlist *perf_evlist__new_default(void); 70 70 struct perf_evlist *perf_evlist__new_dummy(void); 71 71 void perf_evlist__init(struct perf_evlist *evlist, struct perf_cpu_map *cpus, 72 - struct thread_map *threads); 72 + struct perf_thread_map *threads); 73 73 void perf_evlist__exit(struct perf_evlist *evlist); 74 74 void perf_evlist__delete(struct perf_evlist *evlist); 75 75 ··· 195 195 struct perf_evsel *evsel); 196 196 197 197 void perf_evlist__set_maps(struct perf_evlist *evlist, struct perf_cpu_map *cpus, 198 - struct thread_map *threads); 198 + struct perf_thread_map *threads); 199 199 int perf_evlist__create_maps(struct perf_evlist *evlist, struct target *target); 200 200 int perf_evlist__apply_filters(struct perf_evlist *evlist, struct perf_evsel **err_evsel); 201 201
+5 -5
tools/perf/util/evsel.c
··· 1743 1743 1744 1744 static bool ignore_missing_thread(struct perf_evsel *evsel, 1745 1745 int nr_cpus, int cpu, 1746 - struct thread_map *threads, 1746 + struct perf_thread_map *threads, 1747 1747 int thread, int err) 1748 1748 { 1749 1749 pid_t ignore_pid = thread_map__pid(threads, thread); ··· 1826 1826 } 1827 1827 1828 1828 int perf_evsel__open(struct perf_evsel *evsel, struct perf_cpu_map *cpus, 1829 - struct thread_map *threads) 1829 + struct perf_thread_map *threads) 1830 1830 { 1831 1831 int cpu, thread, nthreads; 1832 1832 unsigned long flags = PERF_FLAG_FD_CLOEXEC; ··· 1849 1849 } 1850 1850 1851 1851 if (threads == NULL) { 1852 - static struct thread_map *empty_thread_map; 1852 + static struct perf_thread_map *empty_thread_map; 1853 1853 1854 1854 if (empty_thread_map == NULL) { 1855 1855 empty_thread_map = thread_map__new_by_tid(-1); ··· 2090 2090 } 2091 2091 2092 2092 int perf_evsel__open_per_thread(struct perf_evsel *evsel, 2093 - struct thread_map *threads) 2093 + struct perf_thread_map *threads) 2094 2094 { 2095 2095 return perf_evsel__open(evsel, NULL, threads); 2096 2096 } ··· 3065 3065 int perf_evsel__store_ids(struct perf_evsel *evsel, struct perf_evlist *evlist) 3066 3066 { 3067 3067 struct perf_cpu_map *cpus = evsel->cpus; 3068 - struct thread_map *threads = evsel->threads; 3068 + struct perf_thread_map *threads = evsel->threads; 3069 3069 3070 3070 if (perf_evsel__alloc_id(evsel, cpus->nr, threads->nr)) 3071 3071 return -ENOMEM;
+3 -3
tools/perf/util/evsel.h
··· 126 126 void *handler; 127 127 struct perf_cpu_map *cpus; 128 128 struct perf_cpu_map *own_cpus; 129 - struct thread_map *threads; 129 + struct perf_thread_map *threads; 130 130 unsigned int sample_size; 131 131 int id_pos; 132 132 int is_pos; ··· 302 302 int perf_evsel__open_per_cpu(struct perf_evsel *evsel, 303 303 struct perf_cpu_map *cpus); 304 304 int perf_evsel__open_per_thread(struct perf_evsel *evsel, 305 - struct thread_map *threads); 305 + struct perf_thread_map *threads); 306 306 int perf_evsel__open(struct perf_evsel *evsel, struct perf_cpu_map *cpus, 307 - struct thread_map *threads); 307 + struct perf_thread_map *threads); 308 308 void perf_evsel__close(struct perf_evsel *evsel); 309 309 310 310 struct perf_sample;
+1 -1
tools/perf/util/machine.c
··· 2599 2599 } 2600 2600 2601 2601 int __machine__synthesize_threads(struct machine *machine, struct perf_tool *tool, 2602 - struct target *target, struct thread_map *threads, 2602 + struct target *target, struct perf_thread_map *threads, 2603 2603 perf_event__handler_t process, bool data_mmap, 2604 2604 unsigned int nr_threads_synthesize) 2605 2605 {
+2 -2
tools/perf/util/machine.h
··· 251 251 void *priv); 252 252 253 253 int __machine__synthesize_threads(struct machine *machine, struct perf_tool *tool, 254 - struct target *target, struct thread_map *threads, 254 + struct target *target, struct perf_thread_map *threads, 255 255 perf_event__handler_t process, bool data_mmap, 256 256 unsigned int nr_threads_synthesize); 257 257 static inline 258 258 int machine__synthesize_threads(struct machine *machine, struct target *target, 259 - struct thread_map *threads, bool data_mmap, 259 + struct perf_thread_map *threads, bool data_mmap, 260 260 unsigned int nr_threads_synthesize) 261 261 { 262 262 return __machine__synthesize_threads(machine, NULL, target, threads,
+1 -1
tools/perf/util/parse-events.c
··· 2313 2313 .config = config, 2314 2314 .disabled = 1, 2315 2315 }; 2316 - struct thread_map *tmap = thread_map__new_by_tid(0); 2316 + struct perf_thread_map *tmap = thread_map__new_by_tid(0); 2317 2317 2318 2318 if (tmap == NULL) 2319 2319 return false;
+3 -3
tools/perf/util/python.c
··· 605 605 struct pyrf_thread_map { 606 606 PyObject_HEAD 607 607 608 - struct thread_map *threads; 608 + struct perf_thread_map *threads; 609 609 }; 610 610 611 611 static int pyrf_thread_map__init(struct pyrf_thread_map *pthreads, ··· 797 797 { 798 798 struct perf_evsel *evsel = &pevsel->evsel; 799 799 struct perf_cpu_map *cpus = NULL; 800 - struct thread_map *threads = NULL; 800 + struct perf_thread_map *threads = NULL; 801 801 PyObject *pcpus = NULL, *pthreads = NULL; 802 802 int group = 0, inherit = 0; 803 803 static char *kwlist[] = { "cpus", "threads", "group", "inherit", NULL }; ··· 866 866 { 867 867 PyObject *pcpus = NULL, *pthreads = NULL; 868 868 struct perf_cpu_map *cpus; 869 - struct thread_map *threads; 869 + struct perf_thread_map *threads; 870 870 871 871 if (!PyArg_ParseTuple(args, "OO", &pcpus, &pthreads)) 872 872 return -1;
+1 -1
tools/perf/util/scripting-engines/trace-event-python.c
··· 1392 1392 static void python_process_stat(struct perf_stat_config *config, 1393 1393 struct perf_evsel *counter, u64 tstamp) 1394 1394 { 1395 - struct thread_map *threads = counter->threads; 1395 + struct perf_thread_map *threads = counter->threads; 1396 1396 struct perf_cpu_map *cpus = counter->cpus; 1397 1397 int cpu, thread; 1398 1398
+30 -30
tools/perf/util/thread_map.c
··· 28 28 return 1; 29 29 } 30 30 31 - static void thread_map__reset(struct thread_map *map, int start, int nr) 31 + static void thread_map__reset(struct perf_thread_map *map, int start, int nr) 32 32 { 33 33 size_t size = (nr - start) * sizeof(map->map[0]); 34 34 ··· 36 36 map->err_thread = -1; 37 37 } 38 38 39 - static struct thread_map *thread_map__realloc(struct thread_map *map, int nr) 39 + static struct perf_thread_map *thread_map__realloc(struct perf_thread_map *map, int nr) 40 40 { 41 41 size_t size = sizeof(*map) + sizeof(map->map[0]) * nr; 42 42 int start = map ? map->nr : 0; ··· 53 53 54 54 #define thread_map__alloc(__nr) thread_map__realloc(NULL, __nr) 55 55 56 - struct thread_map *thread_map__new_by_pid(pid_t pid) 56 + struct perf_thread_map *thread_map__new_by_pid(pid_t pid) 57 57 { 58 - struct thread_map *threads; 58 + struct perf_thread_map *threads; 59 59 char name[256]; 60 60 int items; 61 61 struct dirent **namelist = NULL; ··· 81 81 return threads; 82 82 } 83 83 84 - struct thread_map *thread_map__new_by_tid(pid_t tid) 84 + struct perf_thread_map *thread_map__new_by_tid(pid_t tid) 85 85 { 86 - struct thread_map *threads = thread_map__alloc(1); 86 + struct perf_thread_map *threads = thread_map__alloc(1); 87 87 88 88 if (threads != NULL) { 89 89 thread_map__set_pid(threads, 0, tid); ··· 94 94 return threads; 95 95 } 96 96 97 - static struct thread_map *__thread_map__new_all_cpus(uid_t uid) 97 + static struct perf_thread_map *__thread_map__new_all_cpus(uid_t uid) 98 98 { 99 99 DIR *proc; 100 100 int max_threads = 32, items, i; 101 101 char path[NAME_MAX + 1 + 6]; 102 102 struct dirent *dirent, **namelist = NULL; 103 - struct thread_map *threads = thread_map__alloc(max_threads); 103 + struct perf_thread_map *threads = thread_map__alloc(max_threads); 104 104 105 105 if (threads == NULL) 106 106 goto out; ··· 140 140 } 141 141 142 142 if (grow) { 143 - struct thread_map *tmp; 143 + struct perf_thread_map *tmp; 144 144 145 145 tmp = thread_map__realloc(threads, max_threads); 146 146 if (tmp == NULL) ··· 180 180 goto out_closedir; 181 181 } 182 182 183 - struct thread_map *thread_map__new_all_cpus(void) 183 + struct perf_thread_map *thread_map__new_all_cpus(void) 184 184 { 185 185 return __thread_map__new_all_cpus(UINT_MAX); 186 186 } 187 187 188 - struct thread_map *thread_map__new_by_uid(uid_t uid) 188 + struct perf_thread_map *thread_map__new_by_uid(uid_t uid) 189 189 { 190 190 return __thread_map__new_all_cpus(uid); 191 191 } 192 192 193 - struct thread_map *thread_map__new(pid_t pid, pid_t tid, uid_t uid) 193 + struct perf_thread_map *thread_map__new(pid_t pid, pid_t tid, uid_t uid) 194 194 { 195 195 if (pid != -1) 196 196 return thread_map__new_by_pid(pid); ··· 201 201 return thread_map__new_by_tid(tid); 202 202 } 203 203 204 - static struct thread_map *thread_map__new_by_pid_str(const char *pid_str) 204 + static struct perf_thread_map *thread_map__new_by_pid_str(const char *pid_str) 205 205 { 206 - struct thread_map *threads = NULL, *nt; 206 + struct perf_thread_map *threads = NULL, *nt; 207 207 char name[256]; 208 208 int items, total_tasks = 0; 209 209 struct dirent **namelist = NULL; ··· 263 263 goto out; 264 264 } 265 265 266 - struct thread_map *thread_map__new_dummy(void) 266 + struct perf_thread_map *thread_map__new_dummy(void) 267 267 { 268 - struct thread_map *threads = thread_map__alloc(1); 268 + struct perf_thread_map *threads = thread_map__alloc(1); 269 269 270 270 if (threads != NULL) { 271 271 thread_map__set_pid(threads, 0, -1); ··· 275 275 return threads; 276 276 } 277 277 278 - struct thread_map *thread_map__new_by_tid_str(const char *tid_str) 278 + struct perf_thread_map *thread_map__new_by_tid_str(const char *tid_str) 279 279 { 280 - struct thread_map *threads = NULL, *nt; 280 + struct perf_thread_map *threads = NULL, *nt; 281 281 int ntasks = 0; 282 282 pid_t tid, prev_tid = INT_MAX; 283 283 char *end_ptr; ··· 324 324 goto out; 325 325 } 326 326 327 - struct thread_map *thread_map__new_str(const char *pid, const char *tid, 327 + struct perf_thread_map *thread_map__new_str(const char *pid, const char *tid, 328 328 uid_t uid, bool all_threads) 329 329 { 330 330 if (pid) ··· 339 339 return thread_map__new_by_tid_str(tid); 340 340 } 341 341 342 - static void thread_map__delete(struct thread_map *threads) 342 + static void thread_map__delete(struct perf_thread_map *threads) 343 343 { 344 344 if (threads) { 345 345 int i; ··· 352 352 } 353 353 } 354 354 355 - struct thread_map *thread_map__get(struct thread_map *map) 355 + struct perf_thread_map *thread_map__get(struct perf_thread_map *map) 356 356 { 357 357 if (map) 358 358 refcount_inc(&map->refcnt); 359 359 return map; 360 360 } 361 361 362 - void thread_map__put(struct thread_map *map) 362 + void thread_map__put(struct perf_thread_map *map) 363 363 { 364 364 if (map && refcount_dec_and_test(&map->refcnt)) 365 365 thread_map__delete(map); 366 366 } 367 367 368 - size_t thread_map__fprintf(struct thread_map *threads, FILE *fp) 368 + size_t thread_map__fprintf(struct perf_thread_map *threads, FILE *fp) 369 369 { 370 370 int i; 371 371 size_t printed = fprintf(fp, "%d thread%s: ", ··· 400 400 return err; 401 401 } 402 402 403 - static void comm_init(struct thread_map *map, int i) 403 + static void comm_init(struct perf_thread_map *map, int i) 404 404 { 405 405 pid_t pid = thread_map__pid(map, i); 406 406 char *comm = NULL; ··· 421 421 map->map[i].comm = comm; 422 422 } 423 423 424 - void thread_map__read_comms(struct thread_map *threads) 424 + void thread_map__read_comms(struct perf_thread_map *threads) 425 425 { 426 426 int i; 427 427 ··· 429 429 comm_init(threads, i); 430 430 } 431 431 432 - static void thread_map__copy_event(struct thread_map *threads, 432 + static void thread_map__copy_event(struct perf_thread_map *threads, 433 433 struct thread_map_event *event) 434 434 { 435 435 unsigned i; ··· 444 444 refcount_set(&threads->refcnt, 1); 445 445 } 446 446 447 - struct thread_map *thread_map__new_event(struct thread_map_event *event) 447 + struct perf_thread_map *thread_map__new_event(struct thread_map_event *event) 448 448 { 449 - struct thread_map *threads; 449 + struct perf_thread_map *threads; 450 450 451 451 threads = thread_map__alloc(event->nr); 452 452 if (threads) ··· 455 455 return threads; 456 456 } 457 457 458 - bool thread_map__has(struct thread_map *threads, pid_t pid) 458 + bool thread_map__has(struct perf_thread_map *threads, pid_t pid) 459 459 { 460 460 int i; 461 461 ··· 467 467 return false; 468 468 } 469 469 470 - int thread_map__remove(struct thread_map *threads, int idx) 470 + int thread_map__remove(struct perf_thread_map *threads, int idx) 471 471 { 472 472 int i; 473 473
+20 -20
tools/perf/util/thread_map.h
··· 11 11 char *comm; 12 12 }; 13 13 14 - struct thread_map { 14 + struct perf_thread_map { 15 15 refcount_t refcnt; 16 16 int nr; 17 17 int err_thread; ··· 20 20 21 21 struct thread_map_event; 22 22 23 - struct thread_map *thread_map__new_dummy(void); 24 - struct thread_map *thread_map__new_by_pid(pid_t pid); 25 - struct thread_map *thread_map__new_by_tid(pid_t tid); 26 - struct thread_map *thread_map__new_by_uid(uid_t uid); 27 - struct thread_map *thread_map__new_all_cpus(void); 28 - struct thread_map *thread_map__new(pid_t pid, pid_t tid, uid_t uid); 29 - struct thread_map *thread_map__new_event(struct thread_map_event *event); 23 + struct perf_thread_map *thread_map__new_dummy(void); 24 + struct perf_thread_map *thread_map__new_by_pid(pid_t pid); 25 + struct perf_thread_map *thread_map__new_by_tid(pid_t tid); 26 + struct perf_thread_map *thread_map__new_by_uid(uid_t uid); 27 + struct perf_thread_map *thread_map__new_all_cpus(void); 28 + struct perf_thread_map *thread_map__new(pid_t pid, pid_t tid, uid_t uid); 29 + struct perf_thread_map *thread_map__new_event(struct thread_map_event *event); 30 30 31 - struct thread_map *thread_map__get(struct thread_map *map); 32 - void thread_map__put(struct thread_map *map); 31 + struct perf_thread_map *thread_map__get(struct perf_thread_map *map); 32 + void thread_map__put(struct perf_thread_map *map); 33 33 34 - struct thread_map *thread_map__new_str(const char *pid, 34 + struct perf_thread_map *thread_map__new_str(const char *pid, 35 35 const char *tid, uid_t uid, bool all_threads); 36 36 37 - struct thread_map *thread_map__new_by_tid_str(const char *tid_str); 37 + struct perf_thread_map *thread_map__new_by_tid_str(const char *tid_str); 38 38 39 - size_t thread_map__fprintf(struct thread_map *threads, FILE *fp); 39 + size_t thread_map__fprintf(struct perf_thread_map *threads, FILE *fp); 40 40 41 - static inline int thread_map__nr(struct thread_map *threads) 41 + static inline int thread_map__nr(struct perf_thread_map *threads) 42 42 { 43 43 return threads ? threads->nr : 1; 44 44 } 45 45 46 - static inline pid_t thread_map__pid(struct thread_map *map, int thread) 46 + static inline pid_t thread_map__pid(struct perf_thread_map *map, int thread) 47 47 { 48 48 return map->map[thread].pid; 49 49 } 50 50 51 51 static inline void 52 - thread_map__set_pid(struct thread_map *map, int thread, pid_t pid) 52 + thread_map__set_pid(struct perf_thread_map *map, int thread, pid_t pid) 53 53 { 54 54 map->map[thread].pid = pid; 55 55 } 56 56 57 - static inline char *thread_map__comm(struct thread_map *map, int thread) 57 + static inline char *thread_map__comm(struct perf_thread_map *map, int thread) 58 58 { 59 59 return map->map[thread].comm; 60 60 } 61 61 62 - void thread_map__read_comms(struct thread_map *threads); 63 - bool thread_map__has(struct thread_map *threads, pid_t pid); 64 - int thread_map__remove(struct thread_map *threads, int idx); 62 + void thread_map__read_comms(struct perf_thread_map *threads); 63 + bool thread_map__has(struct perf_thread_map *threads, pid_t pid); 64 + int thread_map__remove(struct perf_thread_map *threads, int idx); 65 65 #endif /* __PERF_THREAD_MAP_H */