Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'perf-urgent-for-mingo' of git://git.kernel.org/pub/scm/linux/kernel/git/acme/linux into perf/urgent

Pull perf fixes from Arnaldo Carvalho de Melo:

* Endianness fixes from Jiri Olsa

* Fixes for make perf tarball

* Fix for DSO name in perf script callchains, from David Ahern

* Segfault fixes for perf top --callchain, from Namhyung Kim

* Minor function result fixes from Srikar Dronamraju

* Add missing 3rd ioctl parameter, from Namhyung Kim

* Fix pager usage in minimal embedded systems, from Avik Sil

Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Signed-off-by: Ingo Molnar <mingo@kernel.org>

+214 -58
+2 -2
include/linux/perf_event.h
··· 555 555 PERF_RECORD_MAX, /* non-ABI */ 556 556 }; 557 557 558 + #define PERF_MAX_STACK_DEPTH 255 559 + 558 560 enum perf_callchain_context { 559 561 PERF_CONTEXT_HV = (__u64)-32, 560 562 PERF_CONTEXT_KERNEL = (__u64)-128, ··· 610 608 #include <linux/atomic.h> 611 609 #include <linux/sysfs.h> 612 610 #include <asm/local.h> 613 - 614 - #define PERF_MAX_STACK_DEPTH 255 615 611 616 612 struct perf_callchain_entry { 617 613 __u64 nr;
-1
kernel/events/core.c
··· 3181 3181 event = event->group_leader; 3182 3182 3183 3183 perf_event_for_each_child(event, func); 3184 - func(event); 3185 3184 list_for_each_entry(sibling, &event->sibling_list, group_entry) 3186 3185 perf_event_for_each_child(sibling, func); 3187 3186 mutex_unlock(&ctx->mutex);
+2
tools/perf/MANIFEST
··· 1 1 tools/perf 2 + tools/scripts 3 + tools/lib/traceevent 2 4 include/linux/const.h 3 5 include/linux/perf_event.h 4 6 include/linux/rbtree.h
+2 -2
tools/perf/builtin-report.c
··· 152 152 153 153 if (symbol_conf.use_callchain) { 154 154 err = callchain_append(he->callchain, 155 - &evsel->hists.callchain_cursor, 155 + &callchain_cursor, 156 156 sample->period); 157 157 if (err) 158 158 return err; ··· 162 162 * so we don't allocated the extra space needed because the stdio 163 163 * code will not use it. 164 164 */ 165 - if (al->sym != NULL && use_browser > 0) { 165 + if (he->ms.sym != NULL && use_browser > 0) { 166 166 struct annotation *notes = symbol__annotation(he->ms.sym); 167 167 168 168 assert(evsel != NULL);
+4 -4
tools/perf/builtin-stat.c
··· 1129 1129 return 0; 1130 1130 1131 1131 if (!evsel_list->nr_entries) { 1132 - if (perf_evlist__add_attrs_array(evsel_list, default_attrs) < 0) 1132 + if (perf_evlist__add_default_attrs(evsel_list, default_attrs) < 0) 1133 1133 return -1; 1134 1134 } 1135 1135 ··· 1139 1139 return 0; 1140 1140 1141 1141 /* Append detailed run extra attributes: */ 1142 - if (perf_evlist__add_attrs_array(evsel_list, detailed_attrs) < 0) 1142 + if (perf_evlist__add_default_attrs(evsel_list, detailed_attrs) < 0) 1143 1143 return -1; 1144 1144 1145 1145 if (detailed_run < 2) 1146 1146 return 0; 1147 1147 1148 1148 /* Append very detailed run extra attributes: */ 1149 - if (perf_evlist__add_attrs_array(evsel_list, very_detailed_attrs) < 0) 1149 + if (perf_evlist__add_default_attrs(evsel_list, very_detailed_attrs) < 0) 1150 1150 return -1; 1151 1151 1152 1152 if (detailed_run < 3) 1153 1153 return 0; 1154 1154 1155 1155 /* Append very, very detailed run extra attributes: */ 1156 - return perf_evlist__add_attrs_array(evsel_list, very_very_detailed_attrs); 1156 + return perf_evlist__add_default_attrs(evsel_list, very_very_detailed_attrs); 1157 1157 } 1158 1158 1159 1159 int cmd_stat(int argc, const char **argv, const char *prefix __used)
+1 -1
tools/perf/builtin-top.c
··· 787 787 } 788 788 789 789 if (symbol_conf.use_callchain) { 790 - err = callchain_append(he->callchain, &evsel->hists.callchain_cursor, 790 + err = callchain_append(he->callchain, &callchain_cursor, 791 791 sample->period); 792 792 if (err) 793 793 return;
+4 -3
tools/perf/design.txt
··· 409 409 prctl. When a counter is disabled, it doesn't count or generate 410 410 events but does continue to exist and maintain its count value. 411 411 412 - An individual counter or counter group can be enabled with 412 + An individual counter can be enabled with 413 413 414 - ioctl(fd, PERF_EVENT_IOC_ENABLE); 414 + ioctl(fd, PERF_EVENT_IOC_ENABLE, 0); 415 415 416 416 or disabled with 417 417 418 - ioctl(fd, PERF_EVENT_IOC_DISABLE); 418 + ioctl(fd, PERF_EVENT_IOC_DISABLE, 0); 419 419 420 + For a counter group, pass PERF_IOC_FLAG_GROUP as the third argument. 420 421 Enabling or disabling the leader of a group enables or disables the 421 422 whole group; that is, while the group leader is disabled, none of the 422 423 counters in the group will count. Enabling or disabling a member of a
+1 -1
tools/perf/ui/browsers/annotate.c
··· 668 668 "q/ESC/CTRL+C Exit\n\n" 669 669 "-> Go to target\n" 670 670 "<- Exit\n" 671 - "h Cycle thru hottest instructions\n" 671 + "H Cycle thru hottest instructions\n" 672 672 "j Toggle showing jump to target arrows\n" 673 673 "J Toggle showing number of jump sources on targets\n" 674 674 "n Search next string\n"
+1 -1
tools/perf/util/PERF-VERSION-GEN
··· 12 12 # First check if there is a .git to get the version from git describe 13 13 # otherwise try to get the version from the kernel makefile 14 14 if test -d ../../.git -o -f ../../.git && 15 - VN=$(git describe --abbrev=4 HEAD 2>/dev/null) && 15 + VN=$(git describe --match 'v[0-9].[0-9]*' --abbrev=4 HEAD 2>/dev/null) && 16 16 case "$VN" in 17 17 *$LF*) (exit 1) ;; 18 18 v[0-9]*)
+2
tools/perf/util/callchain.c
··· 18 18 #include "util.h" 19 19 #include "callchain.h" 20 20 21 + __thread struct callchain_cursor callchain_cursor; 22 + 21 23 bool ip_callchain__valid(struct ip_callchain *chain, 22 24 const union perf_event *event) 23 25 {
+2
tools/perf/util/callchain.h
··· 76 76 struct callchain_cursor_node *curr; 77 77 }; 78 78 79 + extern __thread struct callchain_cursor callchain_cursor; 80 + 79 81 static inline void callchain_init(struct callchain_root *root) 80 82 { 81 83 INIT_LIST_HEAD(&root->node.siblings);
+15 -2
tools/perf/util/evlist.c
··· 159 159 return -1; 160 160 } 161 161 162 + int __perf_evlist__add_default_attrs(struct perf_evlist *evlist, 163 + struct perf_event_attr *attrs, size_t nr_attrs) 164 + { 165 + size_t i; 166 + 167 + for (i = 0; i < nr_attrs; i++) 168 + event_attr_init(attrs + i); 169 + 170 + return perf_evlist__add_attrs(evlist, attrs, nr_attrs); 171 + } 172 + 162 173 static int trace_event__id(const char *evname) 163 174 { 164 175 char *filename, *colon; ··· 274 263 for (cpu = 0; cpu < evlist->cpus->nr; cpu++) { 275 264 list_for_each_entry(pos, &evlist->entries, node) { 276 265 for (thread = 0; thread < evlist->threads->nr; thread++) 277 - ioctl(FD(pos, cpu, thread), PERF_EVENT_IOC_DISABLE); 266 + ioctl(FD(pos, cpu, thread), 267 + PERF_EVENT_IOC_DISABLE, 0); 278 268 } 279 269 } 280 270 } ··· 288 276 for (cpu = 0; cpu < evlist->cpus->nr; cpu++) { 289 277 list_for_each_entry(pos, &evlist->entries, node) { 290 278 for (thread = 0; thread < evlist->threads->nr; thread++) 291 - ioctl(FD(pos, cpu, thread), PERF_EVENT_IOC_ENABLE); 279 + ioctl(FD(pos, cpu, thread), 280 + PERF_EVENT_IOC_ENABLE, 0); 292 281 } 293 282 } 294 283 }
+4
tools/perf/util/evlist.h
··· 54 54 int perf_evlist__add_default(struct perf_evlist *evlist); 55 55 int perf_evlist__add_attrs(struct perf_evlist *evlist, 56 56 struct perf_event_attr *attrs, size_t nr_attrs); 57 + int __perf_evlist__add_default_attrs(struct perf_evlist *evlist, 58 + struct perf_event_attr *attrs, size_t nr_attrs); 57 59 int perf_evlist__add_tracepoints(struct perf_evlist *evlist, 58 60 const char *tracepoints[], size_t nr_tracepoints); 59 61 int perf_evlist__set_tracepoints_handlers(struct perf_evlist *evlist, ··· 64 62 65 63 #define perf_evlist__add_attrs_array(evlist, array) \ 66 64 perf_evlist__add_attrs(evlist, array, ARRAY_SIZE(array)) 65 + #define perf_evlist__add_default_attrs(evlist, array) \ 66 + __perf_evlist__add_default_attrs(evlist, array, ARRAY_SIZE(array)) 67 67 68 68 #define perf_evlist__add_tracepoints_array(evlist, array) \ 69 69 perf_evlist__add_tracepoints(evlist, array, ARRAY_SIZE(array))
+22 -7
tools/perf/util/evsel.c
··· 494 494 } 495 495 496 496 static int perf_event__parse_id_sample(const union perf_event *event, u64 type, 497 - struct perf_sample *sample) 497 + struct perf_sample *sample, 498 + bool swapped) 498 499 { 499 500 const u64 *array = event->sample.array; 501 + union u64_swap u; 500 502 501 503 array += ((event->header.size - 502 504 sizeof(event->header)) / sizeof(u64)) - 1; 503 505 504 506 if (type & PERF_SAMPLE_CPU) { 505 - u32 *p = (u32 *)array; 506 - sample->cpu = *p; 507 + u.val64 = *array; 508 + if (swapped) { 509 + /* undo swap of u64, then swap on individual u32s */ 510 + u.val64 = bswap_64(u.val64); 511 + u.val32[0] = bswap_32(u.val32[0]); 512 + } 513 + 514 + sample->cpu = u.val32[0]; 507 515 array--; 508 516 } 509 517 ··· 531 523 } 532 524 533 525 if (type & PERF_SAMPLE_TID) { 534 - u32 *p = (u32 *)array; 535 - sample->pid = p[0]; 536 - sample->tid = p[1]; 526 + u.val64 = *array; 527 + if (swapped) { 528 + /* undo swap of u64, then swap on individual u32s */ 529 + u.val64 = bswap_64(u.val64); 530 + u.val32[0] = bswap_32(u.val32[0]); 531 + u.val32[1] = bswap_32(u.val32[1]); 532 + } 533 + 534 + sample->pid = u.val32[0]; 535 + sample->tid = u.val32[1]; 537 536 } 538 537 539 538 return 0; ··· 577 562 if (event->header.type != PERF_RECORD_SAMPLE) { 578 563 if (!sample_id_all) 579 564 return 0; 580 - return perf_event__parse_id_sample(event, type, data); 565 + return perf_event__parse_id_sample(event, type, data, swapped); 581 566 } 582 567 583 568 array = event->sample.array;
+4 -3
tools/perf/util/hist.c
··· 378 378 * collapse the histogram 379 379 */ 380 380 381 - static bool hists__collapse_insert_entry(struct hists *hists, 381 + static bool hists__collapse_insert_entry(struct hists *hists __used, 382 382 struct rb_root *root, 383 383 struct hist_entry *he) 384 384 { ··· 397 397 iter->period += he->period; 398 398 iter->nr_events += he->nr_events; 399 399 if (symbol_conf.use_callchain) { 400 - callchain_cursor_reset(&hists->callchain_cursor); 401 - callchain_merge(&hists->callchain_cursor, iter->callchain, 400 + callchain_cursor_reset(&callchain_cursor); 401 + callchain_merge(&callchain_cursor, 402 + iter->callchain, 402 403 he->callchain); 403 404 } 404 405 hist_entry__free(he);
-2
tools/perf/util/hist.h
··· 67 67 struct events_stats stats; 68 68 u64 event_stream; 69 69 u16 col_len[HISTC_NR_COLS]; 70 - /* Best would be to reuse the session callchain cursor */ 71 - struct callchain_cursor callchain_cursor; 72 70 }; 73 71 74 72 struct hist_entry *__hists__add_entry(struct hists *self,
+4
tools/perf/util/pager.c
··· 57 57 } 58 58 if (!pager) 59 59 pager = getenv("PAGER"); 60 + if (!pager) { 61 + if (!access("/usr/bin/pager", X_OK)) 62 + pager = "/usr/bin/pager"; 63 + } 60 64 if (!pager) 61 65 pager = "less"; 62 66 else if (!*pager || !strcmp(pager, "cat"))
+2 -6
tools/perf/util/probe-event.c
··· 2164 2164 2165 2165 error: 2166 2166 if (kfd >= 0) { 2167 - if (namelist) 2168 - strlist__delete(namelist); 2169 - 2167 + strlist__delete(namelist); 2170 2168 close(kfd); 2171 2169 } 2172 2170 2173 2171 if (ufd >= 0) { 2174 - if (unamelist) 2175 - strlist__delete(unamelist); 2176 - 2172 + strlist__delete(unamelist); 2177 2173 close(ufd); 2178 2174 } 2179 2175
+76 -21
tools/perf/util/session.c
··· 288 288 return bi; 289 289 } 290 290 291 - int machine__resolve_callchain(struct machine *self, struct perf_evsel *evsel, 291 + int machine__resolve_callchain(struct machine *self, 292 + struct perf_evsel *evsel __used, 292 293 struct thread *thread, 293 294 struct ip_callchain *chain, 294 295 struct symbol **parent) ··· 298 297 unsigned int i; 299 298 int err; 300 299 301 - callchain_cursor_reset(&evsel->hists.callchain_cursor); 300 + callchain_cursor_reset(&callchain_cursor); 301 + 302 + if (chain->nr > PERF_MAX_STACK_DEPTH) { 303 + pr_warning("corrupted callchain. skipping...\n"); 304 + return 0; 305 + } 302 306 303 307 for (i = 0; i < chain->nr; i++) { 304 308 u64 ip; ··· 323 317 case PERF_CONTEXT_USER: 324 318 cpumode = PERF_RECORD_MISC_USER; break; 325 319 default: 326 - break; 320 + pr_debug("invalid callchain context: " 321 + "%"PRId64"\n", (s64) ip); 322 + /* 323 + * It seems the callchain is corrupted. 324 + * Discard all. 325 + */ 326 + callchain_cursor_reset(&callchain_cursor); 327 + return 0; 327 328 } 328 329 continue; 329 330 } ··· 346 333 break; 347 334 } 348 335 349 - err = callchain_cursor_append(&evsel->hists.callchain_cursor, 336 + err = callchain_cursor_append(&callchain_cursor, 350 337 ip, al.map, al.sym); 351 338 if (err) 352 339 return err; ··· 454 441 } 455 442 } 456 443 457 - static void perf_event__all64_swap(union perf_event *event) 444 + static void swap_sample_id_all(union perf_event *event, void *data) 445 + { 446 + void *end = (void *) event + event->header.size; 447 + int size = end - data; 448 + 449 + BUG_ON(size % sizeof(u64)); 450 + mem_bswap_64(data, size); 451 + } 452 + 453 + static void perf_event__all64_swap(union perf_event *event, 454 + bool sample_id_all __used) 458 455 { 459 456 struct perf_event_header *hdr = &event->header; 460 457 mem_bswap_64(hdr + 1, event->header.size - sizeof(*hdr)); 461 458 } 462 459 463 - static void perf_event__comm_swap(union perf_event *event) 460 + static void perf_event__comm_swap(union perf_event *event, bool sample_id_all) 464 461 { 465 462 event->comm.pid = bswap_32(event->comm.pid); 466 463 event->comm.tid = bswap_32(event->comm.tid); 464 + 465 + if (sample_id_all) { 466 + void *data = &event->comm.comm; 467 + 468 + data += ALIGN(strlen(data) + 1, sizeof(u64)); 469 + swap_sample_id_all(event, data); 470 + } 467 471 } 468 472 469 - static void perf_event__mmap_swap(union perf_event *event) 473 + static void perf_event__mmap_swap(union perf_event *event, 474 + bool sample_id_all) 470 475 { 471 476 event->mmap.pid = bswap_32(event->mmap.pid); 472 477 event->mmap.tid = bswap_32(event->mmap.tid); 473 478 event->mmap.start = bswap_64(event->mmap.start); 474 479 event->mmap.len = bswap_64(event->mmap.len); 475 480 event->mmap.pgoff = bswap_64(event->mmap.pgoff); 481 + 482 + if (sample_id_all) { 483 + void *data = &event->mmap.filename; 484 + 485 + data += ALIGN(strlen(data) + 1, sizeof(u64)); 486 + swap_sample_id_all(event, data); 487 + } 476 488 } 477 489 478 - static void perf_event__task_swap(union perf_event *event) 490 + static void perf_event__task_swap(union perf_event *event, bool sample_id_all) 479 491 { 480 492 event->fork.pid = bswap_32(event->fork.pid); 481 493 event->fork.tid = bswap_32(event->fork.tid); 482 494 event->fork.ppid = bswap_32(event->fork.ppid); 483 495 event->fork.ptid = bswap_32(event->fork.ptid); 484 496 event->fork.time = bswap_64(event->fork.time); 497 + 498 + if (sample_id_all) 499 + swap_sample_id_all(event, &event->fork + 1); 485 500 } 486 501 487 - static void perf_event__read_swap(union perf_event *event) 502 + static void perf_event__read_swap(union perf_event *event, bool sample_id_all) 488 503 { 489 504 event->read.pid = bswap_32(event->read.pid); 490 505 event->read.tid = bswap_32(event->read.tid); ··· 520 479 event->read.time_enabled = bswap_64(event->read.time_enabled); 521 480 event->read.time_running = bswap_64(event->read.time_running); 522 481 event->read.id = bswap_64(event->read.id); 482 + 483 + if (sample_id_all) 484 + swap_sample_id_all(event, &event->read + 1); 523 485 } 524 486 525 487 static u8 revbyte(u8 b) ··· 574 530 swap_bitfield((u8 *) (&attr->read_format + 1), sizeof(u64)); 575 531 } 576 532 577 - static void perf_event__hdr_attr_swap(union perf_event *event) 533 + static void perf_event__hdr_attr_swap(union perf_event *event, 534 + bool sample_id_all __used) 578 535 { 579 536 size_t size; 580 537 ··· 586 541 mem_bswap_64(event->attr.id, size); 587 542 } 588 543 589 - static void perf_event__event_type_swap(union perf_event *event) 544 + static void perf_event__event_type_swap(union perf_event *event, 545 + bool sample_id_all __used) 590 546 { 591 547 event->event_type.event_type.event_id = 592 548 bswap_64(event->event_type.event_type.event_id); 593 549 } 594 550 595 - static void perf_event__tracing_data_swap(union perf_event *event) 551 + static void perf_event__tracing_data_swap(union perf_event *event, 552 + bool sample_id_all __used) 596 553 { 597 554 event->tracing_data.size = bswap_32(event->tracing_data.size); 598 555 } 599 556 600 - typedef void (*perf_event__swap_op)(union perf_event *event); 557 + typedef void (*perf_event__swap_op)(union perf_event *event, 558 + bool sample_id_all); 601 559 602 560 static perf_event__swap_op perf_event__swap_ops[] = { 603 561 [PERF_RECORD_MMAP] = perf_event__mmap_swap, ··· 1034 986 } 1035 987 } 1036 988 989 + static void event_swap(union perf_event *event, bool sample_id_all) 990 + { 991 + perf_event__swap_op swap; 992 + 993 + swap = perf_event__swap_ops[event->header.type]; 994 + if (swap) 995 + swap(event, sample_id_all); 996 + } 997 + 1037 998 static int perf_session__process_event(struct perf_session *session, 1038 999 union perf_event *event, 1039 1000 struct perf_tool *tool, ··· 1051 994 struct perf_sample sample; 1052 995 int ret; 1053 996 1054 - if (session->header.needs_swap && 1055 - perf_event__swap_ops[event->header.type]) 1056 - perf_event__swap_ops[event->header.type](event); 997 + if (session->header.needs_swap) 998 + event_swap(event, session->sample_id_all); 1057 999 1058 1000 if (event->header.type >= PERF_RECORD_HEADER_MAX) 1059 1001 return -EINVAL; ··· 1484 1428 int print_sym, int print_dso, int print_symoffset) 1485 1429 { 1486 1430 struct addr_location al; 1487 - struct callchain_cursor *cursor = &evsel->hists.callchain_cursor; 1488 1431 struct callchain_cursor_node *node; 1489 1432 1490 1433 if (perf_event__preprocess_sample(event, machine, &al, sample, ··· 1501 1446 error("Failed to resolve callchain. Skipping\n"); 1502 1447 return; 1503 1448 } 1504 - callchain_cursor_commit(cursor); 1449 + callchain_cursor_commit(&callchain_cursor); 1505 1450 1506 1451 while (1) { 1507 - node = callchain_cursor_current(cursor); 1452 + node = callchain_cursor_current(&callchain_cursor); 1508 1453 if (!node) 1509 1454 break; 1510 1455 ··· 1515 1460 } 1516 1461 if (print_dso) { 1517 1462 printf(" ("); 1518 - map__fprintf_dsoname(al.map, stdout); 1463 + map__fprintf_dsoname(node->map, stdout); 1519 1464 printf(")"); 1520 1465 } 1521 1466 printf("\n"); 1522 1467 1523 - callchain_cursor_advance(cursor); 1468 + callchain_cursor_advance(&callchain_cursor); 1524 1469 } 1525 1470 1526 1471 } else {
+36 -2
tools/perf/util/symbol.c
··· 323 323 dso->sorted_by_name = 0; 324 324 dso->has_build_id = 0; 325 325 dso->kernel = DSO_TYPE_USER; 326 + dso->needs_swap = DSO_SWAP__UNSET; 326 327 INIT_LIST_HEAD(&dso->node); 327 328 } 328 329 ··· 1157 1156 return -1; 1158 1157 } 1159 1158 1159 + static int dso__swap_init(struct dso *dso, unsigned char eidata) 1160 + { 1161 + static unsigned int const endian = 1; 1162 + 1163 + dso->needs_swap = DSO_SWAP__NO; 1164 + 1165 + switch (eidata) { 1166 + case ELFDATA2LSB: 1167 + /* We are big endian, DSO is little endian. */ 1168 + if (*(unsigned char const *)&endian != 1) 1169 + dso->needs_swap = DSO_SWAP__YES; 1170 + break; 1171 + 1172 + case ELFDATA2MSB: 1173 + /* We are little endian, DSO is big endian. */ 1174 + if (*(unsigned char const *)&endian != 0) 1175 + dso->needs_swap = DSO_SWAP__YES; 1176 + break; 1177 + 1178 + default: 1179 + pr_err("unrecognized DSO data encoding %d\n", eidata); 1180 + return -EINVAL; 1181 + } 1182 + 1183 + return 0; 1184 + } 1185 + 1160 1186 static int dso__load_sym(struct dso *dso, struct map *map, const char *name, 1161 1187 int fd, symbol_filter_t filter, int kmodule, 1162 1188 int want_symtab) ··· 1214 1186 pr_debug("%s: cannot get elf header.\n", __func__); 1215 1187 goto out_elf_end; 1216 1188 } 1189 + 1190 + if (dso__swap_init(dso, ehdr.e_ident[EI_DATA])) 1191 + goto out_elf_end; 1217 1192 1218 1193 /* Always reject images with a mismatched build-id: */ 1219 1194 if (dso->has_build_id) { ··· 1303 1272 if (opdsec && sym.st_shndx == opdidx) { 1304 1273 u32 offset = sym.st_value - opdshdr.sh_addr; 1305 1274 u64 *opd = opddata->d_buf + offset; 1306 - sym.st_value = *opd; 1275 + sym.st_value = DSO__SWAP(dso, u64, *opd); 1307 1276 sym.st_shndx = elf_addr_to_index(elf, sym.st_value); 1308 1277 } 1309 1278 ··· 2817 2786 2818 2787 struct map *dso__new_map(const char *name) 2819 2788 { 2789 + struct map *map = NULL; 2820 2790 struct dso *dso = dso__new(name); 2821 - struct map *map = map__new2(0, dso, MAP__FUNCTION); 2791 + 2792 + if (dso) 2793 + map = map__new2(0, dso, MAP__FUNCTION); 2822 2794 2823 2795 return map; 2824 2796 }
+30
tools/perf/util/symbol.h
··· 9 9 #include <linux/list.h> 10 10 #include <linux/rbtree.h> 11 11 #include <stdio.h> 12 + #include <byteswap.h> 12 13 13 14 #ifdef HAVE_CPLUS_DEMANGLE 14 15 extern char *cplus_demangle(const char *, int); ··· 161 160 DSO_TYPE_GUEST_KERNEL 162 161 }; 163 162 163 + enum dso_swap_type { 164 + DSO_SWAP__UNSET, 165 + DSO_SWAP__NO, 166 + DSO_SWAP__YES, 167 + }; 168 + 164 169 struct dso { 165 170 struct list_head node; 166 171 struct rb_root symbols[MAP__NR_TYPES]; 167 172 struct rb_root symbol_names[MAP__NR_TYPES]; 168 173 enum dso_kernel_type kernel; 174 + enum dso_swap_type needs_swap; 169 175 u8 adjust_symbols:1; 170 176 u8 has_build_id:1; 171 177 u8 hit:1; ··· 189 181 u16 short_name_len; 190 182 char name[0]; 191 183 }; 184 + 185 + #define DSO__SWAP(dso, type, val) \ 186 + ({ \ 187 + type ____r = val; \ 188 + BUG_ON(dso->needs_swap == DSO_SWAP__UNSET); \ 189 + if (dso->needs_swap == DSO_SWAP__YES) { \ 190 + switch (sizeof(____r)) { \ 191 + case 2: \ 192 + ____r = bswap_16(val); \ 193 + break; \ 194 + case 4: \ 195 + ____r = bswap_32(val); \ 196 + break; \ 197 + case 8: \ 198 + ____r = bswap_64(val); \ 199 + break; \ 200 + default: \ 201 + BUG_ON(1); \ 202 + } \ 203 + } \ 204 + ____r; \ 205 + }) 192 206 193 207 struct dso *dso__new(const char *name); 194 208 void dso__delete(struct dso *dso);