Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'perf-core-for-mingo-20160506' of git://git.kernel.org/pub/scm/linux/kernel/git/acme/linux into perf/core

Pull perf/core improvements and fixes from Arnaldo Carvalho de Melo:

User visible changes:

- Fix ordering of kernel/user entries in 'caller' mode, where the kernel and
user parts were being correctly inverted but kept in place wrt each other,
i.e. 'callee' (k1, k2, u3, u4) became 'caller' (k2, k1, u4, u3) when it
should be 'caller' (u4, u3, k2, k1) (Chris Phlipot)

- In 'perf trace' don't print the raw arg syscall args for a syscall that has
no arguments, like gettid(). This was happening because just checking if
the syscall args list is NULL may mean that there are no args (e.g.: gettid)
or that there is no tracepoint info (e.g.: clone) (Arnaldo Carvalho de Melo)

- Add extra output of counter values with 'perf stat -vv' (Andi Kleen)

Infrastructure changes:

- Expose callchain db export via the python API (Chris Phlipot)

Code reorganization:

- Move some more syscall arg beautifiers from the 'perf trace' main file to
separate files in tools/perf/trace/beauty/, to reduce the main file line
count (Arnaldo Carvalho de Melo)

Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Signed-off-by: Ingo Molnar <mingo@kernel.org>

+575 -348
+8
tools/perf/builtin-stat.c
··· 298 298 return -1; 299 299 } 300 300 } 301 + 302 + if (verbose > 1) { 303 + fprintf(stat_config.output, 304 + "%s: %d: %" PRIu64 " %" PRIu64 " %" PRIu64 "\n", 305 + perf_evsel__name(counter), 306 + cpu, 307 + count->val, count->ena, count->run); 308 + } 301 309 } 302 310 } 303 311
+10 -155
tools/perf/builtin-trace.c
··· 40 40 41 41 #include <libaudit.h> /* FIXME: Still needed for audit_errno_to_name */ 42 42 #include <stdlib.h> 43 - #include <linux/futex.h> 44 43 #include <linux/err.h> 45 44 #include <linux/seccomp.h> 46 45 #include <linux/filter.h> ··· 400 401 401 402 #define SCA_FLOCK syscall_arg__scnprintf_flock 402 403 403 - static size_t syscall_arg__scnprintf_futex_op(char *bf, size_t size, struct syscall_arg *arg) 404 - { 405 - enum syscall_futex_args { 406 - SCF_UADDR = (1 << 0), 407 - SCF_OP = (1 << 1), 408 - SCF_VAL = (1 << 2), 409 - SCF_TIMEOUT = (1 << 3), 410 - SCF_UADDR2 = (1 << 4), 411 - SCF_VAL3 = (1 << 5), 412 - }; 413 - int op = arg->val; 414 - int cmd = op & FUTEX_CMD_MASK; 415 - size_t printed = 0; 416 - 417 - switch (cmd) { 418 - #define P_FUTEX_OP(n) case FUTEX_##n: printed = scnprintf(bf, size, #n); 419 - P_FUTEX_OP(WAIT); arg->mask |= SCF_VAL3|SCF_UADDR2; break; 420 - P_FUTEX_OP(WAKE); arg->mask |= SCF_VAL3|SCF_UADDR2|SCF_TIMEOUT; break; 421 - P_FUTEX_OP(FD); arg->mask |= SCF_VAL3|SCF_UADDR2|SCF_TIMEOUT; break; 422 - P_FUTEX_OP(REQUEUE); arg->mask |= SCF_VAL3|SCF_TIMEOUT; break; 423 - P_FUTEX_OP(CMP_REQUEUE); arg->mask |= SCF_TIMEOUT; break; 424 - P_FUTEX_OP(CMP_REQUEUE_PI); arg->mask |= SCF_TIMEOUT; break; 425 - P_FUTEX_OP(WAKE_OP); break; 426 - P_FUTEX_OP(LOCK_PI); arg->mask |= SCF_VAL3|SCF_UADDR2|SCF_TIMEOUT; break; 427 - P_FUTEX_OP(UNLOCK_PI); arg->mask |= SCF_VAL3|SCF_UADDR2|SCF_TIMEOUT; break; 428 - P_FUTEX_OP(TRYLOCK_PI); arg->mask |= SCF_VAL3|SCF_UADDR2; break; 429 - P_FUTEX_OP(WAIT_BITSET); arg->mask |= SCF_UADDR2; break; 430 - P_FUTEX_OP(WAKE_BITSET); arg->mask |= SCF_UADDR2; break; 431 - P_FUTEX_OP(WAIT_REQUEUE_PI); break; 432 - default: printed = scnprintf(bf, size, "%#x", cmd); break; 433 - } 434 - 435 - if (op & FUTEX_PRIVATE_FLAG) 436 - printed += scnprintf(bf + printed, size - printed, "|PRIV"); 437 - 438 - if (op & FUTEX_CLOCK_REALTIME) 439 - printed += scnprintf(bf + printed, size - printed, "|CLKRT"); 440 - 441 - return printed; 442 - } 443 - 444 - #define SCA_FUTEX_OP syscall_arg__scnprintf_futex_op 445 - 446 404 static const char *bpf_cmd[] = { 447 405 "MAP_CREATE", "MAP_LOOKUP_ELEM", "MAP_UPDATE_ELEM", "MAP_DELETE_ELEM", 448 406 "MAP_GET_NEXT_KEY", "PROG_LOAD", ··· 498 542 499 543 #define SCA_FILENAME syscall_arg__scnprintf_filename 500 544 501 - static size_t syscall_arg__scnprintf_open_flags(char *bf, size_t size, 502 - struct syscall_arg *arg) 503 - { 504 - int printed = 0, flags = arg->val; 505 - 506 - if (!(flags & O_CREAT)) 507 - arg->mask |= 1 << (arg->idx + 1); /* Mask the mode parm */ 508 - 509 - if (flags == 0) 510 - return scnprintf(bf, size, "RDONLY"); 511 - #define P_FLAG(n) \ 512 - if (flags & O_##n) { \ 513 - printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "|" : "", #n); \ 514 - flags &= ~O_##n; \ 515 - } 516 - 517 - P_FLAG(APPEND); 518 - P_FLAG(ASYNC); 519 - P_FLAG(CLOEXEC); 520 - P_FLAG(CREAT); 521 - P_FLAG(DIRECT); 522 - P_FLAG(DIRECTORY); 523 - P_FLAG(EXCL); 524 - P_FLAG(LARGEFILE); 525 - P_FLAG(NOATIME); 526 - P_FLAG(NOCTTY); 527 - #ifdef O_NONBLOCK 528 - P_FLAG(NONBLOCK); 529 - #elif O_NDELAY 530 - P_FLAG(NDELAY); 531 - #endif 532 - #ifdef O_PATH 533 - P_FLAG(PATH); 534 - #endif 535 - P_FLAG(RDWR); 536 - #ifdef O_DSYNC 537 - if ((flags & O_SYNC) == O_SYNC) 538 - printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "|" : "", "SYNC"); 539 - else { 540 - P_FLAG(DSYNC); 541 - } 542 - #else 543 - P_FLAG(SYNC); 544 - #endif 545 - P_FLAG(TRUNC); 546 - P_FLAG(WRONLY); 547 - #undef P_FLAG 548 - 549 - if (flags) 550 - printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", flags); 551 - 552 - return printed; 553 - } 554 - 555 - #define SCA_OPEN_FLAGS syscall_arg__scnprintf_open_flags 556 - 557 545 static size_t syscall_arg__scnprintf_pipe_flags(char *bf, size_t size, 558 546 struct syscall_arg *arg) 559 547 { ··· 520 620 } 521 621 522 622 #define SCA_PIPE_FLAGS syscall_arg__scnprintf_pipe_flags 523 - 524 - static size_t syscall_arg__scnprintf_signum(char *bf, size_t size, struct syscall_arg *arg) 525 - { 526 - int sig = arg->val; 527 - 528 - switch (sig) { 529 - #define P_SIGNUM(n) case SIG##n: return scnprintf(bf, size, #n) 530 - P_SIGNUM(HUP); 531 - P_SIGNUM(INT); 532 - P_SIGNUM(QUIT); 533 - P_SIGNUM(ILL); 534 - P_SIGNUM(TRAP); 535 - P_SIGNUM(ABRT); 536 - P_SIGNUM(BUS); 537 - P_SIGNUM(FPE); 538 - P_SIGNUM(KILL); 539 - P_SIGNUM(USR1); 540 - P_SIGNUM(SEGV); 541 - P_SIGNUM(USR2); 542 - P_SIGNUM(PIPE); 543 - P_SIGNUM(ALRM); 544 - P_SIGNUM(TERM); 545 - P_SIGNUM(CHLD); 546 - P_SIGNUM(CONT); 547 - P_SIGNUM(STOP); 548 - P_SIGNUM(TSTP); 549 - P_SIGNUM(TTIN); 550 - P_SIGNUM(TTOU); 551 - P_SIGNUM(URG); 552 - P_SIGNUM(XCPU); 553 - P_SIGNUM(XFSZ); 554 - P_SIGNUM(VTALRM); 555 - P_SIGNUM(PROF); 556 - P_SIGNUM(WINCH); 557 - P_SIGNUM(IO); 558 - P_SIGNUM(PWR); 559 - P_SIGNUM(SYS); 560 - #ifdef SIGEMT 561 - P_SIGNUM(EMT); 562 - #endif 563 - #ifdef SIGSTKFLT 564 - P_SIGNUM(STKFLT); 565 - #endif 566 - #ifdef SIGSWI 567 - P_SIGNUM(SWI); 568 - #endif 569 - default: break; 570 - } 571 - 572 - return scnprintf(bf, size, "%#x", sig); 573 - } 574 - 575 - #define SCA_SIGNUM syscall_arg__scnprintf_signum 576 623 577 624 #if defined(__i386__) || defined(__x86_64__) 578 625 /* ··· 634 787 .arg_parm = { [arg] = &strarray__##array, } 635 788 636 789 #include "trace/beauty/eventfd.c" 637 - #include "trace/beauty/pid.c" 790 + #include "trace/beauty/futex_op.c" 638 791 #include "trace/beauty/mmap.c" 639 792 #include "trace/beauty/mode_t.c" 640 793 #include "trace/beauty/msg_flags.c" 794 + #include "trace/beauty/open_flags.c" 641 795 #include "trace/beauty/perf_event_open.c" 796 + #include "trace/beauty/pid.c" 642 797 #include "trace/beauty/sched_policy.c" 798 + #include "trace/beauty/signum.c" 643 799 #include "trace/beauty/socket_type.c" 644 800 #include "trace/beauty/waitid_options.c" 645 801 ··· 1456 1606 "%ld", val); 1457 1607 } 1458 1608 } 1459 - } else { 1609 + } else if (IS_ERR(sc->tp_format)) { 1610 + /* 1611 + * If we managed to read the tracepoint /format file, then we 1612 + * may end up not having any args, like with gettid(), so only 1613 + * print the raw args when we didn't manage to read it. 1614 + */ 1460 1615 int i = 0; 1461 1616 1462 1617 while (i < 6) {
+30 -17
tools/perf/scripts/python/export-to-postgresql.py
··· 223 223 224 224 perf_db_export_mode = True 225 225 perf_db_export_calls = False 226 + perf_db_export_callchains = False 227 + 226 228 227 229 def usage(): 228 - print >> sys.stderr, "Usage is: export-to-postgresql.py <database name> [<columns>] [<calls>]" 230 + print >> sys.stderr, "Usage is: export-to-postgresql.py <database name> [<columns>] [<calls>] [<callchains>]" 229 231 print >> sys.stderr, "where: columns 'all' or 'branches'" 230 - print >> sys.stderr, " calls 'calls' => create calls table" 232 + print >> sys.stderr, " calls 'calls' => create calls and call_paths table" 233 + print >> sys.stderr, " callchains 'callchains' => create call_paths table" 231 234 raise Exception("Too few arguments") 232 235 233 236 if (len(sys.argv) < 2): ··· 248 245 249 246 branches = (columns == "branches") 250 247 251 - if (len(sys.argv) >= 4): 252 - if (sys.argv[3] == "calls"): 248 + for i in range(3,len(sys.argv)): 249 + if (sys.argv[i] == "calls"): 253 250 perf_db_export_calls = True 251 + elif (sys.argv[i] == "callchains"): 252 + perf_db_export_callchains = True 254 253 else: 255 254 usage() 256 255 ··· 363 358 'transaction bigint,' 364 359 'data_src bigint,' 365 360 'branch_type integer,' 366 - 'in_tx boolean)') 361 + 'in_tx boolean,' 362 + 'call_path_id bigint)') 367 363 368 - if perf_db_export_calls: 364 + if perf_db_export_calls or perf_db_export_callchains: 369 365 do_query(query, 'CREATE TABLE call_paths (' 370 366 'id bigint NOT NULL,' 371 367 'parent_id bigint,' 372 368 'symbol_id bigint,' 373 369 'ip bigint)') 370 + if perf_db_export_calls: 374 371 do_query(query, 'CREATE TABLE calls (' 375 372 'id bigint NOT NULL,' 376 373 'thread_id bigint,' ··· 434 427 '(SELECT tid FROM threads WHERE id = thread_id) AS tid' 435 428 ' FROM comm_threads') 436 429 437 - if perf_db_export_calls: 430 + if perf_db_export_calls or perf_db_export_callchains: 438 431 do_query(query, 'CREATE VIEW call_paths_view AS ' 439 432 'SELECT ' 440 433 'c.id,' ··· 450 443 '(SELECT dso_id FROM symbols WHERE id = p.symbol_id) AS parent_dso_id,' 451 444 '(SELECT dso FROM symbols_view WHERE id = p.symbol_id) AS parent_dso_short_name' 452 445 ' FROM call_paths c INNER JOIN call_paths p ON p.id = c.parent_id') 446 + if perf_db_export_calls: 453 447 do_query(query, 'CREATE VIEW calls_view AS ' 454 448 'SELECT ' 455 449 'calls.id,' ··· 548 540 symbol_file = open_output_file("symbol_table.bin") 549 541 branch_type_file = open_output_file("branch_type_table.bin") 550 542 sample_file = open_output_file("sample_table.bin") 551 - if perf_db_export_calls: 543 + if perf_db_export_calls or perf_db_export_callchains: 552 544 call_path_file = open_output_file("call_path_table.bin") 545 + if perf_db_export_calls: 553 546 call_file = open_output_file("call_table.bin") 554 547 555 548 def trace_begin(): ··· 562 553 comm_table(0, "unknown") 563 554 dso_table(0, 0, "unknown", "unknown", "") 564 555 symbol_table(0, 0, 0, 0, 0, "unknown") 565 - sample_table(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) 566 - if perf_db_export_calls: 556 + sample_table(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) 557 + if perf_db_export_calls or perf_db_export_callchains: 567 558 call_path_table(0, 0, 0, 0) 568 559 569 560 unhandled_count = 0 ··· 579 570 copy_output_file(symbol_file, "symbols") 580 571 copy_output_file(branch_type_file, "branch_types") 581 572 copy_output_file(sample_file, "samples") 582 - if perf_db_export_calls: 573 + if perf_db_export_calls or perf_db_export_callchains: 583 574 copy_output_file(call_path_file, "call_paths") 575 + if perf_db_export_calls: 584 576 copy_output_file(call_file, "calls") 585 577 586 578 print datetime.datetime.today(), "Removing intermediate files..." ··· 594 584 remove_output_file(symbol_file) 595 585 remove_output_file(branch_type_file) 596 586 remove_output_file(sample_file) 597 - if perf_db_export_calls: 587 + if perf_db_export_calls or perf_db_export_callchains: 598 588 remove_output_file(call_path_file) 589 + if perf_db_export_calls: 599 590 remove_output_file(call_file) 600 591 os.rmdir(output_dir_name) 601 592 print datetime.datetime.today(), "Adding primary keys" ··· 609 598 do_query(query, 'ALTER TABLE symbols ADD PRIMARY KEY (id)') 610 599 do_query(query, 'ALTER TABLE branch_types ADD PRIMARY KEY (id)') 611 600 do_query(query, 'ALTER TABLE samples ADD PRIMARY KEY (id)') 612 - if perf_db_export_calls: 601 + if perf_db_export_calls or perf_db_export_callchains: 613 602 do_query(query, 'ALTER TABLE call_paths ADD PRIMARY KEY (id)') 603 + if perf_db_export_calls: 614 604 do_query(query, 'ALTER TABLE calls ADD PRIMARY KEY (id)') 615 605 616 606 print datetime.datetime.today(), "Adding foreign keys" ··· 634 622 'ADD CONSTRAINT symbolfk FOREIGN KEY (symbol_id) REFERENCES symbols (id),' 635 623 'ADD CONSTRAINT todsofk FOREIGN KEY (to_dso_id) REFERENCES dsos (id),' 636 624 'ADD CONSTRAINT tosymbolfk FOREIGN KEY (to_symbol_id) REFERENCES symbols (id)') 637 - if perf_db_export_calls: 625 + if perf_db_export_calls or perf_db_export_callchains: 638 626 do_query(query, 'ALTER TABLE call_paths ' 639 627 'ADD CONSTRAINT parentfk FOREIGN KEY (parent_id) REFERENCES call_paths (id),' 640 628 'ADD CONSTRAINT symbolfk FOREIGN KEY (symbol_id) REFERENCES symbols (id)') 629 + if perf_db_export_calls: 641 630 do_query(query, 'ALTER TABLE calls ' 642 631 'ADD CONSTRAINT threadfk FOREIGN KEY (thread_id) REFERENCES threads (id),' 643 632 'ADD CONSTRAINT commfk FOREIGN KEY (comm_id) REFERENCES comms (id),' ··· 706 693 value = struct.pack(fmt, 2, 4, branch_type, n, name) 707 694 branch_type_file.write(value) 708 695 709 - def sample_table(sample_id, evsel_id, machine_id, thread_id, comm_id, dso_id, symbol_id, sym_offset, ip, time, cpu, to_dso_id, to_symbol_id, to_sym_offset, to_ip, period, weight, transaction, data_src, branch_type, in_tx, *x): 696 + def sample_table(sample_id, evsel_id, machine_id, thread_id, comm_id, dso_id, symbol_id, sym_offset, ip, time, cpu, to_dso_id, to_symbol_id, to_sym_offset, to_ip, period, weight, transaction, data_src, branch_type, in_tx, call_path_id, *x): 710 697 if branches: 711 - value = struct.pack("!hiqiqiqiqiqiqiqiqiqiqiiiqiqiqiqiiiB", 17, 8, sample_id, 8, evsel_id, 8, machine_id, 8, thread_id, 8, comm_id, 8, dso_id, 8, symbol_id, 8, sym_offset, 8, ip, 8, time, 4, cpu, 8, to_dso_id, 8, to_symbol_id, 8, to_sym_offset, 8, to_ip, 4, branch_type, 1, in_tx) 698 + value = struct.pack("!hiqiqiqiqiqiqiqiqiqiqiiiqiqiqiqiiiBiq", 18, 8, sample_id, 8, evsel_id, 8, machine_id, 8, thread_id, 8, comm_id, 8, dso_id, 8, symbol_id, 8, sym_offset, 8, ip, 8, time, 4, cpu, 8, to_dso_id, 8, to_symbol_id, 8, to_sym_offset, 8, to_ip, 4, branch_type, 1, in_tx, 8, call_path_id) 712 699 else: 713 - value = struct.pack("!hiqiqiqiqiqiqiqiqiqiqiiiqiqiqiqiqiqiqiqiiiB", 21, 8, sample_id, 8, evsel_id, 8, machine_id, 8, thread_id, 8, comm_id, 8, dso_id, 8, symbol_id, 8, sym_offset, 8, ip, 8, time, 4, cpu, 8, to_dso_id, 8, to_symbol_id, 8, to_sym_offset, 8, to_ip, 8, period, 8, weight, 8, transaction, 8, data_src, 4, branch_type, 1, in_tx) 700 + value = struct.pack("!hiqiqiqiqiqiqiqiqiqiqiiiqiqiqiqiqiqiqiqiiiBiq", 22, 8, sample_id, 8, evsel_id, 8, machine_id, 8, thread_id, 8, comm_id, 8, dso_id, 8, symbol_id, 8, sym_offset, 8, ip, 8, time, 4, cpu, 8, to_dso_id, 8, to_symbol_id, 8, to_sym_offset, 8, to_ip, 8, period, 8, weight, 8, transaction, 8, data_src, 4, branch_type, 1, in_tx, 8, call_path_id) 714 701 sample_file.write(value) 715 702 716 703 def call_path_table(cp_id, parent_id, symbol_id, ip, *x):
+44
tools/perf/trace/beauty/futex_op.c
··· 1 + #include <linux/futex.h> 2 + 3 + static size_t syscall_arg__scnprintf_futex_op(char *bf, size_t size, struct syscall_arg *arg) 4 + { 5 + enum syscall_futex_args { 6 + SCF_UADDR = (1 << 0), 7 + SCF_OP = (1 << 1), 8 + SCF_VAL = (1 << 2), 9 + SCF_TIMEOUT = (1 << 3), 10 + SCF_UADDR2 = (1 << 4), 11 + SCF_VAL3 = (1 << 5), 12 + }; 13 + int op = arg->val; 14 + int cmd = op & FUTEX_CMD_MASK; 15 + size_t printed = 0; 16 + 17 + switch (cmd) { 18 + #define P_FUTEX_OP(n) case FUTEX_##n: printed = scnprintf(bf, size, #n); 19 + P_FUTEX_OP(WAIT); arg->mask |= SCF_VAL3|SCF_UADDR2; break; 20 + P_FUTEX_OP(WAKE); arg->mask |= SCF_VAL3|SCF_UADDR2|SCF_TIMEOUT; break; 21 + P_FUTEX_OP(FD); arg->mask |= SCF_VAL3|SCF_UADDR2|SCF_TIMEOUT; break; 22 + P_FUTEX_OP(REQUEUE); arg->mask |= SCF_VAL3|SCF_TIMEOUT; break; 23 + P_FUTEX_OP(CMP_REQUEUE); arg->mask |= SCF_TIMEOUT; break; 24 + P_FUTEX_OP(CMP_REQUEUE_PI); arg->mask |= SCF_TIMEOUT; break; 25 + P_FUTEX_OP(WAKE_OP); break; 26 + P_FUTEX_OP(LOCK_PI); arg->mask |= SCF_VAL3|SCF_UADDR2|SCF_TIMEOUT; break; 27 + P_FUTEX_OP(UNLOCK_PI); arg->mask |= SCF_VAL3|SCF_UADDR2|SCF_TIMEOUT; break; 28 + P_FUTEX_OP(TRYLOCK_PI); arg->mask |= SCF_VAL3|SCF_UADDR2; break; 29 + P_FUTEX_OP(WAIT_BITSET); arg->mask |= SCF_UADDR2; break; 30 + P_FUTEX_OP(WAKE_BITSET); arg->mask |= SCF_UADDR2; break; 31 + P_FUTEX_OP(WAIT_REQUEUE_PI); break; 32 + default: printed = scnprintf(bf, size, "%#x", cmd); break; 33 + } 34 + 35 + if (op & FUTEX_PRIVATE_FLAG) 36 + printed += scnprintf(bf + printed, size - printed, "|PRIV"); 37 + 38 + if (op & FUTEX_CLOCK_REALTIME) 39 + printed += scnprintf(bf + printed, size - printed, "|CLKRT"); 40 + 41 + return printed; 42 + } 43 + 44 + #define SCA_FUTEX_OP syscall_arg__scnprintf_futex_op
+56
tools/perf/trace/beauty/open_flags.c
··· 1 + 2 + static size_t syscall_arg__scnprintf_open_flags(char *bf, size_t size, 3 + struct syscall_arg *arg) 4 + { 5 + int printed = 0, flags = arg->val; 6 + 7 + if (!(flags & O_CREAT)) 8 + arg->mask |= 1 << (arg->idx + 1); /* Mask the mode parm */ 9 + 10 + if (flags == 0) 11 + return scnprintf(bf, size, "RDONLY"); 12 + #define P_FLAG(n) \ 13 + if (flags & O_##n) { \ 14 + printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "|" : "", #n); \ 15 + flags &= ~O_##n; \ 16 + } 17 + 18 + P_FLAG(APPEND); 19 + P_FLAG(ASYNC); 20 + P_FLAG(CLOEXEC); 21 + P_FLAG(CREAT); 22 + P_FLAG(DIRECT); 23 + P_FLAG(DIRECTORY); 24 + P_FLAG(EXCL); 25 + P_FLAG(LARGEFILE); 26 + P_FLAG(NOATIME); 27 + P_FLAG(NOCTTY); 28 + #ifdef O_NONBLOCK 29 + P_FLAG(NONBLOCK); 30 + #elif O_NDELAY 31 + P_FLAG(NDELAY); 32 + #endif 33 + #ifdef O_PATH 34 + P_FLAG(PATH); 35 + #endif 36 + P_FLAG(RDWR); 37 + #ifdef O_DSYNC 38 + if ((flags & O_SYNC) == O_SYNC) 39 + printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "|" : "", "SYNC"); 40 + else { 41 + P_FLAG(DSYNC); 42 + } 43 + #else 44 + P_FLAG(SYNC); 45 + #endif 46 + P_FLAG(TRUNC); 47 + P_FLAG(WRONLY); 48 + #undef P_FLAG 49 + 50 + if (flags) 51 + printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", flags); 52 + 53 + return printed; 54 + } 55 + 56 + #define SCA_OPEN_FLAGS syscall_arg__scnprintf_open_flags
+53
tools/perf/trace/beauty/signum.c
··· 1 + 2 + static size_t syscall_arg__scnprintf_signum(char *bf, size_t size, struct syscall_arg *arg) 3 + { 4 + int sig = arg->val; 5 + 6 + switch (sig) { 7 + #define P_SIGNUM(n) case SIG##n: return scnprintf(bf, size, #n) 8 + P_SIGNUM(HUP); 9 + P_SIGNUM(INT); 10 + P_SIGNUM(QUIT); 11 + P_SIGNUM(ILL); 12 + P_SIGNUM(TRAP); 13 + P_SIGNUM(ABRT); 14 + P_SIGNUM(BUS); 15 + P_SIGNUM(FPE); 16 + P_SIGNUM(KILL); 17 + P_SIGNUM(USR1); 18 + P_SIGNUM(SEGV); 19 + P_SIGNUM(USR2); 20 + P_SIGNUM(PIPE); 21 + P_SIGNUM(ALRM); 22 + P_SIGNUM(TERM); 23 + P_SIGNUM(CHLD); 24 + P_SIGNUM(CONT); 25 + P_SIGNUM(STOP); 26 + P_SIGNUM(TSTP); 27 + P_SIGNUM(TTIN); 28 + P_SIGNUM(TTOU); 29 + P_SIGNUM(URG); 30 + P_SIGNUM(XCPU); 31 + P_SIGNUM(XFSZ); 32 + P_SIGNUM(VTALRM); 33 + P_SIGNUM(PROF); 34 + P_SIGNUM(WINCH); 35 + P_SIGNUM(IO); 36 + P_SIGNUM(PWR); 37 + P_SIGNUM(SYS); 38 + #ifdef SIGEMT 39 + P_SIGNUM(EMT); 40 + #endif 41 + #ifdef SIGSTKFLT 42 + P_SIGNUM(STKFLT); 43 + #endif 44 + #ifdef SIGSWI 45 + P_SIGNUM(SWI); 46 + #endif 47 + default: break; 48 + } 49 + 50 + return scnprintf(bf, size, "%#x", sig); 51 + } 52 + 53 + #define SCA_SIGNUM syscall_arg__scnprintf_signum
+1
tools/perf/util/Build
··· 74 74 libperf-y += data.o 75 75 libperf-y += tsc.o 76 76 libperf-y += cloexec.o 77 + libperf-y += call-path.o 77 78 libperf-y += thread-stack.o 78 79 libperf-$(CONFIG_AUXTRACE) += auxtrace.o 79 80 libperf-$(CONFIG_AUXTRACE) += intel-pt-decoder/
+122
tools/perf/util/call-path.c
··· 1 + /* 2 + * call-path.h: Manipulate a tree data structure containing function call paths 3 + * Copyright (c) 2014, Intel Corporation. 4 + * 5 + * This program is free software; you can redistribute it and/or modify it 6 + * under the terms and conditions of the GNU General Public License, 7 + * version 2, as published by the Free Software Foundation. 8 + * 9 + * This program is distributed in the hope it will be useful, but WITHOUT 10 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 + * more details. 13 + * 14 + */ 15 + 16 + #include <linux/rbtree.h> 17 + #include <linux/list.h> 18 + 19 + #include "util.h" 20 + #include "call-path.h" 21 + 22 + static void call_path__init(struct call_path *cp, struct call_path *parent, 23 + struct symbol *sym, u64 ip, bool in_kernel) 24 + { 25 + cp->parent = parent; 26 + cp->sym = sym; 27 + cp->ip = sym ? 0 : ip; 28 + cp->db_id = 0; 29 + cp->in_kernel = in_kernel; 30 + RB_CLEAR_NODE(&cp->rb_node); 31 + cp->children = RB_ROOT; 32 + } 33 + 34 + struct call_path_root *call_path_root__new(void) 35 + { 36 + struct call_path_root *cpr; 37 + 38 + cpr = zalloc(sizeof(struct call_path_root)); 39 + if (!cpr) 40 + return NULL; 41 + call_path__init(&cpr->call_path, NULL, NULL, 0, false); 42 + INIT_LIST_HEAD(&cpr->blocks); 43 + return cpr; 44 + } 45 + 46 + void call_path_root__free(struct call_path_root *cpr) 47 + { 48 + struct call_path_block *pos, *n; 49 + 50 + list_for_each_entry_safe(pos, n, &cpr->blocks, node) { 51 + list_del(&pos->node); 52 + free(pos); 53 + } 54 + free(cpr); 55 + } 56 + 57 + static struct call_path *call_path__new(struct call_path_root *cpr, 58 + struct call_path *parent, 59 + struct symbol *sym, u64 ip, 60 + bool in_kernel) 61 + { 62 + struct call_path_block *cpb; 63 + struct call_path *cp; 64 + size_t n; 65 + 66 + if (cpr->next < cpr->sz) { 67 + cpb = list_last_entry(&cpr->blocks, struct call_path_block, 68 + node); 69 + } else { 70 + cpb = zalloc(sizeof(struct call_path_block)); 71 + if (!cpb) 72 + return NULL; 73 + list_add_tail(&cpb->node, &cpr->blocks); 74 + cpr->sz += CALL_PATH_BLOCK_SIZE; 75 + } 76 + 77 + n = cpr->next++ & CALL_PATH_BLOCK_MASK; 78 + cp = &cpb->cp[n]; 79 + 80 + call_path__init(cp, parent, sym, ip, in_kernel); 81 + 82 + return cp; 83 + } 84 + 85 + struct call_path *call_path__findnew(struct call_path_root *cpr, 86 + struct call_path *parent, 87 + struct symbol *sym, u64 ip, u64 ks) 88 + { 89 + struct rb_node **p; 90 + struct rb_node *node_parent = NULL; 91 + struct call_path *cp; 92 + bool in_kernel = ip >= ks; 93 + 94 + if (sym) 95 + ip = 0; 96 + 97 + if (!parent) 98 + return call_path__new(cpr, parent, sym, ip, in_kernel); 99 + 100 + p = &parent->children.rb_node; 101 + while (*p != NULL) { 102 + node_parent = *p; 103 + cp = rb_entry(node_parent, struct call_path, rb_node); 104 + 105 + if (cp->sym == sym && cp->ip == ip) 106 + return cp; 107 + 108 + if (sym < cp->sym || (sym == cp->sym && ip < cp->ip)) 109 + p = &(*p)->rb_left; 110 + else 111 + p = &(*p)->rb_right; 112 + } 113 + 114 + cp = call_path__new(cpr, parent, sym, ip, in_kernel); 115 + if (!cp) 116 + return NULL; 117 + 118 + rb_link_node(&cp->rb_node, node_parent, p); 119 + rb_insert_color(&cp->rb_node, &parent->children); 120 + 121 + return cp; 122 + }
+77
tools/perf/util/call-path.h
··· 1 + /* 2 + * call-path.h: Manipulate a tree data structure containing function call paths 3 + * Copyright (c) 2014, Intel Corporation. 4 + * 5 + * This program is free software; you can redistribute it and/or modify it 6 + * under the terms and conditions of the GNU General Public License, 7 + * version 2, as published by the Free Software Foundation. 8 + * 9 + * This program is distributed in the hope it will be useful, but WITHOUT 10 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 + * more details. 13 + * 14 + */ 15 + 16 + #ifndef __PERF_CALL_PATH_H 17 + #define __PERF_CALL_PATH_H 18 + 19 + #include <sys/types.h> 20 + 21 + #include <linux/types.h> 22 + #include <linux/rbtree.h> 23 + 24 + /** 25 + * struct call_path - node in list of calls leading to a function call. 26 + * @parent: call path to the parent function call 27 + * @sym: symbol of function called 28 + * @ip: only if sym is null, the ip of the function 29 + * @db_id: id used for db-export 30 + * @in_kernel: whether function is a in the kernel 31 + * @rb_node: node in parent's tree of called functions 32 + * @children: tree of call paths of functions called 33 + * 34 + * In combination with the call_return structure, the call_path structure 35 + * defines a context-sensitve call-graph. 36 + */ 37 + struct call_path { 38 + struct call_path *parent; 39 + struct symbol *sym; 40 + u64 ip; 41 + u64 db_id; 42 + bool in_kernel; 43 + struct rb_node rb_node; 44 + struct rb_root children; 45 + }; 46 + 47 + #define CALL_PATH_BLOCK_SHIFT 8 48 + #define CALL_PATH_BLOCK_SIZE (1 << CALL_PATH_BLOCK_SHIFT) 49 + #define CALL_PATH_BLOCK_MASK (CALL_PATH_BLOCK_SIZE - 1) 50 + 51 + struct call_path_block { 52 + struct call_path cp[CALL_PATH_BLOCK_SIZE]; 53 + struct list_head node; 54 + }; 55 + 56 + /** 57 + * struct call_path_root - root of all call paths. 58 + * @call_path: root call path 59 + * @blocks: list of blocks to store call paths 60 + * @next: next free space 61 + * @sz: number of spaces 62 + */ 63 + struct call_path_root { 64 + struct call_path call_path; 65 + struct list_head blocks; 66 + size_t next; 67 + size_t sz; 68 + }; 69 + 70 + struct call_path_root *call_path_root__new(void); 71 + void call_path_root__free(struct call_path_root *cpr); 72 + 73 + struct call_path *call_path__findnew(struct call_path_root *cpr, 74 + struct call_path *parent, 75 + struct symbol *sym, u64 ip, u64 ks); 76 + 77 + #endif
+85
tools/perf/util/db-export.c
··· 23 23 #include "event.h" 24 24 #include "util.h" 25 25 #include "thread-stack.h" 26 + #include "callchain.h" 27 + #include "call-path.h" 26 28 #include "db-export.h" 27 29 28 30 struct deferred_export { ··· 278 276 return 0; 279 277 } 280 278 279 + static struct call_path *call_path_from_sample(struct db_export *dbe, 280 + struct machine *machine, 281 + struct thread *thread, 282 + struct perf_sample *sample, 283 + struct perf_evsel *evsel) 284 + { 285 + u64 kernel_start = machine__kernel_start(machine); 286 + struct call_path *current = &dbe->cpr->call_path; 287 + enum chain_order saved_order = callchain_param.order; 288 + int err; 289 + 290 + if (!symbol_conf.use_callchain || !sample->callchain) 291 + return NULL; 292 + 293 + /* 294 + * Since the call path tree must be built starting with the root, we 295 + * must use ORDER_CALL for call chain resolution, in order to process 296 + * the callchain starting with the root node and ending with the leaf. 297 + */ 298 + callchain_param.order = ORDER_CALLER; 299 + err = thread__resolve_callchain(thread, &callchain_cursor, evsel, 300 + sample, NULL, NULL, 301 + sysctl_perf_event_max_stack); 302 + if (err) { 303 + callchain_param.order = saved_order; 304 + return NULL; 305 + } 306 + callchain_cursor_commit(&callchain_cursor); 307 + 308 + while (1) { 309 + struct callchain_cursor_node *node; 310 + struct addr_location al; 311 + u64 dso_db_id = 0, sym_db_id = 0, offset = 0; 312 + 313 + memset(&al, 0, sizeof(al)); 314 + 315 + node = callchain_cursor_current(&callchain_cursor); 316 + if (!node) 317 + break; 318 + /* 319 + * Handle export of symbol and dso for this node by 320 + * constructing an addr_location struct and then passing it to 321 + * db_ids_from_al() to perform the export. 322 + */ 323 + al.sym = node->sym; 324 + al.map = node->map; 325 + al.machine = machine; 326 + if (al.map) 327 + al.addr = al.map->map_ip(al.map, node->ip); 328 + else 329 + al.addr = node->ip; 330 + 331 + db_ids_from_al(dbe, &al, &dso_db_id, &sym_db_id, &offset); 332 + 333 + /* add node to the call path tree if it doesn't exist */ 334 + current = call_path__findnew(dbe->cpr, current, 335 + al.sym, node->ip, 336 + kernel_start); 337 + 338 + callchain_cursor_advance(&callchain_cursor); 339 + } 340 + 341 + /* Reset the callchain order to its prior value. */ 342 + callchain_param.order = saved_order; 343 + 344 + if (current == &dbe->cpr->call_path) { 345 + /* Bail because the callchain was empty. */ 346 + return NULL; 347 + } 348 + 349 + return current; 350 + } 351 + 281 352 int db_export__branch_type(struct db_export *dbe, u32 branch_type, 282 353 const char *name) 283 354 { ··· 403 328 err = db_ids_from_al(dbe, al, &es.dso_db_id, &es.sym_db_id, &es.offset); 404 329 if (err) 405 330 goto out_put; 331 + 332 + if (dbe->cpr) { 333 + struct call_path *cp = call_path_from_sample(dbe, al->machine, 334 + thread, sample, 335 + evsel); 336 + if (cp) { 337 + db_export__call_path(dbe, cp); 338 + es.call_path_id = cp->db_id; 339 + } 340 + } 406 341 407 342 if ((evsel->attr.sample_type & PERF_SAMPLE_ADDR) && 408 343 sample_addr_correlates_sym(&evsel->attr)) {
+3
tools/perf/util/db-export.h
··· 27 27 struct perf_sample; 28 28 struct addr_location; 29 29 struct call_return_processor; 30 + struct call_path_root; 30 31 struct call_path; 31 32 struct call_return; 32 33 ··· 44 43 u64 addr_dso_db_id; 45 44 u64 addr_sym_db_id; 46 45 u64 addr_offset; /* addr offset from symbol start */ 46 + u64 call_path_id; 47 47 }; 48 48 49 49 struct db_export { ··· 66 64 int (*export_call_return)(struct db_export *dbe, 67 65 struct call_return *cr); 68 66 struct call_return_processor *crp; 67 + struct call_path_root *cpr; 69 68 u64 evsel_last_db_id; 70 69 u64 machine_last_db_id; 71 70 u64 thread_last_db_id;
+41 -15
tools/perf/util/machine.c
··· 1817 1817 int skip_idx = -1; 1818 1818 int first_call = 0; 1819 1819 1820 - callchain_cursor_reset(cursor); 1821 - 1822 1820 if (perf_evsel__has_branch_callstack(evsel)) { 1823 1821 err = resolve_lbr_callchain_sample(thread, cursor, sample, parent, 1824 1822 root_al, max_stack); ··· 1927 1929 entry->map, entry->sym); 1928 1930 } 1929 1931 1930 - int thread__resolve_callchain(struct thread *thread, 1931 - struct callchain_cursor *cursor, 1932 - struct perf_evsel *evsel, 1933 - struct perf_sample *sample, 1934 - struct symbol **parent, 1935 - struct addr_location *root_al, 1936 - int max_stack) 1932 + static int thread__resolve_callchain_unwind(struct thread *thread, 1933 + struct callchain_cursor *cursor, 1934 + struct perf_evsel *evsel, 1935 + struct perf_sample *sample, 1936 + int max_stack) 1937 1937 { 1938 - int ret = thread__resolve_callchain_sample(thread, cursor, evsel, 1939 - sample, parent, 1940 - root_al, max_stack); 1941 - if (ret) 1942 - return ret; 1943 - 1944 1938 /* Can we do dwarf post unwind? */ 1945 1939 if (!((evsel->attr.sample_type & PERF_SAMPLE_REGS_USER) && 1946 1940 (evsel->attr.sample_type & PERF_SAMPLE_STACK_USER))) ··· 1945 1955 1946 1956 return unwind__get_entries(unwind_entry, cursor, 1947 1957 thread, sample, max_stack); 1958 + } 1948 1959 1960 + int thread__resolve_callchain(struct thread *thread, 1961 + struct callchain_cursor *cursor, 1962 + struct perf_evsel *evsel, 1963 + struct perf_sample *sample, 1964 + struct symbol **parent, 1965 + struct addr_location *root_al, 1966 + int max_stack) 1967 + { 1968 + int ret = 0; 1969 + 1970 + callchain_cursor_reset(&callchain_cursor); 1971 + 1972 + if (callchain_param.order == ORDER_CALLEE) { 1973 + ret = thread__resolve_callchain_sample(thread, cursor, 1974 + evsel, sample, 1975 + parent, root_al, 1976 + max_stack); 1977 + if (ret) 1978 + return ret; 1979 + ret = thread__resolve_callchain_unwind(thread, cursor, 1980 + evsel, sample, 1981 + max_stack); 1982 + } else { 1983 + ret = thread__resolve_callchain_unwind(thread, cursor, 1984 + evsel, sample, 1985 + max_stack); 1986 + if (ret) 1987 + return ret; 1988 + ret = thread__resolve_callchain_sample(thread, cursor, 1989 + evsel, sample, 1990 + parent, root_al, 1991 + max_stack); 1992 + } 1993 + 1994 + return ret; 1949 1995 } 1950 1996 1951 1997 int machine__for_each_thread(struct machine *machine,
+34 -2
tools/perf/util/scripting-engines/trace-event-python.c
··· 41 41 #include "../thread-stack.h" 42 42 #include "../trace-event.h" 43 43 #include "../machine.h" 44 + #include "../call-path.h" 44 45 #include "thread_map.h" 45 46 #include "cpumap.h" 46 47 #include "stat.h" ··· 682 681 struct tables *tables = container_of(dbe, struct tables, dbe); 683 682 PyObject *t; 684 683 685 - t = tuple_new(21); 684 + t = tuple_new(22); 686 685 687 686 tuple_set_u64(t, 0, es->db_id); 688 687 tuple_set_u64(t, 1, es->evsel->db_id); ··· 705 704 tuple_set_u64(t, 18, es->sample->data_src); 706 705 tuple_set_s32(t, 19, es->sample->flags & PERF_BRANCH_MASK); 707 706 tuple_set_s32(t, 20, !!(es->sample->flags & PERF_IP_FLAG_IN_TX)); 707 + tuple_set_u64(t, 21, es->call_path_id); 708 708 709 709 call_object(tables->sample_handler, t, "sample_table"); 710 710 ··· 1000 998 { 1001 999 const char *perf_db_export_mode = "perf_db_export_mode"; 1002 1000 const char *perf_db_export_calls = "perf_db_export_calls"; 1003 - PyObject *db_export_mode, *db_export_calls; 1001 + const char *perf_db_export_callchains = "perf_db_export_callchains"; 1002 + PyObject *db_export_mode, *db_export_calls, *db_export_callchains; 1004 1003 bool export_calls = false; 1004 + bool export_callchains = false; 1005 1005 int ret; 1006 1006 1007 1007 memset(tables, 0, sizeof(struct tables)); ··· 1020 1016 if (!ret) 1021 1017 return; 1022 1018 1019 + /* handle export calls */ 1023 1020 tables->dbe.crp = NULL; 1024 1021 db_export_calls = PyDict_GetItemString(main_dict, perf_db_export_calls); 1025 1022 if (db_export_calls) { ··· 1035 1030 call_return_processor__new(python_process_call_return, 1036 1031 &tables->dbe); 1037 1032 if (!tables->dbe.crp) 1033 + Py_FatalError("failed to create calls processor"); 1034 + } 1035 + 1036 + /* handle export callchains */ 1037 + tables->dbe.cpr = NULL; 1038 + db_export_callchains = PyDict_GetItemString(main_dict, 1039 + perf_db_export_callchains); 1040 + if (db_export_callchains) { 1041 + ret = PyObject_IsTrue(db_export_callchains); 1042 + if (ret == -1) 1043 + handler_call_die(perf_db_export_callchains); 1044 + export_callchains = !!ret; 1045 + } 1046 + 1047 + if (export_callchains) { 1048 + /* 1049 + * Attempt to use the call path root from the call return 1050 + * processor, if the call return processor is in use. Otherwise, 1051 + * we allocate a new call path root. This prevents exporting 1052 + * duplicate call path ids when both are in use simultaniously. 1053 + */ 1054 + if (tables->dbe.crp) 1055 + tables->dbe.cpr = tables->dbe.crp->cpr; 1056 + else 1057 + tables->dbe.cpr = call_path_root__new(); 1058 + 1059 + if (!tables->dbe.cpr) 1038 1060 Py_FatalError("failed to create calls processor"); 1039 1061 } 1040 1062
+1 -138
tools/perf/util/thread-stack.c
··· 22 22 #include "debug.h" 23 23 #include "symbol.h" 24 24 #include "comm.h" 25 + #include "call-path.h" 25 26 #include "thread-stack.h" 26 - 27 - #define CALL_PATH_BLOCK_SHIFT 8 28 - #define CALL_PATH_BLOCK_SIZE (1 << CALL_PATH_BLOCK_SHIFT) 29 - #define CALL_PATH_BLOCK_MASK (CALL_PATH_BLOCK_SIZE - 1) 30 - 31 - struct call_path_block { 32 - struct call_path cp[CALL_PATH_BLOCK_SIZE]; 33 - struct list_head node; 34 - }; 35 - 36 - /** 37 - * struct call_path_root - root of all call paths. 38 - * @call_path: root call path 39 - * @blocks: list of blocks to store call paths 40 - * @next: next free space 41 - * @sz: number of spaces 42 - */ 43 - struct call_path_root { 44 - struct call_path call_path; 45 - struct list_head blocks; 46 - size_t next; 47 - size_t sz; 48 - }; 49 - 50 - /** 51 - * struct call_return_processor - provides a call-back to consume call-return 52 - * information. 53 - * @cpr: call path root 54 - * @process: call-back that accepts call/return information 55 - * @data: anonymous data for call-back 56 - */ 57 - struct call_return_processor { 58 - struct call_path_root *cpr; 59 - int (*process)(struct call_return *cr, void *data); 60 - void *data; 61 - }; 62 27 63 28 #define STACK_GROWTH 2048 64 29 ··· 298 333 299 334 for (i = 1; i < chain->nr; i++) 300 335 chain->ips[i] = thread->ts->stack[thread->ts->cnt - i].ret_addr; 301 - } 302 - 303 - static void call_path__init(struct call_path *cp, struct call_path *parent, 304 - struct symbol *sym, u64 ip, bool in_kernel) 305 - { 306 - cp->parent = parent; 307 - cp->sym = sym; 308 - cp->ip = sym ? 0 : ip; 309 - cp->db_id = 0; 310 - cp->in_kernel = in_kernel; 311 - RB_CLEAR_NODE(&cp->rb_node); 312 - cp->children = RB_ROOT; 313 - } 314 - 315 - static struct call_path_root *call_path_root__new(void) 316 - { 317 - struct call_path_root *cpr; 318 - 319 - cpr = zalloc(sizeof(struct call_path_root)); 320 - if (!cpr) 321 - return NULL; 322 - call_path__init(&cpr->call_path, NULL, NULL, 0, false); 323 - INIT_LIST_HEAD(&cpr->blocks); 324 - return cpr; 325 - } 326 - 327 - static void call_path_root__free(struct call_path_root *cpr) 328 - { 329 - struct call_path_block *pos, *n; 330 - 331 - list_for_each_entry_safe(pos, n, &cpr->blocks, node) { 332 - list_del(&pos->node); 333 - free(pos); 334 - } 335 - free(cpr); 336 - } 337 - 338 - static struct call_path *call_path__new(struct call_path_root *cpr, 339 - struct call_path *parent, 340 - struct symbol *sym, u64 ip, 341 - bool in_kernel) 342 - { 343 - struct call_path_block *cpb; 344 - struct call_path *cp; 345 - size_t n; 346 - 347 - if (cpr->next < cpr->sz) { 348 - cpb = list_last_entry(&cpr->blocks, struct call_path_block, 349 - node); 350 - } else { 351 - cpb = zalloc(sizeof(struct call_path_block)); 352 - if (!cpb) 353 - return NULL; 354 - list_add_tail(&cpb->node, &cpr->blocks); 355 - cpr->sz += CALL_PATH_BLOCK_SIZE; 356 - } 357 - 358 - n = cpr->next++ & CALL_PATH_BLOCK_MASK; 359 - cp = &cpb->cp[n]; 360 - 361 - call_path__init(cp, parent, sym, ip, in_kernel); 362 - 363 - return cp; 364 - } 365 - 366 - static struct call_path *call_path__findnew(struct call_path_root *cpr, 367 - struct call_path *parent, 368 - struct symbol *sym, u64 ip, u64 ks) 369 - { 370 - struct rb_node **p; 371 - struct rb_node *node_parent = NULL; 372 - struct call_path *cp; 373 - bool in_kernel = ip >= ks; 374 - 375 - if (sym) 376 - ip = 0; 377 - 378 - if (!parent) 379 - return call_path__new(cpr, parent, sym, ip, in_kernel); 380 - 381 - p = &parent->children.rb_node; 382 - while (*p != NULL) { 383 - node_parent = *p; 384 - cp = rb_entry(node_parent, struct call_path, rb_node); 385 - 386 - if (cp->sym == sym && cp->ip == ip) 387 - return cp; 388 - 389 - if (sym < cp->sym || (sym == cp->sym && ip < cp->ip)) 390 - p = &(*p)->rb_left; 391 - else 392 - p = &(*p)->rb_right; 393 - } 394 - 395 - cp = call_path__new(cpr, parent, sym, ip, in_kernel); 396 - if (!cp) 397 - return NULL; 398 - 399 - rb_link_node(&cp->rb_node, node_parent, p); 400 - rb_insert_color(&cp->rb_node, &parent->children); 401 - 402 - return cp; 403 336 } 404 337 405 338 struct call_return_processor *
+10 -21
tools/perf/util/thread-stack.h
··· 19 19 #include <sys/types.h> 20 20 21 21 #include <linux/types.h> 22 - #include <linux/rbtree.h> 23 22 24 23 struct thread; 25 24 struct comm; 26 25 struct ip_callchain; 27 26 struct symbol; 28 27 struct dso; 29 - struct call_return_processor; 30 28 struct comm; 31 29 struct perf_sample; 32 30 struct addr_location; 31 + struct call_path; 33 32 34 33 /* 35 34 * Call/Return flags. ··· 68 69 }; 69 70 70 71 /** 71 - * struct call_path - node in list of calls leading to a function call. 72 - * @parent: call path to the parent function call 73 - * @sym: symbol of function called 74 - * @ip: only if sym is null, the ip of the function 75 - * @db_id: id used for db-export 76 - * @in_kernel: whether function is a in the kernel 77 - * @rb_node: node in parent's tree of called functions 78 - * @children: tree of call paths of functions called 79 - * 80 - * In combination with the call_return structure, the call_path structure 81 - * defines a context-sensitve call-graph. 72 + * struct call_return_processor - provides a call-back to consume call-return 73 + * information. 74 + * @cpr: call path root 75 + * @process: call-back that accepts call/return information 76 + * @data: anonymous data for call-back 82 77 */ 83 - struct call_path { 84 - struct call_path *parent; 85 - struct symbol *sym; 86 - u64 ip; 87 - u64 db_id; 88 - bool in_kernel; 89 - struct rb_node rb_node; 90 - struct rb_root children; 78 + struct call_return_processor { 79 + struct call_path_root *cpr; 80 + int (*process)(struct call_return *cr, void *data); 81 + void *data; 91 82 }; 92 83 93 84 int thread_stack__event(struct thread *thread, u32 flags, u64 from_ip,