Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'perf/hw-branch-sampling' into perf/core

Merge reason: The 'perf record -b' hardware branch sampling feature is ready for upstream.

Signed-off-by: Ingo Molnar <mingo@elte.hu>

+2019 -245
+4
arch/alpha/kernel/perf_event.c
··· 685 685 { 686 686 int err; 687 687 688 + /* does not support taken branch sampling */ 689 + if (has_branch_stack(event)) 690 + return -EOPNOTSUPP; 691 + 688 692 switch (event->attr.type) { 689 693 case PERF_TYPE_RAW: 690 694 case PERF_TYPE_HARDWARE:
+4
arch/arm/kernel/perf_event.c
··· 539 539 int err = 0; 540 540 atomic_t *active_events = &armpmu->active_events; 541 541 542 + /* does not support taken branch sampling */ 543 + if (has_branch_stack(event)) 544 + return -EOPNOTSUPP; 545 + 542 546 if (armpmu->map_event(event) == -ENOENT) 543 547 return -ENOENT; 544 548
+4
arch/mips/kernel/perf_event_mipsxx.c
··· 606 606 { 607 607 int err = 0; 608 608 609 + /* does not support taken branch sampling */ 610 + if (has_branch_stack(event)) 611 + return -EOPNOTSUPP; 612 + 609 613 switch (event->attr.type) { 610 614 case PERF_TYPE_RAW: 611 615 case PERF_TYPE_HARDWARE:
+4
arch/powerpc/kernel/perf_event.c
··· 1084 1084 if (!ppmu) 1085 1085 return -ENOENT; 1086 1086 1087 + /* does not support taken branch sampling */ 1088 + if (has_branch_stack(event)) 1089 + return -EOPNOTSUPP; 1090 + 1087 1091 switch (event->attr.type) { 1088 1092 case PERF_TYPE_HARDWARE: 1089 1093 ev = event->attr.config;
+4
arch/sh/kernel/perf_event.c
··· 310 310 { 311 311 int err; 312 312 313 + /* does not support taken branch sampling */ 314 + if (has_branch_stack(event)) 315 + return -EOPNOTSUPP; 316 + 313 317 switch (event->attr.type) { 314 318 case PERF_TYPE_RAW: 315 319 case PERF_TYPE_HW_CACHE:
+4
arch/sparc/kernel/perf_event.c
··· 1105 1105 if (atomic_read(&nmi_active) < 0) 1106 1106 return -ENODEV; 1107 1107 1108 + /* does not support taken branch sampling */ 1109 + if (has_branch_stack(event)) 1110 + return -EOPNOTSUPP; 1111 + 1108 1112 switch (attr->type) { 1109 1113 case PERF_TYPE_HARDWARE: 1110 1114 if (attr->config >= sparc_pmu->max_events)
+7
arch/x86/include/asm/msr-index.h
··· 56 56 #define MSR_OFFCORE_RSP_0 0x000001a6 57 57 #define MSR_OFFCORE_RSP_1 0x000001a7 58 58 59 + #define MSR_LBR_SELECT 0x000001c8 60 + #define MSR_LBR_TOS 0x000001c9 61 + #define MSR_LBR_NHM_FROM 0x00000680 62 + #define MSR_LBR_NHM_TO 0x000006c0 63 + #define MSR_LBR_CORE_FROM 0x00000040 64 + #define MSR_LBR_CORE_TO 0x00000060 65 + 59 66 #define MSR_IA32_PEBS_ENABLE 0x000003f1 60 67 #define MSR_IA32_DS_AREA 0x00000600 61 68 #define MSR_IA32_PERF_CAPABILITIES 0x00000345
+78 -7
arch/x86/kernel/cpu/perf_event.c
··· 353 353 return 0; 354 354 } 355 355 356 + /* 357 + * check that branch_sample_type is compatible with 358 + * settings needed for precise_ip > 1 which implies 359 + * using the LBR to capture ALL taken branches at the 360 + * priv levels of the measurement 361 + */ 362 + static inline int precise_br_compat(struct perf_event *event) 363 + { 364 + u64 m = event->attr.branch_sample_type; 365 + u64 b = 0; 366 + 367 + /* must capture all branches */ 368 + if (!(m & PERF_SAMPLE_BRANCH_ANY)) 369 + return 0; 370 + 371 + m &= PERF_SAMPLE_BRANCH_KERNEL | PERF_SAMPLE_BRANCH_USER; 372 + 373 + if (!event->attr.exclude_user) 374 + b |= PERF_SAMPLE_BRANCH_USER; 375 + 376 + if (!event->attr.exclude_kernel) 377 + b |= PERF_SAMPLE_BRANCH_KERNEL; 378 + 379 + /* 380 + * ignore PERF_SAMPLE_BRANCH_HV, not supported on x86 381 + */ 382 + 383 + return m == b; 384 + } 385 + 356 386 int x86_pmu_hw_config(struct perf_event *event) 357 387 { 358 388 if (event->attr.precise_ip) { ··· 399 369 400 370 if (event->attr.precise_ip > precise) 401 371 return -EOPNOTSUPP; 372 + /* 373 + * check that PEBS LBR correction does not conflict with 374 + * whatever the user is asking with attr->branch_sample_type 375 + */ 376 + if (event->attr.precise_ip > 1) { 377 + u64 *br_type = &event->attr.branch_sample_type; 378 + 379 + if (has_branch_stack(event)) { 380 + if (!precise_br_compat(event)) 381 + return -EOPNOTSUPP; 382 + 383 + /* branch_sample_type is compatible */ 384 + 385 + } else { 386 + /* 387 + * user did not specify branch_sample_type 388 + * 389 + * For PEBS fixups, we capture all 390 + * the branches at the priv level of the 391 + * event. 392 + */ 393 + *br_type = PERF_SAMPLE_BRANCH_ANY; 394 + 395 + if (!event->attr.exclude_user) 396 + *br_type |= PERF_SAMPLE_BRANCH_USER; 397 + 398 + if (!event->attr.exclude_kernel) 399 + *br_type |= PERF_SAMPLE_BRANCH_KERNEL; 400 + } 401 + } 402 402 } 403 403 404 404 /* ··· 485 425 486 426 /* mark unused */ 487 427 event->hw.extra_reg.idx = EXTRA_REG_NONE; 428 + 429 + /* mark not used */ 430 + event->hw.extra_reg.idx = EXTRA_REG_NONE; 431 + event->hw.branch_reg.idx = EXTRA_REG_NONE; 488 432 489 433 return x86_pmu.hw_config(event); 490 434 } ··· 1671 1607 NULL, 1672 1608 }; 1673 1609 1610 + static void x86_pmu_flush_branch_stack(void) 1611 + { 1612 + if (x86_pmu.flush_branch_stack) 1613 + x86_pmu.flush_branch_stack(); 1614 + } 1615 + 1674 1616 static struct pmu pmu = { 1675 - .pmu_enable = x86_pmu_enable, 1676 - .pmu_disable = x86_pmu_disable, 1617 + .pmu_enable = x86_pmu_enable, 1618 + .pmu_disable = x86_pmu_disable, 1677 1619 1678 1620 .attr_groups = x86_pmu_attr_groups, 1679 1621 1680 1622 .event_init = x86_pmu_event_init, 1681 1623 1682 - .add = x86_pmu_add, 1683 - .del = x86_pmu_del, 1684 - .start = x86_pmu_start, 1685 - .stop = x86_pmu_stop, 1686 - .read = x86_pmu_read, 1624 + .add = x86_pmu_add, 1625 + .del = x86_pmu_del, 1626 + .start = x86_pmu_start, 1627 + .stop = x86_pmu_stop, 1628 + .read = x86_pmu_read, 1687 1629 1688 1630 .start_txn = x86_pmu_start_txn, 1689 1631 .cancel_txn = x86_pmu_cancel_txn, 1690 1632 .commit_txn = x86_pmu_commit_txn, 1691 1633 1692 1634 .event_idx = x86_pmu_event_idx, 1635 + .flush_branch_stack = x86_pmu_flush_branch_stack, 1693 1636 }; 1694 1637 1695 1638 void perf_update_user_clock(struct perf_event_mmap_page *userpg, u64 now)
+19
arch/x86/kernel/cpu/perf_event.h
··· 33 33 34 34 EXTRA_REG_RSP_0 = 0, /* offcore_response_0 */ 35 35 EXTRA_REG_RSP_1 = 1, /* offcore_response_1 */ 36 + EXTRA_REG_LBR = 2, /* lbr_select */ 36 37 37 38 EXTRA_REG_MAX /* number of entries needed */ 38 39 }; ··· 131 130 void *lbr_context; 132 131 struct perf_branch_stack lbr_stack; 133 132 struct perf_branch_entry lbr_entries[MAX_LBR_ENTRIES]; 133 + struct er_account *lbr_sel; 134 + u64 br_sel; 134 135 135 136 /* 136 137 * Intel host/guest exclude bits ··· 347 344 void (*cpu_starting)(int cpu); 348 345 void (*cpu_dying)(int cpu); 349 346 void (*cpu_dead)(int cpu); 347 + void (*flush_branch_stack)(void); 350 348 351 349 /* 352 350 * Intel Arch Perfmon v2+ ··· 369 365 */ 370 366 unsigned long lbr_tos, lbr_from, lbr_to; /* MSR base regs */ 371 367 int lbr_nr; /* hardware stack size */ 368 + u64 lbr_sel_mask; /* LBR_SELECT valid bits */ 369 + const int *lbr_sel_map; /* lbr_select mappings */ 372 370 373 371 /* 374 372 * Extra registers for events ··· 484 478 485 479 extern struct event_constraint unconstrained; 486 480 481 + static inline bool kernel_ip(unsigned long ip) 482 + { 483 + #ifdef CONFIG_X86_32 484 + return ip > PAGE_OFFSET; 485 + #else 486 + return (long)ip < 0; 487 + #endif 488 + } 489 + 487 490 #ifdef CONFIG_CPU_SUP_AMD 488 491 489 492 int amd_pmu_init(void); ··· 572 557 void intel_pmu_lbr_init_nhm(void); 573 558 574 559 void intel_pmu_lbr_init_atom(void); 560 + 561 + void intel_pmu_lbr_init_snb(void); 562 + 563 + int intel_pmu_setup_lbr_filter(struct perf_event *event); 575 564 576 565 int p4_pmu_init(void); 577 566
+3
arch/x86/kernel/cpu/perf_event_amd.c
··· 139 139 if (ret) 140 140 return ret; 141 141 142 + if (has_branch_stack(event)) 143 + return -EOPNOTSUPP; 144 + 142 145 if (event->attr.exclude_host && event->attr.exclude_guest) 143 146 /* 144 147 * When HO == GO == 1 the hardware treats that as GO == HO == 0
+92 -26
arch/x86/kernel/cpu/perf_event_intel.c
··· 728 728 }, 729 729 }; 730 730 731 + static inline bool intel_pmu_needs_lbr_smpl(struct perf_event *event) 732 + { 733 + /* user explicitly requested branch sampling */ 734 + if (has_branch_stack(event)) 735 + return true; 736 + 737 + /* implicit branch sampling to correct PEBS skid */ 738 + if (x86_pmu.intel_cap.pebs_trap && event->attr.precise_ip > 1) 739 + return true; 740 + 741 + return false; 742 + } 743 + 731 744 static void intel_pmu_disable_all(void) 732 745 { 733 746 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); ··· 895 882 cpuc->intel_ctrl_guest_mask &= ~(1ull << hwc->idx); 896 883 cpuc->intel_ctrl_host_mask &= ~(1ull << hwc->idx); 897 884 885 + /* 886 + * must disable before any actual event 887 + * because any event may be combined with LBR 888 + */ 889 + if (intel_pmu_needs_lbr_smpl(event)) 890 + intel_pmu_lbr_disable(event); 891 + 898 892 if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) { 899 893 intel_pmu_disable_fixed(hwc); 900 894 return; ··· 956 936 intel_pmu_enable_bts(hwc->config); 957 937 return; 958 938 } 939 + /* 940 + * must enabled before any actual event 941 + * because any event may be combined with LBR 942 + */ 943 + if (intel_pmu_needs_lbr_smpl(event)) 944 + intel_pmu_lbr_enable(event); 959 945 960 946 if (event->attr.exclude_host) 961 947 cpuc->intel_ctrl_guest_mask |= (1ull << hwc->idx); ··· 1084 1058 1085 1059 data.period = event->hw.last_period; 1086 1060 1061 + if (has_branch_stack(event)) 1062 + data.br_stack = &cpuc->lbr_stack; 1063 + 1087 1064 if (perf_event_overflow(event, &data, regs)) 1088 1065 x86_pmu_stop(event, 0); 1089 1066 } ··· 1153 1124 */ 1154 1125 static struct event_constraint * 1155 1126 __intel_shared_reg_get_constraints(struct cpu_hw_events *cpuc, 1156 - struct perf_event *event) 1127 + struct perf_event *event, 1128 + struct hw_perf_event_extra *reg) 1157 1129 { 1158 1130 struct event_constraint *c = &emptyconstraint; 1159 - struct hw_perf_event_extra *reg = &event->hw.extra_reg; 1160 1131 struct er_account *era; 1161 1132 unsigned long flags; 1162 1133 int orig_idx = reg->idx; 1163 1134 1164 1135 /* already allocated shared msr */ 1165 1136 if (reg->alloc) 1166 - return &unconstrained; 1137 + return NULL; /* call x86_get_event_constraint() */ 1167 1138 1168 1139 again: 1169 1140 era = &cpuc->shared_regs->regs[reg->idx]; ··· 1186 1157 reg->alloc = 1; 1187 1158 1188 1159 /* 1189 - * All events using extra_reg are unconstrained. 1190 - * Avoids calling x86_get_event_constraints() 1191 - * 1192 - * Must revisit if extra_reg controlling events 1193 - * ever have constraints. Worst case we go through 1194 - * the regular event constraint table. 1160 + * need to call x86_get_event_constraint() 1161 + * to check if associated event has constraints 1195 1162 */ 1196 - c = &unconstrained; 1163 + c = NULL; 1197 1164 } else if (intel_try_alt_er(event, orig_idx)) { 1198 1165 raw_spin_unlock_irqrestore(&era->lock, flags); 1199 1166 goto again; ··· 1226 1201 intel_shared_regs_constraints(struct cpu_hw_events *cpuc, 1227 1202 struct perf_event *event) 1228 1203 { 1229 - struct event_constraint *c = NULL; 1204 + struct event_constraint *c = NULL, *d; 1205 + struct hw_perf_event_extra *xreg, *breg; 1230 1206 1231 - if (event->hw.extra_reg.idx != EXTRA_REG_NONE) 1232 - c = __intel_shared_reg_get_constraints(cpuc, event); 1233 - 1207 + xreg = &event->hw.extra_reg; 1208 + if (xreg->idx != EXTRA_REG_NONE) { 1209 + c = __intel_shared_reg_get_constraints(cpuc, event, xreg); 1210 + if (c == &emptyconstraint) 1211 + return c; 1212 + } 1213 + breg = &event->hw.branch_reg; 1214 + if (breg->idx != EXTRA_REG_NONE) { 1215 + d = __intel_shared_reg_get_constraints(cpuc, event, breg); 1216 + if (d == &emptyconstraint) { 1217 + __intel_shared_reg_put_constraints(cpuc, xreg); 1218 + c = d; 1219 + } 1220 + } 1234 1221 return c; 1235 1222 } 1236 1223 ··· 1290 1253 reg = &event->hw.extra_reg; 1291 1254 if (reg->idx != EXTRA_REG_NONE) 1292 1255 __intel_shared_reg_put_constraints(cpuc, reg); 1256 + 1257 + reg = &event->hw.branch_reg; 1258 + if (reg->idx != EXTRA_REG_NONE) 1259 + __intel_shared_reg_put_constraints(cpuc, reg); 1293 1260 } 1294 1261 1295 1262 static void intel_put_event_constraints(struct cpu_hw_events *cpuc, ··· 1334 1293 1335 1294 alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK); 1336 1295 event->hw.config = alt_config; 1296 + } 1297 + 1298 + if (intel_pmu_needs_lbr_smpl(event)) { 1299 + ret = intel_pmu_setup_lbr_filter(event); 1300 + if (ret) 1301 + return ret; 1337 1302 } 1338 1303 1339 1304 if (event->attr.type != PERF_TYPE_RAW) ··· 1480 1433 { 1481 1434 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu); 1482 1435 1483 - if (!x86_pmu.extra_regs) 1436 + if (!(x86_pmu.extra_regs || x86_pmu.lbr_sel_map)) 1484 1437 return NOTIFY_OK; 1485 1438 1486 1439 cpuc->shared_regs = allocate_shared_regs(cpu); ··· 1502 1455 */ 1503 1456 intel_pmu_lbr_reset(); 1504 1457 1505 - if (!cpuc->shared_regs || (x86_pmu.er_flags & ERF_NO_HT_SHARING)) 1458 + cpuc->lbr_sel = NULL; 1459 + 1460 + if (!cpuc->shared_regs) 1506 1461 return; 1507 1462 1508 - for_each_cpu(i, topology_thread_cpumask(cpu)) { 1509 - struct intel_shared_regs *pc; 1463 + if (!(x86_pmu.er_flags & ERF_NO_HT_SHARING)) { 1464 + for_each_cpu(i, topology_thread_cpumask(cpu)) { 1465 + struct intel_shared_regs *pc; 1510 1466 1511 - pc = per_cpu(cpu_hw_events, i).shared_regs; 1512 - if (pc && pc->core_id == core_id) { 1513 - cpuc->kfree_on_online = cpuc->shared_regs; 1514 - cpuc->shared_regs = pc; 1515 - break; 1467 + pc = per_cpu(cpu_hw_events, i).shared_regs; 1468 + if (pc && pc->core_id == core_id) { 1469 + cpuc->kfree_on_online = cpuc->shared_regs; 1470 + cpuc->shared_regs = pc; 1471 + break; 1472 + } 1516 1473 } 1474 + cpuc->shared_regs->core_id = core_id; 1475 + cpuc->shared_regs->refcnt++; 1517 1476 } 1518 1477 1519 - cpuc->shared_regs->core_id = core_id; 1520 - cpuc->shared_regs->refcnt++; 1478 + if (x86_pmu.lbr_sel_map) 1479 + cpuc->lbr_sel = &cpuc->shared_regs->regs[EXTRA_REG_LBR]; 1521 1480 } 1522 1481 1523 1482 static void intel_pmu_cpu_dying(int cpu) ··· 1539 1486 } 1540 1487 1541 1488 fini_debug_store_on_cpu(cpu); 1489 + } 1490 + 1491 + static void intel_pmu_flush_branch_stack(void) 1492 + { 1493 + /* 1494 + * Intel LBR does not tag entries with the 1495 + * PID of the current task, then we need to 1496 + * flush it on ctxsw 1497 + * For now, we simply reset it 1498 + */ 1499 + if (x86_pmu.lbr_nr) 1500 + intel_pmu_lbr_reset(); 1542 1501 } 1543 1502 1544 1503 static __initconst const struct x86_pmu intel_pmu = { ··· 1580 1515 .cpu_starting = intel_pmu_cpu_starting, 1581 1516 .cpu_dying = intel_pmu_cpu_dying, 1582 1517 .guest_get_msrs = intel_guest_get_msrs, 1518 + .flush_branch_stack = intel_pmu_flush_branch_stack, 1583 1519 }; 1584 1520 1585 1521 static __init void intel_clovertown_quirk(void) ··· 1811 1745 memcpy(hw_cache_event_ids, snb_hw_cache_event_ids, 1812 1746 sizeof(hw_cache_event_ids)); 1813 1747 1814 - intel_pmu_lbr_init_nhm(); 1748 + intel_pmu_lbr_init_snb(); 1815 1749 1816 1750 x86_pmu.event_constraints = intel_snb_event_constraints; 1817 1751 x86_pmu.pebs_constraints = intel_snb_pebs_event_constraints;
+5 -17
arch/x86/kernel/cpu/perf_event_intel_ds.c
··· 3 3 #include <linux/slab.h> 4 4 5 5 #include <asm/perf_event.h> 6 + #include <asm/insn.h> 6 7 7 8 #include "perf_event.h" 8 9 ··· 440 439 hwc->config &= ~ARCH_PERFMON_EVENTSEL_INT; 441 440 442 441 cpuc->pebs_enabled |= 1ULL << hwc->idx; 443 - 444 - if (x86_pmu.intel_cap.pebs_trap && event->attr.precise_ip > 1) 445 - intel_pmu_lbr_enable(event); 446 442 } 447 443 448 444 void intel_pmu_pebs_disable(struct perf_event *event) ··· 452 454 wrmsrl(MSR_IA32_PEBS_ENABLE, cpuc->pebs_enabled); 453 455 454 456 hwc->config |= ARCH_PERFMON_EVENTSEL_INT; 455 - 456 - if (x86_pmu.intel_cap.pebs_trap && event->attr.precise_ip > 1) 457 - intel_pmu_lbr_disable(event); 458 457 } 459 458 460 459 void intel_pmu_pebs_enable_all(void) ··· 468 473 469 474 if (cpuc->pebs_enabled) 470 475 wrmsrl(MSR_IA32_PEBS_ENABLE, 0); 471 - } 472 - 473 - #include <asm/insn.h> 474 - 475 - static inline bool kernel_ip(unsigned long ip) 476 - { 477 - #ifdef CONFIG_X86_32 478 - return ip > PAGE_OFFSET; 479 - #else 480 - return (long)ip < 0; 481 - #endif 482 476 } 483 477 484 478 static int intel_pmu_pebs_fixup_ip(struct pt_regs *regs) ··· 556 572 * both formats and we don't use the other fields in this 557 573 * routine. 558 574 */ 575 + struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 559 576 struct pebs_record_core *pebs = __pebs; 560 577 struct perf_sample_data data; 561 578 struct pt_regs regs; ··· 586 601 regs.flags |= PERF_EFLAGS_EXACT; 587 602 else 588 603 regs.flags &= ~PERF_EFLAGS_EXACT; 604 + 605 + if (has_branch_stack(event)) 606 + data.br_stack = &cpuc->lbr_stack; 589 607 590 608 if (perf_event_overflow(event, &data, &regs)) 591 609 x86_pmu_stop(event, 0);
+505 -21
arch/x86/kernel/cpu/perf_event_intel_lbr.c
··· 3 3 4 4 #include <asm/perf_event.h> 5 5 #include <asm/msr.h> 6 + #include <asm/insn.h> 6 7 7 8 #include "perf_event.h" 8 9 ··· 15 14 }; 16 15 17 16 /* 17 + * Intel LBR_SELECT bits 18 + * Intel Vol3a, April 2011, Section 16.7 Table 16-10 19 + * 20 + * Hardware branch filter (not available on all CPUs) 21 + */ 22 + #define LBR_KERNEL_BIT 0 /* do not capture at ring0 */ 23 + #define LBR_USER_BIT 1 /* do not capture at ring > 0 */ 24 + #define LBR_JCC_BIT 2 /* do not capture conditional branches */ 25 + #define LBR_REL_CALL_BIT 3 /* do not capture relative calls */ 26 + #define LBR_IND_CALL_BIT 4 /* do not capture indirect calls */ 27 + #define LBR_RETURN_BIT 5 /* do not capture near returns */ 28 + #define LBR_IND_JMP_BIT 6 /* do not capture indirect jumps */ 29 + #define LBR_REL_JMP_BIT 7 /* do not capture relative jumps */ 30 + #define LBR_FAR_BIT 8 /* do not capture far branches */ 31 + 32 + #define LBR_KERNEL (1 << LBR_KERNEL_BIT) 33 + #define LBR_USER (1 << LBR_USER_BIT) 34 + #define LBR_JCC (1 << LBR_JCC_BIT) 35 + #define LBR_REL_CALL (1 << LBR_REL_CALL_BIT) 36 + #define LBR_IND_CALL (1 << LBR_IND_CALL_BIT) 37 + #define LBR_RETURN (1 << LBR_RETURN_BIT) 38 + #define LBR_REL_JMP (1 << LBR_REL_JMP_BIT) 39 + #define LBR_IND_JMP (1 << LBR_IND_JMP_BIT) 40 + #define LBR_FAR (1 << LBR_FAR_BIT) 41 + 42 + #define LBR_PLM (LBR_KERNEL | LBR_USER) 43 + 44 + #define LBR_SEL_MASK 0x1ff /* valid bits in LBR_SELECT */ 45 + #define LBR_NOT_SUPP -1 /* LBR filter not supported */ 46 + #define LBR_IGN 0 /* ignored */ 47 + 48 + #define LBR_ANY \ 49 + (LBR_JCC |\ 50 + LBR_REL_CALL |\ 51 + LBR_IND_CALL |\ 52 + LBR_RETURN |\ 53 + LBR_REL_JMP |\ 54 + LBR_IND_JMP |\ 55 + LBR_FAR) 56 + 57 + #define LBR_FROM_FLAG_MISPRED (1ULL << 63) 58 + 59 + #define for_each_branch_sample_type(x) \ 60 + for ((x) = PERF_SAMPLE_BRANCH_USER; \ 61 + (x) < PERF_SAMPLE_BRANCH_MAX; (x) <<= 1) 62 + 63 + /* 64 + * x86control flow change classification 65 + * x86control flow changes include branches, interrupts, traps, faults 66 + */ 67 + enum { 68 + X86_BR_NONE = 0, /* unknown */ 69 + 70 + X86_BR_USER = 1 << 0, /* branch target is user */ 71 + X86_BR_KERNEL = 1 << 1, /* branch target is kernel */ 72 + 73 + X86_BR_CALL = 1 << 2, /* call */ 74 + X86_BR_RET = 1 << 3, /* return */ 75 + X86_BR_SYSCALL = 1 << 4, /* syscall */ 76 + X86_BR_SYSRET = 1 << 5, /* syscall return */ 77 + X86_BR_INT = 1 << 6, /* sw interrupt */ 78 + X86_BR_IRET = 1 << 7, /* return from interrupt */ 79 + X86_BR_JCC = 1 << 8, /* conditional */ 80 + X86_BR_JMP = 1 << 9, /* jump */ 81 + X86_BR_IRQ = 1 << 10,/* hw interrupt or trap or fault */ 82 + X86_BR_IND_CALL = 1 << 11,/* indirect calls */ 83 + }; 84 + 85 + #define X86_BR_PLM (X86_BR_USER | X86_BR_KERNEL) 86 + 87 + #define X86_BR_ANY \ 88 + (X86_BR_CALL |\ 89 + X86_BR_RET |\ 90 + X86_BR_SYSCALL |\ 91 + X86_BR_SYSRET |\ 92 + X86_BR_INT |\ 93 + X86_BR_IRET |\ 94 + X86_BR_JCC |\ 95 + X86_BR_JMP |\ 96 + X86_BR_IRQ |\ 97 + X86_BR_IND_CALL) 98 + 99 + #define X86_BR_ALL (X86_BR_PLM | X86_BR_ANY) 100 + 101 + #define X86_BR_ANY_CALL \ 102 + (X86_BR_CALL |\ 103 + X86_BR_IND_CALL |\ 104 + X86_BR_SYSCALL |\ 105 + X86_BR_IRQ |\ 106 + X86_BR_INT) 107 + 108 + static void intel_pmu_lbr_filter(struct cpu_hw_events *cpuc); 109 + 110 + /* 18 111 * We only support LBR implementations that have FREEZE_LBRS_ON_PMI 19 112 * otherwise it becomes near impossible to get a reliable stack. 20 113 */ ··· 116 21 static void __intel_pmu_lbr_enable(void) 117 22 { 118 23 u64 debugctl; 24 + struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 25 + 26 + if (cpuc->lbr_sel) 27 + wrmsrl(MSR_LBR_SELECT, cpuc->lbr_sel->config); 119 28 120 29 rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl); 121 30 debugctl |= (DEBUGCTLMSR_LBR | DEBUGCTLMSR_FREEZE_LBRS_ON_PMI); ··· 175 76 * Reset the LBR stack if we changed task context to 176 77 * avoid data leaks. 177 78 */ 178 - 179 79 if (event->ctx->task && cpuc->lbr_context != event->ctx) { 180 80 intel_pmu_lbr_reset(); 181 81 cpuc->lbr_context = event->ctx; 182 82 } 83 + cpuc->br_sel = event->hw.branch_reg.reg; 183 84 184 85 cpuc->lbr_users++; 185 86 } ··· 194 95 cpuc->lbr_users--; 195 96 WARN_ON_ONCE(cpuc->lbr_users < 0); 196 97 197 - if (cpuc->enabled && !cpuc->lbr_users) 98 + if (cpuc->enabled && !cpuc->lbr_users) { 198 99 __intel_pmu_lbr_disable(); 100 + /* avoid stale pointer */ 101 + cpuc->lbr_context = NULL; 102 + } 199 103 } 200 104 201 105 void intel_pmu_lbr_enable_all(void) ··· 217 115 __intel_pmu_lbr_disable(); 218 116 } 219 117 118 + /* 119 + * TOS = most recently recorded branch 120 + */ 220 121 static inline u64 intel_pmu_lbr_tos(void) 221 122 { 222 123 u64 tos; ··· 247 142 248 143 rdmsrl(x86_pmu.lbr_from + lbr_idx, msr_lastbranch.lbr); 249 144 250 - cpuc->lbr_entries[i].from = msr_lastbranch.from; 251 - cpuc->lbr_entries[i].to = msr_lastbranch.to; 252 - cpuc->lbr_entries[i].flags = 0; 145 + cpuc->lbr_entries[i].from = msr_lastbranch.from; 146 + cpuc->lbr_entries[i].to = msr_lastbranch.to; 147 + cpuc->lbr_entries[i].mispred = 0; 148 + cpuc->lbr_entries[i].predicted = 0; 149 + cpuc->lbr_entries[i].reserved = 0; 253 150 } 254 151 cpuc->lbr_stack.nr = i; 255 152 } 256 - 257 - #define LBR_FROM_FLAG_MISPRED (1ULL << 63) 258 153 259 154 /* 260 155 * Due to lack of segmentation in Linux the effective address (offset) ··· 270 165 271 166 for (i = 0; i < x86_pmu.lbr_nr; i++) { 272 167 unsigned long lbr_idx = (tos - i) & mask; 273 - u64 from, to, flags = 0; 168 + u64 from, to, mis = 0, pred = 0; 274 169 275 170 rdmsrl(x86_pmu.lbr_from + lbr_idx, from); 276 171 rdmsrl(x86_pmu.lbr_to + lbr_idx, to); 277 172 278 173 if (lbr_format == LBR_FORMAT_EIP_FLAGS) { 279 - flags = !!(from & LBR_FROM_FLAG_MISPRED); 174 + mis = !!(from & LBR_FROM_FLAG_MISPRED); 175 + pred = !mis; 280 176 from = (u64)((((s64)from) << 1) >> 1); 281 177 } 282 178 283 - cpuc->lbr_entries[i].from = from; 284 - cpuc->lbr_entries[i].to = to; 285 - cpuc->lbr_entries[i].flags = flags; 179 + cpuc->lbr_entries[i].from = from; 180 + cpuc->lbr_entries[i].to = to; 181 + cpuc->lbr_entries[i].mispred = mis; 182 + cpuc->lbr_entries[i].predicted = pred; 183 + cpuc->lbr_entries[i].reserved = 0; 286 184 } 287 185 cpuc->lbr_stack.nr = i; 288 186 } ··· 301 193 intel_pmu_lbr_read_32(cpuc); 302 194 else 303 195 intel_pmu_lbr_read_64(cpuc); 196 + 197 + intel_pmu_lbr_filter(cpuc); 304 198 } 305 199 200 + /* 201 + * SW filter is used: 202 + * - in case there is no HW filter 203 + * - in case the HW filter has errata or limitations 204 + */ 205 + static void intel_pmu_setup_sw_lbr_filter(struct perf_event *event) 206 + { 207 + u64 br_type = event->attr.branch_sample_type; 208 + int mask = 0; 209 + 210 + if (br_type & PERF_SAMPLE_BRANCH_USER) 211 + mask |= X86_BR_USER; 212 + 213 + if (br_type & PERF_SAMPLE_BRANCH_KERNEL) 214 + mask |= X86_BR_KERNEL; 215 + 216 + /* we ignore BRANCH_HV here */ 217 + 218 + if (br_type & PERF_SAMPLE_BRANCH_ANY) 219 + mask |= X86_BR_ANY; 220 + 221 + if (br_type & PERF_SAMPLE_BRANCH_ANY_CALL) 222 + mask |= X86_BR_ANY_CALL; 223 + 224 + if (br_type & PERF_SAMPLE_BRANCH_ANY_RETURN) 225 + mask |= X86_BR_RET | X86_BR_IRET | X86_BR_SYSRET; 226 + 227 + if (br_type & PERF_SAMPLE_BRANCH_IND_CALL) 228 + mask |= X86_BR_IND_CALL; 229 + /* 230 + * stash actual user request into reg, it may 231 + * be used by fixup code for some CPU 232 + */ 233 + event->hw.branch_reg.reg = mask; 234 + } 235 + 236 + /* 237 + * setup the HW LBR filter 238 + * Used only when available, may not be enough to disambiguate 239 + * all branches, may need the help of the SW filter 240 + */ 241 + static int intel_pmu_setup_hw_lbr_filter(struct perf_event *event) 242 + { 243 + struct hw_perf_event_extra *reg; 244 + u64 br_type = event->attr.branch_sample_type; 245 + u64 mask = 0, m; 246 + u64 v; 247 + 248 + for_each_branch_sample_type(m) { 249 + if (!(br_type & m)) 250 + continue; 251 + 252 + v = x86_pmu.lbr_sel_map[m]; 253 + if (v == LBR_NOT_SUPP) 254 + return -EOPNOTSUPP; 255 + 256 + if (v != LBR_IGN) 257 + mask |= v; 258 + } 259 + reg = &event->hw.branch_reg; 260 + reg->idx = EXTRA_REG_LBR; 261 + 262 + /* LBR_SELECT operates in suppress mode so invert mask */ 263 + reg->config = ~mask & x86_pmu.lbr_sel_mask; 264 + 265 + return 0; 266 + } 267 + 268 + int intel_pmu_setup_lbr_filter(struct perf_event *event) 269 + { 270 + int ret = 0; 271 + 272 + /* 273 + * no LBR on this PMU 274 + */ 275 + if (!x86_pmu.lbr_nr) 276 + return -EOPNOTSUPP; 277 + 278 + /* 279 + * setup SW LBR filter 280 + */ 281 + intel_pmu_setup_sw_lbr_filter(event); 282 + 283 + /* 284 + * setup HW LBR filter, if any 285 + */ 286 + if (x86_pmu.lbr_sel_map) 287 + ret = intel_pmu_setup_hw_lbr_filter(event); 288 + 289 + return ret; 290 + } 291 + 292 + /* 293 + * return the type of control flow change at address "from" 294 + * intruction is not necessarily a branch (in case of interrupt). 295 + * 296 + * The branch type returned also includes the priv level of the 297 + * target of the control flow change (X86_BR_USER, X86_BR_KERNEL). 298 + * 299 + * If a branch type is unknown OR the instruction cannot be 300 + * decoded (e.g., text page not present), then X86_BR_NONE is 301 + * returned. 302 + */ 303 + static int branch_type(unsigned long from, unsigned long to) 304 + { 305 + struct insn insn; 306 + void *addr; 307 + int bytes, size = MAX_INSN_SIZE; 308 + int ret = X86_BR_NONE; 309 + int ext, to_plm, from_plm; 310 + u8 buf[MAX_INSN_SIZE]; 311 + int is64 = 0; 312 + 313 + to_plm = kernel_ip(to) ? X86_BR_KERNEL : X86_BR_USER; 314 + from_plm = kernel_ip(from) ? X86_BR_KERNEL : X86_BR_USER; 315 + 316 + /* 317 + * maybe zero if lbr did not fill up after a reset by the time 318 + * we get a PMU interrupt 319 + */ 320 + if (from == 0 || to == 0) 321 + return X86_BR_NONE; 322 + 323 + if (from_plm == X86_BR_USER) { 324 + /* 325 + * can happen if measuring at the user level only 326 + * and we interrupt in a kernel thread, e.g., idle. 327 + */ 328 + if (!current->mm) 329 + return X86_BR_NONE; 330 + 331 + /* may fail if text not present */ 332 + bytes = copy_from_user_nmi(buf, (void __user *)from, size); 333 + if (bytes != size) 334 + return X86_BR_NONE; 335 + 336 + addr = buf; 337 + } else 338 + addr = (void *)from; 339 + 340 + /* 341 + * decoder needs to know the ABI especially 342 + * on 64-bit systems running 32-bit apps 343 + */ 344 + #ifdef CONFIG_X86_64 345 + is64 = kernel_ip((unsigned long)addr) || !test_thread_flag(TIF_IA32); 346 + #endif 347 + insn_init(&insn, addr, is64); 348 + insn_get_opcode(&insn); 349 + 350 + switch (insn.opcode.bytes[0]) { 351 + case 0xf: 352 + switch (insn.opcode.bytes[1]) { 353 + case 0x05: /* syscall */ 354 + case 0x34: /* sysenter */ 355 + ret = X86_BR_SYSCALL; 356 + break; 357 + case 0x07: /* sysret */ 358 + case 0x35: /* sysexit */ 359 + ret = X86_BR_SYSRET; 360 + break; 361 + case 0x80 ... 0x8f: /* conditional */ 362 + ret = X86_BR_JCC; 363 + break; 364 + default: 365 + ret = X86_BR_NONE; 366 + } 367 + break; 368 + case 0x70 ... 0x7f: /* conditional */ 369 + ret = X86_BR_JCC; 370 + break; 371 + case 0xc2: /* near ret */ 372 + case 0xc3: /* near ret */ 373 + case 0xca: /* far ret */ 374 + case 0xcb: /* far ret */ 375 + ret = X86_BR_RET; 376 + break; 377 + case 0xcf: /* iret */ 378 + ret = X86_BR_IRET; 379 + break; 380 + case 0xcc ... 0xce: /* int */ 381 + ret = X86_BR_INT; 382 + break; 383 + case 0xe8: /* call near rel */ 384 + case 0x9a: /* call far absolute */ 385 + ret = X86_BR_CALL; 386 + break; 387 + case 0xe0 ... 0xe3: /* loop jmp */ 388 + ret = X86_BR_JCC; 389 + break; 390 + case 0xe9 ... 0xeb: /* jmp */ 391 + ret = X86_BR_JMP; 392 + break; 393 + case 0xff: /* call near absolute, call far absolute ind */ 394 + insn_get_modrm(&insn); 395 + ext = (insn.modrm.bytes[0] >> 3) & 0x7; 396 + switch (ext) { 397 + case 2: /* near ind call */ 398 + case 3: /* far ind call */ 399 + ret = X86_BR_IND_CALL; 400 + break; 401 + case 4: 402 + case 5: 403 + ret = X86_BR_JMP; 404 + break; 405 + } 406 + break; 407 + default: 408 + ret = X86_BR_NONE; 409 + } 410 + /* 411 + * interrupts, traps, faults (and thus ring transition) may 412 + * occur on any instructions. Thus, to classify them correctly, 413 + * we need to first look at the from and to priv levels. If they 414 + * are different and to is in the kernel, then it indicates 415 + * a ring transition. If the from instruction is not a ring 416 + * transition instr (syscall, systenter, int), then it means 417 + * it was a irq, trap or fault. 418 + * 419 + * we have no way of detecting kernel to kernel faults. 420 + */ 421 + if (from_plm == X86_BR_USER && to_plm == X86_BR_KERNEL 422 + && ret != X86_BR_SYSCALL && ret != X86_BR_INT) 423 + ret = X86_BR_IRQ; 424 + 425 + /* 426 + * branch priv level determined by target as 427 + * is done by HW when LBR_SELECT is implemented 428 + */ 429 + if (ret != X86_BR_NONE) 430 + ret |= to_plm; 431 + 432 + return ret; 433 + } 434 + 435 + /* 436 + * implement actual branch filter based on user demand. 437 + * Hardware may not exactly satisfy that request, thus 438 + * we need to inspect opcodes. Mismatched branches are 439 + * discarded. Therefore, the number of branches returned 440 + * in PERF_SAMPLE_BRANCH_STACK sample may vary. 441 + */ 442 + static void 443 + intel_pmu_lbr_filter(struct cpu_hw_events *cpuc) 444 + { 445 + u64 from, to; 446 + int br_sel = cpuc->br_sel; 447 + int i, j, type; 448 + bool compress = false; 449 + 450 + /* if sampling all branches, then nothing to filter */ 451 + if ((br_sel & X86_BR_ALL) == X86_BR_ALL) 452 + return; 453 + 454 + for (i = 0; i < cpuc->lbr_stack.nr; i++) { 455 + 456 + from = cpuc->lbr_entries[i].from; 457 + to = cpuc->lbr_entries[i].to; 458 + 459 + type = branch_type(from, to); 460 + 461 + /* if type does not correspond, then discard */ 462 + if (type == X86_BR_NONE || (br_sel & type) != type) { 463 + cpuc->lbr_entries[i].from = 0; 464 + compress = true; 465 + } 466 + } 467 + 468 + if (!compress) 469 + return; 470 + 471 + /* remove all entries with from=0 */ 472 + for (i = 0; i < cpuc->lbr_stack.nr; ) { 473 + if (!cpuc->lbr_entries[i].from) { 474 + j = i; 475 + while (++j < cpuc->lbr_stack.nr) 476 + cpuc->lbr_entries[j-1] = cpuc->lbr_entries[j]; 477 + cpuc->lbr_stack.nr--; 478 + if (!cpuc->lbr_entries[i].from) 479 + continue; 480 + } 481 + i++; 482 + } 483 + } 484 + 485 + /* 486 + * Map interface branch filters onto LBR filters 487 + */ 488 + static const int nhm_lbr_sel_map[PERF_SAMPLE_BRANCH_MAX] = { 489 + [PERF_SAMPLE_BRANCH_ANY] = LBR_ANY, 490 + [PERF_SAMPLE_BRANCH_USER] = LBR_USER, 491 + [PERF_SAMPLE_BRANCH_KERNEL] = LBR_KERNEL, 492 + [PERF_SAMPLE_BRANCH_HV] = LBR_IGN, 493 + [PERF_SAMPLE_BRANCH_ANY_RETURN] = LBR_RETURN | LBR_REL_JMP 494 + | LBR_IND_JMP | LBR_FAR, 495 + /* 496 + * NHM/WSM erratum: must include REL_JMP+IND_JMP to get CALL branches 497 + */ 498 + [PERF_SAMPLE_BRANCH_ANY_CALL] = 499 + LBR_REL_CALL | LBR_IND_CALL | LBR_REL_JMP | LBR_IND_JMP | LBR_FAR, 500 + /* 501 + * NHM/WSM erratum: must include IND_JMP to capture IND_CALL 502 + */ 503 + [PERF_SAMPLE_BRANCH_IND_CALL] = LBR_IND_CALL | LBR_IND_JMP, 504 + }; 505 + 506 + static const int snb_lbr_sel_map[PERF_SAMPLE_BRANCH_MAX] = { 507 + [PERF_SAMPLE_BRANCH_ANY] = LBR_ANY, 508 + [PERF_SAMPLE_BRANCH_USER] = LBR_USER, 509 + [PERF_SAMPLE_BRANCH_KERNEL] = LBR_KERNEL, 510 + [PERF_SAMPLE_BRANCH_HV] = LBR_IGN, 511 + [PERF_SAMPLE_BRANCH_ANY_RETURN] = LBR_RETURN | LBR_FAR, 512 + [PERF_SAMPLE_BRANCH_ANY_CALL] = LBR_REL_CALL | LBR_IND_CALL 513 + | LBR_FAR, 514 + [PERF_SAMPLE_BRANCH_IND_CALL] = LBR_IND_CALL, 515 + }; 516 + 517 + /* core */ 306 518 void intel_pmu_lbr_init_core(void) 307 519 { 308 520 x86_pmu.lbr_nr = 4; 309 - x86_pmu.lbr_tos = 0x01c9; 310 - x86_pmu.lbr_from = 0x40; 311 - x86_pmu.lbr_to = 0x60; 521 + x86_pmu.lbr_tos = MSR_LBR_TOS; 522 + x86_pmu.lbr_from = MSR_LBR_CORE_FROM; 523 + x86_pmu.lbr_to = MSR_LBR_CORE_TO; 524 + 525 + /* 526 + * SW branch filter usage: 527 + * - compensate for lack of HW filter 528 + */ 529 + pr_cont("4-deep LBR, "); 312 530 } 313 531 532 + /* nehalem/westmere */ 314 533 void intel_pmu_lbr_init_nhm(void) 315 534 { 316 535 x86_pmu.lbr_nr = 16; 317 - x86_pmu.lbr_tos = 0x01c9; 318 - x86_pmu.lbr_from = 0x680; 319 - x86_pmu.lbr_to = 0x6c0; 536 + x86_pmu.lbr_tos = MSR_LBR_TOS; 537 + x86_pmu.lbr_from = MSR_LBR_NHM_FROM; 538 + x86_pmu.lbr_to = MSR_LBR_NHM_TO; 539 + 540 + x86_pmu.lbr_sel_mask = LBR_SEL_MASK; 541 + x86_pmu.lbr_sel_map = nhm_lbr_sel_map; 542 + 543 + /* 544 + * SW branch filter usage: 545 + * - workaround LBR_SEL errata (see above) 546 + * - support syscall, sysret capture. 547 + * That requires LBR_FAR but that means far 548 + * jmp need to be filtered out 549 + */ 550 + pr_cont("16-deep LBR, "); 320 551 } 321 552 553 + /* sandy bridge */ 554 + void intel_pmu_lbr_init_snb(void) 555 + { 556 + x86_pmu.lbr_nr = 16; 557 + x86_pmu.lbr_tos = MSR_LBR_TOS; 558 + x86_pmu.lbr_from = MSR_LBR_NHM_FROM; 559 + x86_pmu.lbr_to = MSR_LBR_NHM_TO; 560 + 561 + x86_pmu.lbr_sel_mask = LBR_SEL_MASK; 562 + x86_pmu.lbr_sel_map = snb_lbr_sel_map; 563 + 564 + /* 565 + * SW branch filter usage: 566 + * - support syscall, sysret capture. 567 + * That requires LBR_FAR but that means far 568 + * jmp need to be filtered out 569 + */ 570 + pr_cont("16-deep LBR, "); 571 + } 572 + 573 + /* atom */ 322 574 void intel_pmu_lbr_init_atom(void) 323 575 { 576 + /* 577 + * only models starting at stepping 10 seems 578 + * to have an operational LBR which can freeze 579 + * on PMU interrupt 580 + */ 581 + if (boot_cpu_data.x86_mask < 10) { 582 + pr_cont("LBR disabled due to erratum"); 583 + return; 584 + } 585 + 324 586 x86_pmu.lbr_nr = 8; 325 - x86_pmu.lbr_tos = 0x01c9; 326 - x86_pmu.lbr_from = 0x40; 327 - x86_pmu.lbr_to = 0x60; 587 + x86_pmu.lbr_tos = MSR_LBR_TOS; 588 + x86_pmu.lbr_from = MSR_LBR_CORE_FROM; 589 + x86_pmu.lbr_to = MSR_LBR_CORE_TO; 590 + 591 + /* 592 + * SW branch filter usage: 593 + * - compensate for lack of HW filter 594 + */ 595 + pr_cont("8-deep LBR, "); 328 596 }
+77 -5
include/linux/perf_event.h
··· 129 129 PERF_SAMPLE_PERIOD = 1U << 8, 130 130 PERF_SAMPLE_STREAM_ID = 1U << 9, 131 131 PERF_SAMPLE_RAW = 1U << 10, 132 + PERF_SAMPLE_BRANCH_STACK = 1U << 11, 132 133 133 - PERF_SAMPLE_MAX = 1U << 11, /* non-ABI */ 134 + PERF_SAMPLE_MAX = 1U << 12, /* non-ABI */ 134 135 }; 136 + 137 + /* 138 + * values to program into branch_sample_type when PERF_SAMPLE_BRANCH is set 139 + * 140 + * If the user does not pass priv level information via branch_sample_type, 141 + * the kernel uses the event's priv level. Branch and event priv levels do 142 + * not have to match. Branch priv level is checked for permissions. 143 + * 144 + * The branch types can be combined, however BRANCH_ANY covers all types 145 + * of branches and therefore it supersedes all the other types. 146 + */ 147 + enum perf_branch_sample_type { 148 + PERF_SAMPLE_BRANCH_USER = 1U << 0, /* user branches */ 149 + PERF_SAMPLE_BRANCH_KERNEL = 1U << 1, /* kernel branches */ 150 + PERF_SAMPLE_BRANCH_HV = 1U << 2, /* hypervisor branches */ 151 + 152 + PERF_SAMPLE_BRANCH_ANY = 1U << 3, /* any branch types */ 153 + PERF_SAMPLE_BRANCH_ANY_CALL = 1U << 4, /* any call branch */ 154 + PERF_SAMPLE_BRANCH_ANY_RETURN = 1U << 5, /* any return branch */ 155 + PERF_SAMPLE_BRANCH_IND_CALL = 1U << 6, /* indirect calls */ 156 + 157 + PERF_SAMPLE_BRANCH_MAX = 1U << 7, /* non-ABI */ 158 + }; 159 + 160 + #define PERF_SAMPLE_BRANCH_PLM_ALL \ 161 + (PERF_SAMPLE_BRANCH_USER|\ 162 + PERF_SAMPLE_BRANCH_KERNEL|\ 163 + PERF_SAMPLE_BRANCH_HV) 135 164 136 165 /* 137 166 * The format of the data returned by read() on a perf event fd, ··· 192 163 }; 193 164 194 165 #define PERF_ATTR_SIZE_VER0 64 /* sizeof first published struct */ 166 + #define PERF_ATTR_SIZE_VER1 72 /* add: config2 */ 167 + #define PERF_ATTR_SIZE_VER2 80 /* add: branch_sample_type */ 195 168 196 169 /* 197 170 * Hardware event_id to monitor via a performance monitoring event: ··· 271 240 __u64 bp_len; 272 241 __u64 config2; /* extension of config1 */ 273 242 }; 243 + __u64 branch_sample_type; /* enum branch_sample_type */ 274 244 }; 275 245 276 246 /* ··· 490 458 * 491 459 * { u32 size; 492 460 * char data[size];}&& PERF_SAMPLE_RAW 461 + * 462 + * { u64 from, to, flags } lbr[nr];} && PERF_SAMPLE_BRANCH_STACK 493 463 * }; 494 464 */ 495 465 PERF_RECORD_SAMPLE = 9, ··· 564 530 void *data; 565 531 }; 566 532 533 + /* 534 + * single taken branch record layout: 535 + * 536 + * from: source instruction (may not always be a branch insn) 537 + * to: branch target 538 + * mispred: branch target was mispredicted 539 + * predicted: branch target was predicted 540 + * 541 + * support for mispred, predicted is optional. In case it 542 + * is not supported mispred = predicted = 0. 543 + */ 567 544 struct perf_branch_entry { 568 - __u64 from; 569 - __u64 to; 570 - __u64 flags; 545 + __u64 from; 546 + __u64 to; 547 + __u64 mispred:1, /* target mispredicted */ 548 + predicted:1,/* target predicted */ 549 + reserved:62; 571 550 }; 572 551 552 + /* 553 + * branch stack layout: 554 + * nr: number of taken branches stored in entries[] 555 + * 556 + * Note that nr can vary from sample to sample 557 + * branches (to, from) are stored from most recent 558 + * to least recent, i.e., entries[0] contains the most 559 + * recent branch. 560 + */ 573 561 struct perf_branch_stack { 574 562 __u64 nr; 575 563 struct perf_branch_entry entries[0]; ··· 622 566 unsigned long event_base; 623 567 int idx; 624 568 int last_cpu; 569 + 625 570 struct hw_perf_event_extra extra_reg; 571 + struct hw_perf_event_extra branch_reg; 626 572 }; 627 573 struct { /* software */ 628 574 struct hrtimer hrtimer; ··· 748 690 * if no implementation is provided it will default to: event->hw.idx + 1. 749 691 */ 750 692 int (*event_idx) (struct perf_event *event); /*optional */ 693 + 694 + /* 695 + * flush branch stack on context-switches (needed in cpu-wide mode) 696 + */ 697 + void (*flush_branch_stack) (void); 751 698 }; 752 699 753 700 /** ··· 986 923 u64 parent_gen; 987 924 u64 generation; 988 925 int pin_count; 989 - int nr_cgroups; /* cgroup events present */ 926 + int nr_cgroups; /* cgroup evts */ 927 + int nr_branch_stack; /* branch_stack evt */ 990 928 struct rcu_head rcu_head; 991 929 }; 992 930 ··· 1052 988 extern u64 perf_event_read_value(struct perf_event *event, 1053 989 u64 *enabled, u64 *running); 1054 990 991 + 1055 992 struct perf_sample_data { 1056 993 u64 type; 1057 994 ··· 1072 1007 u64 period; 1073 1008 struct perf_callchain_entry *callchain; 1074 1009 struct perf_raw_record *raw; 1010 + struct perf_branch_stack *br_stack; 1075 1011 }; 1076 1012 1077 1013 static inline void perf_sample_data_init(struct perf_sample_data *data, u64 addr) 1078 1014 { 1079 1015 data->addr = addr; 1080 1016 data->raw = NULL; 1017 + data->br_stack = NULL; 1081 1018 } 1082 1019 1083 1020 extern void perf_output_sample(struct perf_output_handle *handle, ··· 1217 1150 (user_mode(regs) ? PERF_RECORD_MISC_USER : PERF_RECORD_MISC_KERNEL) 1218 1151 # define perf_instruction_pointer(regs) instruction_pointer(regs) 1219 1152 #endif 1153 + 1154 + static inline bool has_branch_stack(struct perf_event *event) 1155 + { 1156 + return event->attr.sample_type & PERF_SAMPLE_BRANCH_STACK; 1157 + } 1220 1158 1221 1159 extern int perf_output_begin(struct perf_output_handle *handle, 1222 1160 struct perf_event *event, unsigned int size);
+177
kernel/events/core.c
··· 118 118 PERF_FLAG_FD_OUTPUT |\ 119 119 PERF_FLAG_PID_CGROUP) 120 120 121 + /* 122 + * branch priv levels that need permission checks 123 + */ 124 + #define PERF_SAMPLE_BRANCH_PERM_PLM \ 125 + (PERF_SAMPLE_BRANCH_KERNEL |\ 126 + PERF_SAMPLE_BRANCH_HV) 127 + 121 128 enum event_type_t { 122 129 EVENT_FLEXIBLE = 0x1, 123 130 EVENT_PINNED = 0x2, ··· 137 130 */ 138 131 struct static_key_deferred perf_sched_events __read_mostly; 139 132 static DEFINE_PER_CPU(atomic_t, perf_cgroup_events); 133 + static DEFINE_PER_CPU(atomic_t, perf_branch_stack_events); 140 134 141 135 static atomic_t nr_mmap_events __read_mostly; 142 136 static atomic_t nr_comm_events __read_mostly; ··· 889 881 if (is_cgroup_event(event)) 890 882 ctx->nr_cgroups++; 891 883 884 + if (has_branch_stack(event)) 885 + ctx->nr_branch_stack++; 886 + 892 887 list_add_rcu(&event->event_entry, &ctx->event_list); 893 888 if (!ctx->nr_events) 894 889 perf_pmu_rotate_start(ctx->pmu); ··· 1030 1019 if (!ctx->nr_cgroups) 1031 1020 cpuctx->cgrp = NULL; 1032 1021 } 1022 + 1023 + if (has_branch_stack(event)) 1024 + ctx->nr_branch_stack--; 1033 1025 1034 1026 ctx->nr_events--; 1035 1027 if (event->attr.inherit_stat) ··· 2209 2195 } 2210 2196 2211 2197 /* 2198 + * When sampling the branck stack in system-wide, it may be necessary 2199 + * to flush the stack on context switch. This happens when the branch 2200 + * stack does not tag its entries with the pid of the current task. 2201 + * Otherwise it becomes impossible to associate a branch entry with a 2202 + * task. This ambiguity is more likely to appear when the branch stack 2203 + * supports priv level filtering and the user sets it to monitor only 2204 + * at the user level (which could be a useful measurement in system-wide 2205 + * mode). In that case, the risk is high of having a branch stack with 2206 + * branch from multiple tasks. Flushing may mean dropping the existing 2207 + * entries or stashing them somewhere in the PMU specific code layer. 2208 + * 2209 + * This function provides the context switch callback to the lower code 2210 + * layer. It is invoked ONLY when there is at least one system-wide context 2211 + * with at least one active event using taken branch sampling. 2212 + */ 2213 + static void perf_branch_stack_sched_in(struct task_struct *prev, 2214 + struct task_struct *task) 2215 + { 2216 + struct perf_cpu_context *cpuctx; 2217 + struct pmu *pmu; 2218 + unsigned long flags; 2219 + 2220 + /* no need to flush branch stack if not changing task */ 2221 + if (prev == task) 2222 + return; 2223 + 2224 + local_irq_save(flags); 2225 + 2226 + rcu_read_lock(); 2227 + 2228 + list_for_each_entry_rcu(pmu, &pmus, entry) { 2229 + cpuctx = this_cpu_ptr(pmu->pmu_cpu_context); 2230 + 2231 + /* 2232 + * check if the context has at least one 2233 + * event using PERF_SAMPLE_BRANCH_STACK 2234 + */ 2235 + if (cpuctx->ctx.nr_branch_stack > 0 2236 + && pmu->flush_branch_stack) { 2237 + 2238 + pmu = cpuctx->ctx.pmu; 2239 + 2240 + perf_ctx_lock(cpuctx, cpuctx->task_ctx); 2241 + 2242 + perf_pmu_disable(pmu); 2243 + 2244 + pmu->flush_branch_stack(); 2245 + 2246 + perf_pmu_enable(pmu); 2247 + 2248 + perf_ctx_unlock(cpuctx, cpuctx->task_ctx); 2249 + } 2250 + } 2251 + 2252 + rcu_read_unlock(); 2253 + 2254 + local_irq_restore(flags); 2255 + } 2256 + 2257 + /* 2212 2258 * Called from scheduler to add the events of the current task 2213 2259 * with interrupts disabled. 2214 2260 * ··· 2299 2225 */ 2300 2226 if (atomic_read(&__get_cpu_var(perf_cgroup_events))) 2301 2227 perf_cgroup_sched_in(prev, task); 2228 + 2229 + /* check for system-wide branch_stack events */ 2230 + if (atomic_read(&__get_cpu_var(perf_branch_stack_events))) 2231 + perf_branch_stack_sched_in(prev, task); 2302 2232 } 2303 2233 2304 2234 static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count) ··· 2868 2790 if (is_cgroup_event(event)) { 2869 2791 atomic_dec(&per_cpu(perf_cgroup_events, event->cpu)); 2870 2792 static_key_slow_dec_deferred(&perf_sched_events); 2793 + } 2794 + 2795 + if (has_branch_stack(event)) { 2796 + static_key_slow_dec_deferred(&perf_sched_events); 2797 + /* is system-wide event */ 2798 + if (!(event->attach_state & PERF_ATTACH_TASK)) 2799 + atomic_dec(&per_cpu(perf_branch_stack_events, 2800 + event->cpu)); 2871 2801 } 2872 2802 } 2873 2803 ··· 3993 3907 } 3994 3908 } 3995 3909 } 3910 + 3911 + if (sample_type & PERF_SAMPLE_BRANCH_STACK) { 3912 + if (data->br_stack) { 3913 + size_t size; 3914 + 3915 + size = data->br_stack->nr 3916 + * sizeof(struct perf_branch_entry); 3917 + 3918 + perf_output_put(handle, data->br_stack->nr); 3919 + perf_output_copy(handle, data->br_stack->entries, size); 3920 + } else { 3921 + /* 3922 + * we always store at least the value of nr 3923 + */ 3924 + u64 nr = 0; 3925 + perf_output_put(handle, nr); 3926 + } 3927 + } 3996 3928 } 3997 3929 3998 3930 void perf_prepare_sample(struct perf_event_header *header, ··· 4051 3947 size += sizeof(u32); 4052 3948 4053 3949 WARN_ON_ONCE(size & (sizeof(u64)-1)); 3950 + header->size += size; 3951 + } 3952 + 3953 + if (sample_type & PERF_SAMPLE_BRANCH_STACK) { 3954 + int size = sizeof(u64); /* nr */ 3955 + if (data->br_stack) { 3956 + size += data->br_stack->nr 3957 + * sizeof(struct perf_branch_entry); 3958 + } 4054 3959 header->size += size; 4055 3960 } 4056 3961 } ··· 5123 5010 if (event->attr.type != PERF_TYPE_SOFTWARE) 5124 5011 return -ENOENT; 5125 5012 5013 + /* 5014 + * no branch sampling for software events 5015 + */ 5016 + if (has_branch_stack(event)) 5017 + return -EOPNOTSUPP; 5018 + 5126 5019 switch (event_id) { 5127 5020 case PERF_COUNT_SW_CPU_CLOCK: 5128 5021 case PERF_COUNT_SW_TASK_CLOCK: ··· 5238 5119 5239 5120 if (event->attr.type != PERF_TYPE_TRACEPOINT) 5240 5121 return -ENOENT; 5122 + 5123 + /* 5124 + * no branch sampling for tracepoint events 5125 + */ 5126 + if (has_branch_stack(event)) 5127 + return -EOPNOTSUPP; 5241 5128 5242 5129 err = perf_trace_init(event); 5243 5130 if (err) ··· 5470 5345 if (event->attr.config != PERF_COUNT_SW_CPU_CLOCK) 5471 5346 return -ENOENT; 5472 5347 5348 + /* 5349 + * no branch sampling for software events 5350 + */ 5351 + if (has_branch_stack(event)) 5352 + return -EOPNOTSUPP; 5353 + 5473 5354 perf_swevent_init_hrtimer(event); 5474 5355 5475 5356 return 0; ··· 5549 5418 5550 5419 if (event->attr.config != PERF_COUNT_SW_TASK_CLOCK) 5551 5420 return -ENOENT; 5421 + 5422 + /* 5423 + * no branch sampling for software events 5424 + */ 5425 + if (has_branch_stack(event)) 5426 + return -EOPNOTSUPP; 5552 5427 5553 5428 perf_swevent_init_hrtimer(event); 5554 5429 ··· 6003 5866 return ERR_PTR(err); 6004 5867 } 6005 5868 } 5869 + if (has_branch_stack(event)) { 5870 + static_key_slow_inc(&perf_sched_events.key); 5871 + if (!(event->attach_state & PERF_ATTACH_TASK)) 5872 + atomic_inc(&per_cpu(perf_branch_stack_events, 5873 + event->cpu)); 5874 + } 6006 5875 } 6007 5876 6008 5877 return event; ··· 6078 5935 if (attr->read_format & ~(PERF_FORMAT_MAX-1)) 6079 5936 return -EINVAL; 6080 5937 5938 + if (attr->sample_type & PERF_SAMPLE_BRANCH_STACK) { 5939 + u64 mask = attr->branch_sample_type; 5940 + 5941 + /* only using defined bits */ 5942 + if (mask & ~(PERF_SAMPLE_BRANCH_MAX-1)) 5943 + return -EINVAL; 5944 + 5945 + /* at least one branch bit must be set */ 5946 + if (!(mask & ~PERF_SAMPLE_BRANCH_PLM_ALL)) 5947 + return -EINVAL; 5948 + 5949 + /* kernel level capture: check permissions */ 5950 + if ((mask & PERF_SAMPLE_BRANCH_PERM_PLM) 5951 + && perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN)) 5952 + return -EACCES; 5953 + 5954 + /* propagate priv level, when not set for branch */ 5955 + if (!(mask & PERF_SAMPLE_BRANCH_PLM_ALL)) { 5956 + 5957 + /* exclude_kernel checked on syscall entry */ 5958 + if (!attr->exclude_kernel) 5959 + mask |= PERF_SAMPLE_BRANCH_KERNEL; 5960 + 5961 + if (!attr->exclude_user) 5962 + mask |= PERF_SAMPLE_BRANCH_USER; 5963 + 5964 + if (!attr->exclude_hv) 5965 + mask |= PERF_SAMPLE_BRANCH_HV; 5966 + /* 5967 + * adjust user setting (for HW filter setup) 5968 + */ 5969 + attr->branch_sample_type = mask; 5970 + } 5971 + } 6081 5972 out: 6082 5973 return ret; 6083 5974
+6
kernel/events/hw_breakpoint.c
··· 581 581 if (bp->attr.type != PERF_TYPE_BREAKPOINT) 582 582 return -ENOENT; 583 583 584 + /* 585 + * no branch sampling for breakpoint events 586 + */ 587 + if (has_branch_stack(bp)) 588 + return -EOPNOTSUPP; 589 + 584 590 err = register_perf_hw_breakpoint(bp); 585 591 if (err) 586 592 return err;
+30
tools/perf/Documentation/perf-record.txt
··· 152 152 corresponding events, i.e., they always refer to events defined earlier on the command 153 153 line. 154 154 155 + -b:: 156 + --branch-any:: 157 + Enable taken branch stack sampling. Any type of taken branch may be sampled. 158 + This is a shortcut for --branch-filter any. See --branch-filter for more infos. 159 + 160 + -j:: 161 + --branch-filter:: 162 + Enable taken branch stack sampling. Each sample captures a series of consecutive 163 + taken branches. The number of branches captured with each sample depends on the 164 + underlying hardware, the type of branches of interest, and the executed code. 165 + It is possible to select the types of branches captured by enabling filters. The 166 + following filters are defined: 167 + 168 + - any: any type of branches 169 + - any_call: any function call or system call 170 + - any_ret: any function return or system call return 171 + - any_ind: any indirect branch 172 + - u: only when the branch target is at the user level 173 + - k: only when the branch target is in the kernel 174 + - hv: only when the target is at the hypervisor level 175 + 176 + + 177 + The option requires at least one branch type among any, any_call, any_ret, ind_call. 178 + The privilege levels may be ommitted, in which case, the privilege levels of the associated 179 + event are applied to the branch filter. Both kernel (k) and hypervisor (hv) privilege 180 + levels are subject to permissions. When sampling on multiple events, branch stack sampling 181 + is enabled for all the sampling events. The sampled branch type is the same for all events. 182 + The various filters must be specified as a comma separated list: --branch-filter any_ret,u,k 183 + Note that this feature may not be available on all processors. 184 + 155 185 SEE ALSO 156 186 -------- 157 187 linkperf:perf-stat[1], linkperf:perf-list[1]
+10
tools/perf/Documentation/perf-report.txt
··· 153 153 information which may be very large and thus may clutter the display. 154 154 It currently includes: cpu and numa topology of the host system. 155 155 156 + -b:: 157 + --branch-stack:: 158 + Use the addresses of sampled taken branches instead of the instruction 159 + address to build the histograms. To generate meaningful output, the 160 + perf.data file must have been obtained using perf record -b or 161 + perf record --branch-filter xxx where xxx is a branch filter option. 162 + perf report is able to auto-detect whether a perf.data file contains 163 + branch stacks and it will automatically switch to the branch view mode, 164 + unless --no-branch-stack is used. 165 + 156 166 SEE ALSO 157 167 -------- 158 168 linkperf:perf-stat[1], linkperf:perf-annotate[1]
+95
tools/perf/builtin-record.c
··· 473 473 if (!have_tracepoints(&evsel_list->entries)) 474 474 perf_header__clear_feat(&session->header, HEADER_TRACE_INFO); 475 475 476 + if (!rec->opts.branch_stack) 477 + perf_header__clear_feat(&session->header, HEADER_BRANCH_STACK); 478 + 476 479 if (!rec->file_new) { 477 480 err = perf_session__read_header(session, output); 478 481 if (err < 0) ··· 641 638 return err; 642 639 } 643 640 641 + #define BRANCH_OPT(n, m) \ 642 + { .name = n, .mode = (m) } 643 + 644 + #define BRANCH_END { .name = NULL } 645 + 646 + struct branch_mode { 647 + const char *name; 648 + int mode; 649 + }; 650 + 651 + static const struct branch_mode branch_modes[] = { 652 + BRANCH_OPT("u", PERF_SAMPLE_BRANCH_USER), 653 + BRANCH_OPT("k", PERF_SAMPLE_BRANCH_KERNEL), 654 + BRANCH_OPT("hv", PERF_SAMPLE_BRANCH_HV), 655 + BRANCH_OPT("any", PERF_SAMPLE_BRANCH_ANY), 656 + BRANCH_OPT("any_call", PERF_SAMPLE_BRANCH_ANY_CALL), 657 + BRANCH_OPT("any_ret", PERF_SAMPLE_BRANCH_ANY_RETURN), 658 + BRANCH_OPT("ind_call", PERF_SAMPLE_BRANCH_IND_CALL), 659 + BRANCH_END 660 + }; 661 + 662 + static int 663 + parse_branch_stack(const struct option *opt, const char *str, int unset) 664 + { 665 + #define ONLY_PLM \ 666 + (PERF_SAMPLE_BRANCH_USER |\ 667 + PERF_SAMPLE_BRANCH_KERNEL |\ 668 + PERF_SAMPLE_BRANCH_HV) 669 + 670 + uint64_t *mode = (uint64_t *)opt->value; 671 + const struct branch_mode *br; 672 + char *s, *os = NULL, *p; 673 + int ret = -1; 674 + 675 + if (unset) 676 + return 0; 677 + 678 + /* 679 + * cannot set it twice, -b + --branch-filter for instance 680 + */ 681 + if (*mode) 682 + return -1; 683 + 684 + /* str may be NULL in case no arg is passed to -b */ 685 + if (str) { 686 + /* because str is read-only */ 687 + s = os = strdup(str); 688 + if (!s) 689 + return -1; 690 + 691 + for (;;) { 692 + p = strchr(s, ','); 693 + if (p) 694 + *p = '\0'; 695 + 696 + for (br = branch_modes; br->name; br++) { 697 + if (!strcasecmp(s, br->name)) 698 + break; 699 + } 700 + if (!br->name) { 701 + ui__warning("unknown branch filter %s," 702 + " check man page\n", s); 703 + goto error; 704 + } 705 + 706 + *mode |= br->mode; 707 + 708 + if (!p) 709 + break; 710 + 711 + s = p + 1; 712 + } 713 + } 714 + ret = 0; 715 + 716 + /* default to any branch */ 717 + if ((*mode & ~ONLY_PLM) == 0) { 718 + *mode = PERF_SAMPLE_BRANCH_ANY; 719 + } 720 + error: 721 + free(os); 722 + return ret; 723 + } 724 + 644 725 static const char * const record_usage[] = { 645 726 "perf record [<options>] [<command>]", 646 727 "perf record [<options>] -- <command> [<options>]", ··· 814 727 "monitor event in cgroup name only", 815 728 parse_cgroups), 816 729 OPT_STRING('u', "uid", &record.uid_str, "user", "user to profile"), 730 + 731 + OPT_CALLBACK_NOOPT('b', "branch-any", &record.opts.branch_stack, 732 + "branch any", "sample any taken branches", 733 + parse_branch_stack), 734 + 735 + OPT_CALLBACK('j', "branch-filter", &record.opts.branch_stack, 736 + "branch filter mask", "branch stack filter modes", 737 + parse_branch_stack), 817 738 OPT_END() 818 739 }; 819 740
+156 -22
tools/perf/builtin-report.c
··· 53 53 DECLARE_BITMAP(cpu_bitmap, MAX_NR_CPUS); 54 54 }; 55 55 56 + static int perf_report__add_branch_hist_entry(struct perf_tool *tool, 57 + struct addr_location *al, 58 + struct perf_sample *sample, 59 + struct perf_evsel *evsel, 60 + struct machine *machine) 61 + { 62 + struct perf_report *rep = container_of(tool, struct perf_report, tool); 63 + struct symbol *parent = NULL; 64 + int err = 0; 65 + unsigned i; 66 + struct hist_entry *he; 67 + struct branch_info *bi, *bx; 68 + 69 + if ((sort__has_parent || symbol_conf.use_callchain) 70 + && sample->callchain) { 71 + err = machine__resolve_callchain(machine, evsel, al->thread, 72 + sample->callchain, &parent); 73 + if (err) 74 + return err; 75 + } 76 + 77 + bi = machine__resolve_bstack(machine, al->thread, 78 + sample->branch_stack); 79 + if (!bi) 80 + return -ENOMEM; 81 + 82 + for (i = 0; i < sample->branch_stack->nr; i++) { 83 + if (rep->hide_unresolved && !(bi[i].from.sym && bi[i].to.sym)) 84 + continue; 85 + /* 86 + * The report shows the percentage of total branches captured 87 + * and not events sampled. Thus we use a pseudo period of 1. 88 + */ 89 + he = __hists__add_branch_entry(&evsel->hists, al, parent, 90 + &bi[i], 1); 91 + if (he) { 92 + struct annotation *notes; 93 + err = -ENOMEM; 94 + bx = he->branch_info; 95 + if (bx->from.sym && use_browser > 0) { 96 + notes = symbol__annotation(bx->from.sym); 97 + if (!notes->src 98 + && symbol__alloc_hist(bx->from.sym) < 0) 99 + goto out; 100 + 101 + err = symbol__inc_addr_samples(bx->from.sym, 102 + bx->from.map, 103 + evsel->idx, 104 + bx->from.al_addr); 105 + if (err) 106 + goto out; 107 + } 108 + 109 + if (bx->to.sym && use_browser > 0) { 110 + notes = symbol__annotation(bx->to.sym); 111 + if (!notes->src 112 + && symbol__alloc_hist(bx->to.sym) < 0) 113 + goto out; 114 + 115 + err = symbol__inc_addr_samples(bx->to.sym, 116 + bx->to.map, 117 + evsel->idx, 118 + bx->to.al_addr); 119 + if (err) 120 + goto out; 121 + } 122 + evsel->hists.stats.total_period += 1; 123 + hists__inc_nr_events(&evsel->hists, PERF_RECORD_SAMPLE); 124 + err = 0; 125 + } else 126 + return -ENOMEM; 127 + } 128 + out: 129 + return err; 130 + } 131 + 56 132 static int perf_evsel__add_hist_entry(struct perf_evsel *evsel, 57 133 struct addr_location *al, 58 134 struct perf_sample *sample, ··· 202 126 if (rep->cpu_list && !test_bit(sample->cpu, rep->cpu_bitmap)) 203 127 return 0; 204 128 205 - if (al.map != NULL) 206 - al.map->dso->hit = 1; 129 + if (sort__branch_mode == 1) { 130 + if (perf_report__add_branch_hist_entry(tool, &al, sample, 131 + evsel, machine)) { 132 + pr_debug("problem adding lbr entry, skipping event\n"); 133 + return -1; 134 + } 135 + } else { 136 + if (al.map != NULL) 137 + al.map->dso->hit = 1; 207 138 208 - if (perf_evsel__add_hist_entry(evsel, &al, sample, machine)) { 209 - pr_debug("problem incrementing symbol period, skipping event\n"); 210 - return -1; 139 + if (perf_evsel__add_hist_entry(evsel, &al, sample, machine)) { 140 + pr_debug("problem incrementing symbol period, skipping event\n"); 141 + return -1; 142 + } 211 143 } 212 - 213 144 return 0; 214 145 } 215 146 ··· 269 186 "params.\n"); 270 187 return -EINVAL; 271 188 } 189 + } 190 + 191 + if (sort__branch_mode == 1) { 192 + if (!(self->sample_type & PERF_SAMPLE_BRANCH_STACK)) { 193 + fprintf(stderr, "selected -b but no branch data." 194 + " Did you call perf record without" 195 + " -b?\n"); 196 + return -1; 197 + } 272 198 } 273 199 274 200 return 0; ··· 338 246 { 339 247 int ret = -EINVAL; 340 248 u64 nr_samples; 341 - struct perf_session *session; 249 + struct perf_session *session = rep->session; 342 250 struct perf_evsel *pos; 343 251 struct map *kernel_map; 344 252 struct kmap *kernel_kmap; 345 253 const char *help = "For a higher level overview, try: perf report --sort comm,dso"; 346 254 347 255 signal(SIGINT, sig_handler); 348 - 349 - session = perf_session__new(rep->input_name, O_RDONLY, 350 - rep->force, false, &rep->tool); 351 - if (session == NULL) 352 - return -ENOMEM; 353 - 354 - rep->session = session; 355 256 356 257 if (rep->cpu_list) { 357 258 ret = perf_session__cpu_bitmap(session, rep->cpu_list, ··· 512 427 return 0; 513 428 } 514 429 430 + static int 431 + parse_branch_mode(const struct option *opt __used, const char *str __used, int unset) 432 + { 433 + sort__branch_mode = !unset; 434 + return 0; 435 + } 436 + 515 437 int cmd_report(int argc, const char **argv, const char *prefix __used) 516 438 { 439 + struct perf_session *session; 517 440 struct stat st; 441 + bool has_br_stack = false; 442 + int ret = -1; 518 443 char callchain_default_opt[] = "fractal,0.5,callee"; 519 444 const char * const report_usage[] = { 520 445 "perf report [<options>]", ··· 572 477 OPT_BOOLEAN(0, "stdio", &report.use_stdio, 573 478 "Use the stdio interface"), 574 479 OPT_STRING('s', "sort", &sort_order, "key[,key2...]", 575 - "sort by key(s): pid, comm, dso, symbol, parent"), 480 + "sort by key(s): pid, comm, dso, symbol, parent, dso_to," 481 + " dso_from, symbol_to, symbol_from, mispredict"), 576 482 OPT_BOOLEAN(0, "showcpuutilization", &symbol_conf.show_cpu_utilization, 577 483 "Show sample percentage for different cpu modes"), 578 484 OPT_STRING('p', "parent", &parent_pattern, "regex", ··· 613 517 "Specify disassembler style (e.g. -M intel for intel syntax)"), 614 518 OPT_BOOLEAN(0, "show-total-period", &symbol_conf.show_total_period, 615 519 "Show a column with the sum of periods"), 520 + OPT_CALLBACK_NOOPT('b', "branch-stack", &sort__branch_mode, "", 521 + "use branch records for histogram filling", parse_branch_mode), 616 522 OPT_END() 617 523 }; 618 524 ··· 634 536 else 635 537 report.input_name = "perf.data"; 636 538 } 539 + session = perf_session__new(report.input_name, O_RDONLY, 540 + report.force, false, &report.tool); 541 + if (session == NULL) 542 + return -ENOMEM; 637 543 638 - if (strcmp(report.input_name, "-") != 0) 544 + report.session = session; 545 + 546 + has_br_stack = perf_header__has_feat(&session->header, 547 + HEADER_BRANCH_STACK); 548 + 549 + if (sort__branch_mode == -1 && has_br_stack) 550 + sort__branch_mode = 1; 551 + 552 + /* sort__branch_mode could be 0 if --no-branch-stack */ 553 + if (sort__branch_mode == 1) { 554 + /* 555 + * if no sort_order is provided, then specify 556 + * branch-mode specific order 557 + */ 558 + if (sort_order == default_sort_order) 559 + sort_order = "comm,dso_from,symbol_from," 560 + "dso_to,symbol_to"; 561 + 562 + } 563 + 564 + if (strcmp(report.input_name, "-") != 0) { 639 565 setup_browser(true); 640 - else 566 + } else { 641 567 use_browser = 0; 568 + } 642 569 643 570 /* 644 571 * Only in the newt browser we are doing integrated annotation, ··· 691 568 } 692 569 693 570 if (symbol__init() < 0) 694 - return -1; 571 + goto error; 695 572 696 573 setup_sorting(report_usage, options); 697 574 698 575 if (parent_pattern != default_parent_pattern) { 699 576 if (sort_dimension__add("parent") < 0) 700 - return -1; 577 + goto error; 701 578 702 579 /* 703 580 * Only show the parent fields if we explicitly ··· 715 592 if (argc) 716 593 usage_with_options(report_usage, options); 717 594 718 - sort_entry__setup_elide(&sort_dso, symbol_conf.dso_list, "dso", stdout); 719 595 sort_entry__setup_elide(&sort_comm, symbol_conf.comm_list, "comm", stdout); 720 - sort_entry__setup_elide(&sort_sym, symbol_conf.sym_list, "symbol", stdout); 721 596 722 - return __cmd_report(&report); 597 + if (sort__branch_mode == 1) { 598 + sort_entry__setup_elide(&sort_dso_from, symbol_conf.dso_from_list, "dso_from", stdout); 599 + sort_entry__setup_elide(&sort_dso_to, symbol_conf.dso_to_list, "dso_to", stdout); 600 + sort_entry__setup_elide(&sort_sym_from, symbol_conf.sym_from_list, "sym_from", stdout); 601 + sort_entry__setup_elide(&sort_sym_to, symbol_conf.sym_to_list, "sym_to", stdout); 602 + } else { 603 + sort_entry__setup_elide(&sort_dso, symbol_conf.dso_list, "dso", stdout); 604 + sort_entry__setup_elide(&sort_sym, symbol_conf.sym_list, "symbol", stdout); 605 + } 606 + 607 + ret = __cmd_report(&report); 608 + error: 609 + perf_session__delete(session); 610 + return ret; 723 611 }
+18
tools/perf/perf.h
··· 179 179 u64 ips[0]; 180 180 }; 181 181 182 + struct branch_flags { 183 + u64 mispred:1; 184 + u64 predicted:1; 185 + u64 reserved:62; 186 + }; 187 + 188 + struct branch_entry { 189 + u64 from; 190 + u64 to; 191 + struct branch_flags flags; 192 + }; 193 + 194 + struct branch_stack { 195 + u64 nr; 196 + struct branch_entry entries[0]; 197 + }; 198 + 182 199 extern bool perf_host, perf_guest; 183 200 extern const char perf_version_string[]; 184 201 ··· 222 205 unsigned int freq; 223 206 unsigned int mmap_pages; 224 207 unsigned int user_freq; 208 + int branch_stack; 225 209 u64 default_interval; 226 210 u64 user_interval; 227 211 const char *cpu_list;
+1
tools/perf/util/event.h
··· 81 81 u32 raw_size; 82 82 void *raw_data; 83 83 struct ip_callchain *callchain; 84 + struct branch_stack *branch_stack; 84 85 }; 85 86 86 87 #define BUILD_ID_SIZE 20
+14
tools/perf/util/evsel.c
··· 126 126 attr->watermark = 0; 127 127 attr->wakeup_events = 1; 128 128 } 129 + if (opts->branch_stack) { 130 + attr->sample_type |= PERF_SAMPLE_BRANCH_STACK; 131 + attr->branch_sample_type = opts->branch_stack; 132 + } 129 133 130 134 attr->mmap = track; 131 135 attr->comm = track; ··· 580 576 data->raw_data = (void *) pdata; 581 577 } 582 578 579 + if (type & PERF_SAMPLE_BRANCH_STACK) { 580 + u64 sz; 581 + 582 + data->branch_stack = (struct branch_stack *)array; 583 + array++; /* nr */ 584 + 585 + sz = data->branch_stack->nr * sizeof(struct branch_entry); 586 + sz /= sizeof(u64); 587 + array += sz; 588 + } 583 589 return 0; 584 590 } 585 591
+165 -44
tools/perf/util/header.c
··· 1023 1023 return do_write_string(fd, buffer); 1024 1024 } 1025 1025 1026 + static int write_branch_stack(int fd __used, struct perf_header *h __used, 1027 + struct perf_evlist *evlist __used) 1028 + { 1029 + return 0; 1030 + } 1031 + 1026 1032 static void print_hostname(struct perf_header *ph, int fd, FILE *fp) 1027 1033 { 1028 1034 char *str = do_read_string(fd, ph); ··· 1150 1144 uint64_t id; 1151 1145 void *buf = NULL; 1152 1146 char *str; 1153 - u32 nre, sz, nr, i, j, msz; 1154 - int ret; 1147 + u32 nre, sz, nr, i, j; 1148 + ssize_t ret; 1149 + size_t msz; 1155 1150 1156 1151 /* number of events */ 1157 1152 ret = read(fd, &nre, sizeof(nre)); ··· 1169 1162 if (ph->needs_swap) 1170 1163 sz = bswap_32(sz); 1171 1164 1172 - /* 1173 - * ensure it is at least to our ABI rev 1174 - */ 1175 - if (sz < (u32)sizeof(attr)) 1176 - goto error; 1177 - 1178 1165 memset(&attr, 0, sizeof(attr)); 1179 1166 1180 - /* read entire region to sync up to next field */ 1167 + /* buffer to hold on file attr struct */ 1181 1168 buf = malloc(sz); 1182 1169 if (!buf) 1183 1170 goto error; 1184 1171 1185 1172 msz = sizeof(attr); 1186 - if (sz < msz) 1173 + if (sz < (ssize_t)msz) 1187 1174 msz = sz; 1188 1175 1189 1176 for (i = 0 ; i < nre; i++) { 1190 1177 1178 + /* 1179 + * must read entire on-file attr struct to 1180 + * sync up with layout. 1181 + */ 1191 1182 ret = read(fd, buf, sz); 1192 1183 if (ret != (ssize_t)sz) 1193 1184 goto error; ··· 1319 1314 char *str = do_read_string(fd, ph); 1320 1315 fprintf(fp, "# cpuid : %s\n", str); 1321 1316 free(str); 1317 + } 1318 + 1319 + static void print_branch_stack(struct perf_header *ph __used, int fd __used, 1320 + FILE *fp) 1321 + { 1322 + fprintf(fp, "# contains samples with branch stack\n"); 1322 1323 } 1323 1324 1324 1325 static int __event_process_build_id(struct build_id_event *bev, ··· 1531 1520 FEAT_OPA(HEADER_CMDLINE, cmdline), 1532 1521 FEAT_OPF(HEADER_CPU_TOPOLOGY, cpu_topology), 1533 1522 FEAT_OPF(HEADER_NUMA_TOPOLOGY, numa_topology), 1523 + FEAT_OPA(HEADER_BRANCH_STACK, branch_stack), 1534 1524 }; 1535 1525 1536 1526 struct header_print_data { ··· 1816 1804 return err; 1817 1805 } 1818 1806 1819 - static int check_magic_endian(u64 *magic, struct perf_file_header *header, 1820 - struct perf_header *ph) 1807 + static const int attr_file_abi_sizes[] = { 1808 + [0] = PERF_ATTR_SIZE_VER0, 1809 + [1] = PERF_ATTR_SIZE_VER1, 1810 + 0, 1811 + }; 1812 + 1813 + /* 1814 + * In the legacy file format, the magic number is not used to encode endianness. 1815 + * hdr_sz was used to encode endianness. But given that hdr_sz can vary based 1816 + * on ABI revisions, we need to try all combinations for all endianness to 1817 + * detect the endianness. 1818 + */ 1819 + static int try_all_file_abis(uint64_t hdr_sz, struct perf_header *ph) 1820 + { 1821 + uint64_t ref_size, attr_size; 1822 + int i; 1823 + 1824 + for (i = 0 ; attr_file_abi_sizes[i]; i++) { 1825 + ref_size = attr_file_abi_sizes[i] 1826 + + sizeof(struct perf_file_section); 1827 + if (hdr_sz != ref_size) { 1828 + attr_size = bswap_64(hdr_sz); 1829 + if (attr_size != ref_size) 1830 + continue; 1831 + 1832 + ph->needs_swap = true; 1833 + } 1834 + pr_debug("ABI%d perf.data file detected, need_swap=%d\n", 1835 + i, 1836 + ph->needs_swap); 1837 + return 0; 1838 + } 1839 + /* could not determine endianness */ 1840 + return -1; 1841 + } 1842 + 1843 + #define PERF_PIPE_HDR_VER0 16 1844 + 1845 + static const size_t attr_pipe_abi_sizes[] = { 1846 + [0] = PERF_PIPE_HDR_VER0, 1847 + 0, 1848 + }; 1849 + 1850 + /* 1851 + * In the legacy pipe format, there is an implicit assumption that endiannesss 1852 + * between host recording the samples, and host parsing the samples is the 1853 + * same. This is not always the case given that the pipe output may always be 1854 + * redirected into a file and analyzed on a different machine with possibly a 1855 + * different endianness and perf_event ABI revsions in the perf tool itself. 1856 + */ 1857 + static int try_all_pipe_abis(uint64_t hdr_sz, struct perf_header *ph) 1858 + { 1859 + u64 attr_size; 1860 + int i; 1861 + 1862 + for (i = 0 ; attr_pipe_abi_sizes[i]; i++) { 1863 + if (hdr_sz != attr_pipe_abi_sizes[i]) { 1864 + attr_size = bswap_64(hdr_sz); 1865 + if (attr_size != hdr_sz) 1866 + continue; 1867 + 1868 + ph->needs_swap = true; 1869 + } 1870 + pr_debug("Pipe ABI%d perf.data file detected\n", i); 1871 + return 0; 1872 + } 1873 + return -1; 1874 + } 1875 + 1876 + static int check_magic_endian(u64 magic, uint64_t hdr_sz, 1877 + bool is_pipe, struct perf_header *ph) 1821 1878 { 1822 1879 int ret; 1823 1880 1824 1881 /* check for legacy format */ 1825 - ret = memcmp(magic, __perf_magic1, sizeof(*magic)); 1882 + ret = memcmp(&magic, __perf_magic1, sizeof(magic)); 1826 1883 if (ret == 0) { 1827 1884 pr_debug("legacy perf.data format\n"); 1828 - if (!header) 1829 - return -1; 1885 + if (is_pipe) 1886 + return try_all_pipe_abis(hdr_sz, ph); 1830 1887 1831 - if (header->attr_size != sizeof(struct perf_file_attr)) { 1832 - u64 attr_size = bswap_64(header->attr_size); 1833 - 1834 - if (attr_size != sizeof(struct perf_file_attr)) 1835 - return -1; 1836 - 1837 - ph->needs_swap = true; 1838 - } 1839 - return 0; 1888 + return try_all_file_abis(hdr_sz, ph); 1840 1889 } 1890 + /* 1891 + * the new magic number serves two purposes: 1892 + * - unique number to identify actual perf.data files 1893 + * - encode endianness of file 1894 + */ 1841 1895 1842 - /* check magic number with same endianness */ 1843 - if (*magic == __perf_magic2) 1896 + /* check magic number with one endianness */ 1897 + if (magic == __perf_magic2) 1844 1898 return 0; 1845 1899 1846 - /* check magic number but opposite endianness */ 1847 - if (*magic != __perf_magic2_sw) 1900 + /* check magic number with opposite endianness */ 1901 + if (magic != __perf_magic2_sw) 1848 1902 return -1; 1849 1903 1850 1904 ph->needs_swap = true; ··· 1929 1851 if (ret <= 0) 1930 1852 return -1; 1931 1853 1932 - if (check_magic_endian(&header->magic, header, ph) < 0) 1854 + if (check_magic_endian(header->magic, 1855 + header->attr_size, false, ph) < 0) { 1856 + pr_debug("magic/endian check failed\n"); 1933 1857 return -1; 1858 + } 1934 1859 1935 1860 if (ph->needs_swap) { 1936 1861 mem_bswap_64(header, offsetof(struct perf_file_header, ··· 2020 1939 if (ret <= 0) 2021 1940 return -1; 2022 1941 2023 - if (check_magic_endian(&header->magic, NULL, ph) < 0) 1942 + if (check_magic_endian(header->magic, header->size, true, ph) < 0) { 1943 + pr_debug("endian/magic failed\n"); 2024 1944 return -1; 1945 + } 1946 + 1947 + if (ph->needs_swap) 1948 + header->size = bswap_64(header->size); 2025 1949 2026 1950 if (repipe && do_write(STDOUT_FILENO, header, sizeof(*header)) < 0) 2027 1951 return -1; 2028 - 2029 - if (header->size != sizeof(*header)) { 2030 - u64 size = bswap_64(header->size); 2031 - 2032 - if (size != sizeof(*header)) 2033 - return -1; 2034 - 2035 - ph->needs_swap = true; 2036 - } 2037 1952 2038 1953 return 0; 2039 1954 } ··· 2050 1973 return 0; 2051 1974 } 2052 1975 1976 + static int read_attr(int fd, struct perf_header *ph, 1977 + struct perf_file_attr *f_attr) 1978 + { 1979 + struct perf_event_attr *attr = &f_attr->attr; 1980 + size_t sz, left; 1981 + size_t our_sz = sizeof(f_attr->attr); 1982 + int ret; 1983 + 1984 + memset(f_attr, 0, sizeof(*f_attr)); 1985 + 1986 + /* read minimal guaranteed structure */ 1987 + ret = readn(fd, attr, PERF_ATTR_SIZE_VER0); 1988 + if (ret <= 0) { 1989 + pr_debug("cannot read %d bytes of header attr\n", 1990 + PERF_ATTR_SIZE_VER0); 1991 + return -1; 1992 + } 1993 + 1994 + /* on file perf_event_attr size */ 1995 + sz = attr->size; 1996 + 1997 + if (ph->needs_swap) 1998 + sz = bswap_32(sz); 1999 + 2000 + if (sz == 0) { 2001 + /* assume ABI0 */ 2002 + sz = PERF_ATTR_SIZE_VER0; 2003 + } else if (sz > our_sz) { 2004 + pr_debug("file uses a more recent and unsupported ABI" 2005 + " (%zu bytes extra)\n", sz - our_sz); 2006 + return -1; 2007 + } 2008 + /* what we have not yet read and that we know about */ 2009 + left = sz - PERF_ATTR_SIZE_VER0; 2010 + if (left) { 2011 + void *ptr = attr; 2012 + ptr += PERF_ATTR_SIZE_VER0; 2013 + 2014 + ret = readn(fd, ptr, left); 2015 + } 2016 + /* read perf_file_section, ids are read in caller */ 2017 + ret = readn(fd, &f_attr->ids, sizeof(f_attr->ids)); 2018 + 2019 + return ret <= 0 ? -1 : 0; 2020 + } 2021 + 2053 2022 int perf_session__read_header(struct perf_session *session, int fd) 2054 2023 { 2055 2024 struct perf_header *header = &session->header; ··· 2111 1988 if (session->fd_pipe) 2112 1989 return perf_header__read_pipe(session, fd); 2113 1990 2114 - if (perf_file_header__read(&f_header, header, fd) < 0) { 2115 - pr_debug("incompatible file format\n"); 1991 + if (perf_file_header__read(&f_header, header, fd) < 0) 2116 1992 return -EINVAL; 2117 - } 2118 1993 2119 - nr_attrs = f_header.attrs.size / sizeof(f_attr); 1994 + nr_attrs = f_header.attrs.size / f_header.attr_size; 2120 1995 lseek(fd, f_header.attrs.offset, SEEK_SET); 2121 1996 2122 1997 for (i = 0; i < nr_attrs; i++) { 2123 1998 struct perf_evsel *evsel; 2124 1999 off_t tmp; 2125 2000 2126 - if (readn(fd, &f_attr, sizeof(f_attr)) <= 0) 2001 + if (read_attr(fd, header, &f_attr) < 0) 2127 2002 goto out_errno; 2128 2003 2129 2004 if (header->needs_swap)
+1 -1
tools/perf/util/header.h
··· 27 27 HEADER_EVENT_DESC, 28 28 HEADER_CPU_TOPOLOGY, 29 29 HEADER_NUMA_TOPOLOGY, 30 - 30 + HEADER_BRANCH_STACK, 31 31 HEADER_LAST_FEATURE, 32 32 HEADER_FEAT_BITS = 256, 33 33 };
+95 -27
tools/perf/util/hist.c
··· 50 50 hists__set_col_len(hists, col, 0); 51 51 } 52 52 53 + static void hists__set_unres_dso_col_len(struct hists *hists, int dso) 54 + { 55 + const unsigned int unresolved_col_width = BITS_PER_LONG / 4; 56 + 57 + if (hists__col_len(hists, dso) < unresolved_col_width && 58 + !symbol_conf.col_width_list_str && !symbol_conf.field_sep && 59 + !symbol_conf.dso_list) 60 + hists__set_col_len(hists, dso, unresolved_col_width); 61 + } 62 + 53 63 static void hists__calc_col_len(struct hists *hists, struct hist_entry *h) 54 64 { 65 + const unsigned int unresolved_col_width = BITS_PER_LONG / 4; 55 66 u16 len; 56 67 57 68 if (h->ms.sym) 58 - hists__new_col_len(hists, HISTC_SYMBOL, h->ms.sym->namelen); 59 - else { 60 - const unsigned int unresolved_col_width = BITS_PER_LONG / 4; 61 - 62 - if (hists__col_len(hists, HISTC_DSO) < unresolved_col_width && 63 - !symbol_conf.col_width_list_str && !symbol_conf.field_sep && 64 - !symbol_conf.dso_list) 65 - hists__set_col_len(hists, HISTC_DSO, 66 - unresolved_col_width); 67 - } 69 + hists__new_col_len(hists, HISTC_SYMBOL, h->ms.sym->namelen + 4); 70 + else 71 + hists__set_unres_dso_col_len(hists, HISTC_DSO); 68 72 69 73 len = thread__comm_len(h->thread); 70 74 if (hists__new_col_len(hists, HISTC_COMM, len)) ··· 77 73 if (h->ms.map) { 78 74 len = dso__name_len(h->ms.map->dso); 79 75 hists__new_col_len(hists, HISTC_DSO, len); 76 + } 77 + 78 + if (h->branch_info) { 79 + int symlen; 80 + /* 81 + * +4 accounts for '[x] ' priv level info 82 + * +2 account of 0x prefix on raw addresses 83 + */ 84 + if (h->branch_info->from.sym) { 85 + symlen = (int)h->branch_info->from.sym->namelen + 4; 86 + hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen); 87 + 88 + symlen = dso__name_len(h->branch_info->from.map->dso); 89 + hists__new_col_len(hists, HISTC_DSO_FROM, symlen); 90 + } else { 91 + symlen = unresolved_col_width + 4 + 2; 92 + hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen); 93 + hists__set_unres_dso_col_len(hists, HISTC_DSO_FROM); 94 + } 95 + 96 + if (h->branch_info->to.sym) { 97 + symlen = (int)h->branch_info->to.sym->namelen + 4; 98 + hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen); 99 + 100 + symlen = dso__name_len(h->branch_info->to.map->dso); 101 + hists__new_col_len(hists, HISTC_DSO_TO, symlen); 102 + } else { 103 + symlen = unresolved_col_width + 4 + 2; 104 + hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen); 105 + hists__set_unres_dso_col_len(hists, HISTC_DSO_TO); 106 + } 80 107 } 81 108 } 82 109 ··· 230 195 return 0; 231 196 } 232 197 233 - struct hist_entry *__hists__add_entry(struct hists *hists, 198 + static struct hist_entry *add_hist_entry(struct hists *hists, 199 + struct hist_entry *entry, 234 200 struct addr_location *al, 235 - struct symbol *sym_parent, u64 period) 201 + u64 period) 236 202 { 237 203 struct rb_node **p; 238 204 struct rb_node *parent = NULL; 239 205 struct hist_entry *he; 240 - struct hist_entry entry = { 241 - .thread = al->thread, 242 - .ms = { 243 - .map = al->map, 244 - .sym = al->sym, 245 - }, 246 - .cpu = al->cpu, 247 - .ip = al->addr, 248 - .level = al->level, 249 - .period = period, 250 - .parent = sym_parent, 251 - .filtered = symbol__parent_filter(sym_parent), 252 - }; 253 206 int cmp; 254 207 255 208 pthread_mutex_lock(&hists->lock); ··· 248 225 parent = *p; 249 226 he = rb_entry(parent, struct hist_entry, rb_node_in); 250 227 251 - cmp = hist_entry__cmp(&entry, he); 228 + cmp = hist_entry__cmp(entry, he); 252 229 253 230 if (!cmp) { 254 231 he->period += period; ··· 262 239 p = &(*p)->rb_right; 263 240 } 264 241 265 - he = hist_entry__new(&entry); 242 + he = hist_entry__new(entry); 266 243 if (!he) 267 244 goto out_unlock; 268 245 ··· 273 250 out_unlock: 274 251 pthread_mutex_unlock(&hists->lock); 275 252 return he; 253 + } 254 + 255 + struct hist_entry *__hists__add_branch_entry(struct hists *self, 256 + struct addr_location *al, 257 + struct symbol *sym_parent, 258 + struct branch_info *bi, 259 + u64 period) 260 + { 261 + struct hist_entry entry = { 262 + .thread = al->thread, 263 + .ms = { 264 + .map = bi->to.map, 265 + .sym = bi->to.sym, 266 + }, 267 + .cpu = al->cpu, 268 + .ip = bi->to.addr, 269 + .level = al->level, 270 + .period = period, 271 + .parent = sym_parent, 272 + .filtered = symbol__parent_filter(sym_parent), 273 + .branch_info = bi, 274 + }; 275 + 276 + return add_hist_entry(self, &entry, al, period); 277 + } 278 + 279 + struct hist_entry *__hists__add_entry(struct hists *self, 280 + struct addr_location *al, 281 + struct symbol *sym_parent, u64 period) 282 + { 283 + struct hist_entry entry = { 284 + .thread = al->thread, 285 + .ms = { 286 + .map = al->map, 287 + .sym = al->sym, 288 + }, 289 + .cpu = al->cpu, 290 + .ip = al->addr, 291 + .level = al->level, 292 + .period = period, 293 + .parent = sym_parent, 294 + .filtered = symbol__parent_filter(sym_parent), 295 + }; 296 + 297 + return add_hist_entry(self, &entry, al, period); 276 298 } 277 299 278 300 int64_t
+11
tools/perf/util/hist.h
··· 42 42 HISTC_COMM, 43 43 HISTC_PARENT, 44 44 HISTC_CPU, 45 + HISTC_MISPREDICT, 46 + HISTC_SYMBOL_FROM, 47 + HISTC_SYMBOL_TO, 48 + HISTC_DSO_FROM, 49 + HISTC_DSO_TO, 45 50 HISTC_NR_COLS, /* Last entry */ 46 51 }; 47 52 ··· 78 73 int hist_entry__snprintf(struct hist_entry *self, char *bf, size_t size, 79 74 struct hists *hists); 80 75 void hist_entry__free(struct hist_entry *); 76 + 77 + struct hist_entry *__hists__add_branch_entry(struct hists *self, 78 + struct addr_location *al, 79 + struct symbol *sym_parent, 80 + struct branch_info *bi, 81 + u64 period); 81 82 82 83 void hists__output_resort(struct hists *self); 83 84 void hists__output_resort_threaded(struct hists *hists);
+75 -2
tools/perf/util/session.c
··· 24 24 self->fd = STDIN_FILENO; 25 25 26 26 if (perf_session__read_header(self, self->fd) < 0) 27 - pr_err("incompatible file format"); 27 + pr_err("incompatible file format (rerun with -v to learn more)"); 28 28 29 29 return 0; 30 30 } ··· 56 56 } 57 57 58 58 if (perf_session__read_header(self, self->fd) < 0) { 59 - pr_err("incompatible file format"); 59 + pr_err("incompatible file format (rerun with -v to learn more)"); 60 60 goto out_close; 61 61 } 62 62 ··· 227 227 return 1; 228 228 229 229 return 0; 230 + } 231 + 232 + static const u8 cpumodes[] = { 233 + PERF_RECORD_MISC_USER, 234 + PERF_RECORD_MISC_KERNEL, 235 + PERF_RECORD_MISC_GUEST_USER, 236 + PERF_RECORD_MISC_GUEST_KERNEL 237 + }; 238 + #define NCPUMODES (sizeof(cpumodes)/sizeof(u8)) 239 + 240 + static void ip__resolve_ams(struct machine *self, struct thread *thread, 241 + struct addr_map_symbol *ams, 242 + u64 ip) 243 + { 244 + struct addr_location al; 245 + size_t i; 246 + u8 m; 247 + 248 + memset(&al, 0, sizeof(al)); 249 + 250 + for (i = 0; i < NCPUMODES; i++) { 251 + m = cpumodes[i]; 252 + /* 253 + * We cannot use the header.misc hint to determine whether a 254 + * branch stack address is user, kernel, guest, hypervisor. 255 + * Branches may straddle the kernel/user/hypervisor boundaries. 256 + * Thus, we have to try consecutively until we find a match 257 + * or else, the symbol is unknown 258 + */ 259 + thread__find_addr_location(thread, self, m, MAP__FUNCTION, 260 + ip, &al, NULL); 261 + if (al.sym) 262 + goto found; 263 + } 264 + found: 265 + ams->addr = ip; 266 + ams->al_addr = al.addr; 267 + ams->sym = al.sym; 268 + ams->map = al.map; 269 + } 270 + 271 + struct branch_info *machine__resolve_bstack(struct machine *self, 272 + struct thread *thr, 273 + struct branch_stack *bs) 274 + { 275 + struct branch_info *bi; 276 + unsigned int i; 277 + 278 + bi = calloc(bs->nr, sizeof(struct branch_info)); 279 + if (!bi) 280 + return NULL; 281 + 282 + for (i = 0; i < bs->nr; i++) { 283 + ip__resolve_ams(self, thr, &bi[i].to, bs->entries[i].to); 284 + ip__resolve_ams(self, thr, &bi[i].from, bs->entries[i].from); 285 + bi[i].flags = bs->entries[i].flags; 286 + } 287 + return bi; 230 288 } 231 289 232 290 int machine__resolve_callchain(struct machine *self, struct perf_evsel *evsel, ··· 755 697 i, sample->callchain->ips[i]); 756 698 } 757 699 700 + static void branch_stack__printf(struct perf_sample *sample) 701 + { 702 + uint64_t i; 703 + 704 + printf("... branch stack: nr:%" PRIu64 "\n", sample->branch_stack->nr); 705 + 706 + for (i = 0; i < sample->branch_stack->nr; i++) 707 + printf("..... %2"PRIu64": %016" PRIx64 " -> %016" PRIx64 "\n", 708 + i, sample->branch_stack->entries[i].from, 709 + sample->branch_stack->entries[i].to); 710 + } 711 + 758 712 static void perf_session__print_tstamp(struct perf_session *session, 759 713 union perf_event *event, 760 714 struct perf_sample *sample) ··· 814 744 815 745 if (session->sample_type & PERF_SAMPLE_CALLCHAIN) 816 746 callchain__printf(sample); 747 + 748 + if (session->sample_type & PERF_SAMPLE_BRANCH_STACK) 749 + branch_stack__printf(sample); 817 750 } 818 751 819 752 static struct machine *
+4
tools/perf/util/session.h
··· 73 73 struct ip_callchain *chain, 74 74 struct symbol **parent); 75 75 76 + struct branch_info *machine__resolve_bstack(struct machine *self, 77 + struct thread *thread, 78 + struct branch_stack *bs); 79 + 76 80 bool perf_session__has_traces(struct perf_session *self, const char *msg); 77 81 78 82 void mem_bswap_64(void *src, int byte_size);
+238 -53
tools/perf/util/sort.c
··· 8 8 const char *sort_order = default_sort_order; 9 9 int sort__need_collapse = 0; 10 10 int sort__has_parent = 0; 11 + int sort__branch_mode = -1; /* -1 = means not set */ 11 12 12 13 enum sort_type sort__first_dimension; 13 14 ··· 95 94 return repsep_snprintf(bf, size, "%*s", width, self->thread->comm); 96 95 } 97 96 98 - struct sort_entry sort_comm = { 99 - .se_header = "Command", 100 - .se_cmp = sort__comm_cmp, 101 - .se_collapse = sort__comm_collapse, 102 - .se_snprintf = hist_entry__comm_snprintf, 103 - .se_width_idx = HISTC_COMM, 104 - }; 105 - 106 - /* --sort dso */ 107 - 108 - static int64_t 109 - sort__dso_cmp(struct hist_entry *left, struct hist_entry *right) 97 + static int64_t _sort__dso_cmp(struct map *map_l, struct map *map_r) 110 98 { 111 - struct dso *dso_l = left->ms.map ? left->ms.map->dso : NULL; 112 - struct dso *dso_r = right->ms.map ? right->ms.map->dso : NULL; 99 + struct dso *dso_l = map_l ? map_l->dso : NULL; 100 + struct dso *dso_r = map_r ? map_r->dso : NULL; 113 101 const char *dso_name_l, *dso_name_r; 114 102 115 103 if (!dso_l || !dso_r) ··· 115 125 return strcmp(dso_name_l, dso_name_r); 116 126 } 117 127 118 - static int hist_entry__dso_snprintf(struct hist_entry *self, char *bf, 119 - size_t size, unsigned int width) 128 + struct sort_entry sort_comm = { 129 + .se_header = "Command", 130 + .se_cmp = sort__comm_cmp, 131 + .se_collapse = sort__comm_collapse, 132 + .se_snprintf = hist_entry__comm_snprintf, 133 + .se_width_idx = HISTC_COMM, 134 + }; 135 + 136 + /* --sort dso */ 137 + 138 + static int64_t 139 + sort__dso_cmp(struct hist_entry *left, struct hist_entry *right) 120 140 { 121 - if (self->ms.map && self->ms.map->dso) { 122 - const char *dso_name = !verbose ? self->ms.map->dso->short_name : 123 - self->ms.map->dso->long_name; 141 + return _sort__dso_cmp(left->ms.map, right->ms.map); 142 + } 143 + 144 + 145 + static int64_t _sort__sym_cmp(struct symbol *sym_l, struct symbol *sym_r, 146 + u64 ip_l, u64 ip_r) 147 + { 148 + if (!sym_l || !sym_r) 149 + return cmp_null(sym_l, sym_r); 150 + 151 + if (sym_l == sym_r) 152 + return 0; 153 + 154 + if (sym_l) 155 + ip_l = sym_l->start; 156 + if (sym_r) 157 + ip_r = sym_r->start; 158 + 159 + return (int64_t)(ip_r - ip_l); 160 + } 161 + 162 + static int _hist_entry__dso_snprintf(struct map *map, char *bf, 163 + size_t size, unsigned int width) 164 + { 165 + if (map && map->dso) { 166 + const char *dso_name = !verbose ? map->dso->short_name : 167 + map->dso->long_name; 124 168 return repsep_snprintf(bf, size, "%-*s", width, dso_name); 125 169 } 126 170 127 171 return repsep_snprintf(bf, size, "%-*s", width, "[unknown]"); 128 172 } 173 + 174 + static int hist_entry__dso_snprintf(struct hist_entry *self, char *bf, 175 + size_t size, unsigned int width) 176 + { 177 + return _hist_entry__dso_snprintf(self->ms.map, bf, size, width); 178 + } 179 + 180 + static int _hist_entry__sym_snprintf(struct map *map, struct symbol *sym, 181 + u64 ip, char level, char *bf, size_t size, 182 + unsigned int width __used) 183 + { 184 + size_t ret = 0; 185 + 186 + if (verbose) { 187 + char o = map ? dso__symtab_origin(map->dso) : '!'; 188 + ret += repsep_snprintf(bf, size, "%-#*llx %c ", 189 + BITS_PER_LONG / 4, ip, o); 190 + } 191 + 192 + ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", level); 193 + if (sym) 194 + ret += repsep_snprintf(bf + ret, size - ret, "%-*s", 195 + width - ret, 196 + sym->name); 197 + else { 198 + size_t len = BITS_PER_LONG / 4; 199 + ret += repsep_snprintf(bf + ret, size - ret, "%-#.*llx", 200 + len, ip); 201 + ret += repsep_snprintf(bf + ret, size - ret, "%-*s", 202 + width - ret, ""); 203 + } 204 + 205 + return ret; 206 + } 207 + 129 208 130 209 struct sort_entry sort_dso = { 131 210 .se_header = "Shared Object", ··· 203 144 .se_width_idx = HISTC_DSO, 204 145 }; 205 146 206 - /* --sort symbol */ 147 + static int hist_entry__sym_snprintf(struct hist_entry *self, char *bf, 148 + size_t size, unsigned int width __used) 149 + { 150 + return _hist_entry__sym_snprintf(self->ms.map, self->ms.sym, self->ip, 151 + self->level, bf, size, width); 152 + } 207 153 154 + /* --sort symbol */ 208 155 static int64_t 209 156 sort__sym_cmp(struct hist_entry *left, struct hist_entry *right) 210 157 { ··· 228 163 ip_l = left->ms.sym->start; 229 164 ip_r = right->ms.sym->start; 230 165 231 - return (int64_t)(ip_r - ip_l); 232 - } 233 - 234 - static int hist_entry__sym_snprintf(struct hist_entry *self, char *bf, 235 - size_t size, unsigned int width __used) 236 - { 237 - size_t ret = 0; 238 - 239 - if (verbose) { 240 - char o = self->ms.map ? dso__symtab_origin(self->ms.map->dso) : '!'; 241 - ret += repsep_snprintf(bf, size, "%-#*llx %c ", 242 - BITS_PER_LONG / 4, self->ip, o); 243 - } 244 - 245 - if (!sort_dso.elide) 246 - ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", self->level); 247 - 248 - if (self->ms.sym) 249 - ret += repsep_snprintf(bf + ret, size - ret, "%s", 250 - self->ms.sym->name); 251 - else 252 - ret += repsep_snprintf(bf + ret, size - ret, "%-#*llx", 253 - BITS_PER_LONG / 4, self->ip); 254 - 255 - return ret; 166 + return _sort__sym_cmp(left->ms.sym, right->ms.sym, ip_l, ip_r); 256 167 } 257 168 258 169 struct sort_entry sort_sym = { ··· 287 246 .se_width_idx = HISTC_CPU, 288 247 }; 289 248 249 + static int64_t 250 + sort__dso_from_cmp(struct hist_entry *left, struct hist_entry *right) 251 + { 252 + return _sort__dso_cmp(left->branch_info->from.map, 253 + right->branch_info->from.map); 254 + } 255 + 256 + static int hist_entry__dso_from_snprintf(struct hist_entry *self, char *bf, 257 + size_t size, unsigned int width) 258 + { 259 + return _hist_entry__dso_snprintf(self->branch_info->from.map, 260 + bf, size, width); 261 + } 262 + 263 + struct sort_entry sort_dso_from = { 264 + .se_header = "Source Shared Object", 265 + .se_cmp = sort__dso_from_cmp, 266 + .se_snprintf = hist_entry__dso_from_snprintf, 267 + .se_width_idx = HISTC_DSO_FROM, 268 + }; 269 + 270 + static int64_t 271 + sort__dso_to_cmp(struct hist_entry *left, struct hist_entry *right) 272 + { 273 + return _sort__dso_cmp(left->branch_info->to.map, 274 + right->branch_info->to.map); 275 + } 276 + 277 + static int hist_entry__dso_to_snprintf(struct hist_entry *self, char *bf, 278 + size_t size, unsigned int width) 279 + { 280 + return _hist_entry__dso_snprintf(self->branch_info->to.map, 281 + bf, size, width); 282 + } 283 + 284 + static int64_t 285 + sort__sym_from_cmp(struct hist_entry *left, struct hist_entry *right) 286 + { 287 + struct addr_map_symbol *from_l = &left->branch_info->from; 288 + struct addr_map_symbol *from_r = &right->branch_info->from; 289 + 290 + if (!from_l->sym && !from_r->sym) 291 + return right->level - left->level; 292 + 293 + return _sort__sym_cmp(from_l->sym, from_r->sym, from_l->addr, 294 + from_r->addr); 295 + } 296 + 297 + static int64_t 298 + sort__sym_to_cmp(struct hist_entry *left, struct hist_entry *right) 299 + { 300 + struct addr_map_symbol *to_l = &left->branch_info->to; 301 + struct addr_map_symbol *to_r = &right->branch_info->to; 302 + 303 + if (!to_l->sym && !to_r->sym) 304 + return right->level - left->level; 305 + 306 + return _sort__sym_cmp(to_l->sym, to_r->sym, to_l->addr, to_r->addr); 307 + } 308 + 309 + static int hist_entry__sym_from_snprintf(struct hist_entry *self, char *bf, 310 + size_t size, unsigned int width __used) 311 + { 312 + struct addr_map_symbol *from = &self->branch_info->from; 313 + return _hist_entry__sym_snprintf(from->map, from->sym, from->addr, 314 + self->level, bf, size, width); 315 + 316 + } 317 + 318 + static int hist_entry__sym_to_snprintf(struct hist_entry *self, char *bf, 319 + size_t size, unsigned int width __used) 320 + { 321 + struct addr_map_symbol *to = &self->branch_info->to; 322 + return _hist_entry__sym_snprintf(to->map, to->sym, to->addr, 323 + self->level, bf, size, width); 324 + 325 + } 326 + 327 + struct sort_entry sort_dso_to = { 328 + .se_header = "Target Shared Object", 329 + .se_cmp = sort__dso_to_cmp, 330 + .se_snprintf = hist_entry__dso_to_snprintf, 331 + .se_width_idx = HISTC_DSO_TO, 332 + }; 333 + 334 + struct sort_entry sort_sym_from = { 335 + .se_header = "Source Symbol", 336 + .se_cmp = sort__sym_from_cmp, 337 + .se_snprintf = hist_entry__sym_from_snprintf, 338 + .se_width_idx = HISTC_SYMBOL_FROM, 339 + }; 340 + 341 + struct sort_entry sort_sym_to = { 342 + .se_header = "Target Symbol", 343 + .se_cmp = sort__sym_to_cmp, 344 + .se_snprintf = hist_entry__sym_to_snprintf, 345 + .se_width_idx = HISTC_SYMBOL_TO, 346 + }; 347 + 348 + static int64_t 349 + sort__mispredict_cmp(struct hist_entry *left, struct hist_entry *right) 350 + { 351 + const unsigned char mp = left->branch_info->flags.mispred != 352 + right->branch_info->flags.mispred; 353 + const unsigned char p = left->branch_info->flags.predicted != 354 + right->branch_info->flags.predicted; 355 + 356 + return mp || p; 357 + } 358 + 359 + static int hist_entry__mispredict_snprintf(struct hist_entry *self, char *bf, 360 + size_t size, unsigned int width){ 361 + static const char *out = "N/A"; 362 + 363 + if (self->branch_info->flags.predicted) 364 + out = "N"; 365 + else if (self->branch_info->flags.mispred) 366 + out = "Y"; 367 + 368 + return repsep_snprintf(bf, size, "%-*s", width, out); 369 + } 370 + 371 + struct sort_entry sort_mispredict = { 372 + .se_header = "Branch Mispredicted", 373 + .se_cmp = sort__mispredict_cmp, 374 + .se_snprintf = hist_entry__mispredict_snprintf, 375 + .se_width_idx = HISTC_MISPREDICT, 376 + }; 377 + 290 378 struct sort_dimension { 291 379 const char *name; 292 380 struct sort_entry *entry; 293 381 int taken; 294 382 }; 295 383 384 + #define DIM(d, n, func) [d] = { .name = n, .entry = &(func) } 385 + 296 386 static struct sort_dimension sort_dimensions[] = { 297 - { .name = "pid", .entry = &sort_thread, }, 298 - { .name = "comm", .entry = &sort_comm, }, 299 - { .name = "dso", .entry = &sort_dso, }, 300 - { .name = "symbol", .entry = &sort_sym, }, 301 - { .name = "parent", .entry = &sort_parent, }, 302 - { .name = "cpu", .entry = &sort_cpu, }, 387 + DIM(SORT_PID, "pid", sort_thread), 388 + DIM(SORT_COMM, "comm", sort_comm), 389 + DIM(SORT_DSO, "dso", sort_dso), 390 + DIM(SORT_DSO_FROM, "dso_from", sort_dso_from), 391 + DIM(SORT_DSO_TO, "dso_to", sort_dso_to), 392 + DIM(SORT_SYM, "symbol", sort_sym), 393 + DIM(SORT_SYM_TO, "symbol_from", sort_sym_from), 394 + DIM(SORT_SYM_FROM, "symbol_to", sort_sym_to), 395 + DIM(SORT_PARENT, "parent", sort_parent), 396 + DIM(SORT_CPU, "cpu", sort_cpu), 397 + DIM(SORT_MISPREDICT, "mispredict", sort_mispredict), 303 398 }; 304 399 305 400 int sort_dimension__add(const char *tok) ··· 447 270 448 271 if (strncasecmp(tok, sd->name, strlen(tok))) 449 272 continue; 450 - 451 273 if (sd->entry == &sort_parent) { 452 274 int ret = regcomp(&parent_regex, parent_pattern, REG_EXTENDED); 453 275 if (ret) { ··· 478 302 sort__first_dimension = SORT_PARENT; 479 303 else if (!strcmp(sd->name, "cpu")) 480 304 sort__first_dimension = SORT_CPU; 305 + else if (!strcmp(sd->name, "symbol_from")) 306 + sort__first_dimension = SORT_SYM_FROM; 307 + else if (!strcmp(sd->name, "symbol_to")) 308 + sort__first_dimension = SORT_SYM_TO; 309 + else if (!strcmp(sd->name, "dso_from")) 310 + sort__first_dimension = SORT_DSO_FROM; 311 + else if (!strcmp(sd->name, "dso_to")) 312 + sort__first_dimension = SORT_DSO_TO; 313 + else if (!strcmp(sd->name, "mispredict")) 314 + sort__first_dimension = SORT_MISPREDICT; 481 315 } 482 316 483 317 list_add_tail(&sd->entry->list, &hist_entry__sort_list); ··· 495 309 496 310 return 0; 497 311 } 498 - 499 312 return -ESRCH; 500 313 } 501 314
+11
tools/perf/util/sort.h
··· 31 31 extern const char default_sort_order[]; 32 32 extern int sort__need_collapse; 33 33 extern int sort__has_parent; 34 + extern int sort__branch_mode; 34 35 extern char *field_sep; 35 36 extern struct sort_entry sort_comm; 36 37 extern struct sort_entry sort_dso; 37 38 extern struct sort_entry sort_sym; 38 39 extern struct sort_entry sort_parent; 40 + extern struct sort_entry sort_dso_from; 41 + extern struct sort_entry sort_dso_to; 42 + extern struct sort_entry sort_sym_from; 43 + extern struct sort_entry sort_sym_to; 39 44 extern enum sort_type sort__first_dimension; 40 45 41 46 /** ··· 77 72 struct hist_entry *pair; 78 73 struct rb_root sorted_chain; 79 74 }; 75 + struct branch_info *branch_info; 80 76 struct callchain_root callchain[0]; 81 77 }; 82 78 ··· 88 82 SORT_SYM, 89 83 SORT_PARENT, 90 84 SORT_CPU, 85 + SORT_DSO_FROM, 86 + SORT_DSO_TO, 87 + SORT_SYM_FROM, 88 + SORT_SYM_TO, 89 + SORT_MISPREDICT, 91 90 }; 92 91 93 92 /*
+19 -1
tools/perf/util/symbol.h
··· 5 5 #include <stdbool.h> 6 6 #include <stdint.h> 7 7 #include "map.h" 8 + #include "../perf.h" 8 9 #include <linux/list.h> 9 10 #include <linux/rbtree.h> 10 11 #include <stdio.h> ··· 97 96 *col_width_list_str; 98 97 struct strlist *dso_list, 99 98 *comm_list, 100 - *sym_list; 99 + *sym_list, 100 + *dso_from_list, 101 + *dso_to_list, 102 + *sym_from_list, 103 + *sym_to_list; 101 104 const char *symfs; 102 105 }; 103 106 ··· 123 118 struct symbol *sym; 124 119 bool unfolded; 125 120 bool has_children; 121 + }; 122 + 123 + struct addr_map_symbol { 124 + struct map *map; 125 + struct symbol *sym; 126 + u64 addr; 127 + u64 al_addr; 128 + }; 129 + 130 + struct branch_info { 131 + struct addr_map_symbol from; 132 + struct addr_map_symbol to; 133 + struct branch_flags flags; 126 134 }; 127 135 128 136 struct addr_location {
+83 -19
tools/perf/util/ui/browsers/hists.c
··· 805 805 self->hists = hists; 806 806 self->b.refresh = hist_browser__refresh; 807 807 self->b.seek = ui_browser__hists_seek; 808 - self->b.use_navkeypressed = true, 809 - self->has_symbols = sort_sym.list.next != NULL; 808 + self->b.use_navkeypressed = true; 809 + if (sort__branch_mode == 1) 810 + self->has_symbols = sort_sym_from.list.next != NULL; 811 + else 812 + self->has_symbols = sort_sym.list.next != NULL; 810 813 } 811 814 812 815 return self; ··· 856 853 return printed; 857 854 } 858 855 856 + static inline void free_popup_options(char **options, int n) 857 + { 858 + int i; 859 + 860 + for (i = 0; i < n; ++i) { 861 + free(options[i]); 862 + options[i] = NULL; 863 + } 864 + } 865 + 859 866 static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events, 860 867 const char *helpline, const char *ev_name, 861 868 bool left_exits, ··· 874 861 { 875 862 struct hists *self = &evsel->hists; 876 863 struct hist_browser *browser = hist_browser__new(self); 864 + struct branch_info *bi; 877 865 struct pstack *fstack; 866 + char *options[16]; 867 + int nr_options = 0; 878 868 int key = -1; 879 869 880 870 if (browser == NULL) ··· 889 873 890 874 ui_helpline__push(helpline); 891 875 876 + memset(options, 0, sizeof(options)); 877 + 892 878 while (1) { 893 879 const struct thread *thread = NULL; 894 880 const struct dso *dso = NULL; 895 - char *options[16]; 896 - int nr_options = 0, choice = 0, i, 881 + int choice = 0, 897 882 annotate = -2, zoom_dso = -2, zoom_thread = -2, 898 - browse_map = -2; 883 + annotate_f = -2, annotate_t = -2, browse_map = -2; 884 + 885 + nr_options = 0; 899 886 900 887 key = hist_browser__run(browser, ev_name, timer, arg, delay_secs); 901 888 ··· 906 887 thread = hist_browser__selected_thread(browser); 907 888 dso = browser->selection->map ? browser->selection->map->dso : NULL; 908 889 } 909 - 910 890 switch (key) { 911 891 case K_TAB: 912 892 case K_UNTAB: ··· 920 902 if (!browser->has_symbols) { 921 903 ui_browser__warning(&browser->b, delay_secs * 2, 922 904 "Annotation is only available for symbolic views, " 923 - "include \"sym\" in --sort to use it."); 905 + "include \"sym*\" in --sort to use it."); 924 906 continue; 925 907 } 926 908 ··· 990 972 if (!browser->has_symbols) 991 973 goto add_exit_option; 992 974 993 - if (browser->selection != NULL && 994 - browser->selection->sym != NULL && 995 - !browser->selection->map->dso->annotate_warned && 996 - asprintf(&options[nr_options], "Annotate %s", 997 - browser->selection->sym->name) > 0) 998 - annotate = nr_options++; 975 + if (sort__branch_mode == 1) { 976 + bi = browser->he_selection->branch_info; 977 + if (browser->selection != NULL && 978 + bi && 979 + bi->from.sym != NULL && 980 + !bi->from.map->dso->annotate_warned && 981 + asprintf(&options[nr_options], "Annotate %s", 982 + bi->from.sym->name) > 0) 983 + annotate_f = nr_options++; 984 + 985 + if (browser->selection != NULL && 986 + bi && 987 + bi->to.sym != NULL && 988 + !bi->to.map->dso->annotate_warned && 989 + (bi->to.sym != bi->from.sym || 990 + bi->to.map->dso != bi->from.map->dso) && 991 + asprintf(&options[nr_options], "Annotate %s", 992 + bi->to.sym->name) > 0) 993 + annotate_t = nr_options++; 994 + } else { 995 + 996 + if (browser->selection != NULL && 997 + browser->selection->sym != NULL && 998 + !browser->selection->map->dso->annotate_warned && 999 + asprintf(&options[nr_options], "Annotate %s", 1000 + browser->selection->sym->name) > 0) 1001 + annotate = nr_options++; 1002 + } 999 1003 1000 1004 if (thread != NULL && 1001 1005 asprintf(&options[nr_options], "Zoom %s %s(%d) thread", ··· 1038 998 browse_map = nr_options++; 1039 999 add_exit_option: 1040 1000 options[nr_options++] = (char *)"Exit"; 1041 - 1001 + retry_popup_menu: 1042 1002 choice = ui__popup_menu(nr_options, options); 1043 - 1044 - for (i = 0; i < nr_options - 1; ++i) 1045 - free(options[i]); 1046 1003 1047 1004 if (choice == nr_options - 1) 1048 1005 break; 1049 1006 1050 - if (choice == -1) 1007 + if (choice == -1) { 1008 + free_popup_options(options, nr_options - 1); 1051 1009 continue; 1010 + } 1052 1011 1053 - if (choice == annotate) { 1012 + if (choice == annotate || choice == annotate_t || choice == annotate_f) { 1054 1013 struct hist_entry *he; 1055 1014 int err; 1056 1015 do_annotate: 1057 1016 he = hist_browser__selected_entry(browser); 1058 1017 if (he == NULL) 1059 1018 continue; 1019 + 1020 + /* 1021 + * we stash the branch_info symbol + map into the 1022 + * the ms so we don't have to rewrite all the annotation 1023 + * code to use branch_info. 1024 + * in branch mode, the ms struct is not used 1025 + */ 1026 + if (choice == annotate_f) { 1027 + he->ms.sym = he->branch_info->from.sym; 1028 + he->ms.map = he->branch_info->from.map; 1029 + } else if (choice == annotate_t) { 1030 + he->ms.sym = he->branch_info->to.sym; 1031 + he->ms.map = he->branch_info->to.map; 1032 + } 1033 + 1060 1034 /* 1061 1035 * Don't let this be freed, say, by hists__decay_entry. 1062 1036 */ ··· 1078 1024 err = hist_entry__tui_annotate(he, evsel->idx, 1079 1025 timer, arg, delay_secs); 1080 1026 he->used = false; 1027 + /* 1028 + * offer option to annotate the other branch source or target 1029 + * (if they exists) when returning from annotate 1030 + */ 1031 + if ((err == 'q' || err == CTRL('c')) 1032 + && annotate_t != -2 && annotate_f != -2) 1033 + goto retry_popup_menu; 1034 + 1081 1035 ui_browser__update_nr_entries(&browser->b, browser->hists->nr_entries); 1082 1036 if (err) 1083 1037 ui_browser__handle_resize(&browser->b); 1038 + 1084 1039 } else if (choice == browse_map) 1085 1040 map__browse(browser->selection->map); 1086 1041 else if (choice == zoom_dso) { ··· 1135 1072 pstack__delete(fstack); 1136 1073 out: 1137 1074 hist_browser__delete(browser); 1075 + free_popup_options(options, nr_options - 1); 1138 1076 return key; 1139 1077 } 1140 1078