Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull perf updates from Ingo Molnar:
"Mostly tooling and PMU driver fixes, but also a number of late updates
such as the reworking of the call-chain size limiting logic to make
call-graph recording more robust, plus tooling side changes for the
new 'backwards ring-buffer' extension to the perf ring-buffer"

* 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (34 commits)
perf record: Read from backward ring buffer
perf record: Rename variable to make code clear
perf record: Prevent reading invalid data in record__mmap_read
perf evlist: Add API to pause/resume
perf trace: Use the ptr->name beautifier as default for "filename" args
perf trace: Use the fd->name beautifier as default for "fd" args
perf report: Add srcline_from/to branch sort keys
perf evsel: Record fd into perf_mmap
perf evsel: Add overwrite attribute and check write_backward
perf tools: Set buildid dir under symfs when --symfs is provided
perf trace: Only auto set call-graph to "dwarf" when syscalls are being traced
perf annotate: Sort list of recognised instructions
perf annotate: Fix identification of ARM blt and bls instructions
perf tools: Fix usage of max_stack sysctl
perf callchain: Stop validating callchains by the max_stack sysctl
perf trace: Fix exit_group() formatting
perf top: Use machine->kptr_restrict_warned
perf trace: Warn when trying to resolve kernel addresses with kptr_restrict=1
perf machine: Do not bail out if not managing to read ref reloc symbol
perf/x86/intel/p4: Trival indentation fix, remove space
...

+619 -331
+14
Documentation/sysctl/kernel.txt
··· 61 61 - perf_cpu_time_max_percent 62 62 - perf_event_paranoid 63 63 - perf_event_max_stack 64 + - perf_event_max_contexts_per_stack 64 65 - pid_max 65 66 - powersave-nap [ PPC only ] 66 67 - printk ··· 666 665 enabled, otherwise writing to this file will return -EBUSY. 667 666 668 667 The default value is 127. 668 + 669 + ============================================================== 670 + 671 + perf_event_max_contexts_per_stack: 672 + 673 + Controls maximum number of stack frame context entries for 674 + (attr.sample_type & PERF_SAMPLE_CALLCHAIN) configured events, for 675 + instance, when using 'perf record -g' or 'perf trace --call-graph fp'. 676 + 677 + This can only be done when no events are in use that have callchains 678 + enabled, otherwise writing to this file will return -EBUSY. 679 + 680 + The default value is 8. 669 681 670 682 ============================================================== 671 683
+1
MAINTAINERS
··· 8881 8881 F: arch/*/kernel/*/*/perf_event*.c 8882 8882 F: arch/*/include/asm/perf_event.h 8883 8883 F: arch/*/kernel/perf_callchain.c 8884 + F: arch/*/events/* 8884 8885 F: tools/perf/ 8885 8886 8886 8887 PERSONALITY HANDLING
+3 -3
arch/arc/kernel/perf_event.c
··· 48 48 static int callchain_trace(unsigned int addr, void *data) 49 49 { 50 50 struct arc_callchain_trace *ctrl = data; 51 - struct perf_callchain_entry *entry = ctrl->perf_stuff; 51 + struct perf_callchain_entry_ctx *entry = ctrl->perf_stuff; 52 52 perf_callchain_store(entry, addr); 53 53 54 54 if (ctrl->depth++ < 3) ··· 58 58 } 59 59 60 60 void 61 - perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs) 61 + perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs) 62 62 { 63 63 struct arc_callchain_trace ctrl = { 64 64 .depth = 0, ··· 69 69 } 70 70 71 71 void 72 - perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs) 72 + perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs) 73 73 { 74 74 /* 75 75 * User stack can't be unwound trivially with kernel dwarf unwinder
+5 -5
arch/arm/kernel/perf_callchain.c
··· 31 31 */ 32 32 static struct frame_tail __user * 33 33 user_backtrace(struct frame_tail __user *tail, 34 - struct perf_callchain_entry *entry) 34 + struct perf_callchain_entry_ctx *entry) 35 35 { 36 36 struct frame_tail buftail; 37 37 unsigned long err; ··· 59 59 } 60 60 61 61 void 62 - perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs) 62 + perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs) 63 63 { 64 64 struct frame_tail __user *tail; 65 65 ··· 75 75 76 76 tail = (struct frame_tail __user *)regs->ARM_fp - 1; 77 77 78 - while ((entry->nr < sysctl_perf_event_max_stack) && 78 + while ((entry->nr < entry->max_stack) && 79 79 tail && !((unsigned long)tail & 0x3)) 80 80 tail = user_backtrace(tail, entry); 81 81 } ··· 89 89 callchain_trace(struct stackframe *fr, 90 90 void *data) 91 91 { 92 - struct perf_callchain_entry *entry = data; 92 + struct perf_callchain_entry_ctx *entry = data; 93 93 perf_callchain_store(entry, fr->pc); 94 94 return 0; 95 95 } 96 96 97 97 void 98 - perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs) 98 + perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs) 99 99 { 100 100 struct stackframe fr; 101 101
+7 -7
arch/arm64/kernel/perf_callchain.c
··· 31 31 */ 32 32 static struct frame_tail __user * 33 33 user_backtrace(struct frame_tail __user *tail, 34 - struct perf_callchain_entry *entry) 34 + struct perf_callchain_entry_ctx *entry) 35 35 { 36 36 struct frame_tail buftail; 37 37 unsigned long err; ··· 76 76 77 77 static struct compat_frame_tail __user * 78 78 compat_user_backtrace(struct compat_frame_tail __user *tail, 79 - struct perf_callchain_entry *entry) 79 + struct perf_callchain_entry_ctx *entry) 80 80 { 81 81 struct compat_frame_tail buftail; 82 82 unsigned long err; ··· 106 106 } 107 107 #endif /* CONFIG_COMPAT */ 108 108 109 - void perf_callchain_user(struct perf_callchain_entry *entry, 109 + void perf_callchain_user(struct perf_callchain_entry_ctx *entry, 110 110 struct pt_regs *regs) 111 111 { 112 112 if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) { ··· 122 122 123 123 tail = (struct frame_tail __user *)regs->regs[29]; 124 124 125 - while (entry->nr < sysctl_perf_event_max_stack && 125 + while (entry->nr < entry->max_stack && 126 126 tail && !((unsigned long)tail & 0xf)) 127 127 tail = user_backtrace(tail, entry); 128 128 } else { ··· 132 132 133 133 tail = (struct compat_frame_tail __user *)regs->compat_fp - 1; 134 134 135 - while ((entry->nr < sysctl_perf_event_max_stack) && 135 + while ((entry->nr < entry->max_stack) && 136 136 tail && !((unsigned long)tail & 0x3)) 137 137 tail = compat_user_backtrace(tail, entry); 138 138 #endif ··· 146 146 */ 147 147 static int callchain_trace(struct stackframe *frame, void *data) 148 148 { 149 - struct perf_callchain_entry *entry = data; 149 + struct perf_callchain_entry_ctx *entry = data; 150 150 perf_callchain_store(entry, frame->pc); 151 151 return 0; 152 152 } 153 153 154 - void perf_callchain_kernel(struct perf_callchain_entry *entry, 154 + void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, 155 155 struct pt_regs *regs) 156 156 { 157 157 struct stackframe frame;
+5 -5
arch/metag/kernel/perf_callchain.c
··· 29 29 30 30 static struct metag_frame __user * 31 31 user_backtrace(struct metag_frame __user *user_frame, 32 - struct perf_callchain_entry *entry) 32 + struct perf_callchain_entry_ctx *entry) 33 33 { 34 34 struct metag_frame frame; 35 35 unsigned long calladdr; ··· 56 56 } 57 57 58 58 void 59 - perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs) 59 + perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs) 60 60 { 61 61 unsigned long sp = regs->ctx.AX[0].U0; 62 62 struct metag_frame __user *frame; ··· 65 65 66 66 --frame; 67 67 68 - while ((entry->nr < sysctl_perf_event_max_stack) && frame) 68 + while ((entry->nr < entry->max_stack) && frame) 69 69 frame = user_backtrace(frame, entry); 70 70 } 71 71 ··· 78 78 callchain_trace(struct stackframe *fr, 79 79 void *data) 80 80 { 81 - struct perf_callchain_entry *entry = data; 81 + struct perf_callchain_entry_ctx *entry = data; 82 82 perf_callchain_store(entry, fr->pc); 83 83 return 0; 84 84 } 85 85 86 86 void 87 - perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs) 87 + perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs) 88 88 { 89 89 struct stackframe fr; 90 90
+6 -6
arch/mips/kernel/perf_event.c
··· 25 25 * the user stack callchains, we will add it here. 26 26 */ 27 27 28 - static void save_raw_perf_callchain(struct perf_callchain_entry *entry, 29 - unsigned long reg29) 28 + static void save_raw_perf_callchain(struct perf_callchain_entry_ctx *entry, 29 + unsigned long reg29) 30 30 { 31 31 unsigned long *sp = (unsigned long *)reg29; 32 32 unsigned long addr; ··· 35 35 addr = *sp++; 36 36 if (__kernel_text_address(addr)) { 37 37 perf_callchain_store(entry, addr); 38 - if (entry->nr >= sysctl_perf_event_max_stack) 38 + if (entry->nr >= entry->max_stack) 39 39 break; 40 40 } 41 41 } 42 42 } 43 43 44 - void perf_callchain_kernel(struct perf_callchain_entry *entry, 45 - struct pt_regs *regs) 44 + void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, 45 + struct pt_regs *regs) 46 46 { 47 47 unsigned long sp = regs->regs[29]; 48 48 #ifdef CONFIG_KALLSYMS ··· 59 59 } 60 60 do { 61 61 perf_callchain_store(entry, pc); 62 - if (entry->nr >= sysctl_perf_event_max_stack) 62 + if (entry->nr >= entry->max_stack) 63 63 break; 64 64 pc = unwind_stack(current, &sp, pc, &ra); 65 65 } while (pc);
+10 -10
arch/powerpc/perf/callchain.c
··· 47 47 } 48 48 49 49 void 50 - perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs) 50 + perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs) 51 51 { 52 52 unsigned long sp, next_sp; 53 53 unsigned long next_ip; ··· 76 76 next_ip = regs->nip; 77 77 lr = regs->link; 78 78 level = 0; 79 - perf_callchain_store(entry, PERF_CONTEXT_KERNEL); 79 + perf_callchain_store_context(entry, PERF_CONTEXT_KERNEL); 80 80 81 81 } else { 82 82 if (level == 0) ··· 232 232 puc == (unsigned long) &sf->uc; 233 233 } 234 234 235 - static void perf_callchain_user_64(struct perf_callchain_entry *entry, 235 + static void perf_callchain_user_64(struct perf_callchain_entry_ctx *entry, 236 236 struct pt_regs *regs) 237 237 { 238 238 unsigned long sp, next_sp; ··· 247 247 sp = regs->gpr[1]; 248 248 perf_callchain_store(entry, next_ip); 249 249 250 - while (entry->nr < sysctl_perf_event_max_stack) { 250 + while (entry->nr < entry->max_stack) { 251 251 fp = (unsigned long __user *) sp; 252 252 if (!valid_user_sp(sp, 1) || read_user_stack_64(fp, &next_sp)) 253 253 return; ··· 274 274 read_user_stack_64(&uregs[PT_R1], &sp)) 275 275 return; 276 276 level = 0; 277 - perf_callchain_store(entry, PERF_CONTEXT_USER); 277 + perf_callchain_store_context(entry, PERF_CONTEXT_USER); 278 278 perf_callchain_store(entry, next_ip); 279 279 continue; 280 280 } ··· 319 319 return rc; 320 320 } 321 321 322 - static inline void perf_callchain_user_64(struct perf_callchain_entry *entry, 322 + static inline void perf_callchain_user_64(struct perf_callchain_entry_ctx *entry, 323 323 struct pt_regs *regs) 324 324 { 325 325 } ··· 439 439 return mctx->mc_gregs; 440 440 } 441 441 442 - static void perf_callchain_user_32(struct perf_callchain_entry *entry, 442 + static void perf_callchain_user_32(struct perf_callchain_entry_ctx *entry, 443 443 struct pt_regs *regs) 444 444 { 445 445 unsigned int sp, next_sp; ··· 453 453 sp = regs->gpr[1]; 454 454 perf_callchain_store(entry, next_ip); 455 455 456 - while (entry->nr < sysctl_perf_event_max_stack) { 456 + while (entry->nr < entry->max_stack) { 457 457 fp = (unsigned int __user *) (unsigned long) sp; 458 458 if (!valid_user_sp(sp, 0) || read_user_stack_32(fp, &next_sp)) 459 459 return; ··· 473 473 read_user_stack_32(&uregs[PT_R1], &sp)) 474 474 return; 475 475 level = 0; 476 - perf_callchain_store(entry, PERF_CONTEXT_USER); 476 + perf_callchain_store_context(entry, PERF_CONTEXT_USER); 477 477 perf_callchain_store(entry, next_ip); 478 478 continue; 479 479 } ··· 487 487 } 488 488 489 489 void 490 - perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs) 490 + perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs) 491 491 { 492 492 if (current_is_64bit()) 493 493 perf_callchain_user_64(entry, regs);
+2 -2
arch/s390/kernel/perf_event.c
··· 224 224 225 225 static int __perf_callchain_kernel(void *data, unsigned long address) 226 226 { 227 - struct perf_callchain_entry *entry = data; 227 + struct perf_callchain_entry_ctx *entry = data; 228 228 229 229 perf_callchain_store(entry, address); 230 230 return 0; 231 231 } 232 232 233 - void perf_callchain_kernel(struct perf_callchain_entry *entry, 233 + void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, 234 234 struct pt_regs *regs) 235 235 { 236 236 if (user_mode(regs))
+2 -2
arch/sh/kernel/perf_callchain.c
··· 21 21 22 22 static void callchain_address(void *data, unsigned long addr, int reliable) 23 23 { 24 - struct perf_callchain_entry *entry = data; 24 + struct perf_callchain_entry_ctx *entry = data; 25 25 26 26 if (reliable) 27 27 perf_callchain_store(entry, addr); ··· 33 33 }; 34 34 35 35 void 36 - perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs) 36 + perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs) 37 37 { 38 38 perf_callchain_store(entry, regs->pc); 39 39
+7 -7
arch/sparc/kernel/perf_event.c
··· 1711 1711 } 1712 1712 pure_initcall(init_hw_perf_events); 1713 1713 1714 - void perf_callchain_kernel(struct perf_callchain_entry *entry, 1714 + void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, 1715 1715 struct pt_regs *regs) 1716 1716 { 1717 1717 unsigned long ksp, fp; ··· 1756 1756 } 1757 1757 } 1758 1758 #endif 1759 - } while (entry->nr < sysctl_perf_event_max_stack); 1759 + } while (entry->nr < entry->max_stack); 1760 1760 } 1761 1761 1762 1762 static inline int ··· 1769 1769 return (__range_not_ok(fp, size, TASK_SIZE) == 0); 1770 1770 } 1771 1771 1772 - static void perf_callchain_user_64(struct perf_callchain_entry *entry, 1772 + static void perf_callchain_user_64(struct perf_callchain_entry_ctx *entry, 1773 1773 struct pt_regs *regs) 1774 1774 { 1775 1775 unsigned long ufp; ··· 1790 1790 pc = sf.callers_pc; 1791 1791 ufp = (unsigned long)sf.fp + STACK_BIAS; 1792 1792 perf_callchain_store(entry, pc); 1793 - } while (entry->nr < sysctl_perf_event_max_stack); 1793 + } while (entry->nr < entry->max_stack); 1794 1794 } 1795 1795 1796 - static void perf_callchain_user_32(struct perf_callchain_entry *entry, 1796 + static void perf_callchain_user_32(struct perf_callchain_entry_ctx *entry, 1797 1797 struct pt_regs *regs) 1798 1798 { 1799 1799 unsigned long ufp; ··· 1822 1822 ufp = (unsigned long)sf.fp; 1823 1823 } 1824 1824 perf_callchain_store(entry, pc); 1825 - } while (entry->nr < sysctl_perf_event_max_stack); 1825 + } while (entry->nr < entry->max_stack); 1826 1826 } 1827 1827 1828 1828 void 1829 - perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs) 1829 + perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs) 1830 1830 { 1831 1831 u64 saved_fault_address = current_thread_info()->fault_address; 1832 1832 u8 saved_fault_code = get_thread_fault_code();
+3 -3
arch/tile/kernel/perf_event.c
··· 941 941 /* 942 942 * Tile specific backtracing code for perf_events. 943 943 */ 944 - static inline void perf_callchain(struct perf_callchain_entry *entry, 944 + static inline void perf_callchain(struct perf_callchain_entry_ctx *entry, 945 945 struct pt_regs *regs) 946 946 { 947 947 struct KBacktraceIterator kbt; ··· 992 992 } 993 993 } 994 994 995 - void perf_callchain_user(struct perf_callchain_entry *entry, 995 + void perf_callchain_user(struct perf_callchain_entry_ctx *entry, 996 996 struct pt_regs *regs) 997 997 { 998 998 perf_callchain(entry, regs); 999 999 } 1000 1000 1001 - void perf_callchain_kernel(struct perf_callchain_entry *entry, 1001 + void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, 1002 1002 struct pt_regs *regs) 1003 1003 { 1004 1004 perf_callchain(entry, regs);
+7 -7
arch/x86/events/core.c
··· 2202 2202 2203 2203 static int backtrace_address(void *data, unsigned long addr, int reliable) 2204 2204 { 2205 - struct perf_callchain_entry *entry = data; 2205 + struct perf_callchain_entry_ctx *entry = data; 2206 2206 2207 2207 return perf_callchain_store(entry, addr); 2208 2208 } ··· 2214 2214 }; 2215 2215 2216 2216 void 2217 - perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs) 2217 + perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs) 2218 2218 { 2219 2219 if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) { 2220 2220 /* TODO: We don't support guest os callchain now */ ··· 2268 2268 #include <asm/compat.h> 2269 2269 2270 2270 static inline int 2271 - perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry) 2271 + perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry_ctx *entry) 2272 2272 { 2273 2273 /* 32-bit process in 64-bit kernel. */ 2274 2274 unsigned long ss_base, cs_base; ··· 2283 2283 2284 2284 fp = compat_ptr(ss_base + regs->bp); 2285 2285 pagefault_disable(); 2286 - while (entry->nr < sysctl_perf_event_max_stack) { 2286 + while (entry->nr < entry->max_stack) { 2287 2287 unsigned long bytes; 2288 2288 frame.next_frame = 0; 2289 2289 frame.return_address = 0; ··· 2309 2309 } 2310 2310 #else 2311 2311 static inline int 2312 - perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry) 2312 + perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry_ctx *entry) 2313 2313 { 2314 2314 return 0; 2315 2315 } 2316 2316 #endif 2317 2317 2318 2318 void 2319 - perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs) 2319 + perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs) 2320 2320 { 2321 2321 struct stack_frame frame; 2322 2322 const void __user *fp; ··· 2343 2343 return; 2344 2344 2345 2345 pagefault_disable(); 2346 - while (entry->nr < sysctl_perf_event_max_stack) { 2346 + while (entry->nr < entry->max_stack) { 2347 2347 unsigned long bytes; 2348 2348 frame.next_frame = NULL; 2349 2349 frame.return_address = 0;
+1 -1
arch/x86/events/intel/p4.c
··· 826 826 * Clear bits we reserve to be managed by kernel itself 827 827 * and never allowed from a user space 828 828 */ 829 - event->attr.config &= P4_CONFIG_MASK; 829 + event->attr.config &= P4_CONFIG_MASK; 830 830 831 831 rc = p4_validate_raw_event(event); 832 832 if (rc)
+1 -1
arch/x86/events/intel/uncore.c
··· 891 891 return -ENODEV; 892 892 893 893 pkg = topology_phys_to_logical_pkg(phys_id); 894 - if (WARN_ON_ONCE(pkg < 0)) 894 + if (pkg < 0) 895 895 return -EINVAL; 896 896 897 897 if (UNCORE_PCI_DEV_TYPE(id->driver_data) == UNCORE_EXTRA_PCI_DEV) {
+5 -5
arch/xtensa/kernel/perf_event.c
··· 323 323 324 324 static int callchain_trace(struct stackframe *frame, void *data) 325 325 { 326 - struct perf_callchain_entry *entry = data; 326 + struct perf_callchain_entry_ctx *entry = data; 327 327 328 328 perf_callchain_store(entry, frame->pc); 329 329 return 0; 330 330 } 331 331 332 - void perf_callchain_kernel(struct perf_callchain_entry *entry, 332 + void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, 333 333 struct pt_regs *regs) 334 334 { 335 - xtensa_backtrace_kernel(regs, sysctl_perf_event_max_stack, 335 + xtensa_backtrace_kernel(regs, entry->max_stack, 336 336 callchain_trace, NULL, entry); 337 337 } 338 338 339 - void perf_callchain_user(struct perf_callchain_entry *entry, 339 + void perf_callchain_user(struct perf_callchain_entry_ctx *entry, 340 340 struct pt_regs *regs) 341 341 { 342 - xtensa_backtrace_user(regs, sysctl_perf_event_max_stack, 342 + xtensa_backtrace_user(regs, entry->max_stack, 343 343 callchain_trace, entry); 344 344 } 345 345
+29 -5
include/linux/perf_event.h
··· 61 61 __u64 ip[0]; /* /proc/sys/kernel/perf_event_max_stack */ 62 62 }; 63 63 64 + struct perf_callchain_entry_ctx { 65 + struct perf_callchain_entry *entry; 66 + u32 max_stack; 67 + u32 nr; 68 + short contexts; 69 + bool contexts_maxed; 70 + }; 71 + 64 72 struct perf_raw_record { 65 73 u32 size; 66 74 void *data; ··· 1069 1061 /* Callchains */ 1070 1062 DECLARE_PER_CPU(struct perf_callchain_entry, perf_callchain_entry); 1071 1063 1072 - extern void perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs); 1073 - extern void perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs); 1064 + extern void perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs); 1065 + extern void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs); 1074 1066 extern struct perf_callchain_entry * 1075 1067 get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user, 1076 - bool crosstask, bool add_mark); 1068 + u32 max_stack, bool crosstask, bool add_mark); 1077 1069 extern int get_callchain_buffers(void); 1078 1070 extern void put_callchain_buffers(void); 1079 1071 1080 1072 extern int sysctl_perf_event_max_stack; 1073 + extern int sysctl_perf_event_max_contexts_per_stack; 1081 1074 1082 - static inline int perf_callchain_store(struct perf_callchain_entry *entry, u64 ip) 1075 + static inline int perf_callchain_store_context(struct perf_callchain_entry_ctx *ctx, u64 ip) 1083 1076 { 1084 - if (entry->nr < sysctl_perf_event_max_stack) { 1077 + if (ctx->contexts < sysctl_perf_event_max_contexts_per_stack) { 1078 + struct perf_callchain_entry *entry = ctx->entry; 1085 1079 entry->ip[entry->nr++] = ip; 1080 + ++ctx->contexts; 1081 + return 0; 1082 + } else { 1083 + ctx->contexts_maxed = true; 1084 + return -1; /* no more room, stop walking the stack */ 1085 + } 1086 + } 1087 + 1088 + static inline int perf_callchain_store(struct perf_callchain_entry_ctx *ctx, u64 ip) 1089 + { 1090 + if (ctx->nr < ctx->max_stack && !ctx->contexts_maxed) { 1091 + struct perf_callchain_entry *entry = ctx->entry; 1092 + entry->ip[entry->nr++] = ip; 1093 + ++ctx->nr; 1086 1094 return 0; 1087 1095 } else { 1088 1096 return -1; /* no more room, stop walking the stack */
+1
include/uapi/linux/perf_event.h
··· 862 862 }; 863 863 864 864 #define PERF_MAX_STACK_DEPTH 127 865 + #define PERF_MAX_CONTEXTS_PER_STACK 8 865 866 866 867 enum perf_callchain_context { 867 868 PERF_CONTEXT_HV = (__u64)-32,
+2 -1
kernel/bpf/stackmap.c
··· 136 136 BPF_F_FAST_STACK_CMP | BPF_F_REUSE_STACKID))) 137 137 return -EINVAL; 138 138 139 - trace = get_perf_callchain(regs, init_nr, kernel, user, false, false); 139 + trace = get_perf_callchain(regs, init_nr, kernel, user, 140 + sysctl_perf_event_max_stack, false, false); 140 141 141 142 if (unlikely(!trace)) 142 143 /* couldn't fetch the stack trace */
+24 -12
kernel/events/callchain.c
··· 19 19 }; 20 20 21 21 int sysctl_perf_event_max_stack __read_mostly = PERF_MAX_STACK_DEPTH; 22 + int sysctl_perf_event_max_contexts_per_stack __read_mostly = PERF_MAX_CONTEXTS_PER_STACK; 22 23 23 24 static inline size_t perf_callchain_entry__sizeof(void) 24 25 { 25 26 return (sizeof(struct perf_callchain_entry) + 26 - sizeof(__u64) * sysctl_perf_event_max_stack); 27 + sizeof(__u64) * (sysctl_perf_event_max_stack + 28 + sysctl_perf_event_max_contexts_per_stack)); 27 29 } 28 30 29 31 static DEFINE_PER_CPU(int, callchain_recursion[PERF_NR_CONTEXTS]); ··· 34 32 static struct callchain_cpus_entries *callchain_cpus_entries; 35 33 36 34 37 - __weak void perf_callchain_kernel(struct perf_callchain_entry *entry, 35 + __weak void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, 38 36 struct pt_regs *regs) 39 37 { 40 38 } 41 39 42 - __weak void perf_callchain_user(struct perf_callchain_entry *entry, 40 + __weak void perf_callchain_user(struct perf_callchain_entry_ctx *entry, 43 41 struct pt_regs *regs) 44 42 { 45 43 } ··· 178 176 if (!kernel && !user) 179 177 return NULL; 180 178 181 - return get_perf_callchain(regs, 0, kernel, user, crosstask, true); 179 + return get_perf_callchain(regs, 0, kernel, user, sysctl_perf_event_max_stack, crosstask, true); 182 180 } 183 181 184 182 struct perf_callchain_entry * 185 183 get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user, 186 - bool crosstask, bool add_mark) 184 + u32 max_stack, bool crosstask, bool add_mark) 187 185 { 188 186 struct perf_callchain_entry *entry; 187 + struct perf_callchain_entry_ctx ctx; 189 188 int rctx; 190 189 191 190 entry = get_callchain_entry(&rctx); ··· 196 193 if (!entry) 197 194 goto exit_put; 198 195 199 - entry->nr = init_nr; 196 + ctx.entry = entry; 197 + ctx.max_stack = max_stack; 198 + ctx.nr = entry->nr = init_nr; 199 + ctx.contexts = 0; 200 + ctx.contexts_maxed = false; 200 201 201 202 if (kernel && !user_mode(regs)) { 202 203 if (add_mark) 203 - perf_callchain_store(entry, PERF_CONTEXT_KERNEL); 204 - perf_callchain_kernel(entry, regs); 204 + perf_callchain_store_context(&ctx, PERF_CONTEXT_KERNEL); 205 + perf_callchain_kernel(&ctx, regs); 205 206 } 206 207 207 208 if (user) { ··· 221 214 goto exit_put; 222 215 223 216 if (add_mark) 224 - perf_callchain_store(entry, PERF_CONTEXT_USER); 225 - perf_callchain_user(entry, regs); 217 + perf_callchain_store_context(&ctx, PERF_CONTEXT_USER); 218 + perf_callchain_user(&ctx, regs); 226 219 } 227 220 } 228 221 ··· 232 225 return entry; 233 226 } 234 227 228 + /* 229 + * Used for sysctl_perf_event_max_stack and 230 + * sysctl_perf_event_max_contexts_per_stack. 231 + */ 235 232 int perf_event_max_stack_handler(struct ctl_table *table, int write, 236 233 void __user *buffer, size_t *lenp, loff_t *ppos) 237 234 { 238 - int new_value = sysctl_perf_event_max_stack, ret; 235 + int *value = table->data; 236 + int new_value = *value, ret; 239 237 struct ctl_table new_table = *table; 240 238 241 239 new_table.data = &new_value; ··· 252 240 if (atomic_read(&nr_callchain_events)) 253 241 ret = -EBUSY; 254 242 else 255 - sysctl_perf_event_max_stack = new_value; 243 + *value = new_value; 256 244 257 245 mutex_unlock(&callchain_mutex); 258 246
+10 -1
kernel/sysctl.c
··· 1149 1149 }, 1150 1150 { 1151 1151 .procname = "perf_event_max_stack", 1152 - .data = NULL, /* filled in by handler */ 1152 + .data = &sysctl_perf_event_max_stack, 1153 1153 .maxlen = sizeof(sysctl_perf_event_max_stack), 1154 1154 .mode = 0644, 1155 1155 .proc_handler = perf_event_max_stack_handler, 1156 1156 .extra1 = &zero, 1157 1157 .extra2 = &six_hundred_forty_kb, 1158 + }, 1159 + { 1160 + .procname = "perf_event_max_contexts_per_stack", 1161 + .data = &sysctl_perf_event_max_contexts_per_stack, 1162 + .maxlen = sizeof(sysctl_perf_event_max_contexts_per_stack), 1163 + .mode = 0644, 1164 + .proc_handler = perf_event_max_stack_handler, 1165 + .extra1 = &zero, 1166 + .extra2 = &one_thousand, 1158 1167 }, 1159 1168 #endif 1160 1169 #ifdef CONFIG_KMEMCHECK
+3 -2
tools/perf/Documentation/perf-report.txt
··· 103 103 104 104 If --branch-stack option is used, following sort keys are also 105 105 available: 106 - dso_from, dso_to, symbol_from, symbol_to, mispredict. 107 106 108 107 - dso_from: name of library or module branched from 109 108 - dso_to: name of library or module branched to 110 109 - symbol_from: name of function branched from 111 110 - symbol_to: name of function branched to 111 + - srcline_from: source file and line branched from 112 + - srcline_to: source file and line branched to 112 113 - mispredict: "N" for predicted branch, "Y" for mispredicted branch 113 114 - in_tx: branch in TSX transaction 114 115 - abort: TSX transaction abort. ··· 249 248 Note that when using the --itrace option the synthesized callchain size 250 249 will override this value if the synthesized callchain size is bigger. 251 250 252 - Default: /proc/sys/kernel/perf_event_max_stack when present, 127 otherwise. 251 + Default: 127 253 252 254 253 -G:: 255 254 --inverted::
+1 -1
tools/perf/Documentation/perf-script.txt
··· 267 267 Note that when using the --itrace option the synthesized callchain size 268 268 will override this value if the synthesized callchain size is bigger. 269 269 270 - Default: /proc/sys/kernel/perf_event_max_stack when present, 127 otherwise. 270 + Default: 127 271 271 272 272 --ns:: 273 273 Use 9 decimal places when displaying time (i.e. show the nanoseconds)
+2 -1
tools/perf/Documentation/perf-trace.txt
··· 143 143 Implies '--call-graph dwarf' when --call-graph not present on the 144 144 command line, on systems where DWARF unwinding was built in. 145 145 146 - Default: /proc/sys/kernel/perf_event_max_stack when present, 127 otherwise. 146 + Default: /proc/sys/kernel/perf_event_max_stack when present for 147 + live sessions (without --input/-i), 127 otherwise. 147 148 148 149 --min-stack:: 149 150 Set the stack depth limit when parsing the callchain, anything
+3 -2
tools/perf/builtin-annotate.c
··· 324 324 OPT_BOOLEAN(0, "skip-missing", &annotate.skip_missing, 325 325 "Skip symbols that cannot be annotated"), 326 326 OPT_STRING('C', "cpu", &annotate.cpu_list, "cpu", "list of cpus to profile"), 327 - OPT_STRING(0, "symfs", &symbol_conf.symfs, "directory", 328 - "Look for files with symbols relative to this directory"), 327 + OPT_CALLBACK(0, "symfs", NULL, "directory", 328 + "Look for files with symbols relative to this directory", 329 + symbol__config_symfs), 329 330 OPT_BOOLEAN(0, "source", &symbol_conf.annotate_src, 330 331 "Interleave source code with assembly code (default)"), 331 332 OPT_BOOLEAN(0, "asm-raw", &symbol_conf.annotate_asm_raw,
+4 -4
tools/perf/builtin-buildid-cache.c
··· 119 119 if (build_id_cache__kcore_buildid(from_dir, sbuildid) < 0) 120 120 return -1; 121 121 122 - scnprintf(to_dir, sizeof(to_dir), "%s/[kernel.kcore]/%s", 123 - buildid_dir, sbuildid); 122 + scnprintf(to_dir, sizeof(to_dir), "%s/%s/%s", 123 + buildid_dir, DSO__NAME_KCORE, sbuildid); 124 124 125 125 if (!force && 126 126 !build_id_cache__kcore_existing(from_dir, to_dir, sizeof(to_dir))) { ··· 131 131 if (build_id_cache__kcore_dir(dir, sizeof(dir))) 132 132 return -1; 133 133 134 - scnprintf(to_dir, sizeof(to_dir), "%s/[kernel.kcore]/%s/%s", 135 - buildid_dir, sbuildid, dir); 134 + scnprintf(to_dir, sizeof(to_dir), "%s/%s/%s/%s", 135 + buildid_dir, DSO__NAME_KCORE, sbuildid, dir); 136 136 137 137 if (mkdir_p(to_dir, 0755)) 138 138 return -1;
+3 -2
tools/perf/builtin-diff.c
··· 812 812 OPT_STRING_NOEMPTY('t', "field-separator", &symbol_conf.field_sep, "separator", 813 813 "separator for columns, no spaces will be added between " 814 814 "columns '.' is reserved."), 815 - OPT_STRING(0, "symfs", &symbol_conf.symfs, "directory", 816 - "Look for files with symbols relative to this directory"), 815 + OPT_CALLBACK(0, "symfs", NULL, "directory", 816 + "Look for files with symbols relative to this directory", 817 + symbol__config_symfs), 817 818 OPT_UINTEGER('o', "order", &sort_compute, "Specify compute sorting."), 818 819 OPT_CALLBACK(0, "percentage", NULL, "relative|absolute", 819 820 "How to display percentage of filtered entries", parse_filter_percentage),
+71 -10
tools/perf/builtin-record.c
··· 40 40 #include <unistd.h> 41 41 #include <sched.h> 42 42 #include <sys/mman.h> 43 + #include <asm/bug.h> 43 44 44 45 45 46 struct record { ··· 83 82 return record__write(rec, event, event->header.size); 84 83 } 85 84 85 + static int 86 + backward_rb_find_range(void *buf, int mask, u64 head, u64 *start, u64 *end) 87 + { 88 + struct perf_event_header *pheader; 89 + u64 evt_head = head; 90 + int size = mask + 1; 91 + 92 + pr_debug2("backward_rb_find_range: buf=%p, head=%"PRIx64"\n", buf, head); 93 + pheader = (struct perf_event_header *)(buf + (head & mask)); 94 + *start = head; 95 + while (true) { 96 + if (evt_head - head >= (unsigned int)size) { 97 + pr_debug("Finshed reading backward ring buffer: rewind\n"); 98 + if (evt_head - head > (unsigned int)size) 99 + evt_head -= pheader->size; 100 + *end = evt_head; 101 + return 0; 102 + } 103 + 104 + pheader = (struct perf_event_header *)(buf + (evt_head & mask)); 105 + 106 + if (pheader->size == 0) { 107 + pr_debug("Finshed reading backward ring buffer: get start\n"); 108 + *end = evt_head; 109 + return 0; 110 + } 111 + 112 + evt_head += pheader->size; 113 + pr_debug3("move evt_head: %"PRIx64"\n", evt_head); 114 + } 115 + WARN_ONCE(1, "Shouldn't get here\n"); 116 + return -1; 117 + } 118 + 119 + static int 120 + rb_find_range(struct perf_evlist *evlist, 121 + void *data, int mask, u64 head, u64 old, 122 + u64 *start, u64 *end) 123 + { 124 + if (!evlist->backward) { 125 + *start = old; 126 + *end = head; 127 + return 0; 128 + } 129 + 130 + return backward_rb_find_range(data, mask, head, start, end); 131 + } 132 + 86 133 static int record__mmap_read(struct record *rec, int idx) 87 134 { 88 135 struct perf_mmap *md = &rec->evlist->mmap[idx]; 89 136 u64 head = perf_mmap__read_head(md); 90 137 u64 old = md->prev; 138 + u64 end = head, start = old; 91 139 unsigned char *data = md->base + page_size; 92 140 unsigned long size; 93 141 void *buf; 94 142 int rc = 0; 95 143 96 - if (old == head) 144 + if (rb_find_range(rec->evlist, data, md->mask, head, 145 + old, &start, &end)) 146 + return -1; 147 + 148 + if (start == end) 97 149 return 0; 98 150 99 151 rec->samples++; 100 152 101 - size = head - old; 153 + size = end - start; 154 + if (size > (unsigned long)(md->mask) + 1) { 155 + WARN_ONCE(1, "failed to keep up with mmap data. (warn only once)\n"); 102 156 103 - if ((old & md->mask) + size != (head & md->mask)) { 104 - buf = &data[old & md->mask]; 105 - size = md->mask + 1 - (old & md->mask); 106 - old += size; 157 + md->prev = head; 158 + perf_evlist__mmap_consume(rec->evlist, idx); 159 + return 0; 160 + } 161 + 162 + if ((start & md->mask) + size != (end & md->mask)) { 163 + buf = &data[start & md->mask]; 164 + size = md->mask + 1 - (start & md->mask); 165 + start += size; 107 166 108 167 if (record__write(rec, buf, size) < 0) { 109 168 rc = -1; ··· 171 110 } 172 111 } 173 112 174 - buf = &data[old & md->mask]; 175 - size = head - old; 176 - old += size; 113 + buf = &data[start & md->mask]; 114 + size = end - start; 115 + start += size; 177 116 178 117 if (record__write(rec, buf, size) < 0) { 179 118 rc = -1; 180 119 goto out; 181 120 } 182 121 183 - md->prev = old; 122 + md->prev = head; 184 123 perf_evlist__mmap_consume(rec->evlist, idx); 185 124 out: 186 125 return rc;
+4 -3
tools/perf/builtin-report.c
··· 691 691 .ordered_events = true, 692 692 .ordering_requires_timestamps = true, 693 693 }, 694 - .max_stack = sysctl_perf_event_max_stack, 694 + .max_stack = PERF_MAX_STACK_DEPTH, 695 695 .pretty_printing_style = "normal", 696 696 .socket_filter = -1, 697 697 }; ··· 770 770 "columns '.' is reserved."), 771 771 OPT_BOOLEAN('U', "hide-unresolved", &symbol_conf.hide_unresolved, 772 772 "Only display entries resolved to a symbol"), 773 - OPT_STRING(0, "symfs", &symbol_conf.symfs, "directory", 774 - "Look for files with symbols relative to this directory"), 773 + OPT_CALLBACK(0, "symfs", NULL, "directory", 774 + "Look for files with symbols relative to this directory", 775 + symbol__config_symfs), 775 776 OPT_STRING('C', "cpu", &report.cpu_list, "cpu", 776 777 "list of cpus to profile"), 777 778 OPT_BOOLEAN('I', "show-info", &report.show_full_info,
+3 -4
tools/perf/builtin-script.c
··· 2010 2010 "file", "kallsyms pathname"), 2011 2011 OPT_BOOLEAN('G', "hide-call-graph", &no_callchain, 2012 2012 "When printing symbols do not display call chain"), 2013 - OPT_STRING(0, "symfs", &symbol_conf.symfs, "directory", 2014 - "Look for files with symbols relative to this directory"), 2013 + OPT_CALLBACK(0, "symfs", NULL, "directory", 2014 + "Look for files with symbols relative to this directory", 2015 + symbol__config_symfs), 2015 2016 OPT_CALLBACK('F', "fields", NULL, "str", 2016 2017 "comma separated output fields prepend with 'type:'. " 2017 2018 "Valid types: hw,sw,trace,raw. " ··· 2067 2066 "perf script [<options>] <top-script> [script-args]", 2068 2067 NULL 2069 2068 }; 2070 - 2071 - scripting_max_stack = sysctl_perf_event_max_stack; 2072 2069 2073 2070 setup_scripting(); 2074 2071
+13 -9
tools/perf/builtin-stat.c
··· 66 66 #include <stdlib.h> 67 67 #include <sys/prctl.h> 68 68 #include <locale.h> 69 + #include <math.h> 69 70 70 71 #define DEFAULT_SEPARATOR " " 71 72 #define CNTR_NOT_SUPPORTED "<not supported>" ··· 992 991 const char *fmt; 993 992 994 993 if (csv_output) { 995 - fmt = sc != 1.0 ? "%.2f%s" : "%.0f%s"; 994 + fmt = floor(sc) != sc ? "%.2f%s" : "%.0f%s"; 996 995 } else { 997 996 if (big_num) 998 - fmt = sc != 1.0 ? "%'18.2f%s" : "%'18.0f%s"; 997 + fmt = floor(sc) != sc ? "%'18.2f%s" : "%'18.0f%s"; 999 998 else 1000 - fmt = sc != 1.0 ? "%18.2f%s" : "%18.0f%s"; 999 + fmt = floor(sc) != sc ? "%18.2f%s" : "%18.0f%s"; 1001 1000 } 1002 1001 1003 1002 aggr_printout(evsel, id, nr); ··· 1910 1909 } 1911 1910 1912 1911 if (!evsel_list->nr_entries) { 1912 + if (target__has_cpu(&target)) 1913 + default_attrs0[0].config = PERF_COUNT_SW_CPU_CLOCK; 1914 + 1913 1915 if (perf_evlist__add_default_attrs(evsel_list, default_attrs0) < 0) 1914 1916 return -1; 1915 1917 if (pmu_have_event("cpu", "stalled-cycles-frontend")) { ··· 2004 2000 union perf_event *event, 2005 2001 struct perf_session *session) 2006 2002 { 2007 - struct stat_round_event *round = &event->stat_round; 2003 + struct stat_round_event *stat_round = &event->stat_round; 2008 2004 struct perf_evsel *counter; 2009 2005 struct timespec tsh, *ts = NULL; 2010 2006 const char **argv = session->header.env.cmdline_argv; ··· 2013 2009 evlist__for_each(evsel_list, counter) 2014 2010 perf_stat_process_counter(&stat_config, counter); 2015 2011 2016 - if (round->type == PERF_STAT_ROUND_TYPE__FINAL) 2017 - update_stats(&walltime_nsecs_stats, round->time); 2012 + if (stat_round->type == PERF_STAT_ROUND_TYPE__FINAL) 2013 + update_stats(&walltime_nsecs_stats, stat_round->time); 2018 2014 2019 - if (stat_config.interval && round->time) { 2020 - tsh.tv_sec = round->time / NSECS_PER_SEC; 2021 - tsh.tv_nsec = round->time % NSECS_PER_SEC; 2015 + if (stat_config.interval && stat_round->time) { 2016 + tsh.tv_sec = stat_round->time / NSECS_PER_SEC; 2017 + tsh.tv_nsec = stat_round->time % NSECS_PER_SEC; 2022 2018 ts = &tsh; 2023 2019 } 2024 2020
+3 -2
tools/perf/builtin-timechart.c
··· 1945 1945 OPT_CALLBACK('p', "process", NULL, "process", 1946 1946 "process selector. Pass a pid or process name.", 1947 1947 parse_process), 1948 - OPT_STRING(0, "symfs", &symbol_conf.symfs, "directory", 1949 - "Look for files with symbols relative to this directory"), 1948 + OPT_CALLBACK(0, "symfs", NULL, "directory", 1949 + "Look for files with symbols relative to this directory", 1950 + symbol__config_symfs), 1950 1951 OPT_INTEGER('n', "proc-num", &tchart.proc_num, 1951 1952 "min. number of tasks to print"), 1952 1953 OPT_BOOLEAN('t', "topology", &tchart.topology,
+3 -3
tools/perf/builtin-top.c
··· 732 732 if (machine__resolve(machine, &al, sample) < 0) 733 733 return; 734 734 735 - if (!top->kptr_restrict_warned && 735 + if (!machine->kptr_restrict_warned && 736 736 symbol_conf.kptr_restrict && 737 737 al.cpumode == PERF_RECORD_MISC_KERNEL) { 738 738 ui__warning( ··· 743 743 " modules" : ""); 744 744 if (use_browser <= 0) 745 745 sleep(5); 746 - top->kptr_restrict_warned = true; 746 + machine->kptr_restrict_warned = true; 747 747 } 748 748 749 749 if (al.sym == NULL) { ··· 759 759 * --hide-kernel-symbols, even if the user specifies an 760 760 * invalid --vmlinux ;-) 761 761 */ 762 - if (!top->kptr_restrict_warned && !top->vmlinux_warned && 762 + if (!machine->kptr_restrict_warned && !top->vmlinux_warned && 763 763 al.map == machine->vmlinux_maps[MAP__FUNCTION] && 764 764 RB_EMPTY_ROOT(&al.map->dso->symbols[MAP__FUNCTION])) { 765 765 if (symbol_conf.vmlinux_name) {
+117 -157
tools/perf/builtin-trace.c
··· 576 576 bool hexret; 577 577 } syscall_fmts[] = { 578 578 { .name = "access", .errmsg = true, 579 - .arg_scnprintf = { [0] = SCA_FILENAME, /* filename */ 580 - [1] = SCA_ACCMODE, /* mode */ }, }, 579 + .arg_scnprintf = { [1] = SCA_ACCMODE, /* mode */ }, }, 581 580 { .name = "arch_prctl", .errmsg = true, .alias = "prctl", }, 582 581 { .name = "bpf", .errmsg = true, STRARRAY(0, cmd, bpf_cmd), }, 583 582 { .name = "brk", .hexret = true, 584 583 .arg_scnprintf = { [0] = SCA_HEX, /* brk */ }, }, 585 - { .name = "chdir", .errmsg = true, 586 - .arg_scnprintf = { [0] = SCA_FILENAME, /* filename */ }, }, 587 - { .name = "chmod", .errmsg = true, 588 - .arg_scnprintf = { [0] = SCA_FILENAME, /* filename */ }, }, 589 - { .name = "chroot", .errmsg = true, 590 - .arg_scnprintf = { [0] = SCA_FILENAME, /* filename */ }, }, 584 + { .name = "chdir", .errmsg = true, }, 585 + { .name = "chmod", .errmsg = true, }, 586 + { .name = "chroot", .errmsg = true, }, 591 587 { .name = "clock_gettime", .errmsg = true, STRARRAY(0, clk_id, clockid), }, 592 588 { .name = "clone", .errpid = true, }, 593 589 { .name = "close", .errmsg = true, 594 590 .arg_scnprintf = { [0] = SCA_CLOSE_FD, /* fd */ }, }, 595 591 { .name = "connect", .errmsg = true, }, 596 - { .name = "creat", .errmsg = true, 597 - .arg_scnprintf = { [0] = SCA_FILENAME, /* pathname */ }, }, 598 - { .name = "dup", .errmsg = true, 599 - .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, 600 - { .name = "dup2", .errmsg = true, 601 - .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, 602 - { .name = "dup3", .errmsg = true, 603 - .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, 592 + { .name = "creat", .errmsg = true, }, 593 + { .name = "dup", .errmsg = true, }, 594 + { .name = "dup2", .errmsg = true, }, 595 + { .name = "dup3", .errmsg = true, }, 604 596 { .name = "epoll_ctl", .errmsg = true, STRARRAY(1, op, epoll_ctl_ops), }, 605 597 { .name = "eventfd2", .errmsg = true, 606 598 .arg_scnprintf = { [1] = SCA_EFD_FLAGS, /* flags */ }, }, 607 - { .name = "faccessat", .errmsg = true, 608 - .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ 609 - [1] = SCA_FILENAME, /* filename */ }, }, 610 - { .name = "fadvise64", .errmsg = true, 611 - .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, 612 - { .name = "fallocate", .errmsg = true, 613 - .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, 614 - { .name = "fchdir", .errmsg = true, 615 - .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, 616 - { .name = "fchmod", .errmsg = true, 617 - .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, 599 + { .name = "faccessat", .errmsg = true, }, 600 + { .name = "fadvise64", .errmsg = true, }, 601 + { .name = "fallocate", .errmsg = true, }, 602 + { .name = "fchdir", .errmsg = true, }, 603 + { .name = "fchmod", .errmsg = true, }, 618 604 { .name = "fchmodat", .errmsg = true, 619 - .arg_scnprintf = { [0] = SCA_FDAT, /* fd */ 620 - [1] = SCA_FILENAME, /* filename */ }, }, 621 - { .name = "fchown", .errmsg = true, 622 - .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, 605 + .arg_scnprintf = { [0] = SCA_FDAT, /* fd */ }, }, 606 + { .name = "fchown", .errmsg = true, }, 623 607 { .name = "fchownat", .errmsg = true, 624 - .arg_scnprintf = { [0] = SCA_FDAT, /* fd */ 625 - [1] = SCA_FILENAME, /* filename */ }, }, 608 + .arg_scnprintf = { [0] = SCA_FDAT, /* fd */ }, }, 626 609 { .name = "fcntl", .errmsg = true, 627 - .arg_scnprintf = { [0] = SCA_FD, /* fd */ 628 - [1] = SCA_STRARRAY, /* cmd */ }, 610 + .arg_scnprintf = { [1] = SCA_STRARRAY, /* cmd */ }, 629 611 .arg_parm = { [1] = &strarray__fcntl_cmds, /* cmd */ }, }, 630 - { .name = "fdatasync", .errmsg = true, 631 - .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, 612 + { .name = "fdatasync", .errmsg = true, }, 632 613 { .name = "flock", .errmsg = true, 633 - .arg_scnprintf = { [0] = SCA_FD, /* fd */ 634 - [1] = SCA_FLOCK, /* cmd */ }, }, 635 - { .name = "fsetxattr", .errmsg = true, 636 - .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, 637 - { .name = "fstat", .errmsg = true, .alias = "newfstat", 638 - .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, 639 - { .name = "fstatat", .errmsg = true, .alias = "newfstatat", 640 - .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ 641 - [1] = SCA_FILENAME, /* filename */ }, }, 642 - { .name = "fstatfs", .errmsg = true, 643 - .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, 644 - { .name = "fsync", .errmsg = true, 645 - .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, 646 - { .name = "ftruncate", .errmsg = true, 647 - .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, 614 + .arg_scnprintf = { [1] = SCA_FLOCK, /* cmd */ }, }, 615 + { .name = "fsetxattr", .errmsg = true, }, 616 + { .name = "fstat", .errmsg = true, .alias = "newfstat", }, 617 + { .name = "fstatat", .errmsg = true, .alias = "newfstatat", }, 618 + { .name = "fstatfs", .errmsg = true, }, 619 + { .name = "fsync", .errmsg = true, }, 620 + { .name = "ftruncate", .errmsg = true, }, 648 621 { .name = "futex", .errmsg = true, 649 622 .arg_scnprintf = { [1] = SCA_FUTEX_OP, /* op */ }, }, 650 623 { .name = "futimesat", .errmsg = true, 651 - .arg_scnprintf = { [0] = SCA_FDAT, /* fd */ 652 - [1] = SCA_FILENAME, /* filename */ }, }, 653 - { .name = "getdents", .errmsg = true, 654 - .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, 655 - { .name = "getdents64", .errmsg = true, 656 - .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, 624 + .arg_scnprintf = { [0] = SCA_FDAT, /* fd */ }, }, 625 + { .name = "getdents", .errmsg = true, }, 626 + { .name = "getdents64", .errmsg = true, }, 657 627 { .name = "getitimer", .errmsg = true, STRARRAY(0, which, itimers), }, 658 628 { .name = "getpid", .errpid = true, }, 659 629 { .name = "getpgid", .errpid = true, }, ··· 631 661 { .name = "getrandom", .errmsg = true, 632 662 .arg_scnprintf = { [2] = SCA_GETRANDOM_FLAGS, /* flags */ }, }, 633 663 { .name = "getrlimit", .errmsg = true, STRARRAY(0, resource, rlimit_resources), }, 634 - { .name = "getxattr", .errmsg = true, 635 - .arg_scnprintf = { [0] = SCA_FILENAME, /* pathname */ }, }, 636 - { .name = "inotify_add_watch", .errmsg = true, 637 - .arg_scnprintf = { [1] = SCA_FILENAME, /* pathname */ }, }, 664 + { .name = "getxattr", .errmsg = true, }, 665 + { .name = "inotify_add_watch", .errmsg = true, }, 638 666 { .name = "ioctl", .errmsg = true, 639 - .arg_scnprintf = { [0] = SCA_FD, /* fd */ 667 + .arg_scnprintf = { 640 668 #if defined(__i386__) || defined(__x86_64__) 641 669 /* 642 670 * FIXME: Make this available to all arches. ··· 648 680 { .name = "keyctl", .errmsg = true, STRARRAY(0, option, keyctl_options), }, 649 681 { .name = "kill", .errmsg = true, 650 682 .arg_scnprintf = { [1] = SCA_SIGNUM, /* sig */ }, }, 651 - { .name = "lchown", .errmsg = true, 652 - .arg_scnprintf = { [0] = SCA_FILENAME, /* filename */ }, }, 653 - { .name = "lgetxattr", .errmsg = true, 654 - .arg_scnprintf = { [0] = SCA_FILENAME, /* pathname */ }, }, 683 + { .name = "lchown", .errmsg = true, }, 684 + { .name = "lgetxattr", .errmsg = true, }, 655 685 { .name = "linkat", .errmsg = true, 656 686 .arg_scnprintf = { [0] = SCA_FDAT, /* fd */ }, }, 657 - { .name = "listxattr", .errmsg = true, 658 - .arg_scnprintf = { [0] = SCA_FILENAME, /* pathname */ }, }, 659 - { .name = "llistxattr", .errmsg = true, 660 - .arg_scnprintf = { [0] = SCA_FILENAME, /* pathname */ }, }, 661 - { .name = "lremovexattr", .errmsg = true, 662 - .arg_scnprintf = { [0] = SCA_FILENAME, /* pathname */ }, }, 687 + { .name = "listxattr", .errmsg = true, }, 688 + { .name = "llistxattr", .errmsg = true, }, 689 + { .name = "lremovexattr", .errmsg = true, }, 663 690 { .name = "lseek", .errmsg = true, 664 - .arg_scnprintf = { [0] = SCA_FD, /* fd */ 665 - [2] = SCA_STRARRAY, /* whence */ }, 691 + .arg_scnprintf = { [2] = SCA_STRARRAY, /* whence */ }, 666 692 .arg_parm = { [2] = &strarray__whences, /* whence */ }, }, 667 - { .name = "lsetxattr", .errmsg = true, 668 - .arg_scnprintf = { [0] = SCA_FILENAME, /* pathname */ }, }, 669 - { .name = "lstat", .errmsg = true, .alias = "newlstat", 670 - .arg_scnprintf = { [0] = SCA_FILENAME, /* filename */ }, }, 671 - { .name = "lsxattr", .errmsg = true, 672 - .arg_scnprintf = { [0] = SCA_FILENAME, /* pathname */ }, }, 693 + { .name = "lsetxattr", .errmsg = true, }, 694 + { .name = "lstat", .errmsg = true, .alias = "newlstat", }, 695 + { .name = "lsxattr", .errmsg = true, }, 673 696 { .name = "madvise", .errmsg = true, 674 697 .arg_scnprintf = { [0] = SCA_HEX, /* start */ 675 698 [2] = SCA_MADV_BHV, /* behavior */ }, }, 676 - { .name = "mkdir", .errmsg = true, 677 - .arg_scnprintf = { [0] = SCA_FILENAME, /* pathname */ }, }, 699 + { .name = "mkdir", .errmsg = true, }, 678 700 { .name = "mkdirat", .errmsg = true, 679 - .arg_scnprintf = { [0] = SCA_FDAT, /* fd */ 680 - [1] = SCA_FILENAME, /* pathname */ }, }, 681 - { .name = "mknod", .errmsg = true, 682 - .arg_scnprintf = { [0] = SCA_FILENAME, /* filename */ }, }, 701 + .arg_scnprintf = { [0] = SCA_FDAT, /* fd */ }, }, 702 + { .name = "mknod", .errmsg = true, }, 683 703 { .name = "mknodat", .errmsg = true, 684 - .arg_scnprintf = { [0] = SCA_FDAT, /* fd */ 685 - [1] = SCA_FILENAME, /* filename */ }, }, 704 + .arg_scnprintf = { [0] = SCA_FDAT, /* fd */ }, }, 686 705 { .name = "mlock", .errmsg = true, 687 706 .arg_scnprintf = { [0] = SCA_HEX, /* addr */ }, }, 688 707 { .name = "mlockall", .errmsg = true, ··· 677 722 { .name = "mmap", .hexret = true, 678 723 .arg_scnprintf = { [0] = SCA_HEX, /* addr */ 679 724 [2] = SCA_MMAP_PROT, /* prot */ 680 - [3] = SCA_MMAP_FLAGS, /* flags */ 681 - [4] = SCA_FD, /* fd */ }, }, 725 + [3] = SCA_MMAP_FLAGS, /* flags */ }, }, 682 726 { .name = "mprotect", .errmsg = true, 683 727 .arg_scnprintf = { [0] = SCA_HEX, /* start */ 684 728 [2] = SCA_MMAP_PROT, /* prot */ }, }, ··· 694 740 { .name = "name_to_handle_at", .errmsg = true, 695 741 .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, }, 696 742 { .name = "newfstatat", .errmsg = true, 697 - .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ 698 - [1] = SCA_FILENAME, /* filename */ }, }, 743 + .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, }, 699 744 { .name = "open", .errmsg = true, 700 - .arg_scnprintf = { [0] = SCA_FILENAME, /* filename */ 701 - [1] = SCA_OPEN_FLAGS, /* flags */ }, }, 745 + .arg_scnprintf = { [1] = SCA_OPEN_FLAGS, /* flags */ }, }, 702 746 { .name = "open_by_handle_at", .errmsg = true, 703 747 .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ 704 748 [2] = SCA_OPEN_FLAGS, /* flags */ }, }, 705 749 { .name = "openat", .errmsg = true, 706 750 .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ 707 - [1] = SCA_FILENAME, /* filename */ 708 751 [2] = SCA_OPEN_FLAGS, /* flags */ }, }, 709 752 { .name = "perf_event_open", .errmsg = true, 710 753 .arg_scnprintf = { [2] = SCA_INT, /* cpu */ ··· 711 760 .arg_scnprintf = { [1] = SCA_PIPE_FLAGS, /* flags */ }, }, 712 761 { .name = "poll", .errmsg = true, .timeout = true, }, 713 762 { .name = "ppoll", .errmsg = true, .timeout = true, }, 714 - { .name = "pread", .errmsg = true, .alias = "pread64", 715 - .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, 716 - { .name = "preadv", .errmsg = true, .alias = "pread", 717 - .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, 763 + { .name = "pread", .errmsg = true, .alias = "pread64", }, 764 + { .name = "preadv", .errmsg = true, .alias = "pread", }, 718 765 { .name = "prlimit64", .errmsg = true, STRARRAY(1, resource, rlimit_resources), }, 719 - { .name = "pwrite", .errmsg = true, .alias = "pwrite64", 720 - .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, 721 - { .name = "pwritev", .errmsg = true, 722 - .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, 723 - { .name = "read", .errmsg = true, 724 - .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, 725 - { .name = "readlink", .errmsg = true, 726 - .arg_scnprintf = { [0] = SCA_FILENAME, /* path */ }, }, 766 + { .name = "pwrite", .errmsg = true, .alias = "pwrite64", }, 767 + { .name = "pwritev", .errmsg = true, }, 768 + { .name = "read", .errmsg = true, }, 769 + { .name = "readlink", .errmsg = true, }, 727 770 { .name = "readlinkat", .errmsg = true, 728 - .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ 729 - [1] = SCA_FILENAME, /* pathname */ }, }, 730 - { .name = "readv", .errmsg = true, 731 - .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, 771 + .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, }, 772 + { .name = "readv", .errmsg = true, }, 732 773 { .name = "recvfrom", .errmsg = true, 733 - .arg_scnprintf = { [0] = SCA_FD, /* fd */ 734 - [3] = SCA_MSG_FLAGS, /* flags */ }, }, 774 + .arg_scnprintf = { [3] = SCA_MSG_FLAGS, /* flags */ }, }, 735 775 { .name = "recvmmsg", .errmsg = true, 736 - .arg_scnprintf = { [0] = SCA_FD, /* fd */ 737 - [3] = SCA_MSG_FLAGS, /* flags */ }, }, 776 + .arg_scnprintf = { [3] = SCA_MSG_FLAGS, /* flags */ }, }, 738 777 { .name = "recvmsg", .errmsg = true, 739 - .arg_scnprintf = { [0] = SCA_FD, /* fd */ 740 - [2] = SCA_MSG_FLAGS, /* flags */ }, }, 741 - { .name = "removexattr", .errmsg = true, 742 - .arg_scnprintf = { [0] = SCA_FILENAME, /* pathname */ }, }, 778 + .arg_scnprintf = { [2] = SCA_MSG_FLAGS, /* flags */ }, }, 779 + { .name = "removexattr", .errmsg = true, }, 743 780 { .name = "renameat", .errmsg = true, 744 781 .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, }, 745 - { .name = "rmdir", .errmsg = true, 746 - .arg_scnprintf = { [0] = SCA_FILENAME, /* pathname */ }, }, 782 + { .name = "rmdir", .errmsg = true, }, 747 783 { .name = "rt_sigaction", .errmsg = true, 748 784 .arg_scnprintf = { [0] = SCA_SIGNUM, /* sig */ }, }, 749 785 { .name = "rt_sigprocmask", .errmsg = true, STRARRAY(0, how, sighow), }, ··· 745 807 [1] = SCA_SECCOMP_FLAGS, /* flags */ }, }, 746 808 { .name = "select", .errmsg = true, .timeout = true, }, 747 809 { .name = "sendmmsg", .errmsg = true, 748 - .arg_scnprintf = { [0] = SCA_FD, /* fd */ 749 - [3] = SCA_MSG_FLAGS, /* flags */ }, }, 810 + .arg_scnprintf = { [3] = SCA_MSG_FLAGS, /* flags */ }, }, 750 811 { .name = "sendmsg", .errmsg = true, 751 - .arg_scnprintf = { [0] = SCA_FD, /* fd */ 752 - [2] = SCA_MSG_FLAGS, /* flags */ }, }, 812 + .arg_scnprintf = { [2] = SCA_MSG_FLAGS, /* flags */ }, }, 753 813 { .name = "sendto", .errmsg = true, 754 - .arg_scnprintf = { [0] = SCA_FD, /* fd */ 755 - [3] = SCA_MSG_FLAGS, /* flags */ }, }, 814 + .arg_scnprintf = { [3] = SCA_MSG_FLAGS, /* flags */ }, }, 756 815 { .name = "set_tid_address", .errpid = true, }, 757 816 { .name = "setitimer", .errmsg = true, STRARRAY(0, which, itimers), }, 758 817 { .name = "setpgid", .errmsg = true, }, 759 818 { .name = "setrlimit", .errmsg = true, STRARRAY(0, resource, rlimit_resources), }, 760 - { .name = "setxattr", .errmsg = true, 761 - .arg_scnprintf = { [0] = SCA_FILENAME, /* pathname */ }, }, 762 - { .name = "shutdown", .errmsg = true, 763 - .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, 819 + { .name = "setxattr", .errmsg = true, }, 820 + { .name = "shutdown", .errmsg = true, }, 764 821 { .name = "socket", .errmsg = true, 765 822 .arg_scnprintf = { [0] = SCA_STRARRAY, /* family */ 766 823 [1] = SCA_SK_TYPE, /* type */ }, ··· 764 831 .arg_scnprintf = { [0] = SCA_STRARRAY, /* family */ 765 832 [1] = SCA_SK_TYPE, /* type */ }, 766 833 .arg_parm = { [0] = &strarray__socket_families, /* family */ }, }, 767 - { .name = "stat", .errmsg = true, .alias = "newstat", 768 - .arg_scnprintf = { [0] = SCA_FILENAME, /* pathname */ }, }, 769 - { .name = "statfs", .errmsg = true, 770 - .arg_scnprintf = { [0] = SCA_FILENAME, /* pathname */ }, }, 834 + { .name = "stat", .errmsg = true, .alias = "newstat", }, 835 + { .name = "statfs", .errmsg = true, }, 771 836 { .name = "swapoff", .errmsg = true, 772 837 .arg_scnprintf = { [0] = SCA_FILENAME, /* specialfile */ }, }, 773 838 { .name = "swapon", .errmsg = true, ··· 776 845 .arg_scnprintf = { [2] = SCA_SIGNUM, /* sig */ }, }, 777 846 { .name = "tkill", .errmsg = true, 778 847 .arg_scnprintf = { [1] = SCA_SIGNUM, /* sig */ }, }, 779 - { .name = "truncate", .errmsg = true, 780 - .arg_scnprintf = { [0] = SCA_FILENAME, /* path */ }, }, 848 + { .name = "truncate", .errmsg = true, }, 781 849 { .name = "uname", .errmsg = true, .alias = "newuname", }, 782 850 { .name = "unlinkat", .errmsg = true, 783 - .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ 784 - [1] = SCA_FILENAME, /* pathname */ }, }, 785 - { .name = "utime", .errmsg = true, 786 - .arg_scnprintf = { [0] = SCA_FILENAME, /* filename */ }, }, 851 + .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, }, 852 + { .name = "utime", .errmsg = true, }, 787 853 { .name = "utimensat", .errmsg = true, 788 - .arg_scnprintf = { [0] = SCA_FDAT, /* dirfd */ 789 - [1] = SCA_FILENAME, /* filename */ }, }, 790 - { .name = "utimes", .errmsg = true, 791 - .arg_scnprintf = { [0] = SCA_FILENAME, /* filename */ }, }, 792 - { .name = "vmsplice", .errmsg = true, 793 - .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, 854 + .arg_scnprintf = { [0] = SCA_FDAT, /* dirfd */ }, }, 855 + { .name = "utimes", .errmsg = true, }, 856 + { .name = "vmsplice", .errmsg = true, }, 794 857 { .name = "wait4", .errpid = true, 795 858 .arg_scnprintf = { [2] = SCA_WAITID_OPTIONS, /* options */ }, }, 796 859 { .name = "waitid", .errpid = true, 797 860 .arg_scnprintf = { [3] = SCA_WAITID_OPTIONS, /* options */ }, }, 798 - { .name = "write", .errmsg = true, 799 - .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, 800 - { .name = "writev", .errmsg = true, 801 - .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, 861 + { .name = "write", .errmsg = true, }, 862 + { .name = "writev", .errmsg = true, }, 802 863 }; 803 864 804 865 static int syscall_fmt__cmp(const void *name, const void *fmtp) ··· 1083 1160 return trace__process_event(trace, machine, event, sample); 1084 1161 } 1085 1162 1163 + static char *trace__machine__resolve_kernel_addr(void *vmachine, unsigned long long *addrp, char **modp) 1164 + { 1165 + struct machine *machine = vmachine; 1166 + 1167 + if (machine->kptr_restrict_warned) 1168 + return NULL; 1169 + 1170 + if (symbol_conf.kptr_restrict) { 1171 + pr_warning("Kernel address maps (/proc/{kallsyms,modules}) are restricted.\n\n" 1172 + "Check /proc/sys/kernel/kptr_restrict.\n\n" 1173 + "Kernel samples will not be resolved.\n"); 1174 + machine->kptr_restrict_warned = true; 1175 + return NULL; 1176 + } 1177 + 1178 + return machine__resolve_kernel_addr(vmachine, addrp, modp); 1179 + } 1180 + 1086 1181 static int trace__symbols_init(struct trace *trace, struct perf_evlist *evlist) 1087 1182 { 1088 1183 int err = symbol__init(NULL); ··· 1112 1171 if (trace->host == NULL) 1113 1172 return -ENOMEM; 1114 1173 1115 - if (trace_event__register_resolver(trace->host, machine__resolve_kernel_addr) < 0) 1174 + if (trace_event__register_resolver(trace->host, trace__machine__resolve_kernel_addr) < 0) 1116 1175 return -errno; 1117 1176 1118 1177 err = __machine__synthesize_threads(trace->host, &trace->tool, &trace->opts.target, ··· 1127 1186 static int syscall__set_arg_fmts(struct syscall *sc) 1128 1187 { 1129 1188 struct format_field *field; 1130 - int idx = 0; 1189 + int idx = 0, len; 1131 1190 1132 1191 sc->arg_scnprintf = calloc(sc->nr_args, sizeof(void *)); 1133 1192 if (sc->arg_scnprintf == NULL) ··· 1139 1198 for (field = sc->args; field; field = field->next) { 1140 1199 if (sc->fmt && sc->fmt->arg_scnprintf[idx]) 1141 1200 sc->arg_scnprintf[idx] = sc->fmt->arg_scnprintf[idx]; 1201 + else if (strcmp(field->type, "const char *") == 0 && 1202 + (strcmp(field->name, "filename") == 0 || 1203 + strcmp(field->name, "path") == 0 || 1204 + strcmp(field->name, "pathname") == 0)) 1205 + sc->arg_scnprintf[idx] = SCA_FILENAME; 1142 1206 else if (field->flags & FIELD_IS_POINTER) 1143 1207 sc->arg_scnprintf[idx] = syscall_arg__scnprintf_hex; 1144 1208 else if (strcmp(field->type, "pid_t") == 0) 1145 1209 sc->arg_scnprintf[idx] = SCA_PID; 1146 1210 else if (strcmp(field->type, "umode_t") == 0) 1147 1211 sc->arg_scnprintf[idx] = SCA_MODE_T; 1212 + else if ((strcmp(field->type, "int") == 0 || 1213 + strcmp(field->type, "unsigned int") == 0 || 1214 + strcmp(field->type, "long") == 0) && 1215 + (len = strlen(field->name)) >= 2 && 1216 + strcmp(field->name + len - 2, "fd") == 0) { 1217 + /* 1218 + * /sys/kernel/tracing/events/syscalls/sys_enter* 1219 + * egrep 'field:.*fd;' .../format|sed -r 's/.*field:([a-z ]+) [a-z_]*fd.+/\1/g'|sort|uniq -c 1220 + * 65 int 1221 + * 23 unsigned int 1222 + * 7 unsigned long 1223 + */ 1224 + sc->arg_scnprintf[idx] = SCA_FD; 1225 + } 1148 1226 ++idx; 1149 1227 } 1150 1228 ··· 1494 1534 if (sc->is_exit) { 1495 1535 if (!(trace->duration_filter || trace->summary_only || trace->min_stack)) { 1496 1536 trace__fprintf_entry_head(trace, thread, 1, sample->time, trace->output); 1497 - fprintf(trace->output, "%-70s\n", ttrace->entry_str); 1537 + fprintf(trace->output, "%-70s)\n", ttrace->entry_str); 1498 1538 } 1499 1539 } else { 1500 1540 ttrace->entry_pending = true; ··· 2847 2887 mmap_pages_user_set = false; 2848 2888 2849 2889 if (trace.max_stack == UINT_MAX) { 2850 - trace.max_stack = sysctl_perf_event_max_stack; 2890 + trace.max_stack = input_name ? PERF_MAX_STACK_DEPTH : sysctl_perf_event_max_stack; 2851 2891 max_stack_user_set = false; 2852 2892 } 2853 2893 2854 2894 #ifdef HAVE_DWARF_UNWIND_SUPPORT 2855 - if ((trace.min_stack || max_stack_user_set) && !callchain_param.enabled) 2895 + if ((trace.min_stack || max_stack_user_set) && !callchain_param.enabled && trace.trace_syscalls) 2856 2896 record_opts__parse_callchain(&trace.opts, &callchain_param, "dwarf", false); 2857 2897 #endif 2858 2898
+3
tools/perf/perf.c
··· 549 549 if (sysctl__read_int("kernel/perf_event_max_stack", &value) == 0) 550 550 sysctl_perf_event_max_stack = value; 551 551 552 + if (sysctl__read_int("kernel/perf_event_max_contexts_per_stack", &value) == 0) 553 + sysctl_perf_event_max_contexts_per_stack = value; 554 + 552 555 cmd = extract_argv0_path(argv[0]); 553 556 if (!cmd) 554 557 cmd = "perf-help";
+26 -8
tools/perf/util/annotate.c
··· 354 354 .scnprintf = nop__scnprintf, 355 355 }; 356 356 357 - /* 358 - * Must be sorted by name! 359 - */ 360 357 static struct ins instructions[] = { 361 358 { .name = "add", .ops = &mov_ops, }, 362 359 { .name = "addl", .ops = &mov_ops, }, ··· 369 372 { .name = "bgt", .ops = &jump_ops, }, 370 373 { .name = "bhi", .ops = &jump_ops, }, 371 374 { .name = "bl", .ops = &call_ops, }, 372 - { .name = "blt", .ops = &jump_ops, }, 373 375 { .name = "bls", .ops = &jump_ops, }, 376 + { .name = "blt", .ops = &jump_ops, }, 374 377 { .name = "blx", .ops = &call_ops, }, 375 378 { .name = "bne", .ops = &jump_ops, }, 376 379 #endif ··· 446 449 { .name = "xbeginq", .ops = &jump_ops, }, 447 450 }; 448 451 449 - static int ins__cmp(const void *name, const void *insp) 452 + static int ins__key_cmp(const void *name, const void *insp) 450 453 { 451 454 const struct ins *ins = insp; 452 455 453 456 return strcmp(name, ins->name); 454 457 } 455 458 456 - static struct ins *ins__find(const char *name) 459 + static int ins__cmp(const void *a, const void *b) 460 + { 461 + const struct ins *ia = a; 462 + const struct ins *ib = b; 463 + 464 + return strcmp(ia->name, ib->name); 465 + } 466 + 467 + static void ins__sort(void) 457 468 { 458 469 const int nmemb = ARRAY_SIZE(instructions); 459 470 460 - return bsearch(name, instructions, nmemb, sizeof(struct ins), ins__cmp); 471 + qsort(instructions, nmemb, sizeof(struct ins), ins__cmp); 472 + } 473 + 474 + static struct ins *ins__find(const char *name) 475 + { 476 + const int nmemb = ARRAY_SIZE(instructions); 477 + static bool sorted; 478 + 479 + if (!sorted) { 480 + ins__sort(); 481 + sorted = true; 482 + } 483 + 484 + return bsearch(name, instructions, nmemb, sizeof(struct ins), ins__key_cmp); 461 485 } 462 486 463 487 int symbol__annotate_init(struct map *map __maybe_unused, struct symbol *sym) ··· 1140 1122 } else if (dso__is_kcore(dso)) { 1141 1123 goto fallback; 1142 1124 } else if (readlink(symfs_filename, command, sizeof(command)) < 0 || 1143 - strstr(command, "[kernel.kallsyms]") || 1125 + strstr(command, DSO__NAME_KALLSYMS) || 1144 1126 access(symfs_filename, R_OK)) { 1145 1127 free(filename); 1146 1128 fallback:
+1 -1
tools/perf/util/build-id.c
··· 256 256 size_t name_len; 257 257 bool in_kernel = false; 258 258 259 - if (!pos->hit) 259 + if (!pos->hit && !dso__is_vdso(pos)) 260 260 continue; 261 261 262 262 if (dso__is_vdso(pos)) {
+1 -2
tools/perf/util/db-export.c
··· 298 298 */ 299 299 callchain_param.order = ORDER_CALLER; 300 300 err = thread__resolve_callchain(thread, &callchain_cursor, evsel, 301 - sample, NULL, NULL, 302 - sysctl_perf_event_max_stack); 301 + sample, NULL, NULL, PERF_MAX_STACK_DEPTH); 303 302 if (err) { 304 303 callchain_param.order = saved_order; 305 304 return NULL;
+3 -4
tools/perf/util/dso.c
··· 7 7 #include "auxtrace.h" 8 8 #include "util.h" 9 9 #include "debug.h" 10 + #include "vdso.h" 10 11 11 12 char dso__symtab_origin(const struct dso *dso) 12 13 { ··· 63 62 } 64 63 break; 65 64 case DSO_BINARY_TYPE__BUILD_ID_CACHE: 66 - /* skip the locally configured cache if a symfs is given */ 67 - if (symbol_conf.symfs[0] || 68 - (dso__build_id_filename(dso, filename, size) == NULL)) 65 + if (dso__build_id_filename(dso, filename, size) == NULL) 69 66 ret = -1; 70 67 break; 71 68 ··· 1168 1169 struct dso *pos; 1169 1170 1170 1171 list_for_each_entry(pos, head, node) { 1171 - if (with_hits && !pos->hit) 1172 + if (with_hits && !pos->hit && !dso__is_vdso(pos)) 1172 1173 continue; 1173 1174 if (pos->has_build_id) { 1174 1175 have_build_id = true;
+34
tools/perf/util/evlist.c
··· 44 44 perf_evlist__set_maps(evlist, cpus, threads); 45 45 fdarray__init(&evlist->pollfd, 64); 46 46 evlist->workload.pid = -1; 47 + evlist->backward = false; 47 48 } 48 49 49 50 struct perf_evlist *perf_evlist__new(void) ··· 680 679 return NULL; 681 680 } 682 681 682 + static int perf_evlist__set_paused(struct perf_evlist *evlist, bool value) 683 + { 684 + int i; 685 + 686 + for (i = 0; i < evlist->nr_mmaps; i++) { 687 + int fd = evlist->mmap[i].fd; 688 + int err; 689 + 690 + if (fd < 0) 691 + continue; 692 + err = ioctl(fd, PERF_EVENT_IOC_PAUSE_OUTPUT, value ? 1 : 0); 693 + if (err) 694 + return err; 695 + } 696 + return 0; 697 + } 698 + 699 + int perf_evlist__pause(struct perf_evlist *evlist) 700 + { 701 + return perf_evlist__set_paused(evlist, true); 702 + } 703 + 704 + int perf_evlist__resume(struct perf_evlist *evlist) 705 + { 706 + return perf_evlist__set_paused(evlist, false); 707 + } 708 + 683 709 /* When check_messup is true, 'end' must points to a good entry */ 684 710 static union perf_event * 685 711 perf_mmap__read(struct perf_mmap *md, bool check_messup, u64 start, ··· 909 881 if (evlist->mmap[idx].base != NULL) { 910 882 munmap(evlist->mmap[idx].base, evlist->mmap_len); 911 883 evlist->mmap[idx].base = NULL; 884 + evlist->mmap[idx].fd = -1; 912 885 atomic_set(&evlist->mmap[idx].refcnt, 0); 913 886 } 914 887 auxtrace_mmap__munmap(&evlist->mmap[idx].auxtrace_mmap); ··· 930 901 931 902 static int perf_evlist__alloc_mmap(struct perf_evlist *evlist) 932 903 { 904 + int i; 905 + 933 906 evlist->nr_mmaps = cpu_map__nr(evlist->cpus); 934 907 if (cpu_map__empty(evlist->cpus)) 935 908 evlist->nr_mmaps = thread_map__nr(evlist->threads); 936 909 evlist->mmap = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap)); 910 + for (i = 0; i < evlist->nr_mmaps; i++) 911 + evlist->mmap[i].fd = -1; 937 912 return evlist->mmap != NULL ? 0 : -ENOMEM; 938 913 } 939 914 ··· 974 941 evlist->mmap[idx].base = NULL; 975 942 return -1; 976 943 } 944 + evlist->mmap[idx].fd = fd; 977 945 978 946 if (auxtrace_mmap__mmap(&evlist->mmap[idx].auxtrace_mmap, 979 947 &mp->auxtrace_mp, evlist->mmap[idx].base, fd))
+4
tools/perf/util/evlist.h
··· 28 28 struct perf_mmap { 29 29 void *base; 30 30 int mask; 31 + int fd; 31 32 atomic_t refcnt; 32 33 u64 prev; 33 34 struct auxtrace_mmap auxtrace_mmap; ··· 44 43 bool overwrite; 45 44 bool enabled; 46 45 bool has_user_cpus; 46 + bool backward; 47 47 size_t mmap_len; 48 48 int id_pos; 49 49 int is_pos; ··· 137 135 138 136 void perf_evlist__mmap_consume(struct perf_evlist *evlist, int idx); 139 137 138 + int perf_evlist__pause(struct perf_evlist *evlist); 139 + int perf_evlist__resume(struct perf_evlist *evlist); 140 140 int perf_evlist__open(struct perf_evlist *evlist); 141 141 void perf_evlist__close(struct perf_evlist *evlist); 142 142
+13
tools/perf/util/evsel.c
··· 37 37 bool clockid; 38 38 bool clockid_wrong; 39 39 bool lbr_flags; 40 + bool write_backward; 40 41 } perf_missing_features; 41 42 42 43 static clockid_t clockid; ··· 1377 1376 if (perf_missing_features.lbr_flags) 1378 1377 evsel->attr.branch_sample_type &= ~(PERF_SAMPLE_BRANCH_NO_FLAGS | 1379 1378 PERF_SAMPLE_BRANCH_NO_CYCLES); 1379 + if (perf_missing_features.write_backward) 1380 + evsel->attr.write_backward = false; 1380 1381 retry_sample_id: 1381 1382 if (perf_missing_features.sample_id_all) 1382 1383 evsel->attr.sample_id_all = 0; ··· 1438 1435 */ 1439 1436 if (perf_missing_features.clockid || 1440 1437 perf_missing_features.clockid_wrong) { 1438 + err = -EINVAL; 1439 + goto out_close; 1440 + } 1441 + 1442 + if (evsel->overwrite && 1443 + perf_missing_features.write_backward) { 1441 1444 err = -EINVAL; 1442 1445 goto out_close; 1443 1446 } ··· 1508 1499 (PERF_SAMPLE_BRANCH_NO_CYCLES | 1509 1500 PERF_SAMPLE_BRANCH_NO_FLAGS))) { 1510 1501 perf_missing_features.lbr_flags = true; 1502 + goto fallback_missing_features; 1503 + } else if (!perf_missing_features.write_backward && 1504 + evsel->attr.write_backward) { 1505 + perf_missing_features.write_backward = true; 1511 1506 goto fallback_missing_features; 1512 1507 } 1513 1508
+1
tools/perf/util/evsel.h
··· 112 112 bool tracking; 113 113 bool per_pkg; 114 114 bool precise_max; 115 + bool overwrite; 115 116 /* parse modifier helper */ 116 117 int exclude_GH; 117 118 int nr_members;
+9
tools/perf/util/hist.c
··· 117 117 hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen); 118 118 hists__set_unres_dso_col_len(hists, HISTC_DSO_TO); 119 119 } 120 + 121 + if (h->branch_info->srcline_from) 122 + hists__new_col_len(hists, HISTC_SRCLINE_FROM, 123 + strlen(h->branch_info->srcline_from)); 124 + if (h->branch_info->srcline_to) 125 + hists__new_col_len(hists, HISTC_SRCLINE_TO, 126 + strlen(h->branch_info->srcline_to)); 120 127 } 121 128 122 129 if (h->mem_info) { ··· 1049 1042 if (he->branch_info) { 1050 1043 map__zput(he->branch_info->from.map); 1051 1044 map__zput(he->branch_info->to.map); 1045 + free_srcline(he->branch_info->srcline_from); 1046 + free_srcline(he->branch_info->srcline_to); 1052 1047 zfree(&he->branch_info); 1053 1048 } 1054 1049
+2
tools/perf/util/hist.h
··· 52 52 HISTC_MEM_IADDR_SYMBOL, 53 53 HISTC_TRANSACTION, 54 54 HISTC_CYCLES, 55 + HISTC_SRCLINE_FROM, 56 + HISTC_SRCLINE_TO, 55 57 HISTC_TRACE, 56 58 HISTC_NR_COLS, /* Last entry */ 57 59 };
+15 -20
tools/perf/util/machine.c
··· 43 43 44 44 machine->symbol_filter = NULL; 45 45 machine->id_hdr_size = 0; 46 + machine->kptr_restrict_warned = false; 46 47 machine->comm_exec = false; 47 48 machine->kernel_start = 0; 48 49 ··· 710 709 if (machine__is_host(machine)) { 711 710 vmlinux_name = symbol_conf.vmlinux_name; 712 711 if (!vmlinux_name) 713 - vmlinux_name = "[kernel.kallsyms]"; 712 + vmlinux_name = DSO__NAME_KALLSYMS; 714 713 715 714 kernel = machine__findnew_kernel(machine, vmlinux_name, 716 715 "[kernel]", DSO_TYPE_KERNEL); ··· 1136 1135 { 1137 1136 struct dso *kernel = machine__get_kernel(machine); 1138 1137 const char *name; 1139 - u64 addr = machine__get_running_kernel_start(machine, &name); 1138 + u64 addr; 1140 1139 int ret; 1141 1140 1142 - if (!addr || kernel == NULL) 1141 + if (kernel == NULL) 1143 1142 return -1; 1144 1143 1145 1144 ret = __machine__create_kernel_maps(machine, kernel); ··· 1161 1160 */ 1162 1161 map_groups__fixup_end(&machine->kmaps); 1163 1162 1164 - if (maps__set_kallsyms_ref_reloc_sym(machine->vmlinux_maps, name, 1165 - addr)) { 1163 + addr = machine__get_running_kernel_start(machine, &name); 1164 + if (!addr) { 1165 + } else if (maps__set_kallsyms_ref_reloc_sym(machine->vmlinux_maps, name, addr)) { 1166 1166 machine__destroy_kernel_maps(machine); 1167 1167 return -1; 1168 1168 } ··· 1771 1769 */ 1772 1770 int mix_chain_nr = i + 1 + lbr_nr + 1; 1773 1771 1774 - if (mix_chain_nr > (int)sysctl_perf_event_max_stack + PERF_MAX_BRANCH_DEPTH) { 1775 - pr_warning("corrupted callchain. skipping...\n"); 1776 - return 0; 1777 - } 1778 - 1779 1772 for (j = 0; j < mix_chain_nr; j++) { 1780 1773 if (callchain_param.order == ORDER_CALLEE) { 1781 1774 if (j < i + 1) ··· 1808 1811 { 1809 1812 struct branch_stack *branch = sample->branch_stack; 1810 1813 struct ip_callchain *chain = sample->callchain; 1811 - int chain_nr = min(max_stack, (int)chain->nr); 1814 + int chain_nr = chain->nr; 1812 1815 u8 cpumode = PERF_RECORD_MISC_USER; 1813 - int i, j, err; 1816 + int i, j, err, nr_entries; 1814 1817 int skip_idx = -1; 1815 1818 int first_call = 0; 1816 1819 ··· 1825 1828 * Based on DWARF debug information, some architectures skip 1826 1829 * a callchain entry saved by the kernel. 1827 1830 */ 1828 - if (chain->nr < sysctl_perf_event_max_stack) 1829 - skip_idx = arch_skip_callchain_idx(thread, chain); 1831 + skip_idx = arch_skip_callchain_idx(thread, chain); 1830 1832 1831 1833 /* 1832 1834 * Add branches to call stack for easier browsing. This gives ··· 1885 1889 } 1886 1890 1887 1891 check_calls: 1888 - if (chain->nr > sysctl_perf_event_max_stack && (int)chain->nr > max_stack) { 1889 - pr_warning("corrupted callchain. skipping...\n"); 1890 - return 0; 1891 - } 1892 - 1893 - for (i = first_call; i < chain_nr; i++) { 1892 + for (i = first_call, nr_entries = 0; 1893 + i < chain_nr && nr_entries < max_stack; i++) { 1894 1894 u64 ip; 1895 1895 1896 1896 if (callchain_param.order == ORDER_CALLEE) ··· 1899 1907 continue; 1900 1908 #endif 1901 1909 ip = chain->ips[j]; 1910 + 1911 + if (ip < PERF_CONTEXT_MAX) 1912 + ++nr_entries; 1902 1913 1903 1914 err = add_callchain_ip(thread, cursor, parent, root_al, &cpumode, ip); 1904 1915
+1
tools/perf/util/machine.h
··· 28 28 pid_t pid; 29 29 u16 id_hdr_size; 30 30 bool comm_exec; 31 + bool kptr_restrict_warned; 31 32 char *root_dir; 32 33 struct rb_root threads; 33 34 pthread_rwlock_t threads_lock;
+1 -2
tools/perf/util/scripting-engines/trace-event-perl.c
··· 264 264 goto exit; 265 265 266 266 if (thread__resolve_callchain(al->thread, &callchain_cursor, evsel, 267 - sample, NULL, NULL, 268 - sysctl_perf_event_max_stack) != 0) { 267 + sample, NULL, NULL, scripting_max_stack) != 0) { 269 268 pr_err("Failed to resolve callchain. Skipping\n"); 270 269 goto exit; 271 270 }
+84
tools/perf/util/sort.c
··· 353 353 .se_width_idx = HISTC_SRCLINE, 354 354 }; 355 355 356 + /* --sort srcline_from */ 357 + 358 + static int64_t 359 + sort__srcline_from_cmp(struct hist_entry *left, struct hist_entry *right) 360 + { 361 + if (!left->branch_info->srcline_from) { 362 + struct map *map = left->branch_info->from.map; 363 + if (!map) 364 + left->branch_info->srcline_from = SRCLINE_UNKNOWN; 365 + else 366 + left->branch_info->srcline_from = get_srcline(map->dso, 367 + map__rip_2objdump(map, 368 + left->branch_info->from.al_addr), 369 + left->branch_info->from.sym, true); 370 + } 371 + if (!right->branch_info->srcline_from) { 372 + struct map *map = right->branch_info->from.map; 373 + if (!map) 374 + right->branch_info->srcline_from = SRCLINE_UNKNOWN; 375 + else 376 + right->branch_info->srcline_from = get_srcline(map->dso, 377 + map__rip_2objdump(map, 378 + right->branch_info->from.al_addr), 379 + right->branch_info->from.sym, true); 380 + } 381 + return strcmp(right->branch_info->srcline_from, left->branch_info->srcline_from); 382 + } 383 + 384 + static int hist_entry__srcline_from_snprintf(struct hist_entry *he, char *bf, 385 + size_t size, unsigned int width) 386 + { 387 + return repsep_snprintf(bf, size, "%-*.*s", width, width, he->branch_info->srcline_from); 388 + } 389 + 390 + struct sort_entry sort_srcline_from = { 391 + .se_header = "From Source:Line", 392 + .se_cmp = sort__srcline_from_cmp, 393 + .se_snprintf = hist_entry__srcline_from_snprintf, 394 + .se_width_idx = HISTC_SRCLINE_FROM, 395 + }; 396 + 397 + /* --sort srcline_to */ 398 + 399 + static int64_t 400 + sort__srcline_to_cmp(struct hist_entry *left, struct hist_entry *right) 401 + { 402 + if (!left->branch_info->srcline_to) { 403 + struct map *map = left->branch_info->to.map; 404 + if (!map) 405 + left->branch_info->srcline_to = SRCLINE_UNKNOWN; 406 + else 407 + left->branch_info->srcline_to = get_srcline(map->dso, 408 + map__rip_2objdump(map, 409 + left->branch_info->to.al_addr), 410 + left->branch_info->from.sym, true); 411 + } 412 + if (!right->branch_info->srcline_to) { 413 + struct map *map = right->branch_info->to.map; 414 + if (!map) 415 + right->branch_info->srcline_to = SRCLINE_UNKNOWN; 416 + else 417 + right->branch_info->srcline_to = get_srcline(map->dso, 418 + map__rip_2objdump(map, 419 + right->branch_info->to.al_addr), 420 + right->branch_info->to.sym, true); 421 + } 422 + return strcmp(right->branch_info->srcline_to, left->branch_info->srcline_to); 423 + } 424 + 425 + static int hist_entry__srcline_to_snprintf(struct hist_entry *he, char *bf, 426 + size_t size, unsigned int width) 427 + { 428 + return repsep_snprintf(bf, size, "%-*.*s", width, width, he->branch_info->srcline_to); 429 + } 430 + 431 + struct sort_entry sort_srcline_to = { 432 + .se_header = "To Source:Line", 433 + .se_cmp = sort__srcline_to_cmp, 434 + .se_snprintf = hist_entry__srcline_to_snprintf, 435 + .se_width_idx = HISTC_SRCLINE_TO, 436 + }; 437 + 356 438 /* --sort srcfile */ 357 439 358 440 static char no_srcfile[1]; ··· 1429 1347 DIM(SORT_IN_TX, "in_tx", sort_in_tx), 1430 1348 DIM(SORT_ABORT, "abort", sort_abort), 1431 1349 DIM(SORT_CYCLES, "cycles", sort_cycles), 1350 + DIM(SORT_SRCLINE_FROM, "srcline_from", sort_srcline_from), 1351 + DIM(SORT_SRCLINE_TO, "srcline_to", sort_srcline_to), 1432 1352 }; 1433 1353 1434 1354 #undef DIM
+2
tools/perf/util/sort.h
··· 215 215 SORT_ABORT, 216 216 SORT_IN_TX, 217 217 SORT_CYCLES, 218 + SORT_SRCLINE_FROM, 219 + SORT_SRCLINE_TO, 218 220 219 221 /* memory mode specific sort keys */ 220 222 __SORT_MEMORY_MODE,
+5 -3
tools/perf/util/stat-shadow.c
··· 94 94 { 95 95 int ctx = evsel_context(counter); 96 96 97 - if (perf_evsel__match(counter, SOFTWARE, SW_TASK_CLOCK)) 97 + if (perf_evsel__match(counter, SOFTWARE, SW_TASK_CLOCK) || 98 + perf_evsel__match(counter, SOFTWARE, SW_CPU_CLOCK)) 98 99 update_stats(&runtime_nsecs_stats[cpu], count[0]); 99 100 else if (perf_evsel__match(counter, HARDWARE, HW_CPU_CYCLES)) 100 101 update_stats(&runtime_cycles_stats[ctx][cpu], count[0]); ··· 189 188 190 189 color = get_ratio_color(GRC_STALLED_CYCLES_BE, ratio); 191 190 192 - out->print_metric(out->ctx, color, "%6.2f%%", "backend cycles idle", ratio); 191 + out->print_metric(out->ctx, color, "%7.2f%%", "backend cycles idle", ratio); 193 192 } 194 193 195 194 static void print_branch_misses(int cpu, ··· 445 444 ratio = total / avg; 446 445 447 446 print_metric(ctxp, NULL, "%8.0f", "cycles / elision", ratio); 448 - } else if (perf_evsel__match(evsel, SOFTWARE, SW_TASK_CLOCK)) { 447 + } else if (perf_evsel__match(evsel, SOFTWARE, SW_TASK_CLOCK) || 448 + perf_evsel__match(evsel, SOFTWARE, SW_CPU_CLOCK)) { 449 449 if ((ratio = avg_stats(&walltime_nsecs_stats)) != 0) 450 450 print_metric(ctxp, NULL, "%8.3f", "CPUs utilized", 451 451 avg / ratio);
+28 -5
tools/perf/util/symbol.c
··· 1662 1662 1663 1663 build_id__sprintf(dso->build_id, sizeof(dso->build_id), sbuild_id); 1664 1664 1665 - scnprintf(path, sizeof(path), "%s/[kernel.kcore]/%s", buildid_dir, 1666 - sbuild_id); 1665 + scnprintf(path, sizeof(path), "%s/%s/%s", buildid_dir, 1666 + DSO__NAME_KCORE, sbuild_id); 1667 1667 1668 1668 /* Use /proc/kallsyms if possible */ 1669 1669 if (is_host) { ··· 1699 1699 if (!find_matching_kcore(map, path, sizeof(path))) 1700 1700 return strdup(path); 1701 1701 1702 - scnprintf(path, sizeof(path), "%s/[kernel.kallsyms]/%s", 1703 - buildid_dir, sbuild_id); 1702 + scnprintf(path, sizeof(path), "%s/%s/%s", 1703 + buildid_dir, DSO__NAME_KALLSYMS, sbuild_id); 1704 1704 1705 1705 if (access(path, F_OK)) { 1706 1706 pr_err("No kallsyms or vmlinux with build-id %s was found\n", ··· 1769 1769 1770 1770 if (err > 0 && !dso__is_kcore(dso)) { 1771 1771 dso->binary_type = DSO_BINARY_TYPE__KALLSYMS; 1772 - dso__set_long_name(dso, "[kernel.kallsyms]", false); 1772 + dso__set_long_name(dso, DSO__NAME_KALLSYMS, false); 1773 1773 map__fixup_start(map); 1774 1774 map__fixup_end(map); 1775 1775 } ··· 2032 2032 vmlinux_path__exit(); 2033 2033 symbol_conf.sym_list = symbol_conf.dso_list = symbol_conf.comm_list = NULL; 2034 2034 symbol_conf.initialized = false; 2035 + } 2036 + 2037 + int symbol__config_symfs(const struct option *opt __maybe_unused, 2038 + const char *dir, int unset __maybe_unused) 2039 + { 2040 + char *bf = NULL; 2041 + int ret; 2042 + 2043 + symbol_conf.symfs = strdup(dir); 2044 + if (symbol_conf.symfs == NULL) 2045 + return -ENOMEM; 2046 + 2047 + /* skip the locally configured cache if a symfs is given, and 2048 + * config buildid dir to symfs/.debug 2049 + */ 2050 + ret = asprintf(&bf, "%s/%s", dir, ".debug"); 2051 + if (ret < 0) 2052 + return -ENOMEM; 2053 + 2054 + set_buildid_dir(bf); 2055 + 2056 + free(bf); 2057 + return 0; 2035 2058 }
+7
tools/perf/util/symbol.h
··· 44 44 #define DMGL_ANSI (1 << 1) /* Include const, volatile, etc */ 45 45 #endif 46 46 47 + #define DSO__NAME_KALLSYMS "[kernel.kallsyms]" 48 + #define DSO__NAME_KCORE "[kernel.kcore]" 49 + 47 50 /** struct symbol - symtab entry 48 51 * 49 52 * @ignore - resolvable but tools ignore it (e.g. idle routines) ··· 186 183 struct addr_map_symbol from; 187 184 struct addr_map_symbol to; 188 185 struct branch_flags flags; 186 + char *srcline_from; 187 + char *srcline_to; 189 188 }; 190 189 191 190 struct mem_info { ··· 292 287 bool symbol__restricted_filename(const char *filename, 293 288 const char *restricted_filename); 294 289 bool symbol__is_idle(struct symbol *sym); 290 + int symbol__config_symfs(const struct option *opt __maybe_unused, 291 + const char *dir, int unset __maybe_unused); 295 292 296 293 int dso__load_sym(struct dso *dso, struct map *map, struct symsrc *syms_ss, 297 294 struct symsrc *runtime_ss, symbol_filter_t filter,
-1
tools/perf/util/top.h
··· 27 27 int max_stack; 28 28 bool hide_kernel_symbols, hide_user_symbols, zero; 29 29 bool use_tui, use_stdio; 30 - bool kptr_restrict_warned; 31 30 bool vmlinux_warned; 32 31 bool dump_symtab; 33 32 struct hist_entry *sym_filter_entry;
+2 -1
tools/perf/util/util.c
··· 33 33 unsigned int page_size; 34 34 int cacheline_size; 35 35 36 - unsigned int sysctl_perf_event_max_stack = PERF_MAX_STACK_DEPTH; 36 + int sysctl_perf_event_max_stack = PERF_MAX_STACK_DEPTH; 37 + int sysctl_perf_event_max_contexts_per_stack = PERF_MAX_CONTEXTS_PER_STACK; 37 38 38 39 bool test_attr__enabled; 39 40
+2 -1
tools/perf/util/util.h
··· 261 261 262 262 extern unsigned int page_size; 263 263 extern int cacheline_size; 264 - extern unsigned int sysctl_perf_event_max_stack; 264 + extern int sysctl_perf_event_max_stack; 265 + extern int sysctl_perf_event_max_contexts_per_stack; 265 266 266 267 struct parse_tag { 267 268 char tag;