Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge branch 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull perf fixes from Ingo Molnar:
"Two kprobes fixes and a handful of tooling fixes"

* 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
perf tools: Make sparc64 arch point to sparc
perf symbols: Define EM_AARCH64 for older OSes
perf top: Fix SIGBUS on sparc64
perf tools: Fix probing for PERF_FLAG_FD_CLOEXEC flag
perf tools: Fix pthread_attr_setaffinity_np build error
perf tools: Define _GNU_SOURCE on pthread_attr_setaffinity_np feature check
perf bench: Fix order of arguments to memcpy_alloc_mem
kprobes/x86: Check for invalid ftrace location in __recover_probed_insn()
kprobes/x86: Use 5-byte NOP when the code might be modified by ftrace

+72 -22
+40 -14
arch/x86/kernel/kprobes/core.c
··· 223 223 __recover_probed_insn(kprobe_opcode_t *buf, unsigned long addr) 224 224 { 225 225 struct kprobe *kp; 226 + unsigned long faddr; 226 227 227 228 kp = get_kprobe((void *)addr); 228 - /* There is no probe, return original address */ 229 - if (!kp) 229 + faddr = ftrace_location(addr); 230 + /* 231 + * Addresses inside the ftrace location are refused by 232 + * arch_check_ftrace_location(). Something went terribly wrong 233 + * if such an address is checked here. 234 + */ 235 + if (WARN_ON(faddr && faddr != addr)) 236 + return 0UL; 237 + /* 238 + * Use the current code if it is not modified by Kprobe 239 + * and it cannot be modified by ftrace. 240 + */ 241 + if (!kp && !faddr) 230 242 return addr; 231 243 232 244 /* 233 - * Basically, kp->ainsn.insn has an original instruction. 234 - * However, RIP-relative instruction can not do single-stepping 235 - * at different place, __copy_instruction() tweaks the displacement of 236 - * that instruction. In that case, we can't recover the instruction 237 - * from the kp->ainsn.insn. 245 + * Basically, kp->ainsn.insn has an original instruction. 246 + * However, RIP-relative instruction can not do single-stepping 247 + * at different place, __copy_instruction() tweaks the displacement of 248 + * that instruction. In that case, we can't recover the instruction 249 + * from the kp->ainsn.insn. 238 250 * 239 - * On the other hand, kp->opcode has a copy of the first byte of 240 - * the probed instruction, which is overwritten by int3. And 241 - * the instruction at kp->addr is not modified by kprobes except 242 - * for the first byte, we can recover the original instruction 243 - * from it and kp->opcode. 251 + * On the other hand, in case on normal Kprobe, kp->opcode has a copy 252 + * of the first byte of the probed instruction, which is overwritten 253 + * by int3. And the instruction at kp->addr is not modified by kprobes 254 + * except for the first byte, we can recover the original instruction 255 + * from it and kp->opcode. 256 + * 257 + * In case of Kprobes using ftrace, we do not have a copy of 258 + * the original instruction. In fact, the ftrace location might 259 + * be modified at anytime and even could be in an inconsistent state. 260 + * Fortunately, we know that the original code is the ideal 5-byte 261 + * long NOP. 244 262 */ 245 - memcpy(buf, kp->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t)); 246 - buf[0] = kp->opcode; 263 + memcpy(buf, (void *)addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t)); 264 + if (faddr) 265 + memcpy(buf, ideal_nops[NOP_ATOMIC5], 5); 266 + else 267 + buf[0] = kp->opcode; 247 268 return (unsigned long)buf; 248 269 } 249 270 ··· 272 251 * Recover the probed instruction at addr for further analysis. 273 252 * Caller must lock kprobes by kprobe_mutex, or disable preemption 274 253 * for preventing to release referencing kprobes. 254 + * Returns zero if the instruction can not get recovered. 275 255 */ 276 256 unsigned long recover_probed_instruction(kprobe_opcode_t *buf, unsigned long addr) 277 257 { ··· 307 285 * normally used, we just go through if there is no kprobe. 308 286 */ 309 287 __addr = recover_probed_instruction(buf, addr); 288 + if (!__addr) 289 + return 0; 310 290 kernel_insn_init(&insn, (void *)__addr, MAX_INSN_SIZE); 311 291 insn_get_length(&insn); 312 292 ··· 357 333 unsigned long recovered_insn = 358 334 recover_probed_instruction(buf, (unsigned long)src); 359 335 336 + if (!recovered_insn) 337 + return 0; 360 338 kernel_insn_init(&insn, (void *)recovered_insn, MAX_INSN_SIZE); 361 339 insn_get_length(&insn); 362 340 /* Another subsystem puts a breakpoint, failed to recover */
+2
arch/x86/kernel/kprobes/opt.c
··· 259 259 */ 260 260 return 0; 261 261 recovered_insn = recover_probed_instruction(buf, addr); 262 + if (!recovered_insn) 263 + return 0; 262 264 kernel_insn_init(&insn, (void *)recovered_insn, MAX_INSN_SIZE); 263 265 insn_get_length(&insn); 264 266 /* Another subsystem puts a breakpoint */
+2 -2
tools/perf/bench/mem-memcpy.c
··· 289 289 memcpy_t fn = r->fn.memcpy; 290 290 int i; 291 291 292 - memcpy_alloc_mem(&src, &dst, len); 292 + memcpy_alloc_mem(&dst, &src, len); 293 293 294 294 if (prefault) 295 295 fn(dst, src, len); ··· 312 312 void *src = NULL, *dst = NULL; 313 313 int i; 314 314 315 - memcpy_alloc_mem(&src, &dst, len); 315 + memcpy_alloc_mem(&dst, &src, len); 316 316 317 317 if (prefault) 318 318 fn(dst, src, len);
+4
tools/perf/config/Makefile.arch
··· 21 21 endif 22 22 endif 23 23 24 + ifeq ($(RAW_ARCH),sparc64) 25 + ARCH ?= sparc 26 + endif 27 + 24 28 ARCH ?= $(RAW_ARCH) 25 29 26 30 LP64 := $(shell echo __LP64__ | ${CC} ${CFLAGS} -E -x c - | tail -n 1)
+1 -1
tools/perf/config/feature-checks/Makefile
··· 49 49 $(BUILD) 50 50 51 51 test-pthread-attr-setaffinity-np.bin: 52 - $(BUILD) -Werror -lpthread 52 + $(BUILD) -D_GNU_SOURCE -Werror -lpthread 53 53 54 54 test-stackprotector-all.bin: 55 55 $(BUILD) -Werror -fstack-protector-all
+2 -1
tools/perf/config/feature-checks/test-pthread-attr-setaffinity-np.c
··· 5 5 { 6 6 int ret = 0; 7 7 pthread_attr_t thread_attr; 8 + cpu_set_t cs; 8 9 9 10 pthread_attr_init(&thread_attr); 10 11 /* don't care abt exact args, just the API itself in libpthread */ 11 - ret = pthread_attr_setaffinity_np(&thread_attr, 0, NULL); 12 + ret = pthread_attr_setaffinity_np(&thread_attr, sizeof(cs), &cs); 12 13 13 14 return ret; 14 15 }
+15 -3
tools/perf/util/cloexec.c
··· 25 25 if (cpu < 0) 26 26 cpu = 0; 27 27 28 + /* 29 + * Using -1 for the pid is a workaround to avoid gratuitous jump label 30 + * changes. 31 + */ 28 32 while (1) { 29 33 /* check cloexec flag */ 30 34 fd = sys_perf_event_open(&attr, pid, cpu, -1, ··· 51 47 err, strerror_r(err, sbuf, sizeof(sbuf))); 52 48 53 49 /* not supported, confirm error related to PERF_FLAG_FD_CLOEXEC */ 54 - fd = sys_perf_event_open(&attr, pid, cpu, -1, 0); 50 + while (1) { 51 + fd = sys_perf_event_open(&attr, pid, cpu, -1, 0); 52 + if (fd < 0 && pid == -1 && errno == EACCES) { 53 + pid = 0; 54 + continue; 55 + } 56 + break; 57 + } 55 58 err = errno; 59 + 60 + if (fd >= 0) 61 + close(fd); 56 62 57 63 if (WARN_ONCE(fd < 0 && err != EBUSY, 58 64 "perf_event_open(..., 0) failed unexpectedly with error %d (%s)\n", 59 65 err, strerror_r(err, sbuf, sizeof(sbuf)))) 60 66 return -1; 61 - 62 - close(fd); 63 67 64 68 return 0; 65 69 }
+1 -1
tools/perf/util/evlist.h
··· 28 28 int mask; 29 29 int refcnt; 30 30 unsigned int prev; 31 - char event_copy[PERF_SAMPLE_MAX_SIZE]; 31 + char event_copy[PERF_SAMPLE_MAX_SIZE] __attribute__((aligned(8))); 32 32 }; 33 33 34 34 struct perf_evlist {
+5
tools/perf/util/symbol-elf.c
··· 11 11 #include <symbol/kallsyms.h> 12 12 #include "debug.h" 13 13 14 + #ifndef EM_AARCH64 15 + #define EM_AARCH64 183 /* ARM 64 bit */ 16 + #endif 17 + 18 + 14 19 #ifdef HAVE_CPLUS_DEMANGLE_SUPPORT 15 20 extern char *cplus_demangle(const char *, int); 16 21