Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Augment the raw_syscalls tracepoints with the contents of the pointer arguments.
4 *
5 * Test it with:
6 *
7 * perf trace -e tools/perf/examples/bpf/augmented_raw_syscalls.c cat /etc/passwd > /dev/null
8 *
9 * This exactly matches what is marshalled into the raw_syscall:sys_enter
10 * payload expected by the 'perf trace' beautifiers.
11 *
12 * For now it just uses the existing tracepoint augmentation code in 'perf
13 * trace', in the next csets we'll hook up these with the sys_enter/sys_exit
14 * code that will combine entry/exit in a strace like way.
15 */
16
17#include <unistd.h>
18#include <pid_filter.h>
19
20/* bpf-output associated map */
21bpf_map(__augmented_syscalls__, PERF_EVENT_ARRAY, int, u32, __NR_CPUS__);
22
23struct syscall {
24 bool enabled;
25};
26
27bpf_map(syscalls, ARRAY, int, struct syscall, 512);
28
29struct syscall_enter_args {
30 unsigned long long common_tp_fields;
31 long syscall_nr;
32 unsigned long args[6];
33};
34
35struct syscall_exit_args {
36 unsigned long long common_tp_fields;
37 long syscall_nr;
38 long ret;
39};
40
41struct augmented_filename {
42 unsigned int size;
43 int reserved;
44 char value[256];
45};
46
47#define SYS_OPEN 2
48#define SYS_ACCESS 21
49#define SYS_OPENAT 257
50
51pid_filter(pids_filtered);
52
53SEC("raw_syscalls:sys_enter")
54int sys_enter(struct syscall_enter_args *args)
55{
56 struct {
57 struct syscall_enter_args args;
58 struct augmented_filename filename;
59 } augmented_args;
60 struct syscall *syscall;
61 unsigned int len = sizeof(augmented_args);
62 const void *filename_arg = NULL;
63
64 if (pid_filter__has(&pids_filtered, getpid()))
65 return 0;
66
67 probe_read(&augmented_args.args, sizeof(augmented_args.args), args);
68
69 syscall = bpf_map_lookup_elem(&syscalls, &augmented_args.args.syscall_nr);
70 if (syscall == NULL || !syscall->enabled)
71 return 0;
72 /*
73 * Yonghong and Edward Cree sayz:
74 *
75 * https://www.spinics.net/lists/netdev/msg531645.html
76 *
77 * >> R0=inv(id=0) R1=inv2 R6=ctx(id=0,off=0,imm=0) R7=inv64 R10=fp0,call_-1
78 * >> 10: (bf) r1 = r6
79 * >> 11: (07) r1 += 16
80 * >> 12: (05) goto pc+2
81 * >> 15: (79) r3 = *(u64 *)(r1 +0)
82 * >> dereference of modified ctx ptr R1 off=16 disallowed
83 * > Aha, we at least got a different error message this time.
84 * > And indeed llvm has done that optimisation, rather than the more obvious
85 * > 11: r3 = *(u64 *)(r1 +16)
86 * > because it wants to have lots of reads share a single insn. You may be able
87 * > to defeat that optimisation by adding compiler barriers, idk. Maybe someone
88 * > with llvm knowledge can figure out how to stop it (ideally, llvm would know
89 * > when it's generating for bpf backend and not do that). -O0? ¯\_(ツ)_/¯
90 *
91 * The optimization mostly likes below:
92 *
93 * br1:
94 * ...
95 * r1 += 16
96 * goto merge
97 * br2:
98 * ...
99 * r1 += 20
100 * goto merge
101 * merge:
102 * *(u64 *)(r1 + 0)
103 *
104 * The compiler tries to merge common loads. There is no easy way to
105 * stop this compiler optimization without turning off a lot of other
106 * optimizations. The easiest way is to add barriers:
107 *
108 * __asm__ __volatile__("": : :"memory")
109 *
110 * after the ctx memory access to prevent their down stream merging.
111 */
112 switch (augmented_args.args.syscall_nr) {
113 case SYS_ACCESS:
114 case SYS_OPEN: filename_arg = (const void *)args->args[0];
115 __asm__ __volatile__("": : :"memory");
116 break;
117 case SYS_OPENAT: filename_arg = (const void *)args->args[1];
118 break;
119 }
120
121 if (filename_arg != NULL) {
122 augmented_args.filename.reserved = 0;
123 augmented_args.filename.size = probe_read_str(&augmented_args.filename.value,
124 sizeof(augmented_args.filename.value),
125 filename_arg);
126 if (augmented_args.filename.size < sizeof(augmented_args.filename.value)) {
127 len -= sizeof(augmented_args.filename.value) - augmented_args.filename.size;
128 len &= sizeof(augmented_args.filename.value) - 1;
129 }
130 } else {
131 len = sizeof(augmented_args.args);
132 }
133
134 /* If perf_event_output fails, return non-zero so that it gets recorded unaugmented */
135 return perf_event_output(args, &__augmented_syscalls__, BPF_F_CURRENT_CPU, &augmented_args, len);
136}
137
138SEC("raw_syscalls:sys_exit")
139int sys_exit(struct syscall_exit_args *args)
140{
141 struct syscall_exit_args exit_args;
142 struct syscall *syscall;
143
144 if (pid_filter__has(&pids_filtered, getpid()))
145 return 0;
146
147 probe_read(&exit_args, sizeof(exit_args), args);
148
149 syscall = bpf_map_lookup_elem(&syscalls, &exit_args.syscall_nr);
150 if (syscall == NULL || !syscall->enabled)
151 return 0;
152
153 return 1;
154}
155
156license(GPL);