Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

tracing/probes: Add fprobe events for tracing function entry and exit.

Add fprobe events for tracing function entry and exit instead of kprobe
events. With this change, we can continue to trace function entry/exit
even if the CONFIG_KPROBES_ON_FTRACE is not available. Since
CONFIG_KPROBES_ON_FTRACE requires the CONFIG_DYNAMIC_FTRACE_WITH_REGS,
it is not available if the architecture only supports
CONFIG_DYNAMIC_FTRACE_WITH_ARGS. And that means kprobe events can not
probe function entry/exit effectively on such architecture.
But this can be solved if the dynamic events supports fprobe events.

The fprobe event is a new dynamic events which is only for the function
(symbol) entry and exit. This event accepts non register fetch arguments
so that user can trace the function arguments and return values.

The fprobe events syntax is here;

f[:[GRP/][EVENT]] FUNCTION [FETCHARGS]
f[MAXACTIVE][:[GRP/][EVENT]] FUNCTION%return [FETCHARGS]

E.g.

# echo 'f vfs_read $arg1' >> dynamic_events
# echo 'f vfs_read%return $retval' >> dynamic_events
# cat dynamic_events
f:fprobes/vfs_read__entry vfs_read arg1=$arg1
f:fprobes/vfs_read__exit vfs_read%return arg1=$retval
# echo 1 > events/fprobes/enable
# head -n 20 trace | tail
# TASK-PID CPU# ||||| TIMESTAMP FUNCTION
# | | | ||||| | |
sh-142 [005] ...1. 448.386420: vfs_read__entry: (vfs_read+0x4/0x340) arg1=0xffff888007f7c540
sh-142 [005] ..... 448.386436: vfs_read__exit: (ksys_read+0x75/0x100 <- vfs_read) arg1=0x1
sh-142 [005] ...1. 448.386451: vfs_read__entry: (vfs_read+0x4/0x340) arg1=0xffff888007f7c540
sh-142 [005] ..... 448.386458: vfs_read__exit: (ksys_read+0x75/0x100 <- vfs_read) arg1=0x1
sh-142 [005] ...1. 448.386469: vfs_read__entry: (vfs_read+0x4/0x340) arg1=0xffff888007f7c540
sh-142 [005] ..... 448.386476: vfs_read__exit: (ksys_read+0x75/0x100 <- vfs_read) arg1=0x1
sh-142 [005] ...1. 448.602073: vfs_read__entry: (vfs_read+0x4/0x340) arg1=0xffff888007f7c540
sh-142 [005] ..... 448.602089: vfs_read__exit: (ksys_read+0x75/0x100 <- vfs_read) arg1=0x1

Link: https://lore.kernel.org/all/168507469754.913472.6112857614708350210.stgit@mhiramat.roam.corp.google.com/

Reported-by: kernel test robot <lkp@intel.com>
Link: https://lore.kernel.org/all/202302011530.7vm4O8Ro-lkp@intel.com/
Signed-off-by: Masami Hiramatsu (Google) <mhiramat@kernel.org>

+1109 -8
+5
include/linux/fprobe.h
··· 66 66 int register_fprobe_ips(struct fprobe *fp, unsigned long *addrs, int num); 67 67 int register_fprobe_syms(struct fprobe *fp, const char **syms, int num); 68 68 int unregister_fprobe(struct fprobe *fp); 69 + bool fprobe_is_registered(struct fprobe *fp); 69 70 #else 70 71 static inline int register_fprobe(struct fprobe *fp, const char *filter, const char *notfilter) 71 72 { ··· 83 82 static inline int unregister_fprobe(struct fprobe *fp) 84 83 { 85 84 return -EOPNOTSUPP; 85 + } 86 + static inline bool fprobe_is_registered(struct fprobe *fp) 87 + { 88 + return false; 86 89 } 87 90 #endif 88 91
+3
include/linux/trace_events.h
··· 318 318 TRACE_EVENT_FL_KPROBE_BIT, 319 319 TRACE_EVENT_FL_UPROBE_BIT, 320 320 TRACE_EVENT_FL_EPROBE_BIT, 321 + TRACE_EVENT_FL_FPROBE_BIT, 321 322 TRACE_EVENT_FL_CUSTOM_BIT, 322 323 }; 323 324 ··· 333 332 * KPROBE - Event is a kprobe 334 333 * UPROBE - Event is a uprobe 335 334 * EPROBE - Event is an event probe 335 + * FPROBE - Event is an function probe 336 336 * CUSTOM - Event is a custom event (to be attached to an exsiting tracepoint) 337 337 * This is set when the custom event has not been attached 338 338 * to a tracepoint yet, then it is cleared when it is. ··· 348 346 TRACE_EVENT_FL_KPROBE = (1 << TRACE_EVENT_FL_KPROBE_BIT), 349 347 TRACE_EVENT_FL_UPROBE = (1 << TRACE_EVENT_FL_UPROBE_BIT), 350 348 TRACE_EVENT_FL_EPROBE = (1 << TRACE_EVENT_FL_EPROBE_BIT), 349 + TRACE_EVENT_FL_FPROBE = (1 << TRACE_EVENT_FL_FPROBE_BIT), 351 350 TRACE_EVENT_FL_CUSTOM = (1 << TRACE_EVENT_FL_CUSTOM_BIT), 352 351 }; 353 352
+14
kernel/trace/Kconfig
··· 650 650 651 651 If unsure, say N. 652 652 653 + config FPROBE_EVENTS 654 + depends on FPROBE 655 + depends on HAVE_REGS_AND_STACK_ACCESS_API 656 + bool "Enable fprobe-based dynamic events" 657 + select TRACING 658 + select PROBE_EVENTS 659 + select DYNAMIC_EVENTS 660 + default y 661 + help 662 + This allows user to add tracing events on the function entry and 663 + exit via ftrace interface. The syntax is same as the kprobe events 664 + and the kprobe events on function entry and exit will be 665 + transparently converted to this fprobe events. 666 + 653 667 config KPROBE_EVENTS 654 668 depends on KPROBES 655 669 depends on HAVE_REGS_AND_STACK_ACCESS_API
+1
kernel/trace/Makefile
··· 104 104 obj-$(CONFIG_FTRACE_RECORD_RECURSION) += trace_recursion_record.o 105 105 obj-$(CONFIG_FPROBE) += fprobe.o 106 106 obj-$(CONFIG_RETHOOK) += rethook.o 107 + obj-$(CONFIG_FPROBE_EVENTS) += trace_fprobe.o 107 108 108 109 obj-$(CONFIG_TRACEPOINT_BENCHMARK) += trace_benchmark.o 109 110 obj-$(CONFIG_RV) += rv/
+9 -2
kernel/trace/fprobe.c
··· 348 348 } 349 349 EXPORT_SYMBOL_GPL(register_fprobe_syms); 350 350 351 + bool fprobe_is_registered(struct fprobe *fp) 352 + { 353 + if (!fp || (fp->ops.saved_func != fprobe_handler && 354 + fp->ops.saved_func != fprobe_kprobe_handler)) 355 + return false; 356 + return true; 357 + } 358 + 351 359 /** 352 360 * unregister_fprobe() - Unregister fprobe from ftrace 353 361 * @fp: A fprobe data structure to be unregistered. ··· 368 360 { 369 361 int ret; 370 362 371 - if (!fp || (fp->ops.saved_func != fprobe_handler && 372 - fp->ops.saved_func != fprobe_kprobe_handler)) 363 + if (!fprobe_is_registered(fp)) 373 364 return -EINVAL; 374 365 375 366 /*
+7 -1
kernel/trace/trace.c
··· 5672 5672 " uprobe_events\t\t- Create/append/remove/show the userspace dynamic events\n" 5673 5673 "\t\t\t Write into this file to define/undefine new trace events.\n" 5674 5674 #endif 5675 - #if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS) 5675 + #if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS) || \ 5676 + defined(CONFIG_FPROBE_EVENTS) 5676 5677 "\t accepts: event-definitions (one definition per line)\n" 5678 + #if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS) 5677 5679 "\t Format: p[:[<group>/][<event>]] <place> [<args>]\n" 5678 5680 "\t r[maxactive][:[<group>/][<event>]] <place> [<args>]\n" 5681 + #endif 5682 + #ifdef CONFIG_FPROBE_EVENTS 5683 + "\t f[:[<group>/][<event>]] <func-name>[%return] [<args>]\n" 5684 + #endif 5679 5685 #ifdef CONFIG_HIST_TRIGGERS 5680 5686 "\t s:[synthetic/]<event> <field> [<field>]\n" 5681 5687 #endif
+11
kernel/trace/trace.h
··· 148 148 unsigned long ret_ip; 149 149 }; 150 150 151 + struct fentry_trace_entry_head { 152 + struct trace_entry ent; 153 + unsigned long ip; 154 + }; 155 + 156 + struct fexit_trace_entry_head { 157 + struct trace_entry ent; 158 + unsigned long func; 159 + unsigned long ret_ip; 160 + }; 161 + 151 162 #define TRACE_BUF_SIZE 1024 152 163 153 164 struct trace_array;
+1053
kernel/trace/trace_fprobe.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* 3 + * Fprobe-based tracing events 4 + * Copyright (C) 2022 Google LLC. 5 + */ 6 + #define pr_fmt(fmt) "trace_fprobe: " fmt 7 + 8 + #include <linux/fprobe.h> 9 + #include <linux/module.h> 10 + #include <linux/rculist.h> 11 + #include <linux/security.h> 12 + #include <linux/uaccess.h> 13 + 14 + #include "trace_dynevent.h" 15 + #include "trace_probe.h" 16 + #include "trace_probe_kernel.h" 17 + #include "trace_probe_tmpl.h" 18 + 19 + #define FPROBE_EVENT_SYSTEM "fprobes" 20 + #define RETHOOK_MAXACTIVE_MAX 4096 21 + 22 + static int trace_fprobe_create(const char *raw_command); 23 + static int trace_fprobe_show(struct seq_file *m, struct dyn_event *ev); 24 + static int trace_fprobe_release(struct dyn_event *ev); 25 + static bool trace_fprobe_is_busy(struct dyn_event *ev); 26 + static bool trace_fprobe_match(const char *system, const char *event, 27 + int argc, const char **argv, struct dyn_event *ev); 28 + 29 + static struct dyn_event_operations trace_fprobe_ops = { 30 + .create = trace_fprobe_create, 31 + .show = trace_fprobe_show, 32 + .is_busy = trace_fprobe_is_busy, 33 + .free = trace_fprobe_release, 34 + .match = trace_fprobe_match, 35 + }; 36 + 37 + /* 38 + * Fprobe event core functions 39 + */ 40 + struct trace_fprobe { 41 + struct dyn_event devent; 42 + struct fprobe fp; 43 + const char *symbol; 44 + struct trace_probe tp; 45 + }; 46 + 47 + static bool is_trace_fprobe(struct dyn_event *ev) 48 + { 49 + return ev->ops == &trace_fprobe_ops; 50 + } 51 + 52 + static struct trace_fprobe *to_trace_fprobe(struct dyn_event *ev) 53 + { 54 + return container_of(ev, struct trace_fprobe, devent); 55 + } 56 + 57 + /** 58 + * for_each_trace_fprobe - iterate over the trace_fprobe list 59 + * @pos: the struct trace_fprobe * for each entry 60 + * @dpos: the struct dyn_event * to use as a loop cursor 61 + */ 62 + #define for_each_trace_fprobe(pos, dpos) \ 63 + for_each_dyn_event(dpos) \ 64 + if (is_trace_fprobe(dpos) && (pos = to_trace_fprobe(dpos))) 65 + 66 + static bool trace_fprobe_is_return(struct trace_fprobe *tf) 67 + { 68 + return tf->fp.exit_handler != NULL; 69 + } 70 + 71 + static const char *trace_fprobe_symbol(struct trace_fprobe *tf) 72 + { 73 + return tf->symbol ? tf->symbol : "unknown"; 74 + } 75 + 76 + static bool trace_fprobe_is_busy(struct dyn_event *ev) 77 + { 78 + struct trace_fprobe *tf = to_trace_fprobe(ev); 79 + 80 + return trace_probe_is_enabled(&tf->tp); 81 + } 82 + 83 + static bool trace_fprobe_match_command_head(struct trace_fprobe *tf, 84 + int argc, const char **argv) 85 + { 86 + char buf[MAX_ARGSTR_LEN + 1]; 87 + 88 + if (!argc) 89 + return true; 90 + 91 + snprintf(buf, sizeof(buf), "%s", trace_fprobe_symbol(tf)); 92 + if (strcmp(buf, argv[0])) 93 + return false; 94 + argc--; argv++; 95 + 96 + return trace_probe_match_command_args(&tf->tp, argc, argv); 97 + } 98 + 99 + static bool trace_fprobe_match(const char *system, const char *event, 100 + int argc, const char **argv, struct dyn_event *ev) 101 + { 102 + struct trace_fprobe *tf = to_trace_fprobe(ev); 103 + 104 + if (event[0] != '\0' && strcmp(trace_probe_name(&tf->tp), event)) 105 + return false; 106 + 107 + if (system && strcmp(trace_probe_group_name(&tf->tp), system)) 108 + return false; 109 + 110 + return trace_fprobe_match_command_head(tf, argc, argv); 111 + } 112 + 113 + static bool trace_fprobe_is_registered(struct trace_fprobe *tf) 114 + { 115 + return fprobe_is_registered(&tf->fp); 116 + } 117 + 118 + /* 119 + * Note that we don't verify the fetch_insn code, since it does not come 120 + * from user space. 121 + */ 122 + static int 123 + process_fetch_insn(struct fetch_insn *code, void *rec, void *dest, 124 + void *base) 125 + { 126 + struct pt_regs *regs = rec; 127 + unsigned long val; 128 + int ret; 129 + 130 + retry: 131 + /* 1st stage: get value from context */ 132 + switch (code->op) { 133 + case FETCH_OP_STACK: 134 + val = regs_get_kernel_stack_nth(regs, code->param); 135 + break; 136 + case FETCH_OP_STACKP: 137 + val = kernel_stack_pointer(regs); 138 + break; 139 + case FETCH_OP_RETVAL: 140 + val = regs_return_value(regs); 141 + break; 142 + #ifdef CONFIG_HAVE_FUNCTION_ARG_ACCESS_API 143 + case FETCH_OP_ARG: 144 + val = regs_get_kernel_argument(regs, code->param); 145 + break; 146 + #endif 147 + case FETCH_NOP_SYMBOL: /* Ignore a place holder */ 148 + code++; 149 + goto retry; 150 + default: 151 + ret = process_common_fetch_insn(code, &val); 152 + if (ret < 0) 153 + return ret; 154 + } 155 + code++; 156 + 157 + return process_fetch_insn_bottom(code, val, dest, base); 158 + } 159 + NOKPROBE_SYMBOL(process_fetch_insn) 160 + 161 + /* function entry handler */ 162 + static nokprobe_inline void 163 + __fentry_trace_func(struct trace_fprobe *tf, unsigned long entry_ip, 164 + struct pt_regs *regs, 165 + struct trace_event_file *trace_file) 166 + { 167 + struct fentry_trace_entry_head *entry; 168 + struct trace_event_call *call = trace_probe_event_call(&tf->tp); 169 + struct trace_event_buffer fbuffer; 170 + int dsize; 171 + 172 + if (WARN_ON_ONCE(call != trace_file->event_call)) 173 + return; 174 + 175 + if (trace_trigger_soft_disabled(trace_file)) 176 + return; 177 + 178 + dsize = __get_data_size(&tf->tp, regs); 179 + 180 + entry = trace_event_buffer_reserve(&fbuffer, trace_file, 181 + sizeof(*entry) + tf->tp.size + dsize); 182 + if (!entry) 183 + return; 184 + 185 + fbuffer.regs = regs; 186 + entry = fbuffer.entry = ring_buffer_event_data(fbuffer.event); 187 + entry->ip = entry_ip; 188 + store_trace_args(&entry[1], &tf->tp, regs, sizeof(*entry), dsize); 189 + 190 + trace_event_buffer_commit(&fbuffer); 191 + } 192 + 193 + static void 194 + fentry_trace_func(struct trace_fprobe *tf, unsigned long entry_ip, 195 + struct pt_regs *regs) 196 + { 197 + struct event_file_link *link; 198 + 199 + trace_probe_for_each_link_rcu(link, &tf->tp) 200 + __fentry_trace_func(tf, entry_ip, regs, link->file); 201 + } 202 + NOKPROBE_SYMBOL(fentry_trace_func); 203 + 204 + /* Kretprobe handler */ 205 + static nokprobe_inline void 206 + __fexit_trace_func(struct trace_fprobe *tf, unsigned long entry_ip, 207 + unsigned long ret_ip, struct pt_regs *regs, 208 + struct trace_event_file *trace_file) 209 + { 210 + struct fexit_trace_entry_head *entry; 211 + struct trace_event_buffer fbuffer; 212 + struct trace_event_call *call = trace_probe_event_call(&tf->tp); 213 + int dsize; 214 + 215 + if (WARN_ON_ONCE(call != trace_file->event_call)) 216 + return; 217 + 218 + if (trace_trigger_soft_disabled(trace_file)) 219 + return; 220 + 221 + dsize = __get_data_size(&tf->tp, regs); 222 + 223 + entry = trace_event_buffer_reserve(&fbuffer, trace_file, 224 + sizeof(*entry) + tf->tp.size + dsize); 225 + if (!entry) 226 + return; 227 + 228 + fbuffer.regs = regs; 229 + entry = fbuffer.entry = ring_buffer_event_data(fbuffer.event); 230 + entry->func = entry_ip; 231 + entry->ret_ip = ret_ip; 232 + store_trace_args(&entry[1], &tf->tp, regs, sizeof(*entry), dsize); 233 + 234 + trace_event_buffer_commit(&fbuffer); 235 + } 236 + 237 + static void 238 + fexit_trace_func(struct trace_fprobe *tf, unsigned long entry_ip, 239 + unsigned long ret_ip, struct pt_regs *regs) 240 + { 241 + struct event_file_link *link; 242 + 243 + trace_probe_for_each_link_rcu(link, &tf->tp) 244 + __fexit_trace_func(tf, entry_ip, ret_ip, regs, link->file); 245 + } 246 + NOKPROBE_SYMBOL(fexit_trace_func); 247 + 248 + #ifdef CONFIG_PERF_EVENTS 249 + 250 + static int fentry_perf_func(struct trace_fprobe *tf, unsigned long entry_ip, 251 + struct pt_regs *regs) 252 + { 253 + struct trace_event_call *call = trace_probe_event_call(&tf->tp); 254 + struct fentry_trace_entry_head *entry; 255 + struct hlist_head *head; 256 + int size, __size, dsize; 257 + int rctx; 258 + 259 + head = this_cpu_ptr(call->perf_events); 260 + if (hlist_empty(head)) 261 + return 0; 262 + 263 + dsize = __get_data_size(&tf->tp, regs); 264 + __size = sizeof(*entry) + tf->tp.size + dsize; 265 + size = ALIGN(__size + sizeof(u32), sizeof(u64)); 266 + size -= sizeof(u32); 267 + 268 + entry = perf_trace_buf_alloc(size, NULL, &rctx); 269 + if (!entry) 270 + return 0; 271 + 272 + entry->ip = entry_ip; 273 + memset(&entry[1], 0, dsize); 274 + store_trace_args(&entry[1], &tf->tp, regs, sizeof(*entry), dsize); 275 + perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs, 276 + head, NULL); 277 + return 0; 278 + } 279 + NOKPROBE_SYMBOL(fentry_perf_func); 280 + 281 + static void 282 + fexit_perf_func(struct trace_fprobe *tf, unsigned long entry_ip, 283 + unsigned long ret_ip, struct pt_regs *regs) 284 + { 285 + struct trace_event_call *call = trace_probe_event_call(&tf->tp); 286 + struct fexit_trace_entry_head *entry; 287 + struct hlist_head *head; 288 + int size, __size, dsize; 289 + int rctx; 290 + 291 + head = this_cpu_ptr(call->perf_events); 292 + if (hlist_empty(head)) 293 + return; 294 + 295 + dsize = __get_data_size(&tf->tp, regs); 296 + __size = sizeof(*entry) + tf->tp.size + dsize; 297 + size = ALIGN(__size + sizeof(u32), sizeof(u64)); 298 + size -= sizeof(u32); 299 + 300 + entry = perf_trace_buf_alloc(size, NULL, &rctx); 301 + if (!entry) 302 + return; 303 + 304 + entry->func = entry_ip; 305 + entry->ret_ip = ret_ip; 306 + store_trace_args(&entry[1], &tf->tp, regs, sizeof(*entry), dsize); 307 + perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs, 308 + head, NULL); 309 + } 310 + NOKPROBE_SYMBOL(fexit_perf_func); 311 + #endif /* CONFIG_PERF_EVENTS */ 312 + 313 + static int fentry_dispatcher(struct fprobe *fp, unsigned long entry_ip, 314 + unsigned long ret_ip, struct pt_regs *regs, 315 + void *entry_data) 316 + { 317 + struct trace_fprobe *tf = container_of(fp, struct trace_fprobe, fp); 318 + int ret = 0; 319 + 320 + if (trace_probe_test_flag(&tf->tp, TP_FLAG_TRACE)) 321 + fentry_trace_func(tf, entry_ip, regs); 322 + #ifdef CONFIG_PERF_EVENTS 323 + if (trace_probe_test_flag(&tf->tp, TP_FLAG_PROFILE)) 324 + ret = fentry_perf_func(tf, entry_ip, regs); 325 + #endif 326 + return ret; 327 + } 328 + NOKPROBE_SYMBOL(fentry_dispatcher); 329 + 330 + static void fexit_dispatcher(struct fprobe *fp, unsigned long entry_ip, 331 + unsigned long ret_ip, struct pt_regs *regs, 332 + void *entry_data) 333 + { 334 + struct trace_fprobe *tf = container_of(fp, struct trace_fprobe, fp); 335 + 336 + if (trace_probe_test_flag(&tf->tp, TP_FLAG_TRACE)) 337 + fexit_trace_func(tf, entry_ip, ret_ip, regs); 338 + #ifdef CONFIG_PERF_EVENTS 339 + if (trace_probe_test_flag(&tf->tp, TP_FLAG_PROFILE)) 340 + fexit_perf_func(tf, entry_ip, ret_ip, regs); 341 + #endif 342 + } 343 + NOKPROBE_SYMBOL(fexit_dispatcher); 344 + 345 + static void free_trace_fprobe(struct trace_fprobe *tf) 346 + { 347 + if (tf) { 348 + trace_probe_cleanup(&tf->tp); 349 + kfree(tf->symbol); 350 + kfree(tf); 351 + } 352 + } 353 + 354 + /* 355 + * Allocate new trace_probe and initialize it (including fprobe). 356 + */ 357 + static struct trace_fprobe *alloc_trace_fprobe(const char *group, 358 + const char *event, 359 + const char *symbol, 360 + int maxactive, 361 + int nargs, bool is_return) 362 + { 363 + struct trace_fprobe *tf; 364 + int ret = -ENOMEM; 365 + 366 + tf = kzalloc(struct_size(tf, tp.args, nargs), GFP_KERNEL); 367 + if (!tf) 368 + return ERR_PTR(ret); 369 + 370 + tf->symbol = kstrdup(symbol, GFP_KERNEL); 371 + if (!tf->symbol) 372 + goto error; 373 + 374 + if (is_return) 375 + tf->fp.exit_handler = fexit_dispatcher; 376 + else 377 + tf->fp.entry_handler = fentry_dispatcher; 378 + 379 + tf->fp.nr_maxactive = maxactive; 380 + 381 + ret = trace_probe_init(&tf->tp, event, group, false); 382 + if (ret < 0) 383 + goto error; 384 + 385 + dyn_event_init(&tf->devent, &trace_fprobe_ops); 386 + return tf; 387 + error: 388 + free_trace_fprobe(tf); 389 + return ERR_PTR(ret); 390 + } 391 + 392 + static struct trace_fprobe *find_trace_fprobe(const char *event, 393 + const char *group) 394 + { 395 + struct dyn_event *pos; 396 + struct trace_fprobe *tf; 397 + 398 + for_each_trace_fprobe(tf, pos) 399 + if (strcmp(trace_probe_name(&tf->tp), event) == 0 && 400 + strcmp(trace_probe_group_name(&tf->tp), group) == 0) 401 + return tf; 402 + return NULL; 403 + } 404 + 405 + static inline int __enable_trace_fprobe(struct trace_fprobe *tf) 406 + { 407 + if (trace_fprobe_is_registered(tf)) 408 + enable_fprobe(&tf->fp); 409 + 410 + return 0; 411 + } 412 + 413 + static void __disable_trace_fprobe(struct trace_probe *tp) 414 + { 415 + struct trace_fprobe *tf; 416 + 417 + list_for_each_entry(tf, trace_probe_probe_list(tp), tp.list) { 418 + if (!trace_fprobe_is_registered(tf)) 419 + continue; 420 + disable_fprobe(&tf->fp); 421 + } 422 + } 423 + 424 + /* 425 + * Enable trace_probe 426 + * if the file is NULL, enable "perf" handler, or enable "trace" handler. 427 + */ 428 + static int enable_trace_fprobe(struct trace_event_call *call, 429 + struct trace_event_file *file) 430 + { 431 + struct trace_probe *tp; 432 + struct trace_fprobe *tf; 433 + bool enabled; 434 + int ret = 0; 435 + 436 + tp = trace_probe_primary_from_call(call); 437 + if (WARN_ON_ONCE(!tp)) 438 + return -ENODEV; 439 + enabled = trace_probe_is_enabled(tp); 440 + 441 + /* This also changes "enabled" state */ 442 + if (file) { 443 + ret = trace_probe_add_file(tp, file); 444 + if (ret) 445 + return ret; 446 + } else 447 + trace_probe_set_flag(tp, TP_FLAG_PROFILE); 448 + 449 + if (!enabled) { 450 + list_for_each_entry(tf, trace_probe_probe_list(tp), tp.list) { 451 + /* TODO: check the fprobe is gone */ 452 + __enable_trace_fprobe(tf); 453 + } 454 + } 455 + 456 + return 0; 457 + } 458 + 459 + /* 460 + * Disable trace_probe 461 + * if the file is NULL, disable "perf" handler, or disable "trace" handler. 462 + */ 463 + static int disable_trace_fprobe(struct trace_event_call *call, 464 + struct trace_event_file *file) 465 + { 466 + struct trace_probe *tp; 467 + 468 + tp = trace_probe_primary_from_call(call); 469 + if (WARN_ON_ONCE(!tp)) 470 + return -ENODEV; 471 + 472 + if (file) { 473 + if (!trace_probe_get_file_link(tp, file)) 474 + return -ENOENT; 475 + if (!trace_probe_has_single_file(tp)) 476 + goto out; 477 + trace_probe_clear_flag(tp, TP_FLAG_TRACE); 478 + } else 479 + trace_probe_clear_flag(tp, TP_FLAG_PROFILE); 480 + 481 + if (!trace_probe_is_enabled(tp)) 482 + __disable_trace_fprobe(tp); 483 + 484 + out: 485 + if (file) 486 + /* 487 + * Synchronization is done in below function. For perf event, 488 + * file == NULL and perf_trace_event_unreg() calls 489 + * tracepoint_synchronize_unregister() to ensure synchronize 490 + * event. We don't need to care about it. 491 + */ 492 + trace_probe_remove_file(tp, file); 493 + 494 + return 0; 495 + } 496 + 497 + /* Event entry printers */ 498 + static enum print_line_t 499 + print_fentry_event(struct trace_iterator *iter, int flags, 500 + struct trace_event *event) 501 + { 502 + struct fentry_trace_entry_head *field; 503 + struct trace_seq *s = &iter->seq; 504 + struct trace_probe *tp; 505 + 506 + field = (struct fentry_trace_entry_head *)iter->ent; 507 + tp = trace_probe_primary_from_call( 508 + container_of(event, struct trace_event_call, event)); 509 + if (WARN_ON_ONCE(!tp)) 510 + goto out; 511 + 512 + trace_seq_printf(s, "%s: (", trace_probe_name(tp)); 513 + 514 + if (!seq_print_ip_sym(s, field->ip, flags | TRACE_ITER_SYM_OFFSET)) 515 + goto out; 516 + 517 + trace_seq_putc(s, ')'); 518 + 519 + if (trace_probe_print_args(s, tp->args, tp->nr_args, 520 + (u8 *)&field[1], field) < 0) 521 + goto out; 522 + 523 + trace_seq_putc(s, '\n'); 524 + out: 525 + return trace_handle_return(s); 526 + } 527 + 528 + static enum print_line_t 529 + print_fexit_event(struct trace_iterator *iter, int flags, 530 + struct trace_event *event) 531 + { 532 + struct fexit_trace_entry_head *field; 533 + struct trace_seq *s = &iter->seq; 534 + struct trace_probe *tp; 535 + 536 + field = (struct fexit_trace_entry_head *)iter->ent; 537 + tp = trace_probe_primary_from_call( 538 + container_of(event, struct trace_event_call, event)); 539 + if (WARN_ON_ONCE(!tp)) 540 + goto out; 541 + 542 + trace_seq_printf(s, "%s: (", trace_probe_name(tp)); 543 + 544 + if (!seq_print_ip_sym(s, field->ret_ip, flags | TRACE_ITER_SYM_OFFSET)) 545 + goto out; 546 + 547 + trace_seq_puts(s, " <- "); 548 + 549 + if (!seq_print_ip_sym(s, field->func, flags & ~TRACE_ITER_SYM_OFFSET)) 550 + goto out; 551 + 552 + trace_seq_putc(s, ')'); 553 + 554 + if (trace_probe_print_args(s, tp->args, tp->nr_args, 555 + (u8 *)&field[1], field) < 0) 556 + goto out; 557 + 558 + trace_seq_putc(s, '\n'); 559 + 560 + out: 561 + return trace_handle_return(s); 562 + } 563 + 564 + static int fentry_event_define_fields(struct trace_event_call *event_call) 565 + { 566 + int ret; 567 + struct fentry_trace_entry_head field; 568 + struct trace_probe *tp; 569 + 570 + tp = trace_probe_primary_from_call(event_call); 571 + if (WARN_ON_ONCE(!tp)) 572 + return -ENOENT; 573 + 574 + DEFINE_FIELD(unsigned long, ip, FIELD_STRING_IP, 0); 575 + 576 + return traceprobe_define_arg_fields(event_call, sizeof(field), tp); 577 + } 578 + 579 + static int fexit_event_define_fields(struct trace_event_call *event_call) 580 + { 581 + int ret; 582 + struct fexit_trace_entry_head field; 583 + struct trace_probe *tp; 584 + 585 + tp = trace_probe_primary_from_call(event_call); 586 + if (WARN_ON_ONCE(!tp)) 587 + return -ENOENT; 588 + 589 + DEFINE_FIELD(unsigned long, func, FIELD_STRING_FUNC, 0); 590 + DEFINE_FIELD(unsigned long, ret_ip, FIELD_STRING_RETIP, 0); 591 + 592 + return traceprobe_define_arg_fields(event_call, sizeof(field), tp); 593 + } 594 + 595 + static struct trace_event_functions fentry_funcs = { 596 + .trace = print_fentry_event 597 + }; 598 + 599 + static struct trace_event_functions fexit_funcs = { 600 + .trace = print_fexit_event 601 + }; 602 + 603 + static struct trace_event_fields fentry_fields_array[] = { 604 + { .type = TRACE_FUNCTION_TYPE, 605 + .define_fields = fentry_event_define_fields }, 606 + {} 607 + }; 608 + 609 + static struct trace_event_fields fexit_fields_array[] = { 610 + { .type = TRACE_FUNCTION_TYPE, 611 + .define_fields = fexit_event_define_fields }, 612 + {} 613 + }; 614 + 615 + static int fprobe_register(struct trace_event_call *event, 616 + enum trace_reg type, void *data); 617 + 618 + static inline void init_trace_event_call(struct trace_fprobe *tf) 619 + { 620 + struct trace_event_call *call = trace_probe_event_call(&tf->tp); 621 + 622 + if (trace_fprobe_is_return(tf)) { 623 + call->event.funcs = &fexit_funcs; 624 + call->class->fields_array = fexit_fields_array; 625 + } else { 626 + call->event.funcs = &fentry_funcs; 627 + call->class->fields_array = fentry_fields_array; 628 + } 629 + 630 + call->flags = TRACE_EVENT_FL_FPROBE; 631 + call->class->reg = fprobe_register; 632 + } 633 + 634 + static int register_fprobe_event(struct trace_fprobe *tf) 635 + { 636 + init_trace_event_call(tf); 637 + 638 + return trace_probe_register_event_call(&tf->tp); 639 + } 640 + 641 + static int unregister_fprobe_event(struct trace_fprobe *tf) 642 + { 643 + return trace_probe_unregister_event_call(&tf->tp); 644 + } 645 + 646 + /* Internal register function - just handle fprobe and flags */ 647 + static int __register_trace_fprobe(struct trace_fprobe *tf) 648 + { 649 + int i, ret; 650 + 651 + /* Should we need new LOCKDOWN flag for fprobe? */ 652 + ret = security_locked_down(LOCKDOWN_KPROBES); 653 + if (ret) 654 + return ret; 655 + 656 + if (trace_fprobe_is_registered(tf)) 657 + return -EINVAL; 658 + 659 + for (i = 0; i < tf->tp.nr_args; i++) { 660 + ret = traceprobe_update_arg(&tf->tp.args[i]); 661 + if (ret) 662 + return ret; 663 + } 664 + 665 + /* Set/clear disabled flag according to tp->flag */ 666 + if (trace_probe_is_enabled(&tf->tp)) 667 + tf->fp.flags &= ~FPROBE_FL_DISABLED; 668 + else 669 + tf->fp.flags |= FPROBE_FL_DISABLED; 670 + 671 + /* TODO: handle filter, nofilter or symbol list */ 672 + return register_fprobe(&tf->fp, tf->symbol, NULL); 673 + } 674 + 675 + /* Internal unregister function - just handle fprobe and flags */ 676 + static void __unregister_trace_fprobe(struct trace_fprobe *tf) 677 + { 678 + if (trace_fprobe_is_registered(tf)) { 679 + unregister_fprobe(&tf->fp); 680 + memset(&tf->fp, 0, sizeof(tf->fp)); 681 + } 682 + } 683 + 684 + /* TODO: make this trace_*probe common function */ 685 + /* Unregister a trace_probe and probe_event */ 686 + static int unregister_trace_fprobe(struct trace_fprobe *tf) 687 + { 688 + /* If other probes are on the event, just unregister fprobe */ 689 + if (trace_probe_has_sibling(&tf->tp)) 690 + goto unreg; 691 + 692 + /* Enabled event can not be unregistered */ 693 + if (trace_probe_is_enabled(&tf->tp)) 694 + return -EBUSY; 695 + 696 + /* If there's a reference to the dynamic event */ 697 + if (trace_event_dyn_busy(trace_probe_event_call(&tf->tp))) 698 + return -EBUSY; 699 + 700 + /* Will fail if probe is being used by ftrace or perf */ 701 + if (unregister_fprobe_event(tf)) 702 + return -EBUSY; 703 + 704 + unreg: 705 + __unregister_trace_fprobe(tf); 706 + dyn_event_remove(&tf->devent); 707 + trace_probe_unlink(&tf->tp); 708 + 709 + return 0; 710 + } 711 + 712 + static bool trace_fprobe_has_same_fprobe(struct trace_fprobe *orig, 713 + struct trace_fprobe *comp) 714 + { 715 + struct trace_probe_event *tpe = orig->tp.event; 716 + int i; 717 + 718 + list_for_each_entry(orig, &tpe->probes, tp.list) { 719 + if (strcmp(trace_fprobe_symbol(orig), 720 + trace_fprobe_symbol(comp))) 721 + continue; 722 + 723 + /* 724 + * trace_probe_compare_arg_type() ensured that nr_args and 725 + * each argument name and type are same. Let's compare comm. 726 + */ 727 + for (i = 0; i < orig->tp.nr_args; i++) { 728 + if (strcmp(orig->tp.args[i].comm, 729 + comp->tp.args[i].comm)) 730 + break; 731 + } 732 + 733 + if (i == orig->tp.nr_args) 734 + return true; 735 + } 736 + 737 + return false; 738 + } 739 + 740 + static int append_trace_fprobe(struct trace_fprobe *tf, struct trace_fprobe *to) 741 + { 742 + int ret; 743 + 744 + if (trace_fprobe_is_return(tf) != trace_fprobe_is_return(to)) { 745 + trace_probe_log_set_index(0); 746 + trace_probe_log_err(0, DIFF_PROBE_TYPE); 747 + return -EEXIST; 748 + } 749 + ret = trace_probe_compare_arg_type(&tf->tp, &to->tp); 750 + if (ret) { 751 + /* Note that argument starts index = 2 */ 752 + trace_probe_log_set_index(ret + 1); 753 + trace_probe_log_err(0, DIFF_ARG_TYPE); 754 + return -EEXIST; 755 + } 756 + if (trace_fprobe_has_same_fprobe(to, tf)) { 757 + trace_probe_log_set_index(0); 758 + trace_probe_log_err(0, SAME_PROBE); 759 + return -EEXIST; 760 + } 761 + 762 + /* Append to existing event */ 763 + ret = trace_probe_append(&tf->tp, &to->tp); 764 + if (ret) 765 + return ret; 766 + 767 + ret = __register_trace_fprobe(tf); 768 + if (ret) 769 + trace_probe_unlink(&tf->tp); 770 + else 771 + dyn_event_add(&tf->devent, trace_probe_event_call(&tf->tp)); 772 + 773 + return ret; 774 + } 775 + 776 + /* Register a trace_probe and probe_event */ 777 + static int register_trace_fprobe(struct trace_fprobe *tf) 778 + { 779 + struct trace_fprobe *old_tf; 780 + int ret; 781 + 782 + mutex_lock(&event_mutex); 783 + 784 + old_tf = find_trace_fprobe(trace_probe_name(&tf->tp), 785 + trace_probe_group_name(&tf->tp)); 786 + if (old_tf) { 787 + ret = append_trace_fprobe(tf, old_tf); 788 + goto end; 789 + } 790 + 791 + /* Register new event */ 792 + ret = register_fprobe_event(tf); 793 + if (ret) { 794 + if (ret == -EEXIST) { 795 + trace_probe_log_set_index(0); 796 + trace_probe_log_err(0, EVENT_EXIST); 797 + } else 798 + pr_warn("Failed to register probe event(%d)\n", ret); 799 + goto end; 800 + } 801 + 802 + /* Register fprobe */ 803 + ret = __register_trace_fprobe(tf); 804 + if (ret < 0) 805 + unregister_fprobe_event(tf); 806 + else 807 + dyn_event_add(&tf->devent, trace_probe_event_call(&tf->tp)); 808 + 809 + end: 810 + mutex_unlock(&event_mutex); 811 + return ret; 812 + } 813 + 814 + static int __trace_fprobe_create(int argc, const char *argv[]) 815 + { 816 + /* 817 + * Argument syntax: 818 + * - Add fentry probe: 819 + * f[:[GRP/][EVENT]] [MOD:]KSYM [FETCHARGS] 820 + * - Add fexit probe: 821 + * f[N][:[GRP/][EVENT]] [MOD:]KSYM%return [FETCHARGS] 822 + * 823 + * Fetch args: 824 + * $retval : fetch return value 825 + * $stack : fetch stack address 826 + * $stackN : fetch Nth entry of stack (N:0-) 827 + * $argN : fetch Nth argument (N:1-) 828 + * $comm : fetch current task comm 829 + * @ADDR : fetch memory at ADDR (ADDR should be in kernel) 830 + * @SYM[+|-offs] : fetch memory at SYM +|- offs (SYM is a data symbol) 831 + * Dereferencing memory fetch: 832 + * +|-offs(ARG) : fetch memory at ARG +|- offs address. 833 + * Alias name of args: 834 + * NAME=FETCHARG : set NAME as alias of FETCHARG. 835 + * Type of args: 836 + * FETCHARG:TYPE : use TYPE instead of unsigned long. 837 + */ 838 + struct trace_fprobe *tf = NULL; 839 + int i, len, ret = 0; 840 + bool is_return = false; 841 + char *symbol = NULL, *tmp = NULL; 842 + const char *event = NULL, *group = FPROBE_EVENT_SYSTEM; 843 + int maxactive = 0; 844 + char buf[MAX_EVENT_NAME_LEN]; 845 + char gbuf[MAX_EVENT_NAME_LEN]; 846 + unsigned int flags = TPARG_FL_KERNEL | TPARG_FL_FPROBE; 847 + 848 + if (argv[0][0] != 'f' || argc < 2) 849 + return -ECANCELED; 850 + 851 + trace_probe_log_init("trace_fprobe", argc, argv); 852 + 853 + event = strchr(&argv[0][1], ':'); 854 + if (event) 855 + event++; 856 + 857 + if (isdigit(argv[0][1])) { 858 + if (event) 859 + len = event - &argv[0][1] - 1; 860 + else 861 + len = strlen(&argv[0][1]); 862 + if (len > MAX_EVENT_NAME_LEN - 1) { 863 + trace_probe_log_err(1, BAD_MAXACT); 864 + goto parse_error; 865 + } 866 + memcpy(buf, &argv[0][1], len); 867 + buf[len] = '\0'; 868 + ret = kstrtouint(buf, 0, &maxactive); 869 + if (ret || !maxactive) { 870 + trace_probe_log_err(1, BAD_MAXACT); 871 + goto parse_error; 872 + } 873 + /* fprobe rethook instances are iterated over via a list. The 874 + * maximum should stay reasonable. 875 + */ 876 + if (maxactive > RETHOOK_MAXACTIVE_MAX) { 877 + trace_probe_log_err(1, MAXACT_TOO_BIG); 878 + goto parse_error; 879 + } 880 + } 881 + 882 + trace_probe_log_set_index(1); 883 + 884 + /* a symbol specified */ 885 + symbol = kstrdup(argv[1], GFP_KERNEL); 886 + if (!symbol) 887 + return -ENOMEM; 888 + 889 + tmp = strchr(symbol, '%'); 890 + if (tmp) { 891 + if (!strcmp(tmp, "%return")) { 892 + *tmp = '\0'; 893 + is_return = true; 894 + } else { 895 + trace_probe_log_err(tmp - symbol, BAD_ADDR_SUFFIX); 896 + goto parse_error; 897 + } 898 + } 899 + if (!is_return && maxactive) { 900 + trace_probe_log_set_index(0); 901 + trace_probe_log_err(1, BAD_MAXACT_TYPE); 902 + goto parse_error; 903 + } 904 + 905 + if (is_return) 906 + flags |= TPARG_FL_RETURN; 907 + else 908 + flags |= TPARG_FL_FENTRY; 909 + 910 + trace_probe_log_set_index(0); 911 + if (event) { 912 + ret = traceprobe_parse_event_name(&event, &group, gbuf, 913 + event - argv[0]); 914 + if (ret) 915 + goto parse_error; 916 + } 917 + 918 + if (!event) { 919 + /* Make a new event name */ 920 + snprintf(buf, MAX_EVENT_NAME_LEN, "%s__%s", symbol, 921 + is_return ? "exit" : "entry"); 922 + sanitize_event_name(buf); 923 + event = buf; 924 + } 925 + 926 + /* setup a probe */ 927 + tf = alloc_trace_fprobe(group, event, symbol, maxactive, 928 + argc - 2, is_return); 929 + if (IS_ERR(tf)) { 930 + ret = PTR_ERR(tf); 931 + /* This must return -ENOMEM, else there is a bug */ 932 + WARN_ON_ONCE(ret != -ENOMEM); 933 + goto out; /* We know tf is not allocated */ 934 + } 935 + argc -= 2; argv += 2; 936 + 937 + /* parse arguments */ 938 + for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) { 939 + trace_probe_log_set_index(i + 2); 940 + ret = traceprobe_parse_probe_arg(&tf->tp, i, argv[i], flags); 941 + if (ret) 942 + goto error; /* This can be -ENOMEM */ 943 + } 944 + 945 + ret = traceprobe_set_print_fmt(&tf->tp, 946 + is_return ? PROBE_PRINT_RETURN : PROBE_PRINT_NORMAL); 947 + if (ret < 0) 948 + goto error; 949 + 950 + ret = register_trace_fprobe(tf); 951 + if (ret) { 952 + trace_probe_log_set_index(1); 953 + if (ret == -EILSEQ) 954 + trace_probe_log_err(0, BAD_INSN_BNDRY); 955 + else if (ret == -ENOENT) 956 + trace_probe_log_err(0, BAD_PROBE_ADDR); 957 + else if (ret != -ENOMEM && ret != -EEXIST) 958 + trace_probe_log_err(0, FAIL_REG_PROBE); 959 + goto error; 960 + } 961 + 962 + out: 963 + trace_probe_log_clear(); 964 + kfree(symbol); 965 + return ret; 966 + 967 + parse_error: 968 + ret = -EINVAL; 969 + error: 970 + free_trace_fprobe(tf); 971 + goto out; 972 + } 973 + 974 + static int trace_fprobe_create(const char *raw_command) 975 + { 976 + return trace_probe_create(raw_command, __trace_fprobe_create); 977 + } 978 + 979 + static int trace_fprobe_release(struct dyn_event *ev) 980 + { 981 + struct trace_fprobe *tf = to_trace_fprobe(ev); 982 + int ret = unregister_trace_fprobe(tf); 983 + 984 + if (!ret) 985 + free_trace_fprobe(tf); 986 + return ret; 987 + } 988 + 989 + static int trace_fprobe_show(struct seq_file *m, struct dyn_event *ev) 990 + { 991 + struct trace_fprobe *tf = to_trace_fprobe(ev); 992 + int i; 993 + 994 + seq_putc(m, 'f'); 995 + if (trace_fprobe_is_return(tf) && tf->fp.nr_maxactive) 996 + seq_printf(m, "%d", tf->fp.nr_maxactive); 997 + seq_printf(m, ":%s/%s", trace_probe_group_name(&tf->tp), 998 + trace_probe_name(&tf->tp)); 999 + 1000 + seq_printf(m, " %s%s", trace_fprobe_symbol(tf), 1001 + trace_fprobe_is_return(tf) ? "%return" : ""); 1002 + 1003 + for (i = 0; i < tf->tp.nr_args; i++) 1004 + seq_printf(m, " %s=%s", tf->tp.args[i].name, tf->tp.args[i].comm); 1005 + seq_putc(m, '\n'); 1006 + 1007 + return 0; 1008 + } 1009 + 1010 + /* 1011 + * called by perf_trace_init() or __ftrace_set_clr_event() under event_mutex. 1012 + */ 1013 + static int fprobe_register(struct trace_event_call *event, 1014 + enum trace_reg type, void *data) 1015 + { 1016 + struct trace_event_file *file = data; 1017 + 1018 + switch (type) { 1019 + case TRACE_REG_REGISTER: 1020 + return enable_trace_fprobe(event, file); 1021 + case TRACE_REG_UNREGISTER: 1022 + return disable_trace_fprobe(event, file); 1023 + 1024 + #ifdef CONFIG_PERF_EVENTS 1025 + case TRACE_REG_PERF_REGISTER: 1026 + return enable_trace_fprobe(event, NULL); 1027 + case TRACE_REG_PERF_UNREGISTER: 1028 + return disable_trace_fprobe(event, NULL); 1029 + case TRACE_REG_PERF_OPEN: 1030 + case TRACE_REG_PERF_CLOSE: 1031 + case TRACE_REG_PERF_ADD: 1032 + case TRACE_REG_PERF_DEL: 1033 + return 0; 1034 + #endif 1035 + } 1036 + return 0; 1037 + } 1038 + 1039 + /* 1040 + * Register dynevent at core_initcall. This allows kernel to setup fprobe 1041 + * events in postcore_initcall without tracefs. 1042 + */ 1043 + static __init int init_fprobe_trace_early(void) 1044 + { 1045 + int ret; 1046 + 1047 + ret = dyn_event_register(&trace_fprobe_ops); 1048 + if (ret) 1049 + return ret; 1050 + 1051 + return 0; 1052 + } 1053 + core_initcall(init_fprobe_trace_early);
+1 -1
kernel/trace/trace_kprobe.c
··· 764 764 765 765 if (isdigit(argv[0][1])) { 766 766 if (!is_return) { 767 - trace_probe_log_err(1, MAXACT_NO_KPROBE); 767 + trace_probe_log_err(1, BAD_MAXACT_TYPE); 768 768 goto parse_error; 769 769 } 770 770 if (event)
+2 -2
kernel/trace/trace_probe.c
··· 393 393 break; 394 394 395 395 case '%': /* named register */ 396 - if (flags & TPARG_FL_TPOINT) { 397 - /* eprobes do not handle registers */ 396 + if (flags & (TPARG_FL_TPOINT | TPARG_FL_FPROBE)) { 397 + /* eprobe and fprobe do not handle registers */ 398 398 trace_probe_log_err(offs, BAD_VAR); 399 399 break; 400 400 }
+2 -1
kernel/trace/trace_probe.h
··· 367 367 #define TPARG_FL_FENTRY BIT(2) 368 368 #define TPARG_FL_TPOINT BIT(3) 369 369 #define TPARG_FL_USER BIT(4) 370 + #define TPARG_FL_FPROBE BIT(5) 370 371 #define TPARG_FL_MASK GENMASK(4, 0) 371 372 372 373 extern int traceprobe_parse_probe_arg(struct trace_probe *tp, int i, ··· 410 409 C(REFCNT_OPEN_BRACE, "Reference counter brace is not closed"), \ 411 410 C(BAD_REFCNT_SUFFIX, "Reference counter has wrong suffix"), \ 412 411 C(BAD_UPROBE_OFFS, "Invalid uprobe offset"), \ 413 - C(MAXACT_NO_KPROBE, "Maxactive is not for kprobe"), \ 412 + C(BAD_MAXACT_TYPE, "Maxactive is only for function exit"), \ 414 413 C(BAD_MAXACT, "Invalid maxactive number"), \ 415 414 C(MAXACT_TOO_BIG, "Maxactive is too big"), \ 416 415 C(BAD_PROBE_ADDR, "Invalid probed address or symbol"), \
+1 -1
tools/testing/selftests/ftrace/test.d/kprobe/kprobe_syntax_errors.tc
··· 8 8 } 9 9 10 10 if grep -q 'r\[maxactive\]' README; then 11 - check_error 'p^100 vfs_read' # MAXACT_NO_KPROBE 11 + check_error 'p^100 vfs_read' # BAD_MAXACT_TYPE 12 12 check_error 'r^1a111 vfs_read' # BAD_MAXACT 13 13 check_error 'r^100000 vfs_read' # MAXACT_TOO_BIG 14 14 fi