Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'trace-v6.15' of git://git.kernel.org/pub/scm/linux/kernel/git/trace/linux-trace

Pull tracing updates from Steven Rostedt:

- Add option traceoff_after_boot

In order to debug kernel boot, it sometimes is helpful to enable
tracing via the kernel command line. Unfortunately, by the time the
login prompt appears, the trace is overwritten by the init process
and other user space start up applications.

Adding a "traceoff_after_boot" will disable tracing when the kernel
passes control to init which will allow developers to be able to see
the traces that occurred during boot.

- Clean up the mmflags macros that display the GFP flags in trace
events

The macros to print the GFP flags for trace events had a bit of
duplication. The code was restructured to remove duplication and in
the process it also adds some flags that were missed before.

- Removed some dead code and scripts/draw_functrace.py

draw_functrace.py hasn't worked in years and as nobody complained
about it, remove it.

- Constify struct event_trigger_ops

The event_trigger_ops is just a structure that has function pointers
that are assigned when the variables are created. These variables
should all be constants.

- Other minor clean ups and fixes

* tag 'trace-v6.15' of git://git.kernel.org/pub/scm/linux/kernel/git/trace/linux-trace:
tracing: Replace strncpy with memcpy for fixed-length substring copy
tracing: Fix synth event printk format for str fields
tracing: Do not use PERF enums when perf is not defined
tracing: Ensure module defining synth event cannot be unloaded while tracing
tracing: fix return value in __ftrace_event_enable_disable for TRACE_REG_UNREGISTER
tracing/osnoise: Fix possible recursive locking for cpus_read_lock()
tracing: Align synth event print fmt
tracing: gfp: vsprintf: Do not print "none" when using %pGg printf format
tracepoint: Print the function symbol when tracepoint_debug is set
tracing: Constify struct event_trigger_ops
scripts/tracing: Remove scripts/tracing/draw_functrace.py
tracing: Update MAINTAINERS file to include tracepoint.c
tracing/user_events: Slightly simplify user_seq_show()
tracing/user_events: Don't use %pK through printk
tracing: gfp: Remove duplication of recording GFP flags
tracing: Remove orphaned event_trace_printk
ring-buffer: Fix typo in comment about header page pointer
tracing: Add traceoff_after_boot option

+108 -225
+9
Documentation/admin-guide/kernel-parameters.txt
··· 7289 7289 See also "Event triggers" in Documentation/trace/events.rst 7290 7290 7291 7291 7292 + traceoff_after_boot 7293 + [FTRACE] Sometimes tracing is used to debug issues 7294 + during the boot process. Since the trace buffer has a 7295 + limited amount of storage, it may be prudent to 7296 + disable tracing after the boot is finished, otherwise 7297 + the critical information may be overwritten. With this 7298 + option, the main tracing buffer will be turned off at 7299 + the end of the boot process. 7300 + 7292 7301 traceoff_on_warning 7293 7302 [FTRACE] enable this option to disable tracing when a 7294 7303 warning is hit. This turns off "tracing_on". Tracing can
+1
MAINTAINERS
··· 24233 24233 F: include/linux/trace*.h 24234 24234 F: include/trace/ 24235 24235 F: kernel/trace/ 24236 + F: kernel/tracepoint.c 24236 24237 F: scripts/tracing/ 24237 24238 F: tools/testing/selftests/ftrace/ 24238 24239
-18
include/linux/trace_events.h
··· 859 859 int trace_set_clr_event(const char *system, const char *event, int set); 860 860 int trace_array_set_clr_event(struct trace_array *tr, const char *system, 861 861 const char *event, bool enable); 862 - /* 863 - * The double __builtin_constant_p is because gcc will give us an error 864 - * if we try to allocate the static variable to fmt if it is not a 865 - * constant. Even with the outer if statement optimizing out. 866 - */ 867 - #define event_trace_printk(ip, fmt, args...) \ 868 - do { \ 869 - __trace_printk_check_format(fmt, ##args); \ 870 - tracing_record_cmdline(current); \ 871 - if (__builtin_constant_p(fmt)) { \ 872 - static const char *trace_printk_fmt \ 873 - __section("__trace_printk_fmt") = \ 874 - __builtin_constant_p(fmt) ? fmt : NULL; \ 875 - \ 876 - __trace_bprintk(ip, trace_printk_fmt, ##args); \ 877 - } else \ 878 - __trace_printk(ip, fmt, ##args); \ 879 - } while (0) 880 862 881 863 #ifdef CONFIG_PERF_EVENTS 882 864 struct perf_event;
+10 -31
include/trace/events/mmflags.h
··· 78 78 79 79 #define gfpflag_string(flag) {(__force unsigned long)flag, #flag} 80 80 81 + /* 82 + * For the values that match the bits, use the TRACE_GFP_FLAGS 83 + * which will allow any updates to be included automatically. 84 + */ 85 + #undef TRACE_GFP_EM 86 + #define TRACE_GFP_EM(a) gfpflag_string(__GFP_##a), 87 + 81 88 #define __def_gfpflag_names \ 82 89 gfpflag_string(GFP_TRANSHUGE), \ 83 90 gfpflag_string(GFP_TRANSHUGE_LIGHT), \ ··· 98 91 gfpflag_string(GFP_NOIO), \ 99 92 gfpflag_string(GFP_NOWAIT), \ 100 93 gfpflag_string(GFP_DMA), \ 101 - gfpflag_string(__GFP_HIGHMEM), \ 102 94 gfpflag_string(GFP_DMA32), \ 103 - gfpflag_string(__GFP_HIGH), \ 104 - gfpflag_string(__GFP_IO), \ 105 - gfpflag_string(__GFP_FS), \ 106 - gfpflag_string(__GFP_NOWARN), \ 107 - gfpflag_string(__GFP_RETRY_MAYFAIL), \ 108 - gfpflag_string(__GFP_NOFAIL), \ 109 - gfpflag_string(__GFP_NORETRY), \ 110 - gfpflag_string(__GFP_COMP), \ 111 - gfpflag_string(__GFP_ZERO), \ 112 - gfpflag_string(__GFP_NOMEMALLOC), \ 113 - gfpflag_string(__GFP_MEMALLOC), \ 114 - gfpflag_string(__GFP_HARDWALL), \ 115 - gfpflag_string(__GFP_THISNODE), \ 116 - gfpflag_string(__GFP_RECLAIMABLE), \ 117 - gfpflag_string(__GFP_MOVABLE), \ 118 - gfpflag_string(__GFP_ACCOUNT), \ 119 - gfpflag_string(__GFP_WRITE), \ 120 95 gfpflag_string(__GFP_RECLAIM), \ 121 - gfpflag_string(__GFP_DIRECT_RECLAIM), \ 122 - gfpflag_string(__GFP_KSWAPD_RECLAIM), \ 123 - gfpflag_string(__GFP_ZEROTAGS) 124 - 125 - #ifdef CONFIG_KASAN_HW_TAGS 126 - #define __def_gfpflag_names_kasan , \ 127 - gfpflag_string(__GFP_SKIP_ZERO), \ 128 - gfpflag_string(__GFP_SKIP_KASAN) 129 - #else 130 - #define __def_gfpflag_names_kasan 131 - #endif 96 + TRACE_GFP_FLAGS \ 97 + { 0, NULL } 132 98 133 99 #define show_gfp_flags(flags) \ 134 - (flags) ? __print_flags(flags, "|", \ 135 - __def_gfpflag_names __def_gfpflag_names_kasan \ 100 + (flags) ? __print_flags(flags, "|", __def_gfpflag_names \ 136 101 ) : "none" 137 102 138 103 #ifdef CONFIG_MMU
+1 -1
kernel/trace/ring_buffer.c
··· 5318 5318 * moving it. The page before the header page has the 5319 5319 * flag bit '1' set if it is pointing to the page we want. 5320 5320 * but if the writer is in the process of moving it 5321 - * than it will be '2' or already moved '0'. 5321 + * then it will be '2' or already moved '0'. 5322 5322 */ 5323 5323 5324 5324 ret = rb_head_page_replace(reader, cpu_buffer->reader_page);
+11
kernel/trace/trace.c
··· 87 87 static struct trace_iterator *tracepoint_print_iter; 88 88 int tracepoint_printk; 89 89 static bool tracepoint_printk_stop_on_boot __initdata; 90 + static bool traceoff_after_boot __initdata; 90 91 static DEFINE_STATIC_KEY_FALSE(tracepoint_printk_key); 91 92 92 93 /* For tracers that don't implement custom flags */ ··· 330 329 return 1; 331 330 } 332 331 __setup("tp_printk_stop_on_boot", set_tracepoint_printk_stop); 332 + 333 + static int __init set_traceoff_after_boot(char *str) 334 + { 335 + traceoff_after_boot = true; 336 + return 1; 337 + } 338 + __setup("traceoff_after_boot", set_traceoff_after_boot); 333 339 334 340 unsigned long long ns2usecs(u64 nsec) 335 341 { ··· 10716 10708 static_key_disable(&tracepoint_printk_key.key); 10717 10709 tracepoint_printk = 0; 10718 10710 } 10711 + 10712 + if (traceoff_after_boot) 10713 + tracing_off(); 10719 10714 10720 10715 tracing_set_default_clock(); 10721 10716 clear_boot_tracer();
+2 -2
kernel/trace/trace.h
··· 1717 1717 unsigned long count; 1718 1718 int ref; 1719 1719 int flags; 1720 - struct event_trigger_ops *ops; 1720 + const struct event_trigger_ops *ops; 1721 1721 struct event_command *cmd_ops; 1722 1722 struct event_filter __rcu *filter; 1723 1723 char *filter_str; ··· 1962 1962 int (*set_filter)(char *filter_str, 1963 1963 struct event_trigger_data *data, 1964 1964 struct trace_event_file *file); 1965 - struct event_trigger_ops *(*get_trigger_ops)(char *cmd, char *param); 1965 + const struct event_trigger_ops *(*get_trigger_ops)(char *cmd, char *param); 1966 1966 }; 1967 1967 1968 1968 /**
+3 -3
kernel/trace/trace_eprobe.c
··· 478 478 __eprobe_trace_func(edata, rec); 479 479 } 480 480 481 - static struct event_trigger_ops eprobe_trigger_ops = { 481 + static const struct event_trigger_ops eprobe_trigger_ops = { 482 482 .trigger = eprobe_trigger_func, 483 483 .print = eprobe_trigger_print, 484 484 .init = eprobe_trigger_init, ··· 507 507 508 508 } 509 509 510 - static struct event_trigger_ops *eprobe_trigger_get_ops(char *cmd, 511 - char *param) 510 + static const struct event_trigger_ops *eprobe_trigger_get_ops(char *cmd, 511 + char *param) 512 512 { 513 513 return &eprobe_trigger_ops; 514 514 }
+3 -1
kernel/trace/trace_events.c
··· 790 790 clear_bit(EVENT_FILE_FL_RECORDED_TGID_BIT, &file->flags); 791 791 } 792 792 793 - call->class->reg(call, TRACE_REG_UNREGISTER, file); 793 + ret = call->class->reg(call, TRACE_REG_UNREGISTER, file); 794 + 795 + WARN_ON_ONCE(ret); 794 796 } 795 797 /* If in SOFT_MODE, just set the SOFT_DISABLE_BIT, else clear it */ 796 798 if (file->flags & EVENT_FILE_FL_SOFT_MODE)
+10 -10
kernel/trace/trace_events_hist.c
··· 6203 6203 } 6204 6204 } 6205 6205 6206 - static struct event_trigger_ops event_hist_trigger_ops = { 6206 + static const struct event_trigger_ops event_hist_trigger_ops = { 6207 6207 .trigger = event_hist_trigger, 6208 6208 .print = event_hist_trigger_print, 6209 6209 .init = event_hist_trigger_init, ··· 6235 6235 } 6236 6236 } 6237 6237 6238 - static struct event_trigger_ops event_hist_trigger_named_ops = { 6238 + static const struct event_trigger_ops event_hist_trigger_named_ops = { 6239 6239 .trigger = event_hist_trigger, 6240 6240 .print = event_hist_trigger_print, 6241 6241 .init = event_hist_trigger_named_init, 6242 6242 .free = event_hist_trigger_named_free, 6243 6243 }; 6244 6244 6245 - static struct event_trigger_ops *event_hist_get_trigger_ops(char *cmd, 6246 - char *param) 6245 + static const struct event_trigger_ops *event_hist_get_trigger_ops(char *cmd, 6246 + char *param) 6247 6247 { 6248 6248 return &event_hist_trigger_ops; 6249 6249 } ··· 6838 6838 hist_enable_trigger(data, buffer, rec, event); 6839 6839 } 6840 6840 6841 - static struct event_trigger_ops hist_enable_trigger_ops = { 6841 + static const struct event_trigger_ops hist_enable_trigger_ops = { 6842 6842 .trigger = hist_enable_trigger, 6843 6843 .print = event_enable_trigger_print, 6844 6844 .init = event_trigger_init, 6845 6845 .free = event_enable_trigger_free, 6846 6846 }; 6847 6847 6848 - static struct event_trigger_ops hist_enable_count_trigger_ops = { 6848 + static const struct event_trigger_ops hist_enable_count_trigger_ops = { 6849 6849 .trigger = hist_enable_count_trigger, 6850 6850 .print = event_enable_trigger_print, 6851 6851 .init = event_trigger_init, 6852 6852 .free = event_enable_trigger_free, 6853 6853 }; 6854 6854 6855 - static struct event_trigger_ops hist_disable_trigger_ops = { 6855 + static const struct event_trigger_ops hist_disable_trigger_ops = { 6856 6856 .trigger = hist_enable_trigger, 6857 6857 .print = event_enable_trigger_print, 6858 6858 .init = event_trigger_init, 6859 6859 .free = event_enable_trigger_free, 6860 6860 }; 6861 6861 6862 - static struct event_trigger_ops hist_disable_count_trigger_ops = { 6862 + static const struct event_trigger_ops hist_disable_count_trigger_ops = { 6863 6863 .trigger = hist_enable_count_trigger, 6864 6864 .print = event_enable_trigger_print, 6865 6865 .init = event_trigger_init, 6866 6866 .free = event_enable_trigger_free, 6867 6867 }; 6868 6868 6869 - static struct event_trigger_ops * 6869 + static const struct event_trigger_ops * 6870 6870 hist_enable_get_trigger_ops(char *cmd, char *param) 6871 6871 { 6872 - struct event_trigger_ops *ops; 6872 + const struct event_trigger_ops *ops; 6873 6873 bool enable; 6874 6874 6875 6875 enable = (strcmp(cmd, ENABLE_HIST_STR) == 0);
+36 -4
kernel/trace/trace_events_synth.c
··· 207 207 if (len == 0) 208 208 return 0; /* variable-length string */ 209 209 210 - strncpy(buf, start, len); 210 + memcpy(buf, start, len); 211 211 buf[len] = '\0'; 212 212 213 213 err = kstrtouint(buf, 0, &size); ··· 305 305 else if (strcmp(type, "gfp_t") == 0) 306 306 fmt = "%x"; 307 307 else if (synth_field_is_string(type)) 308 - fmt = "%.*s"; 308 + fmt = "%s"; 309 309 else if (synth_field_is_stack(type)) 310 310 fmt = "%s"; 311 311 ··· 612 612 fmt = synth_field_fmt(event->fields[i]->type); 613 613 pos += snprintf(buf + pos, LEN_OR_ZERO, "%s=%s%s", 614 614 event->fields[i]->name, fmt, 615 - i == event->n_fields - 1 ? "" : ", "); 615 + i == event->n_fields - 1 ? "" : " "); 616 616 } 617 617 pos += snprintf(buf + pos, LEN_OR_ZERO, "\""); 618 618 ··· 852 852 {} 853 853 }; 854 854 855 + static int synth_event_reg(struct trace_event_call *call, 856 + enum trace_reg type, void *data) 857 + { 858 + struct synth_event *event = container_of(call, struct synth_event, call); 859 + 860 + switch (type) { 861 + #ifdef CONFIG_PERF_EVENTS 862 + case TRACE_REG_PERF_REGISTER: 863 + #endif 864 + case TRACE_REG_REGISTER: 865 + if (!try_module_get(event->mod)) 866 + return -EBUSY; 867 + break; 868 + default: 869 + break; 870 + } 871 + 872 + int ret = trace_event_reg(call, type, data); 873 + 874 + switch (type) { 875 + #ifdef CONFIG_PERF_EVENTS 876 + case TRACE_REG_PERF_UNREGISTER: 877 + #endif 878 + case TRACE_REG_UNREGISTER: 879 + module_put(event->mod); 880 + break; 881 + default: 882 + break; 883 + } 884 + return ret; 885 + } 886 + 855 887 static int register_synth_event(struct synth_event *event) 856 888 { 857 889 struct trace_event_call *call = &event->call; ··· 913 881 goto out; 914 882 } 915 883 call->flags = TRACE_EVENT_FL_TRACEPOINT; 916 - call->class->reg = trace_event_reg; 884 + call->class->reg = synth_event_reg; 917 885 call->class->probe = trace_event_raw_event_synth; 918 886 call->data = event; 919 887 call->tp = event->tp;
+19 -19
kernel/trace/trace_events_trigger.c
··· 825 825 void *private_data) 826 826 { 827 827 struct event_trigger_data *trigger_data; 828 - struct event_trigger_ops *trigger_ops; 828 + const struct event_trigger_ops *trigger_ops; 829 829 830 830 trigger_ops = cmd_ops->get_trigger_ops(cmd, param); 831 831 ··· 1367 1367 data->filter_str); 1368 1368 } 1369 1369 1370 - static struct event_trigger_ops traceon_trigger_ops = { 1370 + static const struct event_trigger_ops traceon_trigger_ops = { 1371 1371 .trigger = traceon_trigger, 1372 1372 .print = traceon_trigger_print, 1373 1373 .init = event_trigger_init, 1374 1374 .free = event_trigger_free, 1375 1375 }; 1376 1376 1377 - static struct event_trigger_ops traceon_count_trigger_ops = { 1377 + static const struct event_trigger_ops traceon_count_trigger_ops = { 1378 1378 .trigger = traceon_count_trigger, 1379 1379 .print = traceon_trigger_print, 1380 1380 .init = event_trigger_init, 1381 1381 .free = event_trigger_free, 1382 1382 }; 1383 1383 1384 - static struct event_trigger_ops traceoff_trigger_ops = { 1384 + static const struct event_trigger_ops traceoff_trigger_ops = { 1385 1385 .trigger = traceoff_trigger, 1386 1386 .print = traceoff_trigger_print, 1387 1387 .init = event_trigger_init, 1388 1388 .free = event_trigger_free, 1389 1389 }; 1390 1390 1391 - static struct event_trigger_ops traceoff_count_trigger_ops = { 1391 + static const struct event_trigger_ops traceoff_count_trigger_ops = { 1392 1392 .trigger = traceoff_count_trigger, 1393 1393 .print = traceoff_trigger_print, 1394 1394 .init = event_trigger_init, 1395 1395 .free = event_trigger_free, 1396 1396 }; 1397 1397 1398 - static struct event_trigger_ops * 1398 + static const struct event_trigger_ops * 1399 1399 onoff_get_trigger_ops(char *cmd, char *param) 1400 1400 { 1401 - struct event_trigger_ops *ops; 1401 + const struct event_trigger_ops *ops; 1402 1402 1403 1403 /* we register both traceon and traceoff to this callback */ 1404 1404 if (strcmp(cmd, "traceon") == 0) ··· 1491 1491 data->filter_str); 1492 1492 } 1493 1493 1494 - static struct event_trigger_ops snapshot_trigger_ops = { 1494 + static const struct event_trigger_ops snapshot_trigger_ops = { 1495 1495 .trigger = snapshot_trigger, 1496 1496 .print = snapshot_trigger_print, 1497 1497 .init = event_trigger_init, 1498 1498 .free = event_trigger_free, 1499 1499 }; 1500 1500 1501 - static struct event_trigger_ops snapshot_count_trigger_ops = { 1501 + static const struct event_trigger_ops snapshot_count_trigger_ops = { 1502 1502 .trigger = snapshot_count_trigger, 1503 1503 .print = snapshot_trigger_print, 1504 1504 .init = event_trigger_init, 1505 1505 .free = event_trigger_free, 1506 1506 }; 1507 1507 1508 - static struct event_trigger_ops * 1508 + static const struct event_trigger_ops * 1509 1509 snapshot_get_trigger_ops(char *cmd, char *param) 1510 1510 { 1511 1511 return param ? &snapshot_count_trigger_ops : &snapshot_trigger_ops; ··· 1586 1586 data->filter_str); 1587 1587 } 1588 1588 1589 - static struct event_trigger_ops stacktrace_trigger_ops = { 1589 + static const struct event_trigger_ops stacktrace_trigger_ops = { 1590 1590 .trigger = stacktrace_trigger, 1591 1591 .print = stacktrace_trigger_print, 1592 1592 .init = event_trigger_init, 1593 1593 .free = event_trigger_free, 1594 1594 }; 1595 1595 1596 - static struct event_trigger_ops stacktrace_count_trigger_ops = { 1596 + static const struct event_trigger_ops stacktrace_count_trigger_ops = { 1597 1597 .trigger = stacktrace_count_trigger, 1598 1598 .print = stacktrace_trigger_print, 1599 1599 .init = event_trigger_init, 1600 1600 .free = event_trigger_free, 1601 1601 }; 1602 1602 1603 - static struct event_trigger_ops * 1603 + static const struct event_trigger_ops * 1604 1604 stacktrace_get_trigger_ops(char *cmd, char *param) 1605 1605 { 1606 1606 return param ? &stacktrace_count_trigger_ops : &stacktrace_trigger_ops; ··· 1711 1711 } 1712 1712 } 1713 1713 1714 - static struct event_trigger_ops event_enable_trigger_ops = { 1714 + static const struct event_trigger_ops event_enable_trigger_ops = { 1715 1715 .trigger = event_enable_trigger, 1716 1716 .print = event_enable_trigger_print, 1717 1717 .init = event_trigger_init, 1718 1718 .free = event_enable_trigger_free, 1719 1719 }; 1720 1720 1721 - static struct event_trigger_ops event_enable_count_trigger_ops = { 1721 + static const struct event_trigger_ops event_enable_count_trigger_ops = { 1722 1722 .trigger = event_enable_count_trigger, 1723 1723 .print = event_enable_trigger_print, 1724 1724 .init = event_trigger_init, 1725 1725 .free = event_enable_trigger_free, 1726 1726 }; 1727 1727 1728 - static struct event_trigger_ops event_disable_trigger_ops = { 1728 + static const struct event_trigger_ops event_disable_trigger_ops = { 1729 1729 .trigger = event_enable_trigger, 1730 1730 .print = event_enable_trigger_print, 1731 1731 .init = event_trigger_init, 1732 1732 .free = event_enable_trigger_free, 1733 1733 }; 1734 1734 1735 - static struct event_trigger_ops event_disable_count_trigger_ops = { 1735 + static const struct event_trigger_ops event_disable_count_trigger_ops = { 1736 1736 .trigger = event_enable_count_trigger, 1737 1737 .print = event_enable_trigger_print, 1738 1738 .init = event_trigger_init, ··· 1916 1916 data->ops->free(data); 1917 1917 } 1918 1918 1919 - static struct event_trigger_ops * 1919 + static const struct event_trigger_ops * 1920 1920 event_enable_get_trigger_ops(char *cmd, char *param) 1921 1921 { 1922 - struct event_trigger_ops *ops; 1922 + const struct event_trigger_ops *ops; 1923 1923 bool enable; 1924 1924 1925 1925 #ifdef CONFIG_HIST_TRIGGERS
+2 -5
kernel/trace/trace_events_user.c
··· 455 455 if (ret && ret != -ENOENT) { 456 456 struct user_event *user = enabler->event; 457 457 458 - pr_warn("user_events: Fault for mm: 0x%pK @ 0x%llx event: %s\n", 458 + pr_warn("user_events: Fault for mm: 0x%p @ 0x%llx event: %s\n", 459 459 mm->mm, (unsigned long long)uaddr, EVENT_NAME(user)); 460 460 } 461 461 ··· 2793 2793 2794 2794 seq_printf(m, "%s", EVENT_TP_NAME(user)); 2795 2795 2796 - if (status != 0) 2797 - seq_puts(m, " #"); 2798 - 2799 2796 if (status != 0) { 2800 - seq_puts(m, " Used by"); 2797 + seq_puts(m, " # Used by"); 2801 2798 if (status & EVENT_STATUS_FTRACE) 2802 2799 seq_puts(m, " ftrace"); 2803 2800 if (status & EVENT_STATUS_PERF)
-1
kernel/trace/trace_osnoise.c
··· 2006 2006 2007 2007 if (IS_ERR(kthread)) { 2008 2008 pr_err(BANNER "could not start sampling thread\n"); 2009 - stop_per_cpu_kthreads(); 2010 2009 return -ENOMEM; 2011 2010 } 2012 2011
+1 -1
kernel/tracepoint.c
··· 127 127 return; 128 128 129 129 for (i = 0; funcs[i].func; i++) 130 - printk(KERN_DEBUG "Probe %d : %p\n", i, funcs[i].func); 130 + printk(KERN_DEBUG "Probe %d : %pSb\n", i, funcs[i].func); 131 131 } 132 132 133 133 static struct tracepoint_func *
-129
scripts/tracing/draw_functrace.py
··· 1 - #!/usr/bin/env python 2 - # SPDX-License-Identifier: GPL-2.0-only 3 - 4 - """ 5 - Copyright 2008 (c) Frederic Weisbecker <fweisbec@gmail.com> 6 - 7 - This script parses a trace provided by the function tracer in 8 - kernel/trace/trace_functions.c 9 - The resulted trace is processed into a tree to produce a more human 10 - view of the call stack by drawing textual but hierarchical tree of 11 - calls. Only the functions's names and the call time are provided. 12 - 13 - Usage: 14 - Be sure that you have CONFIG_FUNCTION_TRACER 15 - # mount -t tracefs nodev /sys/kernel/tracing 16 - # echo function > /sys/kernel/tracing/current_tracer 17 - $ cat /sys/kernel/tracing/trace_pipe > ~/raw_trace_func 18 - Wait some times but not too much, the script is a bit slow. 19 - Break the pipe (Ctrl + Z) 20 - $ scripts/tracing/draw_functrace.py < ~/raw_trace_func > draw_functrace 21 - Then you have your drawn trace in draw_functrace 22 - """ 23 - 24 - 25 - import sys, re 26 - 27 - class CallTree: 28 - """ This class provides a tree representation of the functions 29 - call stack. If a function has no parent in the kernel (interrupt, 30 - syscall, kernel thread...) then it is attached to a virtual parent 31 - called ROOT. 32 - """ 33 - ROOT = None 34 - 35 - def __init__(self, func, time = None, parent = None): 36 - self._func = func 37 - self._time = time 38 - if parent is None: 39 - self._parent = CallTree.ROOT 40 - else: 41 - self._parent = parent 42 - self._children = [] 43 - 44 - def calls(self, func, calltime): 45 - """ If a function calls another one, call this method to insert it 46 - into the tree at the appropriate place. 47 - @return: A reference to the newly created child node. 48 - """ 49 - child = CallTree(func, calltime, self) 50 - self._children.append(child) 51 - return child 52 - 53 - def getParent(self, func): 54 - """ Retrieve the last parent of the current node that 55 - has the name given by func. If this function is not 56 - on a parent, then create it as new child of root 57 - @return: A reference to the parent. 58 - """ 59 - tree = self 60 - while tree != CallTree.ROOT and tree._func != func: 61 - tree = tree._parent 62 - if tree == CallTree.ROOT: 63 - child = CallTree.ROOT.calls(func, None) 64 - return child 65 - return tree 66 - 67 - def __repr__(self): 68 - return self.__toString("", True) 69 - 70 - def __toString(self, branch, lastChild): 71 - if self._time is not None: 72 - s = "%s----%s (%s)\n" % (branch, self._func, self._time) 73 - else: 74 - s = "%s----%s\n" % (branch, self._func) 75 - 76 - i = 0 77 - if lastChild: 78 - branch = branch[:-1] + " " 79 - while i < len(self._children): 80 - if i != len(self._children) - 1: 81 - s += "%s" % self._children[i].__toString(branch +\ 82 - " |", False) 83 - else: 84 - s += "%s" % self._children[i].__toString(branch +\ 85 - " |", True) 86 - i += 1 87 - return s 88 - 89 - class BrokenLineException(Exception): 90 - """If the last line is not complete because of the pipe breakage, 91 - we want to stop the processing and ignore this line. 92 - """ 93 - pass 94 - 95 - class CommentLineException(Exception): 96 - """ If the line is a comment (as in the beginning of the trace file), 97 - just ignore it. 98 - """ 99 - pass 100 - 101 - 102 - def parseLine(line): 103 - line = line.strip() 104 - if line.startswith("#"): 105 - raise CommentLineException 106 - m = re.match("[^]]+?\\] +([a-z.]+) +([0-9.]+): (\\w+) <-(\\w+)", line) 107 - if m is None: 108 - raise BrokenLineException 109 - return (m.group(2), m.group(3), m.group(4)) 110 - 111 - 112 - def main(): 113 - CallTree.ROOT = CallTree("Root (Nowhere)", None, None) 114 - tree = CallTree.ROOT 115 - 116 - for line in sys.stdin: 117 - try: 118 - calltime, callee, caller = parseLine(line) 119 - except BrokenLineException: 120 - break 121 - except CommentLineException: 122 - continue 123 - tree = tree.getParent(caller) 124 - tree = tree.calls(callee, calltime) 125 - 126 - print(CallTree.ROOT) 127 - 128 - if __name__ == "__main__": 129 - main()