Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

pstore/ftrace: Convert to its own enable/disable debugfs knob

With this patch we no longer reuse function tracer infrastructure, now
we register our own tracer back-end via a debugfs knob.

It's a bit more code, but that is the only downside. On the bright side we
have:

- Ability to make persistent_ram module removable (when needed, we can
move ftrace_ops struct into a module). Note that persistent_ram is still
not removable for other reasons, but with this patch it's just one
thing less to worry about;

- Pstore part is more isolated from the generic function tracer. We tried
it already by registering our own tracer in available_tracers, but that
way we're loosing ability to see the traces while we record them to
pstore. This solution is somewhere in the middle: we only register
"internal ftracer" back-end, but not the "front-end";

- When there is only pstore tracing enabled, the kernel will only write
to the pstore buffer, omitting function tracer buffer (which, of course,
still can be enabled via 'echo function > current_tracer').

Suggested-by: Steven Rostedt <rostedt@goodmis.org>
Signed-off-by: Anton Vorontsov <anton.vorontsov@linaro.org>

+105 -26
+1 -3
Documentation/ramoops.txt
··· 102 102 file. Here is an example of usage: 103 103 104 104 # mount -t debugfs debugfs /sys/kernel/debug/ 105 - # cd /sys/kernel/debug/tracing 106 - # echo function > current_tracer 107 - # echo 1 > options/func_pstore 105 + # echo 1 > /sys/kernel/debug/pstore/record_ftrace 108 106 # reboot -f 109 107 [...] 110 108 # mount -t pstore pstore /mnt/
+1
fs/pstore/Kconfig
··· 23 23 bool "Persistent function tracer" 24 24 depends on PSTORE 25 25 depends on FUNCTION_TRACER 26 + depends on DEBUG_FS 26 27 help 27 28 With this option kernel traces function calls into a persistent 28 29 ram buffer that can be decoded and dumped after reboot through
+95 -1
fs/pstore/ftrace.c
··· 17 17 #include <linux/percpu.h> 18 18 #include <linux/smp.h> 19 19 #include <linux/atomic.h> 20 + #include <linux/types.h> 21 + #include <linux/mutex.h> 22 + #include <linux/ftrace.h> 23 + #include <linux/fs.h> 24 + #include <linux/debugfs.h> 25 + #include <linux/err.h> 26 + #include <linux/cache.h> 20 27 #include <asm/barrier.h> 21 28 #include "internal.h" 22 29 23 - void notrace pstore_ftrace_call(unsigned long ip, unsigned long parent_ip) 30 + static void notrace pstore_ftrace_call(unsigned long ip, 31 + unsigned long parent_ip) 24 32 { 33 + unsigned long flags; 25 34 struct pstore_ftrace_record rec = {}; 26 35 27 36 if (unlikely(oops_in_progress)) 28 37 return; 38 + 39 + local_irq_save(flags); 29 40 30 41 rec.ip = ip; 31 42 rec.parent_ip = parent_ip; 32 43 pstore_ftrace_encode_cpu(&rec, raw_smp_processor_id()); 33 44 psinfo->write_buf(PSTORE_TYPE_FTRACE, 0, NULL, 0, (void *)&rec, 34 45 sizeof(rec), psinfo); 46 + 47 + local_irq_restore(flags); 48 + } 49 + 50 + static struct ftrace_ops pstore_ftrace_ops __read_mostly = { 51 + .func = pstore_ftrace_call, 52 + }; 53 + 54 + static DEFINE_MUTEX(pstore_ftrace_lock); 55 + static bool pstore_ftrace_enabled; 56 + 57 + static ssize_t pstore_ftrace_knob_write(struct file *f, const char __user *buf, 58 + size_t count, loff_t *ppos) 59 + { 60 + u8 on; 61 + ssize_t ret; 62 + 63 + ret = kstrtou8_from_user(buf, count, 2, &on); 64 + if (ret) 65 + return ret; 66 + 67 + mutex_lock(&pstore_ftrace_lock); 68 + 69 + if (!on ^ pstore_ftrace_enabled) 70 + goto out; 71 + 72 + if (on) 73 + ret = register_ftrace_function(&pstore_ftrace_ops); 74 + else 75 + ret = unregister_ftrace_function(&pstore_ftrace_ops); 76 + if (ret) { 77 + pr_err("%s: unable to %sregister ftrace ops: %zd\n", 78 + __func__, on ? "" : "un", ret); 79 + goto err; 80 + } 81 + 82 + pstore_ftrace_enabled = on; 83 + out: 84 + ret = count; 85 + err: 86 + mutex_unlock(&pstore_ftrace_lock); 87 + 88 + return ret; 89 + } 90 + 91 + static ssize_t pstore_ftrace_knob_read(struct file *f, char __user *buf, 92 + size_t count, loff_t *ppos) 93 + { 94 + char val[] = { '0' + pstore_ftrace_enabled, '\n' }; 95 + 96 + return simple_read_from_buffer(buf, count, ppos, val, sizeof(val)); 97 + } 98 + 99 + static const struct file_operations pstore_knob_fops = { 100 + .open = simple_open, 101 + .read = pstore_ftrace_knob_read, 102 + .write = pstore_ftrace_knob_write, 103 + }; 104 + 105 + void pstore_register_ftrace(void) 106 + { 107 + struct dentry *dir; 108 + struct dentry *file; 109 + 110 + if (!psinfo->write_buf) 111 + return; 112 + 113 + dir = debugfs_create_dir("pstore", NULL); 114 + if (!dir) { 115 + pr_err("%s: unable to create pstore directory\n", __func__); 116 + return; 117 + } 118 + 119 + file = debugfs_create_file("record_ftrace", 0600, dir, NULL, 120 + &pstore_knob_fops); 121 + if (!file) { 122 + pr_err("%s: unable to create record_ftrace file\n", __func__); 123 + goto err_file; 124 + } 125 + 126 + return; 127 + err_file: 128 + debugfs_remove(dir); 35 129 }
+6
fs/pstore/internal.h
··· 39 39 #endif 40 40 } 41 41 42 + #ifdef CONFIG_PSTORE_FTRACE 43 + extern void pstore_register_ftrace(void); 44 + #else 45 + static inline void pstore_register_ftrace(void) {} 46 + #endif 47 + 42 48 extern struct pstore_info *psinfo; 43 49 44 50 extern void pstore_set_kmsg_bytes(int);
+1
fs/pstore/platform.c
··· 236 236 237 237 kmsg_dump_register(&pstore_dumper); 238 238 pstore_register_console(); 239 + pstore_register_ftrace(); 239 240 240 241 if (pstore_update_ms >= 0) { 241 242 pstore_timer.expires = jiffies +
-8
include/linux/pstore.h
··· 64 64 void *data; 65 65 }; 66 66 67 - 68 - #ifdef CONFIG_PSTORE_FTRACE 69 - extern void pstore_ftrace_call(unsigned long ip, unsigned long parent_ip); 70 - #else 71 - static inline void pstore_ftrace_call(unsigned long ip, unsigned long parent_ip) 72 - { } 73 - #endif 74 - 75 67 #ifdef CONFIG_PSTORE 76 68 extern int pstore_register(struct pstore_info *); 77 69 #else
+1 -14
kernel/trace/trace_functions.c
··· 13 13 #include <linux/debugfs.h> 14 14 #include <linux/uaccess.h> 15 15 #include <linux/ftrace.h> 16 - #include <linux/pstore.h> 17 16 #include <linux/fs.h> 18 17 19 18 #include "trace.h" ··· 74 75 preempt_enable_notrace(); 75 76 } 76 77 77 - /* Our two options */ 78 + /* Our option */ 78 79 enum { 79 80 TRACE_FUNC_OPT_STACK = 0x1, 80 - TRACE_FUNC_OPT_PSTORE = 0x2, 81 81 }; 82 82 83 83 static struct tracer_flags func_flags; ··· 104 106 disabled = atomic_inc_return(&data->disabled); 105 107 106 108 if (likely(disabled == 1)) { 107 - /* 108 - * So far tracing doesn't support multiple buffers, so 109 - * we make an explicit call for now. 110 - */ 111 - if (unlikely(func_flags.val & TRACE_FUNC_OPT_PSTORE)) 112 - pstore_ftrace_call(ip, parent_ip); 113 109 pc = preempt_count(); 114 110 trace_function(tr, ip, parent_ip, flags, pc); 115 111 } ··· 169 177 #ifdef CONFIG_STACKTRACE 170 178 { TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) }, 171 179 #endif 172 - #ifdef CONFIG_PSTORE_FTRACE 173 - { TRACER_OPT(func_pstore, TRACE_FUNC_OPT_PSTORE) }, 174 - #endif 175 180 { } /* Always set a last empty entry */ 176 181 }; 177 182 ··· 220 231 register_ftrace_function(&trace_ops); 221 232 } 222 233 223 - break; 224 - case TRACE_FUNC_OPT_PSTORE: 225 234 break; 226 235 default: 227 236 return -EINVAL;