Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'tip/perf/core-2' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace into perf/core

Pull tracing updates from Steven Rostedt.

Signed-off-by: Ingo Molnar <mingo@kernel.org>

+365 -343
+16
Documentation/kernel-parameters.txt
··· 2859 2859 to facilitate early boot debugging. 2860 2860 See also Documentation/trace/events.txt 2861 2861 2862 + trace_options=[option-list] 2863 + [FTRACE] Enable or disable tracer options at boot. 2864 + The option-list is a comma delimited list of options 2865 + that can be enabled or disabled just as if you were 2866 + to echo the option name into 2867 + 2868 + /sys/kernel/debug/tracing/trace_options 2869 + 2870 + For example, to enable stacktrace option (to dump the 2871 + stack trace of each event), add to the command line: 2872 + 2873 + trace_options=stacktrace 2874 + 2875 + See also Documentation/trace/ftrace.txt "trace options" 2876 + section. 2877 + 2862 2878 transparent_hugepage= 2863 2879 [KNL] 2864 2880 Format: [always|madvise|never]
+7 -7
include/linux/ftrace_event.h
··· 127 127 void trace_current_buffer_unlock_commit(struct ring_buffer *buffer, 128 128 struct ring_buffer_event *event, 129 129 unsigned long flags, int pc); 130 - void trace_nowake_buffer_unlock_commit(struct ring_buffer *buffer, 131 - struct ring_buffer_event *event, 132 - unsigned long flags, int pc); 133 - void trace_nowake_buffer_unlock_commit_regs(struct ring_buffer *buffer, 134 - struct ring_buffer_event *event, 135 - unsigned long flags, int pc, 136 - struct pt_regs *regs); 130 + void trace_buffer_unlock_commit(struct ring_buffer *buffer, 131 + struct ring_buffer_event *event, 132 + unsigned long flags, int pc); 133 + void trace_buffer_unlock_commit_regs(struct ring_buffer *buffer, 134 + struct ring_buffer_event *event, 135 + unsigned long flags, int pc, 136 + struct pt_regs *regs); 137 137 void trace_current_buffer_discard_commit(struct ring_buffer *buffer, 138 138 struct ring_buffer_event *event); 139 139
+2 -5
include/linux/kernel.h
··· 527 527 528 528 extern void ftrace_dump(enum ftrace_dump_mode oops_dump_mode); 529 529 #else 530 - static inline __printf(1, 2) 531 - int trace_printk(const char *fmt, ...); 532 - 533 530 static inline void tracing_start(void) { } 534 531 static inline void tracing_stop(void) { } 535 532 static inline void ftrace_off_permanent(void) { } ··· 536 539 static inline void tracing_off(void) { } 537 540 static inline int tracing_is_on(void) { return 0; } 538 541 539 - static inline int 540 - trace_printk(const char *fmt, ...) 542 + static inline __printf(1, 2) 543 + int trace_printk(const char *fmt, ...) 541 544 { 542 545 return 0; 543 546 }
+2 -1
include/linux/ring_buffer.h
··· 159 159 void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu); 160 160 void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu); 161 161 162 - unsigned long ring_buffer_oldest_event_ts(struct ring_buffer *buffer, int cpu); 162 + u64 ring_buffer_oldest_event_ts(struct ring_buffer *buffer, int cpu); 163 163 unsigned long ring_buffer_bytes_cpu(struct ring_buffer *buffer, int cpu); 164 164 unsigned long ring_buffer_entries(struct ring_buffer *buffer); 165 165 unsigned long ring_buffer_overruns(struct ring_buffer *buffer); 166 166 unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu); 167 167 unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu); 168 168 unsigned long ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu); 169 + unsigned long ring_buffer_dropped_events_cpu(struct ring_buffer *buffer, int cpu); 169 170 170 171 u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu); 171 172 void ring_buffer_normalize_time_stamp(struct ring_buffer *buffer,
+1 -2
include/trace/ftrace.h
··· 545 545 { assign; } \ 546 546 \ 547 547 if (!filter_current_check_discard(buffer, event_call, entry, event)) \ 548 - trace_nowake_buffer_unlock_commit(buffer, \ 549 - event, irq_flags, pc); \ 548 + trace_buffer_unlock_commit(buffer, event, irq_flags, pc); \ 550 549 } 551 550 /* 552 551 * The ftrace_test_probe is compiled out, it is only here as a build time check
-23
include/trace/syscall.h
··· 31 31 struct ftrace_event_call *exit_event; 32 32 }; 33 33 34 - #ifdef CONFIG_FTRACE_SYSCALLS 35 - extern unsigned long arch_syscall_addr(int nr); 36 - extern int init_syscall_trace(struct ftrace_event_call *call); 37 - 38 - extern int reg_event_syscall_enter(struct ftrace_event_call *call); 39 - extern void unreg_event_syscall_enter(struct ftrace_event_call *call); 40 - extern int reg_event_syscall_exit(struct ftrace_event_call *call); 41 - extern void unreg_event_syscall_exit(struct ftrace_event_call *call); 42 - extern int 43 - ftrace_format_syscall(struct ftrace_event_call *call, struct trace_seq *s); 44 - enum print_line_t print_syscall_enter(struct trace_iterator *iter, int flags, 45 - struct trace_event *event); 46 - enum print_line_t print_syscall_exit(struct trace_iterator *iter, int flags, 47 - struct trace_event *event); 48 - #endif 49 - 50 - #ifdef CONFIG_PERF_EVENTS 51 - int perf_sysenter_enable(struct ftrace_event_call *call); 52 - void perf_sysenter_disable(struct ftrace_event_call *call); 53 - int perf_sysexit_enable(struct ftrace_event_call *call); 54 - void perf_sysexit_disable(struct ftrace_event_call *call); 55 - #endif 56 - 57 34 #endif /* _TRACE_SYSCALL_H */
+1
kernel/trace/Kconfig
··· 119 119 select BINARY_PRINTF 120 120 select EVENT_TRACING 121 121 select TRACE_CLOCK 122 + select IRQ_WORK 122 123 123 124 config GENERIC_TRACER 124 125 bool
+3 -3
kernel/trace/ftrace.c
··· 2868 2868 { 2869 2869 return register_ftrace_command(&ftrace_mod_cmd); 2870 2870 } 2871 - device_initcall(ftrace_mod_cmd_init); 2871 + core_initcall(ftrace_mod_cmd_init); 2872 2872 2873 2873 static void function_trace_probe_call(unsigned long ip, unsigned long parent_ip, 2874 2874 struct ftrace_ops *op, struct pt_regs *pt_regs) ··· 4055 4055 ftrace_enabled = 1; 4056 4056 return 0; 4057 4057 } 4058 - device_initcall(ftrace_nodyn_init); 4058 + core_initcall(ftrace_nodyn_init); 4059 4059 4060 4060 static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; } 4061 4061 static inline void ftrace_startup_enable(int command) { } ··· 4381 4381 if (strlen(tmp) == 0) 4382 4382 return 1; 4383 4383 4384 - ret = strict_strtol(tmp, 10, &val); 4384 + ret = kstrtol(tmp, 10, &val); 4385 4385 if (ret < 0) 4386 4386 return ret; 4387 4387
+40 -11
kernel/trace/ring_buffer.c
··· 460 460 unsigned long lost_events; 461 461 unsigned long last_overrun; 462 462 local_t entries_bytes; 463 - local_t commit_overrun; 464 - local_t overrun; 465 463 local_t entries; 464 + local_t overrun; 465 + local_t commit_overrun; 466 + local_t dropped_events; 466 467 local_t committing; 467 468 local_t commits; 468 469 unsigned long read; ··· 1821 1820 } 1822 1821 1823 1822 /** 1824 - * ring_buffer_update_event - update event type and data 1823 + * rb_update_event - update event type and data 1825 1824 * @event: the even to update 1826 1825 * @type: the type of event 1827 1826 * @length: the size of the event field in the ring buffer ··· 2156 2155 * If we are not in overwrite mode, 2157 2156 * this is easy, just stop here. 2158 2157 */ 2159 - if (!(buffer->flags & RB_FL_OVERWRITE)) 2158 + if (!(buffer->flags & RB_FL_OVERWRITE)) { 2159 + local_inc(&cpu_buffer->dropped_events); 2160 2160 goto out_reset; 2161 + } 2161 2162 2162 2163 ret = rb_handle_head_page(cpu_buffer, 2163 2164 tail_page, ··· 2723 2720 * and not the length of the event which would hold the header. 2724 2721 */ 2725 2722 int ring_buffer_write(struct ring_buffer *buffer, 2726 - unsigned long length, 2727 - void *data) 2723 + unsigned long length, 2724 + void *data) 2728 2725 { 2729 2726 struct ring_buffer_per_cpu *cpu_buffer; 2730 2727 struct ring_buffer_event *event; ··· 2932 2929 * @buffer: The ring buffer 2933 2930 * @cpu: The per CPU buffer to read from. 2934 2931 */ 2935 - unsigned long ring_buffer_oldest_event_ts(struct ring_buffer *buffer, int cpu) 2932 + u64 ring_buffer_oldest_event_ts(struct ring_buffer *buffer, int cpu) 2936 2933 { 2937 2934 unsigned long flags; 2938 2935 struct ring_buffer_per_cpu *cpu_buffer; 2939 2936 struct buffer_page *bpage; 2940 - unsigned long ret; 2937 + u64 ret; 2941 2938 2942 2939 if (!cpumask_test_cpu(cpu, buffer->cpumask)) 2943 2940 return 0; ··· 2998 2995 EXPORT_SYMBOL_GPL(ring_buffer_entries_cpu); 2999 2996 3000 2997 /** 3001 - * ring_buffer_overrun_cpu - get the number of overruns in a cpu_buffer 2998 + * ring_buffer_overrun_cpu - get the number of overruns caused by the ring 2999 + * buffer wrapping around (only if RB_FL_OVERWRITE is on). 3002 3000 * @buffer: The ring buffer 3003 3001 * @cpu: The per CPU buffer to get the number of overruns from 3004 3002 */ ··· 3019 3015 EXPORT_SYMBOL_GPL(ring_buffer_overrun_cpu); 3020 3016 3021 3017 /** 3022 - * ring_buffer_commit_overrun_cpu - get the number of overruns caused by commits 3018 + * ring_buffer_commit_overrun_cpu - get the number of overruns caused by 3019 + * commits failing due to the buffer wrapping around while there are uncommitted 3020 + * events, such as during an interrupt storm. 3023 3021 * @buffer: The ring buffer 3024 3022 * @cpu: The per CPU buffer to get the number of overruns from 3025 3023 */ ··· 3040 3034 return ret; 3041 3035 } 3042 3036 EXPORT_SYMBOL_GPL(ring_buffer_commit_overrun_cpu); 3037 + 3038 + /** 3039 + * ring_buffer_dropped_events_cpu - get the number of dropped events caused by 3040 + * the ring buffer filling up (only if RB_FL_OVERWRITE is off). 3041 + * @buffer: The ring buffer 3042 + * @cpu: The per CPU buffer to get the number of overruns from 3043 + */ 3044 + unsigned long 3045 + ring_buffer_dropped_events_cpu(struct ring_buffer *buffer, int cpu) 3046 + { 3047 + struct ring_buffer_per_cpu *cpu_buffer; 3048 + unsigned long ret; 3049 + 3050 + if (!cpumask_test_cpu(cpu, buffer->cpumask)) 3051 + return 0; 3052 + 3053 + cpu_buffer = buffer->buffers[cpu]; 3054 + ret = local_read(&cpu_buffer->dropped_events); 3055 + 3056 + return ret; 3057 + } 3058 + EXPORT_SYMBOL_GPL(ring_buffer_dropped_events_cpu); 3043 3059 3044 3060 /** 3045 3061 * ring_buffer_entries - get the number of entries in a buffer ··· 3892 3864 local_set(&cpu_buffer->reader_page->page->commit, 0); 3893 3865 cpu_buffer->reader_page->read = 0; 3894 3866 3895 - local_set(&cpu_buffer->commit_overrun, 0); 3896 3867 local_set(&cpu_buffer->entries_bytes, 0); 3897 3868 local_set(&cpu_buffer->overrun, 0); 3869 + local_set(&cpu_buffer->commit_overrun, 0); 3870 + local_set(&cpu_buffer->dropped_events, 0); 3898 3871 local_set(&cpu_buffer->entries, 0); 3899 3872 local_set(&cpu_buffer->committing, 0); 3900 3873 local_set(&cpu_buffer->commits, 0);
+191 -181
kernel/trace/trace.c
··· 19 19 #include <linux/seq_file.h> 20 20 #include <linux/notifier.h> 21 21 #include <linux/irqflags.h> 22 + #include <linux/irq_work.h> 22 23 #include <linux/debugfs.h> 23 24 #include <linux/pagemap.h> 24 25 #include <linux/hardirq.h> ··· 77 76 { 78 77 return 0; 79 78 } 79 + 80 + /* 81 + * To prevent the comm cache from being overwritten when no 82 + * tracing is active, only save the comm when a trace event 83 + * occurred. 84 + */ 85 + static DEFINE_PER_CPU(bool, trace_cmdline_save); 86 + 87 + /* 88 + * When a reader is waiting for data, then this variable is 89 + * set to true. 90 + */ 91 + static bool trace_wakeup_needed; 92 + 93 + static struct irq_work trace_work_wakeup; 80 94 81 95 /* 82 96 * Kill all tracing for good (never come back). ··· 155 139 } 156 140 __setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops); 157 141 142 + 143 + static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata; 144 + static char *trace_boot_options __initdata; 145 + 146 + static int __init set_trace_boot_options(char *str) 147 + { 148 + strncpy(trace_boot_options_buf, str, MAX_TRACER_SIZE); 149 + trace_boot_options = trace_boot_options_buf; 150 + return 0; 151 + } 152 + __setup("trace_options=", set_trace_boot_options); 153 + 158 154 unsigned long long ns2usecs(cycle_t nsec) 159 155 { 160 156 nsec += 500; ··· 226 198 227 199 static DEFINE_PER_CPU(struct trace_array_cpu, max_tr_data); 228 200 229 - /* tracer_enabled is used to toggle activation of a tracer */ 230 - static int tracer_enabled = 1; 231 - 232 - /** 233 - * tracing_is_enabled - return tracer_enabled status 234 - * 235 - * This function is used by other tracers to know the status 236 - * of the tracer_enabled flag. Tracers may use this function 237 - * to know if it should enable their features when starting 238 - * up. See irqsoff tracer for an example (start_irqsoff_tracer). 239 - */ 240 201 int tracing_is_enabled(void) 241 202 { 242 - return tracer_enabled; 203 + return tracing_is_on(); 243 204 } 244 205 245 206 /* ··· 350 333 static int trace_stop_count; 351 334 static DEFINE_RAW_SPINLOCK(tracing_start_lock); 352 335 353 - static void wakeup_work_handler(struct work_struct *work) 336 + /** 337 + * trace_wake_up - wake up tasks waiting for trace input 338 + * 339 + * Schedules a delayed work to wake up any task that is blocked on the 340 + * trace_wait queue. These is used with trace_poll for tasks polling the 341 + * trace. 342 + */ 343 + static void trace_wake_up(struct irq_work *work) 354 344 { 355 - wake_up(&trace_wait); 356 - } 345 + wake_up_all(&trace_wait); 357 346 358 - static DECLARE_DELAYED_WORK(wakeup_work, wakeup_work_handler); 347 + } 359 348 360 349 /** 361 350 * tracing_on - enable tracing buffers ··· 416 393 } 417 394 EXPORT_SYMBOL_GPL(tracing_is_on); 418 395 419 - /** 420 - * trace_wake_up - wake up tasks waiting for trace input 421 - * 422 - * Schedules a delayed work to wake up any task that is blocked on the 423 - * trace_wait queue. These is used with trace_poll for tasks polling the 424 - * trace. 425 - */ 426 - void trace_wake_up(void) 427 - { 428 - const unsigned long delay = msecs_to_jiffies(2); 429 - 430 - if (trace_flags & TRACE_ITER_BLOCK) 431 - return; 432 - schedule_delayed_work(&wakeup_work, delay); 433 - } 434 - 435 396 static int __init set_buf_size(char *str) 436 397 { 437 398 unsigned long buf_size; ··· 438 431 439 432 if (!str) 440 433 return 0; 441 - ret = strict_strtoul(str, 0, &threshold); 434 + ret = kstrtoul(str, 0, &threshold); 442 435 if (ret < 0) 443 436 return 0; 444 437 tracing_thresh = threshold * 1000; ··· 764 757 } 765 758 #endif /* CONFIG_TRACER_MAX_TRACE */ 766 759 760 + static void default_wait_pipe(struct trace_iterator *iter) 761 + { 762 + DEFINE_WAIT(wait); 763 + 764 + prepare_to_wait(&trace_wait, &wait, TASK_INTERRUPTIBLE); 765 + 766 + /* 767 + * The events can happen in critical sections where 768 + * checking a work queue can cause deadlocks. 769 + * After adding a task to the queue, this flag is set 770 + * only to notify events to try to wake up the queue 771 + * using irq_work. 772 + * 773 + * We don't clear it even if the buffer is no longer 774 + * empty. The flag only causes the next event to run 775 + * irq_work to do the work queue wake up. The worse 776 + * that can happen if we race with !trace_empty() is that 777 + * an event will cause an irq_work to try to wake up 778 + * an empty queue. 779 + * 780 + * There's no reason to protect this flag either, as 781 + * the work queue and irq_work logic will do the necessary 782 + * synchronization for the wake ups. The only thing 783 + * that is necessary is that the wake up happens after 784 + * a task has been queued. It's OK for spurious wake ups. 785 + */ 786 + trace_wakeup_needed = true; 787 + 788 + if (trace_empty(iter)) 789 + schedule(); 790 + 791 + finish_wait(&trace_wait, &wait); 792 + } 793 + 767 794 /** 768 795 * register_tracer - register a tracer with the ftrace system. 769 796 * @type - the plugin for the tracer ··· 914 873 915 874 out_unlock: 916 875 return ret; 917 - } 918 - 919 - void unregister_tracer(struct tracer *type) 920 - { 921 - struct tracer **t; 922 - 923 - mutex_lock(&trace_types_lock); 924 - for (t = &trace_types; *t; t = &(*t)->next) { 925 - if (*t == type) 926 - goto found; 927 - } 928 - pr_info("Tracer %s not registered\n", type->name); 929 - goto out; 930 - 931 - found: 932 - *t = (*t)->next; 933 - 934 - if (type == current_trace && tracer_enabled) { 935 - tracer_enabled = 0; 936 - tracing_stop(); 937 - if (current_trace->stop) 938 - current_trace->stop(&global_trace); 939 - current_trace = &nop_trace; 940 - } 941 - out: 942 - mutex_unlock(&trace_types_lock); 943 876 } 944 877 945 878 void tracing_reset(struct trace_array *tr, int cpu) ··· 1146 1131 1147 1132 void tracing_record_cmdline(struct task_struct *tsk) 1148 1133 { 1149 - if (atomic_read(&trace_record_cmdline_disabled) || !tracer_enabled || 1150 - !tracing_is_on()) 1134 + if (atomic_read(&trace_record_cmdline_disabled) || !tracing_is_on()) 1151 1135 return; 1136 + 1137 + if (!__this_cpu_read(trace_cmdline_save)) 1138 + return; 1139 + 1140 + __this_cpu_write(trace_cmdline_save, false); 1152 1141 1153 1142 trace_save_cmdline(tsk); 1154 1143 } ··· 1197 1178 return event; 1198 1179 } 1199 1180 1181 + void 1182 + __buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event) 1183 + { 1184 + __this_cpu_write(trace_cmdline_save, true); 1185 + if (trace_wakeup_needed) { 1186 + trace_wakeup_needed = false; 1187 + /* irq_work_queue() supplies it's own memory barriers */ 1188 + irq_work_queue(&trace_work_wakeup); 1189 + } 1190 + ring_buffer_unlock_commit(buffer, event); 1191 + } 1192 + 1200 1193 static inline void 1201 1194 __trace_buffer_unlock_commit(struct ring_buffer *buffer, 1202 1195 struct ring_buffer_event *event, 1203 - unsigned long flags, int pc, 1204 - int wake) 1196 + unsigned long flags, int pc) 1205 1197 { 1206 - ring_buffer_unlock_commit(buffer, event); 1198 + __buffer_unlock_commit(buffer, event); 1207 1199 1208 1200 ftrace_trace_stack(buffer, flags, 6, pc); 1209 1201 ftrace_trace_userstack(buffer, flags, pc); 1210 - 1211 - if (wake) 1212 - trace_wake_up(); 1213 1202 } 1214 1203 1215 1204 void trace_buffer_unlock_commit(struct ring_buffer *buffer, 1216 1205 struct ring_buffer_event *event, 1217 1206 unsigned long flags, int pc) 1218 1207 { 1219 - __trace_buffer_unlock_commit(buffer, event, flags, pc, 1); 1208 + __trace_buffer_unlock_commit(buffer, event, flags, pc); 1220 1209 } 1210 + EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit); 1221 1211 1222 1212 struct ring_buffer_event * 1223 1213 trace_current_buffer_lock_reserve(struct ring_buffer **current_rb, ··· 1243 1215 struct ring_buffer_event *event, 1244 1216 unsigned long flags, int pc) 1245 1217 { 1246 - __trace_buffer_unlock_commit(buffer, event, flags, pc, 1); 1218 + __trace_buffer_unlock_commit(buffer, event, flags, pc); 1247 1219 } 1248 1220 EXPORT_SYMBOL_GPL(trace_current_buffer_unlock_commit); 1249 1221 1250 - void trace_nowake_buffer_unlock_commit(struct ring_buffer *buffer, 1251 - struct ring_buffer_event *event, 1252 - unsigned long flags, int pc) 1222 + void trace_buffer_unlock_commit_regs(struct ring_buffer *buffer, 1223 + struct ring_buffer_event *event, 1224 + unsigned long flags, int pc, 1225 + struct pt_regs *regs) 1253 1226 { 1254 - __trace_buffer_unlock_commit(buffer, event, flags, pc, 0); 1255 - } 1256 - EXPORT_SYMBOL_GPL(trace_nowake_buffer_unlock_commit); 1257 - 1258 - void trace_nowake_buffer_unlock_commit_regs(struct ring_buffer *buffer, 1259 - struct ring_buffer_event *event, 1260 - unsigned long flags, int pc, 1261 - struct pt_regs *regs) 1262 - { 1263 - ring_buffer_unlock_commit(buffer, event); 1227 + __buffer_unlock_commit(buffer, event); 1264 1228 1265 1229 ftrace_trace_stack_regs(buffer, flags, 0, pc, regs); 1266 1230 ftrace_trace_userstack(buffer, flags, pc); 1267 1231 } 1268 - EXPORT_SYMBOL_GPL(trace_nowake_buffer_unlock_commit_regs); 1232 + EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit_regs); 1269 1233 1270 1234 void trace_current_buffer_discard_commit(struct ring_buffer *buffer, 1271 1235 struct ring_buffer_event *event) ··· 1289 1269 entry->parent_ip = parent_ip; 1290 1270 1291 1271 if (!filter_check_discard(call, entry, buffer, event)) 1292 - ring_buffer_unlock_commit(buffer, event); 1272 + __buffer_unlock_commit(buffer, event); 1293 1273 } 1294 1274 1295 1275 void ··· 1382 1362 entry->size = trace.nr_entries; 1383 1363 1384 1364 if (!filter_check_discard(call, entry, buffer, event)) 1385 - ring_buffer_unlock_commit(buffer, event); 1365 + __buffer_unlock_commit(buffer, event); 1386 1366 1387 1367 out: 1388 1368 /* Again, don't let gcc optimize things here */ ··· 1478 1458 1479 1459 save_stack_trace_user(&trace); 1480 1460 if (!filter_check_discard(call, entry, buffer, event)) 1481 - ring_buffer_unlock_commit(buffer, event); 1461 + __buffer_unlock_commit(buffer, event); 1482 1462 1483 1463 out_drop_count: 1484 1464 __this_cpu_dec(user_stack_count); ··· 1579 1559 return -ENOMEM; 1580 1560 } 1581 1561 1562 + static int buffers_allocated; 1563 + 1582 1564 void trace_printk_init_buffers(void) 1583 1565 { 1584 - static int buffers_allocated; 1585 - 1586 1566 if (buffers_allocated) 1587 1567 return; 1588 1568 ··· 1591 1571 1592 1572 pr_info("ftrace: Allocated trace_printk buffers\n"); 1593 1573 1574 + /* Expand the buffers to set size */ 1575 + tracing_update_buffers(); 1576 + 1594 1577 buffers_allocated = 1; 1578 + 1579 + /* 1580 + * trace_printk_init_buffers() can be called by modules. 1581 + * If that happens, then we need to start cmdline recording 1582 + * directly here. If the global_trace.buffer is already 1583 + * allocated here, then this was called by module code. 1584 + */ 1585 + if (global_trace.buffer) 1586 + tracing_start_cmdline_record(); 1587 + } 1588 + 1589 + void trace_printk_start_comm(void) 1590 + { 1591 + /* Start tracing comms if trace printk is set */ 1592 + if (!buffers_allocated) 1593 + return; 1594 + tracing_start_cmdline_record(); 1595 + } 1596 + 1597 + static void trace_printk_start_stop_comm(int enabled) 1598 + { 1599 + if (!buffers_allocated) 1600 + return; 1601 + 1602 + if (enabled) 1603 + tracing_start_cmdline_record(); 1604 + else 1605 + tracing_stop_cmdline_record(); 1595 1606 } 1596 1607 1597 1608 /** ··· 1673 1622 1674 1623 memcpy(entry->buf, tbuffer, sizeof(u32) * len); 1675 1624 if (!filter_check_discard(call, entry, buffer, event)) { 1676 - ring_buffer_unlock_commit(buffer, event); 1625 + __buffer_unlock_commit(buffer, event); 1677 1626 ftrace_trace_stack(buffer, flags, 6, pc); 1678 1627 } 1679 1628 ··· 1744 1693 memcpy(&entry->buf, tbuffer, len); 1745 1694 entry->buf[len] = '\0'; 1746 1695 if (!filter_check_discard(call, entry, buffer, event)) { 1747 - ring_buffer_unlock_commit(buffer, event); 1696 + __buffer_unlock_commit(buffer, event); 1748 1697 ftrace_trace_stack(buffer, flags, 6, pc); 1749 1698 } 1750 1699 out: ··· 2845 2794 2846 2795 if (mask == TRACE_ITER_OVERWRITE) 2847 2796 ring_buffer_change_overwrite(global_trace.buffer, enabled); 2797 + 2798 + if (mask == TRACE_ITER_PRINTK) 2799 + trace_printk_start_stop_comm(enabled); 2848 2800 } 2849 2801 2850 - static ssize_t 2851 - tracing_trace_options_write(struct file *filp, const char __user *ubuf, 2852 - size_t cnt, loff_t *ppos) 2802 + static int trace_set_options(char *option) 2853 2803 { 2854 - char buf[64]; 2855 2804 char *cmp; 2856 2805 int neg = 0; 2857 - int ret; 2806 + int ret = 0; 2858 2807 int i; 2859 2808 2860 - if (cnt >= sizeof(buf)) 2861 - return -EINVAL; 2862 - 2863 - if (copy_from_user(&buf, ubuf, cnt)) 2864 - return -EFAULT; 2865 - 2866 - buf[cnt] = 0; 2867 - cmp = strstrip(buf); 2809 + cmp = strstrip(option); 2868 2810 2869 2811 if (strncmp(cmp, "no", 2) == 0) { 2870 2812 neg = 1; ··· 2876 2832 mutex_lock(&trace_types_lock); 2877 2833 ret = set_tracer_option(current_trace, cmp, neg); 2878 2834 mutex_unlock(&trace_types_lock); 2879 - if (ret) 2880 - return ret; 2881 2835 } 2836 + 2837 + return ret; 2838 + } 2839 + 2840 + static ssize_t 2841 + tracing_trace_options_write(struct file *filp, const char __user *ubuf, 2842 + size_t cnt, loff_t *ppos) 2843 + { 2844 + char buf[64]; 2845 + 2846 + if (cnt >= sizeof(buf)) 2847 + return -EINVAL; 2848 + 2849 + if (copy_from_user(&buf, ubuf, cnt)) 2850 + return -EFAULT; 2851 + 2852 + trace_set_options(buf); 2882 2853 2883 2854 *ppos += cnt; 2884 2855 ··· 2999 2940 }; 3000 2941 3001 2942 static ssize_t 3002 - tracing_ctrl_read(struct file *filp, char __user *ubuf, 3003 - size_t cnt, loff_t *ppos) 3004 - { 3005 - char buf[64]; 3006 - int r; 3007 - 3008 - r = sprintf(buf, "%u\n", tracer_enabled); 3009 - return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); 3010 - } 3011 - 3012 - static ssize_t 3013 - tracing_ctrl_write(struct file *filp, const char __user *ubuf, 3014 - size_t cnt, loff_t *ppos) 3015 - { 3016 - struct trace_array *tr = filp->private_data; 3017 - unsigned long val; 3018 - int ret; 3019 - 3020 - ret = kstrtoul_from_user(ubuf, cnt, 10, &val); 3021 - if (ret) 3022 - return ret; 3023 - 3024 - val = !!val; 3025 - 3026 - mutex_lock(&trace_types_lock); 3027 - if (tracer_enabled ^ val) { 3028 - 3029 - /* Only need to warn if this is used to change the state */ 3030 - WARN_ONCE(1, "tracing_enabled is deprecated. Use tracing_on"); 3031 - 3032 - if (val) { 3033 - tracer_enabled = 1; 3034 - if (current_trace->start) 3035 - current_trace->start(tr); 3036 - tracing_start(); 3037 - } else { 3038 - tracer_enabled = 0; 3039 - tracing_stop(); 3040 - if (current_trace->stop) 3041 - current_trace->stop(tr); 3042 - } 3043 - } 3044 - mutex_unlock(&trace_types_lock); 3045 - 3046 - *ppos += cnt; 3047 - 3048 - return cnt; 3049 - } 3050 - 3051 - static ssize_t 3052 2943 tracing_set_trace_read(struct file *filp, char __user *ubuf, 3053 2944 size_t cnt, loff_t *ppos) 3054 2945 { ··· 3038 3029 * expanding it later. 3039 3030 */ 3040 3031 ring_buffer_expanded = 1; 3032 + 3033 + /* May be called before buffers are initialized */ 3034 + if (!global_trace.buffer) 3035 + return 0; 3041 3036 3042 3037 ret = ring_buffer_resize(global_trace.buffer, size, cpu); 3043 3038 if (ret < 0) ··· 3398 3385 } 3399 3386 } 3400 3387 3401 - 3402 - void default_wait_pipe(struct trace_iterator *iter) 3403 - { 3404 - DEFINE_WAIT(wait); 3405 - 3406 - prepare_to_wait(&trace_wait, &wait, TASK_INTERRUPTIBLE); 3407 - 3408 - if (trace_empty(iter)) 3409 - schedule(); 3410 - 3411 - finish_wait(&trace_wait, &wait); 3412 - } 3413 - 3414 3388 /* 3415 3389 * This is a make-shift waitqueue. 3416 3390 * A tracer might use this callback on some rare cases: ··· 3438 3438 return -EINTR; 3439 3439 3440 3440 /* 3441 - * We block until we read something and tracing is disabled. 3441 + * We block until we read something and tracing is enabled. 3442 3442 * We still block if tracing is disabled, but we have never 3443 3443 * read anything. This allows a user to cat this file, and 3444 3444 * then enable tracing. But after we have read something, ··· 3446 3446 * 3447 3447 * iter->pos will be 0 if we haven't read anything. 3448 3448 */ 3449 - if (!tracer_enabled && iter->pos) 3449 + if (tracing_is_enabled() && iter->pos) 3450 3450 break; 3451 3451 } 3452 3452 ··· 3955 3955 } else 3956 3956 entry->buf[cnt] = '\0'; 3957 3957 3958 - ring_buffer_unlock_commit(buffer, event); 3958 + __buffer_unlock_commit(buffer, event); 3959 3959 3960 3960 written = cnt; 3961 3961 ··· 4016 4016 if (max_tr.buffer) 4017 4017 ring_buffer_set_clock(max_tr.buffer, trace_clocks[i].func); 4018 4018 4019 + /* 4020 + * New clock may not be consistent with the previous clock. 4021 + * Reset the buffer so that it doesn't have incomparable timestamps. 4022 + */ 4023 + tracing_reset_online_cpus(&global_trace); 4024 + if (max_tr.buffer) 4025 + tracing_reset_online_cpus(&max_tr); 4026 + 4019 4027 mutex_unlock(&trace_types_lock); 4020 4028 4021 4029 *fpos += cnt; ··· 4042 4034 .open = tracing_open_generic, 4043 4035 .read = tracing_max_lat_read, 4044 4036 .write = tracing_max_lat_write, 4045 - .llseek = generic_file_llseek, 4046 - }; 4047 - 4048 - static const struct file_operations tracing_ctrl_fops = { 4049 - .open = tracing_open_generic, 4050 - .read = tracing_ctrl_read, 4051 - .write = tracing_ctrl_write, 4052 4037 .llseek = generic_file_llseek, 4053 4038 }; 4054 4039 ··· 4385 4384 t = ns2usecs(ring_buffer_time_stamp(tr->buffer, cpu)); 4386 4385 usec_rem = do_div(t, USEC_PER_SEC); 4387 4386 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem); 4387 + 4388 + cnt = ring_buffer_dropped_events_cpu(tr->buffer, cpu); 4389 + trace_seq_printf(s, "dropped events: %ld\n", cnt); 4388 4390 4389 4391 count = simple_read_from_buffer(ubuf, count, ppos, s->buffer, s->len); 4390 4392 ··· 4819 4815 4820 4816 d_tracer = tracing_init_dentry(); 4821 4817 4822 - trace_create_file("tracing_enabled", 0644, d_tracer, 4823 - &global_trace, &tracing_ctrl_fops); 4824 - 4825 4818 trace_create_file("trace_options", 0644, d_tracer, 4826 4819 NULL, &tracing_iter_fops); 4827 4820 ··· 5090 5089 5091 5090 /* Only allocate trace_printk buffers if a trace_printk exists */ 5092 5091 if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt) 5092 + /* Must be called before global_trace.buffer is allocated */ 5093 5093 trace_printk_init_buffers(); 5094 5094 5095 5095 /* To save memory, keep the ring buffer size to its minimum */ ··· 5138 5136 #endif 5139 5137 5140 5138 trace_init_cmdlines(); 5139 + init_irq_work(&trace_work_wakeup, trace_wake_up); 5141 5140 5142 5141 register_tracer(&nop_trace); 5143 5142 current_trace = &nop_trace; ··· 5149 5146 &trace_panic_notifier); 5150 5147 5151 5148 register_die_notifier(&trace_die_notifier); 5149 + 5150 + while (trace_boot_options) { 5151 + char *option; 5152 + 5153 + option = strsep(&trace_boot_options, ","); 5154 + trace_set_options(option); 5155 + } 5152 5156 5153 5157 return 0; 5154 5158
+6 -8
kernel/trace/trace.h
··· 285 285 int (*set_flag)(u32 old_flags, u32 bit, int set); 286 286 struct tracer *next; 287 287 struct tracer_flags *flags; 288 - int print_max; 289 - int use_max_tr; 288 + bool print_max; 289 + bool use_max_tr; 290 290 }; 291 291 292 292 ··· 327 327 328 328 int tracer_init(struct tracer *t, struct trace_array *tr); 329 329 int tracing_is_enabled(void); 330 - void trace_wake_up(void); 331 330 void tracing_reset(struct trace_array *tr, int cpu); 332 331 void tracing_reset_online_cpus(struct trace_array *tr); 333 332 void tracing_reset_current(int cpu); ··· 348 349 unsigned long len, 349 350 unsigned long flags, 350 351 int pc); 351 - void trace_buffer_unlock_commit(struct ring_buffer *buffer, 352 - struct ring_buffer_event *event, 353 - unsigned long flags, int pc); 354 352 355 353 struct trace_entry *tracing_get_trace_entry(struct trace_array *tr, 356 354 struct trace_array_cpu *data); 357 355 358 356 struct trace_entry *trace_find_next_entry(struct trace_iterator *iter, 359 357 int *ent_cpu, u64 *ent_ts); 358 + 359 + void __buffer_unlock_commit(struct ring_buffer *buffer, 360 + struct ring_buffer_event *event); 360 361 361 362 int trace_empty(struct trace_iterator *iter); 362 363 ··· 366 367 367 368 void tracing_iter_reset(struct trace_iterator *iter, int cpu); 368 369 369 - void default_wait_pipe(struct trace_iterator *iter); 370 370 void poll_wait_pipe(struct trace_iterator *iter); 371 371 372 372 void ftrace(struct trace_array *tr, ··· 405 407 void tracing_stop_sched_switch_record(void); 406 408 void tracing_start_sched_switch_record(void); 407 409 int register_tracer(struct tracer *type); 408 - void unregister_tracer(struct tracer *type); 409 410 int is_tracing_stopped(void); 410 411 enum trace_file_type { 411 412 TRACE_FILE_LAT_FMT = 1, ··· 838 841 extern const char *__stop___trace_bprintk_fmt[]; 839 842 840 843 void trace_printk_init_buffers(void); 844 + void trace_printk_start_comm(void); 841 845 842 846 #undef FTRACE_ENTRY 843 847 #define FTRACE_ENTRY(call, struct_name, id, tstruct, print, filter) \
+2 -2
kernel/trace/trace_branch.c
··· 77 77 entry->correct = val == expect; 78 78 79 79 if (!filter_check_discard(call, entry, buffer, event)) 80 - ring_buffer_unlock_commit(buffer, event); 80 + __buffer_unlock_commit(buffer, event); 81 81 82 82 out: 83 83 atomic_dec(&tr->data[cpu]->disabled); ··· 199 199 } 200 200 return register_tracer(&branch_trace); 201 201 } 202 - device_initcall(init_branch_tracer); 202 + core_initcall(init_branch_tracer); 203 203 204 204 #else 205 205 static inline
+31 -20
kernel/trace/trace_events.c
··· 491 491 mutex_unlock(&event_mutex); 492 492 } 493 493 494 - static int 495 - ftrace_event_seq_open(struct inode *inode, struct file *file) 496 - { 497 - const struct seq_operations *seq_ops; 498 - 499 - if ((file->f_mode & FMODE_WRITE) && 500 - (file->f_flags & O_TRUNC)) 501 - ftrace_clear_events(); 502 - 503 - seq_ops = inode->i_private; 504 - return seq_open(file, seq_ops); 505 - } 506 - 507 494 static ssize_t 508 495 event_enable_read(struct file *filp, char __user *ubuf, size_t cnt, 509 496 loff_t *ppos) ··· 967 980 return r; 968 981 } 969 982 983 + static int ftrace_event_avail_open(struct inode *inode, struct file *file); 984 + static int ftrace_event_set_open(struct inode *inode, struct file *file); 985 + 970 986 static const struct seq_operations show_event_seq_ops = { 971 987 .start = t_start, 972 988 .next = t_next, ··· 985 995 }; 986 996 987 997 static const struct file_operations ftrace_avail_fops = { 988 - .open = ftrace_event_seq_open, 998 + .open = ftrace_event_avail_open, 989 999 .read = seq_read, 990 1000 .llseek = seq_lseek, 991 1001 .release = seq_release, 992 1002 }; 993 1003 994 1004 static const struct file_operations ftrace_set_event_fops = { 995 - .open = ftrace_event_seq_open, 1005 + .open = ftrace_event_set_open, 996 1006 .read = seq_read, 997 1007 .write = ftrace_event_write, 998 1008 .llseek = seq_lseek, ··· 1066 1076 "'events' directory\n"); 1067 1077 1068 1078 return d_events; 1079 + } 1080 + 1081 + static int 1082 + ftrace_event_avail_open(struct inode *inode, struct file *file) 1083 + { 1084 + const struct seq_operations *seq_ops = &show_event_seq_ops; 1085 + 1086 + return seq_open(file, seq_ops); 1087 + } 1088 + 1089 + static int 1090 + ftrace_event_set_open(struct inode *inode, struct file *file) 1091 + { 1092 + const struct seq_operations *seq_ops = &show_set_event_seq_ops; 1093 + 1094 + if ((file->f_mode & FMODE_WRITE) && 1095 + (file->f_flags & O_TRUNC)) 1096 + ftrace_clear_events(); 1097 + 1098 + return seq_open(file, seq_ops); 1069 1099 } 1070 1100 1071 1101 static struct dentry * ··· 1499 1489 if (ret) 1500 1490 pr_warn("Failed to enable trace event: %s\n", token); 1501 1491 } 1492 + 1493 + trace_printk_start_comm(); 1494 + 1502 1495 return 0; 1503 1496 } 1504 1497 ··· 1518 1505 return 0; 1519 1506 1520 1507 entry = debugfs_create_file("available_events", 0444, d_tracer, 1521 - (void *)&show_event_seq_ops, 1522 - &ftrace_avail_fops); 1508 + NULL, &ftrace_avail_fops); 1523 1509 if (!entry) 1524 1510 pr_warning("Could not create debugfs " 1525 1511 "'available_events' entry\n"); 1526 1512 1527 1513 entry = debugfs_create_file("set_event", 0644, d_tracer, 1528 - (void *)&show_set_event_seq_ops, 1529 - &ftrace_set_event_fops); 1514 + NULL, &ftrace_set_event_fops); 1530 1515 if (!entry) 1531 1516 pr_warning("Could not create debugfs " 1532 1517 "'set_event' entry\n"); ··· 1760 1749 entry->ip = ip; 1761 1750 entry->parent_ip = parent_ip; 1762 1751 1763 - trace_nowake_buffer_unlock_commit(buffer, event, flags, pc); 1752 + trace_buffer_unlock_commit(buffer, event, flags, pc); 1764 1753 1765 1754 out: 1766 1755 atomic_dec(&per_cpu(ftrace_test_event_disable, cpu));
+2 -2
kernel/trace/trace_events_filter.c
··· 1000 1000 } 1001 1001 } else { 1002 1002 if (field->is_signed) 1003 - ret = strict_strtoll(pred->regex.pattern, 0, &val); 1003 + ret = kstrtoll(pred->regex.pattern, 0, &val); 1004 1004 else 1005 - ret = strict_strtoull(pred->regex.pattern, 0, &val); 1005 + ret = kstrtoull(pred->regex.pattern, 0, &val); 1006 1006 if (ret) { 1007 1007 parse_error(ps, FILT_ERR_ILLEGAL_INTVAL, 0); 1008 1008 return -EINVAL;
+2 -3
kernel/trace/trace_functions.c
··· 366 366 * We use the callback data field (which is a pointer) 367 367 * as our counter. 368 368 */ 369 - ret = strict_strtoul(number, 0, (unsigned long *)&count); 369 + ret = kstrtoul(number, 0, (unsigned long *)&count); 370 370 if (ret) 371 371 return ret; 372 372 ··· 411 411 init_func_cmd_traceon(); 412 412 return register_tracer(&function_trace); 413 413 } 414 - device_initcall(init_function_trace); 415 - 414 + core_initcall(init_function_trace);
+3 -3
kernel/trace/trace_functions_graph.c
··· 223 223 entry = ring_buffer_event_data(event); 224 224 entry->graph_ent = *trace; 225 225 if (!filter_current_check_discard(buffer, call, entry, event)) 226 - ring_buffer_unlock_commit(buffer, event); 226 + __buffer_unlock_commit(buffer, event); 227 227 228 228 return 1; 229 229 } ··· 327 327 entry = ring_buffer_event_data(event); 328 328 entry->ret = *trace; 329 329 if (!filter_current_check_discard(buffer, call, entry, event)) 330 - ring_buffer_unlock_commit(buffer, event); 330 + __buffer_unlock_commit(buffer, event); 331 331 } 332 332 333 333 void trace_graph_return(struct ftrace_graph_ret *trace) ··· 1474 1474 return register_tracer(&graph_trace); 1475 1475 } 1476 1476 1477 - device_initcall(init_graph_trace); 1477 + core_initcall(init_graph_trace);
+7 -7
kernel/trace/trace_irqsoff.c
··· 604 604 .reset = irqsoff_tracer_reset, 605 605 .start = irqsoff_tracer_start, 606 606 .stop = irqsoff_tracer_stop, 607 - .print_max = 1, 607 + .print_max = true, 608 608 .print_header = irqsoff_print_header, 609 609 .print_line = irqsoff_print_line, 610 610 .flags = &tracer_flags, ··· 614 614 #endif 615 615 .open = irqsoff_trace_open, 616 616 .close = irqsoff_trace_close, 617 - .use_max_tr = 1, 617 + .use_max_tr = true, 618 618 }; 619 619 # define register_irqsoff(trace) register_tracer(&trace) 620 620 #else ··· 637 637 .reset = irqsoff_tracer_reset, 638 638 .start = irqsoff_tracer_start, 639 639 .stop = irqsoff_tracer_stop, 640 - .print_max = 1, 640 + .print_max = true, 641 641 .print_header = irqsoff_print_header, 642 642 .print_line = irqsoff_print_line, 643 643 .flags = &tracer_flags, ··· 647 647 #endif 648 648 .open = irqsoff_trace_open, 649 649 .close = irqsoff_trace_close, 650 - .use_max_tr = 1, 650 + .use_max_tr = true, 651 651 }; 652 652 # define register_preemptoff(trace) register_tracer(&trace) 653 653 #else ··· 672 672 .reset = irqsoff_tracer_reset, 673 673 .start = irqsoff_tracer_start, 674 674 .stop = irqsoff_tracer_stop, 675 - .print_max = 1, 675 + .print_max = true, 676 676 .print_header = irqsoff_print_header, 677 677 .print_line = irqsoff_print_line, 678 678 .flags = &tracer_flags, ··· 682 682 #endif 683 683 .open = irqsoff_trace_open, 684 684 .close = irqsoff_trace_close, 685 - .use_max_tr = 1, 685 + .use_max_tr = true, 686 686 }; 687 687 688 688 # define register_preemptirqsoff(trace) register_tracer(&trace) ··· 698 698 699 699 return 0; 700 700 } 701 - device_initcall(init_irqsoff_tracer); 701 + core_initcall(init_irqsoff_tracer);
+5 -5
kernel/trace/trace_kprobe.c
··· 444 444 return -EINVAL; 445 445 } 446 446 /* an address specified */ 447 - ret = strict_strtoul(&argv[1][0], 0, (unsigned long *)&addr); 447 + ret = kstrtoul(&argv[1][0], 0, (unsigned long *)&addr); 448 448 if (ret) { 449 449 pr_info("Failed to parse address.\n"); 450 450 return ret; ··· 751 751 store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize); 752 752 753 753 if (!filter_current_check_discard(buffer, call, entry, event)) 754 - trace_nowake_buffer_unlock_commit_regs(buffer, event, 755 - irq_flags, pc, regs); 754 + trace_buffer_unlock_commit_regs(buffer, event, 755 + irq_flags, pc, regs); 756 756 } 757 757 758 758 /* Kretprobe handler */ ··· 784 784 store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize); 785 785 786 786 if (!filter_current_check_discard(buffer, call, entry, event)) 787 - trace_nowake_buffer_unlock_commit_regs(buffer, event, 788 - irq_flags, pc, regs); 787 + trace_buffer_unlock_commit_regs(buffer, event, 788 + irq_flags, pc, regs); 789 789 } 790 790 791 791 /* Event entry printers */
+7 -7
kernel/trace/trace_probe.c
··· 441 441 goto fail; 442 442 443 443 type++; 444 - if (strict_strtoul(type, 0, &bs)) 444 + if (kstrtoul(type, 0, &bs)) 445 445 goto fail; 446 446 447 447 switch (bs) { ··· 501 501 502 502 tmp = strchr(symbol, '+'); 503 503 if (tmp) { 504 - /* skip sign because strict_strtol doesn't accept '+' */ 505 - ret = strict_strtoul(tmp + 1, 0, offset); 504 + /* skip sign because kstrtoul doesn't accept '+' */ 505 + ret = kstrtoul(tmp + 1, 0, offset); 506 506 if (ret) 507 507 return ret; 508 508 ··· 533 533 else 534 534 ret = -EINVAL; 535 535 } else if (isdigit(arg[5])) { 536 - ret = strict_strtoul(arg + 5, 10, &param); 536 + ret = kstrtoul(arg + 5, 10, &param); 537 537 if (ret || param > PARAM_MAX_STACK) 538 538 ret = -EINVAL; 539 539 else { ··· 579 579 580 580 case '@': /* memory or symbol */ 581 581 if (isdigit(arg[1])) { 582 - ret = strict_strtoul(arg + 1, 0, &param); 582 + ret = kstrtoul(arg + 1, 0, &param); 583 583 if (ret) 584 584 break; 585 585 ··· 597 597 break; 598 598 599 599 case '+': /* deref memory */ 600 - arg++; /* Skip '+', because strict_strtol() rejects it. */ 600 + arg++; /* Skip '+', because kstrtol() rejects it. */ 601 601 case '-': 602 602 tmp = strchr(arg, '('); 603 603 if (!tmp) 604 604 break; 605 605 606 606 *tmp = '\0'; 607 - ret = strict_strtol(arg, 0, &offset); 607 + ret = kstrtol(arg, 0, &offset); 608 608 609 609 if (ret) 610 610 break;
+1 -3
kernel/trace/trace_sched_switch.c
··· 102 102 entry->next_cpu = task_cpu(wakee); 103 103 104 104 if (!filter_check_discard(call, entry, buffer, event)) 105 - ring_buffer_unlock_commit(buffer, event); 106 - ftrace_trace_stack(tr->buffer, flags, 6, pc); 107 - ftrace_trace_userstack(tr->buffer, flags, pc); 105 + trace_buffer_unlock_commit(buffer, event, flags, pc); 108 106 } 109 107 110 108 static void
+5 -5
kernel/trace/trace_sched_wakeup.c
··· 589 589 .reset = wakeup_tracer_reset, 590 590 .start = wakeup_tracer_start, 591 591 .stop = wakeup_tracer_stop, 592 - .print_max = 1, 592 + .print_max = true, 593 593 .print_header = wakeup_print_header, 594 594 .print_line = wakeup_print_line, 595 595 .flags = &tracer_flags, ··· 599 599 #endif 600 600 .open = wakeup_trace_open, 601 601 .close = wakeup_trace_close, 602 - .use_max_tr = 1, 602 + .use_max_tr = true, 603 603 }; 604 604 605 605 static struct tracer wakeup_rt_tracer __read_mostly = ··· 610 610 .start = wakeup_tracer_start, 611 611 .stop = wakeup_tracer_stop, 612 612 .wait_pipe = poll_wait_pipe, 613 - .print_max = 1, 613 + .print_max = true, 614 614 .print_header = wakeup_print_header, 615 615 .print_line = wakeup_print_line, 616 616 .flags = &tracer_flags, ··· 620 620 #endif 621 621 .open = wakeup_trace_open, 622 622 .close = wakeup_trace_close, 623 - .use_max_tr = 1, 623 + .use_max_tr = true, 624 624 }; 625 625 626 626 __init static int init_wakeup_tracer(void) ··· 637 637 638 638 return 0; 639 639 } 640 - device_initcall(init_wakeup_tracer); 640 + core_initcall(init_wakeup_tracer);
+1 -12
kernel/trace/trace_selftest.c
··· 320 320 int (*func)(void)) 321 321 { 322 322 int save_ftrace_enabled = ftrace_enabled; 323 - int save_tracer_enabled = tracer_enabled; 324 323 unsigned long count; 325 324 char *func_name; 326 325 int ret; ··· 330 331 331 332 /* enable tracing, and record the filter function */ 332 333 ftrace_enabled = 1; 333 - tracer_enabled = 1; 334 334 335 335 /* passed in by parameter to fool gcc from optimizing */ 336 336 func(); ··· 393 395 394 396 out: 395 397 ftrace_enabled = save_ftrace_enabled; 396 - tracer_enabled = save_tracer_enabled; 397 398 398 399 /* Enable tracing on all functions again */ 399 400 ftrace_set_global_filter(NULL, 0, 1); ··· 449 452 trace_selftest_function_recursion(void) 450 453 { 451 454 int save_ftrace_enabled = ftrace_enabled; 452 - int save_tracer_enabled = tracer_enabled; 453 455 char *func_name; 454 456 int len; 455 457 int ret; ··· 461 465 462 466 /* enable tracing, and record the filter function */ 463 467 ftrace_enabled = 1; 464 - tracer_enabled = 1; 465 468 466 469 /* Handle PPC64 '.' name */ 467 470 func_name = "*" __stringify(DYN_FTRACE_TEST_NAME); ··· 529 534 ret = 0; 530 535 out: 531 536 ftrace_enabled = save_ftrace_enabled; 532 - tracer_enabled = save_tracer_enabled; 533 537 534 538 return ret; 535 539 } ··· 563 569 trace_selftest_function_regs(void) 564 570 { 565 571 int save_ftrace_enabled = ftrace_enabled; 566 - int save_tracer_enabled = tracer_enabled; 567 572 char *func_name; 568 573 int len; 569 574 int ret; ··· 579 586 580 587 /* enable tracing, and record the filter function */ 581 588 ftrace_enabled = 1; 582 - tracer_enabled = 1; 583 589 584 590 /* Handle PPC64 '.' name */ 585 591 func_name = "*" __stringify(DYN_FTRACE_TEST_NAME); ··· 640 648 ret = 0; 641 649 out: 642 650 ftrace_enabled = save_ftrace_enabled; 643 - tracer_enabled = save_tracer_enabled; 644 651 645 652 return ret; 646 653 } ··· 653 662 trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr) 654 663 { 655 664 int save_ftrace_enabled = ftrace_enabled; 656 - int save_tracer_enabled = tracer_enabled; 657 665 unsigned long count; 658 666 int ret; 659 667 ··· 661 671 662 672 /* start the tracing */ 663 673 ftrace_enabled = 1; 664 - tracer_enabled = 1; 665 674 666 675 ret = tracer_init(trace, tr); 667 676 if (ret) { ··· 697 708 ret = trace_selftest_function_regs(); 698 709 out: 699 710 ftrace_enabled = save_ftrace_enabled; 700 - tracer_enabled = save_tracer_enabled; 701 711 702 712 /* kill ftrace totally if we failed */ 703 713 if (ret) ··· 1094 1106 tracing_stop(); 1095 1107 /* check both trace buffers */ 1096 1108 ret = trace_test_buffer(tr, NULL); 1109 + printk("ret = %d\n", ret); 1097 1110 if (!ret) 1098 1111 ret = trace_test_buffer(&max_tr, &count); 1099 1112
+29 -32
kernel/trace/trace_syscalls.c
··· 21 21 static int syscall_exit_register(struct ftrace_event_call *event, 22 22 enum trace_reg type, void *data); 23 23 24 - static int syscall_enter_define_fields(struct ftrace_event_call *call); 25 - static int syscall_exit_define_fields(struct ftrace_event_call *call); 26 - 27 24 static struct list_head * 28 25 syscall_get_enter_fields(struct ftrace_event_call *call) 29 26 { ··· 28 31 29 32 return &entry->enter_fields; 30 33 } 31 - 32 - struct trace_event_functions enter_syscall_print_funcs = { 33 - .trace = print_syscall_enter, 34 - }; 35 - 36 - struct trace_event_functions exit_syscall_print_funcs = { 37 - .trace = print_syscall_exit, 38 - }; 39 - 40 - struct ftrace_event_class event_class_syscall_enter = { 41 - .system = "syscalls", 42 - .reg = syscall_enter_register, 43 - .define_fields = syscall_enter_define_fields, 44 - .get_fields = syscall_get_enter_fields, 45 - .raw_init = init_syscall_trace, 46 - }; 47 - 48 - struct ftrace_event_class event_class_syscall_exit = { 49 - .system = "syscalls", 50 - .reg = syscall_exit_register, 51 - .define_fields = syscall_exit_define_fields, 52 - .fields = LIST_HEAD_INIT(event_class_syscall_exit.fields), 53 - .raw_init = init_syscall_trace, 54 - }; 55 34 56 35 extern struct syscall_metadata *__start_syscalls_metadata[]; 57 36 extern struct syscall_metadata *__stop_syscalls_metadata[]; ··· 405 432 mutex_unlock(&syscall_trace_lock); 406 433 } 407 434 408 - int init_syscall_trace(struct ftrace_event_call *call) 435 + static int init_syscall_trace(struct ftrace_event_call *call) 409 436 { 410 437 int id; 411 438 int num; ··· 429 456 430 457 return id; 431 458 } 459 + 460 + struct trace_event_functions enter_syscall_print_funcs = { 461 + .trace = print_syscall_enter, 462 + }; 463 + 464 + struct trace_event_functions exit_syscall_print_funcs = { 465 + .trace = print_syscall_exit, 466 + }; 467 + 468 + struct ftrace_event_class event_class_syscall_enter = { 469 + .system = "syscalls", 470 + .reg = syscall_enter_register, 471 + .define_fields = syscall_enter_define_fields, 472 + .get_fields = syscall_get_enter_fields, 473 + .raw_init = init_syscall_trace, 474 + }; 475 + 476 + struct ftrace_event_class event_class_syscall_exit = { 477 + .system = "syscalls", 478 + .reg = syscall_exit_register, 479 + .define_fields = syscall_exit_define_fields, 480 + .fields = LIST_HEAD_INIT(event_class_syscall_exit.fields), 481 + .raw_init = init_syscall_trace, 482 + }; 432 483 433 484 unsigned long __init __weak arch_syscall_addr(int nr) 434 485 { ··· 534 537 perf_trace_buf_submit(rec, size, rctx, 0, 1, regs, head, NULL); 535 538 } 536 539 537 - int perf_sysenter_enable(struct ftrace_event_call *call) 540 + static int perf_sysenter_enable(struct ftrace_event_call *call) 538 541 { 539 542 int ret = 0; 540 543 int num; ··· 555 558 return ret; 556 559 } 557 560 558 - void perf_sysenter_disable(struct ftrace_event_call *call) 561 + static void perf_sysenter_disable(struct ftrace_event_call *call) 559 562 { 560 563 int num; 561 564 ··· 612 615 perf_trace_buf_submit(rec, size, rctx, 0, 1, regs, head, NULL); 613 616 } 614 617 615 - int perf_sysexit_enable(struct ftrace_event_call *call) 618 + static int perf_sysexit_enable(struct ftrace_event_call *call) 616 619 { 617 620 int ret = 0; 618 621 int num; ··· 633 636 return ret; 634 637 } 635 638 636 - void perf_sysexit_disable(struct ftrace_event_call *call) 639 + static void perf_sysexit_disable(struct ftrace_event_call *call) 637 640 { 638 641 int num; 639 642
+1 -1
kernel/trace/trace_uprobe.c
··· 252 252 if (ret) 253 253 goto fail_address_parse; 254 254 255 - ret = strict_strtoul(arg, 0, &offset); 255 + ret = kstrtoul(arg, 0, &offset); 256 256 if (ret) 257 257 goto fail_address_parse; 258 258