Merge branch 'tracing-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'tracing-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
tracing: Fix return of trace_dump_stack()
ksym_tracer: Fix bad cast
tracing/power: Remove two exports
tracing: Change event->profile_count to be int type
tracing: Simplify trace_option_write()
tracing: Remove useless trace option
tracing: Use seq file for trace_clock
tracing: Use seq file for trace_options
function-graph: Allow writing the same val to set_graph_function
ftrace: Call trace_parser_clear() properly
ftrace: Return EINVAL when writing invalid val to set_ftrace_filter
tracing: Move a printk out of ftrace_raw_reg_event_foo()
tracing: Pull up calls to trace_define_common_fields()
tracing: Extract duplicate ftrace_raw_init_event_foo()
ftrace.h: Use common pr_info fmt string
tracing: Add stack trace to irqsoff tracer
tracing: Add trace_dump_stack()
ring-buffer: Move resize integrity check under reader lock
ring-buffer: Use sync sched protection on ring buffer resizing
tracing: Fix wrong usage of strstrip in trace_ksyms

+203 -282
+2 -2
include/linux/ftrace_event.h
··· 131 131 void *mod; 132 132 void *data; 133 133 134 - atomic_t profile_count; 134 + int profile_count; 135 135 int (*profile_enable)(struct ftrace_event_call *); 136 136 void (*profile_disable)(struct ftrace_event_call *); 137 137 }; ··· 158 158 FILTER_PTR_STRING, 159 159 }; 160 160 161 - extern int trace_define_common_fields(struct ftrace_event_call *call); 161 + extern int trace_event_raw_init(struct ftrace_event_call *call); 162 162 extern int trace_define_field(struct ftrace_event_call *call, const char *type, 163 163 const char *name, int offset, int size, 164 164 int is_signed, int filter_type);
+3
include/linux/kernel.h
··· 535 535 __trace_printk(unsigned long ip, const char *fmt, ...) 536 536 __attribute__ ((format (printf, 2, 3))); 537 537 538 + extern void trace_dump_stack(void); 539 + 538 540 /* 539 541 * The double __builtin_constant_p is because gcc will give us an error 540 542 * if we try to allocate the static variable to fmt if it is not a ··· 570 568 static inline void tracing_start(void) { } 571 569 static inline void tracing_stop(void) { } 572 570 static inline void ftrace_off_permanent(void) { } 571 + static inline void trace_dump_stack(void) { } 573 572 static inline int 574 573 trace_printk(const char *fmt, ...) 575 574 {
+2 -4
include/linux/syscalls.h
··· 102 102 #ifdef CONFIG_EVENT_PROFILE 103 103 104 104 #define TRACE_SYS_ENTER_PROFILE_INIT(sname) \ 105 - .profile_count = ATOMIC_INIT(-1), \ 106 105 .profile_enable = prof_sysenter_enable, \ 107 106 .profile_disable = prof_sysenter_disable, 108 107 109 108 #define TRACE_SYS_EXIT_PROFILE_INIT(sname) \ 110 - .profile_count = ATOMIC_INIT(-1), \ 111 109 .profile_enable = prof_sysexit_enable, \ 112 110 .profile_disable = prof_sysexit_disable, 113 111 #else ··· 143 145 .name = "sys_enter"#sname, \ 144 146 .system = "syscalls", \ 145 147 .event = &enter_syscall_print_##sname, \ 146 - .raw_init = init_syscall_trace, \ 148 + .raw_init = trace_event_raw_init, \ 147 149 .show_format = syscall_enter_format, \ 148 150 .define_fields = syscall_enter_define_fields, \ 149 151 .regfunc = reg_event_syscall_enter, \ ··· 165 167 .name = "sys_exit"#sname, \ 166 168 .system = "syscalls", \ 167 169 .event = &exit_syscall_print_##sname, \ 168 - .raw_init = init_syscall_trace, \ 170 + .raw_init = trace_event_raw_init, \ 169 171 .show_format = syscall_exit_format, \ 170 172 .define_fields = syscall_exit_define_fields, \ 171 173 .regfunc = reg_event_syscall_exit, \
+6 -50
include/trace/ftrace.h
··· 436 436 struct ftrace_raw_##call field; \ 437 437 int ret; \ 438 438 \ 439 - ret = trace_define_common_fields(event_call); \ 440 - if (ret) \ 441 - return ret; \ 442 - \ 443 439 tstruct; \ 444 440 \ 445 441 return ret; \ ··· 555 559 * 556 560 * static int ftrace_reg_event_<call>(struct ftrace_event_call *unused) 557 561 * { 558 - * int ret; 559 - * 560 - * ret = register_trace_<call>(ftrace_event_<call>); 561 - * if (!ret) 562 - * pr_info("event trace: Could not activate trace point " 563 - * "probe to <call>"); 564 - * return ret; 562 + * return register_trace_<call>(ftrace_event_<call>); 565 563 * } 566 564 * 567 565 * static void ftrace_unreg_event_<call>(struct ftrace_event_call *unused) ··· 613 623 * .trace = ftrace_raw_output_<call>, <-- stage 2 614 624 * }; 615 625 * 616 - * static int ftrace_raw_init_event_<call>(struct ftrace_event_call *unused) 617 - * { 618 - * int id; 619 - * 620 - * id = register_ftrace_event(&ftrace_event_type_<call>); 621 - * if (!id) 622 - * return -ENODEV; 623 - * event_<call>.id = id; 624 - * return 0; 625 - * } 626 - * 627 626 * static struct ftrace_event_call __used 628 627 * __attribute__((__aligned__(4))) 629 628 * __attribute__((section("_ftrace_events"))) event_<call> = { 630 629 * .name = "<call>", 631 630 * .system = "<system>", 632 - * .raw_init = ftrace_raw_init_event_<call>, 631 + * .raw_init = trace_event_raw_init, 633 632 * .regfunc = ftrace_reg_event_<call>, 634 633 * .unregfunc = ftrace_unreg_event_<call>, 635 634 * .show_format = ftrace_format_<call>, ··· 626 647 * 627 648 */ 628 649 629 - #undef TP_FMT 630 - #define TP_FMT(fmt, args...) fmt "\n", ##args 631 - 632 650 #ifdef CONFIG_EVENT_PROFILE 633 651 634 652 #define _TRACE_PROFILE_INIT(call) \ 635 - .profile_count = ATOMIC_INIT(-1), \ 636 653 .profile_enable = ftrace_profile_enable_##call, \ 637 654 .profile_disable = ftrace_profile_disable_##call, 638 655 ··· 703 728 \ 704 729 static int ftrace_raw_reg_event_##call(struct ftrace_event_call *unused)\ 705 730 { \ 706 - int ret; \ 707 - \ 708 - ret = register_trace_##call(ftrace_raw_event_##call); \ 709 - if (ret) \ 710 - pr_info("event trace: Could not activate trace point " \ 711 - "probe to " #call "\n"); \ 712 - return ret; \ 731 + return register_trace_##call(ftrace_raw_event_##call); \ 713 732 } \ 714 733 \ 715 734 static void ftrace_raw_unreg_event_##call(struct ftrace_event_call *unused)\ ··· 713 744 \ 714 745 static struct trace_event ftrace_event_type_##call = { \ 715 746 .trace = ftrace_raw_output_##call, \ 716 - }; \ 717 - \ 718 - static int ftrace_raw_init_event_##call(struct ftrace_event_call *unused)\ 719 - { \ 720 - int id; \ 721 - \ 722 - id = register_ftrace_event(&ftrace_event_type_##call); \ 723 - if (!id) \ 724 - return -ENODEV; \ 725 - event_##call.id = id; \ 726 - INIT_LIST_HEAD(&event_##call.fields); \ 727 - return 0; \ 728 - } 747 + }; 729 748 730 749 #undef DEFINE_EVENT_PRINT 731 750 #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \ ··· 733 776 .name = #call, \ 734 777 .system = __stringify(TRACE_SYSTEM), \ 735 778 .event = &ftrace_event_type_##call, \ 736 - .raw_init = ftrace_raw_init_event_##call, \ 779 + .raw_init = trace_event_raw_init, \ 737 780 .regfunc = ftrace_raw_reg_event_##call, \ 738 781 .unregfunc = ftrace_raw_unreg_event_##call, \ 739 782 .show_format = ftrace_format_##template, \ ··· 750 793 .name = #call, \ 751 794 .system = __stringify(TRACE_SYSTEM), \ 752 795 .event = &ftrace_event_type_##call, \ 753 - .raw_init = ftrace_raw_init_event_##call, \ 796 + .raw_init = trace_event_raw_init, \ 754 797 .regfunc = ftrace_raw_reg_event_##call, \ 755 798 .unregfunc = ftrace_raw_unreg_event_##call, \ 756 799 .show_format = ftrace_format_##call, \ ··· 910 953 perf_swevent_put_recursion_context(rctx); \ 911 954 end_recursion: \ 912 955 local_irq_restore(irq_flags); \ 913 - \ 914 956 } 915 957 916 958 #undef DEFINE_EVENT
+19 -11
kernel/trace/ftrace.c
··· 1724 1724 return ftrace_match(str, regex, len, type); 1725 1725 } 1726 1726 1727 - static void ftrace_match_records(char *buff, int len, int enable) 1727 + static int ftrace_match_records(char *buff, int len, int enable) 1728 1728 { 1729 1729 unsigned int search_len; 1730 1730 struct ftrace_page *pg; ··· 1733 1733 char *search; 1734 1734 int type; 1735 1735 int not; 1736 + int found = 0; 1736 1737 1737 1738 flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE; 1738 1739 type = filter_parse_regex(buff, len, &search, &not); ··· 1751 1750 rec->flags &= ~flag; 1752 1751 else 1753 1752 rec->flags |= flag; 1753 + found = 1; 1754 1754 } 1755 1755 /* 1756 1756 * Only enable filtering if we have a function that ··· 1761 1759 ftrace_filtered = 1; 1762 1760 } while_for_each_ftrace_rec(); 1763 1761 mutex_unlock(&ftrace_lock); 1762 + 1763 + return found; 1764 1764 } 1765 1765 1766 1766 static int ··· 1784 1780 return 1; 1785 1781 } 1786 1782 1787 - static void ftrace_match_module_records(char *buff, char *mod, int enable) 1783 + static int ftrace_match_module_records(char *buff, char *mod, int enable) 1788 1784 { 1789 1785 unsigned search_len = 0; 1790 1786 struct ftrace_page *pg; ··· 1793 1789 char *search = buff; 1794 1790 unsigned long flag; 1795 1791 int not = 0; 1792 + int found = 0; 1796 1793 1797 1794 flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE; 1798 1795 ··· 1824 1819 rec->flags &= ~flag; 1825 1820 else 1826 1821 rec->flags |= flag; 1822 + found = 1; 1827 1823 } 1828 1824 if (enable && (rec->flags & FTRACE_FL_FILTER)) 1829 1825 ftrace_filtered = 1; 1830 1826 1831 1827 } while_for_each_ftrace_rec(); 1832 1828 mutex_unlock(&ftrace_lock); 1829 + 1830 + return found; 1833 1831 } 1834 1832 1835 1833 /* ··· 1861 1853 if (!strlen(mod)) 1862 1854 return -EINVAL; 1863 1855 1864 - ftrace_match_module_records(func, mod, enable); 1865 - return 0; 1856 + if (ftrace_match_module_records(func, mod, enable)) 1857 + return 0; 1858 + return -EINVAL; 1866 1859 } 1867 1860 1868 1861 static struct ftrace_func_command ftrace_mod_cmd = { ··· 2160 2151 func = strsep(&next, ":"); 2161 2152 2162 2153 if (!next) { 2163 - ftrace_match_records(func, len, enable); 2164 - return 0; 2154 + if (ftrace_match_records(func, len, enable)) 2155 + return 0; 2156 + return ret; 2165 2157 } 2166 2158 2167 2159 /* command found */ ··· 2208 2198 !trace_parser_cont(parser)) { 2209 2199 ret = ftrace_process_regex(parser->buffer, 2210 2200 parser->idx, enable); 2201 + trace_parser_clear(parser); 2211 2202 if (ret) 2212 2203 goto out_unlock; 2213 - 2214 - trace_parser_clear(parser); 2215 2204 } 2216 2205 2217 2206 ret = read; ··· 2552 2543 exists = true; 2553 2544 break; 2554 2545 } 2555 - if (!exists) { 2546 + if (!exists) 2556 2547 array[(*idx)++] = rec->ip; 2557 - found = 1; 2558 - } 2548 + found = 1; 2559 2549 } 2560 2550 } while_for_each_ftrace_rec(); 2561 2551
-2
kernel/trace/power-traces.c
··· 14 14 #define CREATE_TRACE_POINTS 15 15 #include <trace/events/power.h> 16 16 17 - EXPORT_TRACEPOINT_SYMBOL_GPL(power_start); 18 - EXPORT_TRACEPOINT_SYMBOL_GPL(power_end); 19 17 EXPORT_TRACEPOINT_SYMBOL_GPL(power_frequency); 20 18
+11 -18
kernel/trace/ring_buffer.c
··· 1193 1193 struct list_head *p; 1194 1194 unsigned i; 1195 1195 1196 - atomic_inc(&cpu_buffer->record_disabled); 1197 - synchronize_sched(); 1198 - 1199 1196 spin_lock_irq(&cpu_buffer->reader_lock); 1200 1197 rb_head_page_deactivate(cpu_buffer); 1201 1198 ··· 1208 1211 return; 1209 1212 1210 1213 rb_reset_cpu(cpu_buffer); 1211 - spin_unlock_irq(&cpu_buffer->reader_lock); 1212 - 1213 1214 rb_check_pages(cpu_buffer); 1214 1215 1215 - atomic_dec(&cpu_buffer->record_disabled); 1216 - 1216 + spin_unlock_irq(&cpu_buffer->reader_lock); 1217 1217 } 1218 1218 1219 1219 static void ··· 1220 1226 struct buffer_page *bpage; 1221 1227 struct list_head *p; 1222 1228 unsigned i; 1223 - 1224 - atomic_inc(&cpu_buffer->record_disabled); 1225 - synchronize_sched(); 1226 1229 1227 1230 spin_lock_irq(&cpu_buffer->reader_lock); 1228 1231 rb_head_page_deactivate(cpu_buffer); ··· 1233 1242 list_add_tail(&bpage->list, cpu_buffer->pages); 1234 1243 } 1235 1244 rb_reset_cpu(cpu_buffer); 1236 - spin_unlock_irq(&cpu_buffer->reader_lock); 1237 - 1238 1245 rb_check_pages(cpu_buffer); 1239 1246 1240 - atomic_dec(&cpu_buffer->record_disabled); 1247 + spin_unlock_irq(&cpu_buffer->reader_lock); 1241 1248 } 1242 1249 1243 1250 /** 1244 1251 * ring_buffer_resize - resize the ring buffer 1245 1252 * @buffer: the buffer to resize. 1246 1253 * @size: the new size. 1247 - * 1248 - * The tracer is responsible for making sure that the buffer is 1249 - * not being used while changing the size. 1250 - * Note: We may be able to change the above requirement by using 1251 - * RCU synchronizations. 1252 1254 * 1253 1255 * Minimum size is 2 * BUF_PAGE_SIZE. 1254 1256 * ··· 1273 1289 1274 1290 if (size == buffer_size) 1275 1291 return size; 1292 + 1293 + atomic_inc(&buffer->record_disabled); 1294 + 1295 + /* Make sure all writers are done with this buffer. */ 1296 + synchronize_sched(); 1276 1297 1277 1298 mutex_lock(&buffer->mutex); 1278 1299 get_online_cpus(); ··· 1341 1352 put_online_cpus(); 1342 1353 mutex_unlock(&buffer->mutex); 1343 1354 1355 + atomic_dec(&buffer->record_disabled); 1356 + 1344 1357 return size; 1345 1358 1346 1359 free_pages: ··· 1352 1361 } 1353 1362 put_online_cpus(); 1354 1363 mutex_unlock(&buffer->mutex); 1364 + atomic_dec(&buffer->record_disabled); 1355 1365 return -ENOMEM; 1356 1366 1357 1367 /* ··· 1362 1370 out_fail: 1363 1371 put_online_cpus(); 1364 1372 mutex_unlock(&buffer->mutex); 1373 + atomic_dec(&buffer->record_disabled); 1365 1374 return -1; 1366 1375 } 1367 1376 EXPORT_SYMBOL_GPL(ring_buffer_resize);
+84 -112
kernel/trace/trace.c
··· 313 313 "bin", 314 314 "block", 315 315 "stacktrace", 316 - "sched-tree", 317 316 "trace_printk", 318 317 "ftrace_preempt", 319 318 "branch", ··· 1148 1149 int pc) 1149 1150 { 1150 1151 __ftrace_trace_stack(tr->buffer, flags, skip, pc); 1152 + } 1153 + 1154 + /** 1155 + * trace_dump_stack - record a stack back trace in the trace buffer 1156 + */ 1157 + void trace_dump_stack(void) 1158 + { 1159 + unsigned long flags; 1160 + 1161 + if (tracing_disabled || tracing_selftest_running) 1162 + return; 1163 + 1164 + local_save_flags(flags); 1165 + 1166 + /* skipping 3 traces, seems to get us at the caller of this function */ 1167 + __ftrace_trace_stack(global_trace.buffer, flags, 3, preempt_count()); 1151 1168 } 1152 1169 1153 1170 void ··· 2331 2316 .write = tracing_cpumask_write, 2332 2317 }; 2333 2318 2334 - static ssize_t 2335 - tracing_trace_options_read(struct file *filp, char __user *ubuf, 2336 - size_t cnt, loff_t *ppos) 2319 + static int tracing_trace_options_show(struct seq_file *m, void *v) 2337 2320 { 2338 2321 struct tracer_opt *trace_opts; 2339 2322 u32 tracer_flags; 2340 - int len = 0; 2341 - char *buf; 2342 - int r = 0; 2343 2323 int i; 2344 - 2345 - 2346 - /* calculate max size */ 2347 - for (i = 0; trace_options[i]; i++) { 2348 - len += strlen(trace_options[i]); 2349 - len += 3; /* "no" and newline */ 2350 - } 2351 2324 2352 2325 mutex_lock(&trace_types_lock); 2353 2326 tracer_flags = current_trace->flags->val; 2354 2327 trace_opts = current_trace->flags->opts; 2355 2328 2356 - /* 2357 - * Increase the size with names of options specific 2358 - * of the current tracer. 2359 - */ 2360 - for (i = 0; trace_opts[i].name; i++) { 2361 - len += strlen(trace_opts[i].name); 2362 - len += 3; /* "no" and newline */ 2363 - } 2364 - 2365 - /* +1 for \0 */ 2366 - buf = kmalloc(len + 1, GFP_KERNEL); 2367 - if (!buf) { 2368 - mutex_unlock(&trace_types_lock); 2369 - return -ENOMEM; 2370 - } 2371 - 2372 2329 for (i = 0; trace_options[i]; i++) { 2373 2330 if (trace_flags & (1 << i)) 2374 - r += sprintf(buf + r, "%s\n", trace_options[i]); 2331 + seq_printf(m, "%s\n", trace_options[i]); 2375 2332 else 2376 - r += sprintf(buf + r, "no%s\n", trace_options[i]); 2333 + seq_printf(m, "no%s\n", trace_options[i]); 2377 2334 } 2378 2335 2379 2336 for (i = 0; trace_opts[i].name; i++) { 2380 2337 if (tracer_flags & trace_opts[i].bit) 2381 - r += sprintf(buf + r, "%s\n", 2382 - trace_opts[i].name); 2338 + seq_printf(m, "%s\n", trace_opts[i].name); 2383 2339 else 2384 - r += sprintf(buf + r, "no%s\n", 2385 - trace_opts[i].name); 2340 + seq_printf(m, "no%s\n", trace_opts[i].name); 2386 2341 } 2387 2342 mutex_unlock(&trace_types_lock); 2388 2343 2389 - WARN_ON(r >= len + 1); 2390 - 2391 - r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r); 2392 - 2393 - kfree(buf); 2394 - return r; 2344 + return 0; 2395 2345 } 2396 2346 2397 - /* Try to assign a tracer specific option */ 2398 - static int set_tracer_option(struct tracer *trace, char *cmp, int neg) 2347 + static int __set_tracer_option(struct tracer *trace, 2348 + struct tracer_flags *tracer_flags, 2349 + struct tracer_opt *opts, int neg) 2399 2350 { 2400 - struct tracer_flags *tracer_flags = trace->flags; 2401 - struct tracer_opt *opts = NULL; 2402 - int ret = 0, i = 0; 2403 - int len; 2351 + int ret; 2404 2352 2405 - for (i = 0; tracer_flags->opts[i].name; i++) { 2406 - opts = &tracer_flags->opts[i]; 2407 - len = strlen(opts->name); 2408 - 2409 - if (strncmp(cmp, opts->name, len) == 0) { 2410 - ret = trace->set_flag(tracer_flags->val, 2411 - opts->bit, !neg); 2412 - break; 2413 - } 2414 - } 2415 - /* Not found */ 2416 - if (!tracer_flags->opts[i].name) 2417 - return -EINVAL; 2418 - 2419 - /* Refused to handle */ 2353 + ret = trace->set_flag(tracer_flags->val, opts->bit, !neg); 2420 2354 if (ret) 2421 2355 return ret; 2422 2356 ··· 2373 2409 tracer_flags->val &= ~opts->bit; 2374 2410 else 2375 2411 tracer_flags->val |= opts->bit; 2376 - 2377 2412 return 0; 2413 + } 2414 + 2415 + /* Try to assign a tracer specific option */ 2416 + static int set_tracer_option(struct tracer *trace, char *cmp, int neg) 2417 + { 2418 + struct tracer_flags *tracer_flags = trace->flags; 2419 + struct tracer_opt *opts = NULL; 2420 + int i; 2421 + 2422 + for (i = 0; tracer_flags->opts[i].name; i++) { 2423 + opts = &tracer_flags->opts[i]; 2424 + 2425 + if (strcmp(cmp, opts->name) == 0) 2426 + return __set_tracer_option(trace, trace->flags, 2427 + opts, neg); 2428 + } 2429 + 2430 + return -EINVAL; 2378 2431 } 2379 2432 2380 2433 static void set_tracer_flags(unsigned int mask, int enabled) ··· 2411 2430 size_t cnt, loff_t *ppos) 2412 2431 { 2413 2432 char buf[64]; 2414 - char *cmp = buf; 2433 + char *cmp; 2415 2434 int neg = 0; 2416 2435 int ret; 2417 2436 int i; ··· 2423 2442 return -EFAULT; 2424 2443 2425 2444 buf[cnt] = 0; 2445 + cmp = strstrip(buf); 2426 2446 2427 - if (strncmp(buf, "no", 2) == 0) { 2447 + if (strncmp(cmp, "no", 2) == 0) { 2428 2448 neg = 1; 2429 2449 cmp += 2; 2430 2450 } 2431 2451 2432 2452 for (i = 0; trace_options[i]; i++) { 2433 - int len = strlen(trace_options[i]); 2434 - 2435 - if (strncmp(cmp, trace_options[i], len) == 0) { 2453 + if (strcmp(cmp, trace_options[i]) == 0) { 2436 2454 set_tracer_flags(1 << i, !neg); 2437 2455 break; 2438 2456 } ··· 2451 2471 return cnt; 2452 2472 } 2453 2473 2474 + static int tracing_trace_options_open(struct inode *inode, struct file *file) 2475 + { 2476 + if (tracing_disabled) 2477 + return -ENODEV; 2478 + return single_open(file, tracing_trace_options_show, NULL); 2479 + } 2480 + 2454 2481 static const struct file_operations tracing_iter_fops = { 2455 - .open = tracing_open_generic, 2456 - .read = tracing_trace_options_read, 2482 + .open = tracing_trace_options_open, 2483 + .read = seq_read, 2484 + .llseek = seq_lseek, 2485 + .release = single_release, 2457 2486 .write = tracing_trace_options_write, 2458 2487 }; 2459 2488 ··· 3381 3392 return cnt; 3382 3393 } 3383 3394 3384 - static ssize_t tracing_clock_read(struct file *filp, char __user *ubuf, 3385 - size_t cnt, loff_t *ppos) 3395 + static int tracing_clock_show(struct seq_file *m, void *v) 3386 3396 { 3387 - char buf[64]; 3388 - int bufiter = 0; 3389 3397 int i; 3390 3398 3391 3399 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) 3392 - bufiter += snprintf(buf + bufiter, sizeof(buf) - bufiter, 3400 + seq_printf(m, 3393 3401 "%s%s%s%s", i ? " " : "", 3394 3402 i == trace_clock_id ? "[" : "", trace_clocks[i].name, 3395 3403 i == trace_clock_id ? "]" : ""); 3396 - bufiter += snprintf(buf + bufiter, sizeof(buf) - bufiter, "\n"); 3404 + seq_putc(m, '\n'); 3397 3405 3398 - return simple_read_from_buffer(ubuf, cnt, ppos, buf, bufiter); 3406 + return 0; 3399 3407 } 3400 3408 3401 3409 static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf, ··· 3434 3448 return cnt; 3435 3449 } 3436 3450 3451 + static int tracing_clock_open(struct inode *inode, struct file *file) 3452 + { 3453 + if (tracing_disabled) 3454 + return -ENODEV; 3455 + return single_open(file, tracing_clock_show, NULL); 3456 + } 3457 + 3437 3458 static const struct file_operations tracing_max_lat_fops = { 3438 3459 .open = tracing_open_generic, 3439 3460 .read = tracing_max_lat_read, ··· 3479 3486 }; 3480 3487 3481 3488 static const struct file_operations trace_clock_fops = { 3482 - .open = tracing_open_generic, 3483 - .read = tracing_clock_read, 3489 + .open = tracing_clock_open, 3490 + .read = seq_read, 3491 + .llseek = seq_lseek, 3492 + .release = single_release, 3484 3493 .write = tracing_clock_write, 3485 3494 }; 3486 3495 ··· 3943 3948 if (ret < 0) 3944 3949 return ret; 3945 3950 3946 - ret = 0; 3947 - switch (val) { 3948 - case 0: 3949 - /* do nothing if already cleared */ 3950 - if (!(topt->flags->val & topt->opt->bit)) 3951 - break; 3952 - 3953 - mutex_lock(&trace_types_lock); 3954 - if (current_trace->set_flag) 3955 - ret = current_trace->set_flag(topt->flags->val, 3956 - topt->opt->bit, 0); 3957 - mutex_unlock(&trace_types_lock); 3958 - if (ret) 3959 - return ret; 3960 - topt->flags->val &= ~topt->opt->bit; 3961 - break; 3962 - case 1: 3963 - /* do nothing if already set */ 3964 - if (topt->flags->val & topt->opt->bit) 3965 - break; 3966 - 3967 - mutex_lock(&trace_types_lock); 3968 - if (current_trace->set_flag) 3969 - ret = current_trace->set_flag(topt->flags->val, 3970 - topt->opt->bit, 1); 3971 - mutex_unlock(&trace_types_lock); 3972 - if (ret) 3973 - return ret; 3974 - topt->flags->val |= topt->opt->bit; 3975 - break; 3976 - 3977 - default: 3951 + if (val != 0 && val != 1) 3978 3952 return -EINVAL; 3953 + 3954 + if (!!(topt->flags->val & topt->opt->bit) != val) { 3955 + mutex_lock(&trace_types_lock); 3956 + ret = __set_tracer_option(current_trace, topt->flags, 3957 + topt->opt, val); 3958 + mutex_unlock(&trace_types_lock); 3959 + if (ret) 3960 + return ret; 3979 3961 } 3980 3962 3981 3963 *ppos += cnt;
+11 -12
kernel/trace/trace.h
··· 597 597 TRACE_ITER_BIN = 0x40, 598 598 TRACE_ITER_BLOCK = 0x80, 599 599 TRACE_ITER_STACKTRACE = 0x100, 600 - TRACE_ITER_SCHED_TREE = 0x200, 601 - TRACE_ITER_PRINTK = 0x400, 602 - TRACE_ITER_PREEMPTONLY = 0x800, 603 - TRACE_ITER_BRANCH = 0x1000, 604 - TRACE_ITER_ANNOTATE = 0x2000, 605 - TRACE_ITER_USERSTACKTRACE = 0x4000, 606 - TRACE_ITER_SYM_USEROBJ = 0x8000, 607 - TRACE_ITER_PRINTK_MSGONLY = 0x10000, 608 - TRACE_ITER_CONTEXT_INFO = 0x20000, /* Print pid/cpu/time */ 609 - TRACE_ITER_LATENCY_FMT = 0x40000, 610 - TRACE_ITER_SLEEP_TIME = 0x80000, 611 - TRACE_ITER_GRAPH_TIME = 0x100000, 600 + TRACE_ITER_PRINTK = 0x200, 601 + TRACE_ITER_PREEMPTONLY = 0x400, 602 + TRACE_ITER_BRANCH = 0x800, 603 + TRACE_ITER_ANNOTATE = 0x1000, 604 + TRACE_ITER_USERSTACKTRACE = 0x2000, 605 + TRACE_ITER_SYM_USEROBJ = 0x4000, 606 + TRACE_ITER_PRINTK_MSGONLY = 0x8000, 607 + TRACE_ITER_CONTEXT_INFO = 0x10000, /* Print pid/cpu/time */ 608 + TRACE_ITER_LATENCY_FMT = 0x20000, 609 + TRACE_ITER_SLEEP_TIME = 0x40000, 610 + TRACE_ITER_GRAPH_TIME = 0x80000, 612 611 }; 613 612 614 613 /*
+3 -3
kernel/trace/trace_event_profile.c
··· 25 25 char *buf; 26 26 int ret = -ENOMEM; 27 27 28 - if (atomic_inc_return(&event->profile_count)) 28 + if (event->profile_count++ > 0) 29 29 return 0; 30 30 31 31 if (!total_profile_count) { ··· 56 56 perf_trace_buf = NULL; 57 57 } 58 58 fail_buf: 59 - atomic_dec(&event->profile_count); 59 + event->profile_count--; 60 60 61 61 return ret; 62 62 } ··· 83 83 { 84 84 char *buf, *nmi_buf; 85 85 86 - if (!atomic_add_negative(-1, &event->profile_count)) 86 + if (--event->profile_count > 0) 87 87 return; 88 88 89 89 event->profile_disable(event);
+33 -8
kernel/trace/trace_events.c
··· 78 78 if (ret) \ 79 79 return ret; 80 80 81 - int trace_define_common_fields(struct ftrace_event_call *call) 81 + static int trace_define_common_fields(struct ftrace_event_call *call) 82 82 { 83 83 int ret; 84 84 struct trace_entry ent; ··· 91 91 92 92 return ret; 93 93 } 94 - EXPORT_SYMBOL_GPL(trace_define_common_fields); 95 94 96 95 void trace_destroy_fields(struct ftrace_event_call *call) 97 96 { ··· 104 105 } 105 106 } 106 107 107 - static void ftrace_event_enable_disable(struct ftrace_event_call *call, 108 + int trace_event_raw_init(struct ftrace_event_call *call) 109 + { 110 + int id; 111 + 112 + id = register_ftrace_event(call->event); 113 + if (!id) 114 + return -ENODEV; 115 + call->id = id; 116 + INIT_LIST_HEAD(&call->fields); 117 + 118 + return 0; 119 + } 120 + EXPORT_SYMBOL_GPL(trace_event_raw_init); 121 + 122 + static int ftrace_event_enable_disable(struct ftrace_event_call *call, 108 123 int enable) 109 124 { 125 + int ret = 0; 126 + 110 127 switch (enable) { 111 128 case 0: 112 129 if (call->enabled) { ··· 133 118 break; 134 119 case 1: 135 120 if (!call->enabled) { 136 - call->enabled = 1; 137 121 tracing_start_cmdline_record(); 138 - call->regfunc(call); 122 + ret = call->regfunc(call); 123 + if (ret) { 124 + tracing_stop_cmdline_record(); 125 + pr_info("event trace: Could not enable event " 126 + "%s\n", call->name); 127 + break; 128 + } 129 + call->enabled = 1; 139 130 } 140 131 break; 141 132 } 133 + 134 + return ret; 142 135 } 143 136 144 137 static void ftrace_clear_events(void) ··· 425 402 case 0: 426 403 case 1: 427 404 mutex_lock(&event_mutex); 428 - ftrace_event_enable_disable(call, val); 405 + ret = ftrace_event_enable_disable(call, val); 429 406 mutex_unlock(&event_mutex); 430 407 break; 431 408 ··· 435 412 436 413 *ppos += cnt; 437 414 438 - return cnt; 415 + return ret ? ret : cnt; 439 416 } 440 417 441 418 static ssize_t ··· 936 913 id); 937 914 938 915 if (call->define_fields) { 939 - ret = call->define_fields(call); 916 + ret = trace_define_common_fields(call); 917 + if (!ret) 918 + ret = call->define_fields(call); 940 919 if (ret < 0) { 941 920 pr_warning("Could not initialize trace point" 942 921 " events/%s\n", call->name);
-4
kernel/trace/trace_export.c
··· 184 184 struct struct_name field; \ 185 185 int ret; \ 186 186 \ 187 - ret = trace_define_common_fields(event_call); \ 188 - if (ret) \ 189 - return ret; \ 190 - \ 191 187 tstruct; \ 192 188 \ 193 189 return ret; \
+2
kernel/trace/trace_irqsoff.c
··· 151 151 goto out_unlock; 152 152 153 153 trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc); 154 + /* Skip 5 functions to get to the irq/preempt enable function */ 155 + __trace_stack(tr, flags, 5, pc); 154 156 155 157 if (data->critical_sequence != max_sequence) 156 158 goto out_unlock;
-9
kernel/trace/trace_kprobe.c
··· 1132 1132 struct kprobe_trace_entry field; 1133 1133 struct trace_probe *tp = (struct trace_probe *)event_call->data; 1134 1134 1135 - ret = trace_define_common_fields(event_call); 1136 - if (ret) 1137 - return ret; 1138 - 1139 1135 DEFINE_FIELD(unsigned long, ip, FIELD_STRING_IP, 0); 1140 1136 DEFINE_FIELD(int, nargs, FIELD_STRING_NARGS, 1); 1141 1137 /* Set argument names as fields */ ··· 1145 1149 int ret, i; 1146 1150 struct kretprobe_trace_entry field; 1147 1151 struct trace_probe *tp = (struct trace_probe *)event_call->data; 1148 - 1149 - ret = trace_define_common_fields(event_call); 1150 - if (ret) 1151 - return ret; 1152 1152 1153 1153 DEFINE_FIELD(unsigned long, func, FIELD_STRING_FUNC, 0); 1154 1154 DEFINE_FIELD(unsigned long, ret_ip, FIELD_STRING_RETIP, 0); ··· 1445 1453 call->unregfunc = probe_event_disable; 1446 1454 1447 1455 #ifdef CONFIG_EVENT_PROFILE 1448 - atomic_set(&call->profile_count, -1); 1449 1456 call->profile_enable = probe_profile_enable; 1450 1457 call->profile_disable = probe_profile_disable; 1451 1458 #endif
+25 -31
kernel/trace/trace_ksym.c
··· 236 236 mutex_lock(&ksym_tracer_mutex); 237 237 238 238 hlist_for_each_entry(entry, node, &ksym_filter_head, ksym_hlist) { 239 - ret = trace_seq_printf(s, "%pS:", (void *)entry->attr.bp_addr); 239 + ret = trace_seq_printf(s, "%pS:", 240 + (void *)(unsigned long)entry->attr.bp_addr); 240 241 if (entry->attr.bp_type == HW_BREAKPOINT_R) 241 242 ret = trace_seq_puts(s, "r--\n"); 242 243 else if (entry->attr.bp_type == HW_BREAKPOINT_W) ··· 279 278 { 280 279 struct trace_ksym *entry; 281 280 struct hlist_node *node; 282 - char *input_string, *ksymname = NULL; 281 + char *buf, *input_string, *ksymname = NULL; 283 282 unsigned long ksym_addr = 0; 284 283 int ret, op, changed = 0; 285 284 286 - input_string = kzalloc(count + 1, GFP_KERNEL); 287 - if (!input_string) 285 + buf = kzalloc(count + 1, GFP_KERNEL); 286 + if (!buf) 288 287 return -ENOMEM; 289 288 290 - if (copy_from_user(input_string, buffer, count)) { 291 - kfree(input_string); 292 - return -EFAULT; 293 - } 294 - input_string[count] = '\0'; 289 + ret = -EFAULT; 290 + if (copy_from_user(buf, buffer, count)) 291 + goto out; 295 292 296 - strstrip(input_string); 293 + buf[count] = '\0'; 294 + input_string = strstrip(buf); 297 295 298 296 /* 299 297 * Clear all breakpoints if: ··· 300 300 * 2: echo 0 > ksym_trace_filter 301 301 * 3: echo "*:---" > ksym_trace_filter 302 302 */ 303 - if (!input_string[0] || !strcmp(input_string, "0") || 304 - !strcmp(input_string, "*:---")) { 303 + if (!buf[0] || !strcmp(buf, "0") || 304 + !strcmp(buf, "*:---")) { 305 305 __ksym_trace_reset(); 306 - kfree(input_string); 307 - return count; 306 + ret = 0; 307 + goto out; 308 308 } 309 309 310 310 ret = op = parse_ksym_trace_str(input_string, &ksymname, &ksym_addr); 311 - if (ret < 0) { 312 - kfree(input_string); 313 - return ret; 314 - } 311 + if (ret < 0) 312 + goto out; 315 313 316 314 mutex_lock(&ksym_tracer_mutex); 317 315 ··· 320 322 if (entry->attr.bp_type != op) 321 323 changed = 1; 322 324 else 323 - goto out; 325 + goto out_unlock; 324 326 break; 325 327 } 326 328 } ··· 335 337 if (IS_ERR(entry->ksym_hbp)) 336 338 ret = PTR_ERR(entry->ksym_hbp); 337 339 else 338 - goto out; 340 + goto out_unlock; 339 341 } 340 342 /* Error or "symbol:---" case: drop it */ 341 343 ksym_filter_entry_count--; 342 344 hlist_del_rcu(&(entry->ksym_hlist)); 343 345 synchronize_rcu(); 344 346 kfree(entry); 345 - goto out; 347 + goto out_unlock; 346 348 } else { 347 349 /* Check for malformed request: (4) */ 348 - if (op == 0) 349 - goto out; 350 - ret = process_new_ksym_entry(ksymname, op, ksym_addr); 350 + if (op) 351 + ret = process_new_ksym_entry(ksymname, op, ksym_addr); 351 352 } 352 - out: 353 + out_unlock: 353 354 mutex_unlock(&ksym_tracer_mutex); 354 - 355 - kfree(input_string); 356 - 357 - if (!ret) 358 - ret = count; 359 - return ret; 355 + out: 356 + kfree(buf); 357 + return !ret ? count : ret; 360 358 } 361 359 362 360 static const struct file_operations ksym_tracing_fops = {
+2 -16
kernel/trace/trace_syscalls.c
··· 217 217 int i; 218 218 int offset = offsetof(typeof(trace), args); 219 219 220 - ret = trace_define_common_fields(call); 221 - if (ret) 222 - return ret; 223 - 224 220 ret = trace_define_field(call, SYSCALL_FIELD(int, nr), FILTER_OTHER); 225 221 if (ret) 226 222 return ret; ··· 236 240 { 237 241 struct syscall_trace_exit trace; 238 242 int ret; 239 - 240 - ret = trace_define_common_fields(call); 241 - if (ret) 242 - return ret; 243 243 244 244 ret = trace_define_field(call, SYSCALL_FIELD(int, nr), FILTER_OTHER); 245 245 if (ret) ··· 325 333 mutex_lock(&syscall_trace_lock); 326 334 if (!sys_refcount_enter) 327 335 ret = register_trace_sys_enter(ftrace_syscall_enter); 328 - if (ret) { 329 - pr_info("event trace: Could not activate" 330 - "syscall entry trace point"); 331 - } else { 336 + if (!ret) { 332 337 set_bit(num, enabled_enter_syscalls); 333 338 sys_refcount_enter++; 334 339 } ··· 359 370 mutex_lock(&syscall_trace_lock); 360 371 if (!sys_refcount_exit) 361 372 ret = register_trace_sys_exit(ftrace_syscall_exit); 362 - if (ret) { 363 - pr_info("event trace: Could not activate" 364 - "syscall exit trace point"); 365 - } else { 373 + if (!ret) { 366 374 set_bit(num, enabled_exit_syscalls); 367 375 sys_refcount_exit++; 368 376 }