Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'tip/perf/core' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace into perf/core

Pull tracing updated from Steve Rostedt.

Signed-off-by: Ingo Molnar <mingo@kernel.org>

+288 -243
+83
Documentation/trace/ftrace.txt
··· 1842 1842 # cat buffer_size_kb 1843 1843 85 1844 1844 1845 + Snapshot 1846 + -------- 1847 + CONFIG_TRACER_SNAPSHOT makes a generic snapshot feature 1848 + available to all non latency tracers. (Latency tracers which 1849 + record max latency, such as "irqsoff" or "wakeup", can't use 1850 + this feature, since those are already using the snapshot 1851 + mechanism internally.) 1852 + 1853 + Snapshot preserves a current trace buffer at a particular point 1854 + in time without stopping tracing. Ftrace swaps the current 1855 + buffer with a spare buffer, and tracing continues in the new 1856 + current (=previous spare) buffer. 1857 + 1858 + The following debugfs files in "tracing" are related to this 1859 + feature: 1860 + 1861 + snapshot: 1862 + 1863 + This is used to take a snapshot and to read the output 1864 + of the snapshot. Echo 1 into this file to allocate a 1865 + spare buffer and to take a snapshot (swap), then read 1866 + the snapshot from this file in the same format as 1867 + "trace" (described above in the section "The File 1868 + System"). Both reads snapshot and tracing are executable 1869 + in parallel. When the spare buffer is allocated, echoing 1870 + 0 frees it, and echoing else (positive) values clear the 1871 + snapshot contents. 1872 + More details are shown in the table below. 1873 + 1874 + status\input | 0 | 1 | else | 1875 + --------------+------------+------------+------------+ 1876 + not allocated |(do nothing)| alloc+swap | EINVAL | 1877 + --------------+------------+------------+------------+ 1878 + allocated | free | swap | clear | 1879 + --------------+------------+------------+------------+ 1880 + 1881 + Here is an example of using the snapshot feature. 1882 + 1883 + # echo 1 > events/sched/enable 1884 + # echo 1 > snapshot 1885 + # cat snapshot 1886 + # tracer: nop 1887 + # 1888 + # entries-in-buffer/entries-written: 71/71 #P:8 1889 + # 1890 + # _-----=> irqs-off 1891 + # / _----=> need-resched 1892 + # | / _---=> hardirq/softirq 1893 + # || / _--=> preempt-depth 1894 + # ||| / delay 1895 + # TASK-PID CPU# |||| TIMESTAMP FUNCTION 1896 + # | | | |||| | | 1897 + <idle>-0 [005] d... 2440.603828: sched_switch: prev_comm=swapper/5 prev_pid=0 prev_prio=120 prev_state=R ==> next_comm=snapshot-test-2 next_pid=2242 next_prio=120 1898 + sleep-2242 [005] d... 2440.603846: sched_switch: prev_comm=snapshot-test-2 prev_pid=2242 prev_prio=120 prev_state=R ==> next_comm=kworker/5:1 next_pid=60 next_prio=120 1899 + [...] 1900 + <idle>-0 [002] d... 2440.707230: sched_switch: prev_comm=swapper/2 prev_pid=0 prev_prio=120 prev_state=R ==> next_comm=snapshot-test-2 next_pid=2229 next_prio=120 1901 + 1902 + # cat trace 1903 + # tracer: nop 1904 + # 1905 + # entries-in-buffer/entries-written: 77/77 #P:8 1906 + # 1907 + # _-----=> irqs-off 1908 + # / _----=> need-resched 1909 + # | / _---=> hardirq/softirq 1910 + # || / _--=> preempt-depth 1911 + # ||| / delay 1912 + # TASK-PID CPU# |||| TIMESTAMP FUNCTION 1913 + # | | | |||| | | 1914 + <idle>-0 [007] d... 2440.707395: sched_switch: prev_comm=swapper/7 prev_pid=0 prev_prio=120 prev_state=R ==> next_comm=snapshot-test-2 next_pid=2243 next_prio=120 1915 + snapshot-test-2-2229 [002] d... 2440.707438: sched_switch: prev_comm=snapshot-test-2 prev_pid=2229 prev_prio=120 prev_state=S ==> next_comm=swapper/2 next_pid=0 next_prio=120 1916 + [...] 1917 + 1918 + 1919 + If you try to use this snapshot feature when current tracer is 1920 + one of the latency tracers, you will get the following results. 1921 + 1922 + # echo wakeup > current_tracer 1923 + # echo 1 > snapshot 1924 + bash: echo: write error: Device or resource busy 1925 + # cat snapshot 1926 + cat: snapshot: Device or resource busy 1927 + 1845 1928 ----------- 1846 1929 1847 1930 More details can be found in the source code, in the
+3
include/linux/ftrace_event.h
··· 83 83 long idx; 84 84 85 85 cpumask_var_t started; 86 + 87 + /* it's true when current open file is snapshot */ 88 + bool snapshot; 86 89 }; 87 90 88 91 enum trace_iter_flags {
+1
include/linux/ring_buffer.h
··· 167 167 unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu); 168 168 unsigned long ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu); 169 169 unsigned long ring_buffer_dropped_events_cpu(struct ring_buffer *buffer, int cpu); 170 + unsigned long ring_buffer_read_events_cpu(struct ring_buffer *buffer, int cpu); 170 171 171 172 u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu); 172 173 void ring_buffer_normalize_time_stamp(struct ring_buffer *buffer,
+10
kernel/trace/Kconfig
··· 253 253 help 254 254 Basic tracer to catch the syscall entry and exit events. 255 255 256 + config TRACER_SNAPSHOT 257 + bool "Create a snapshot trace buffer" 258 + select TRACER_MAX_TRACE 259 + help 260 + Allow tracing users to take snapshot of the current buffer using the 261 + ftrace interface, e.g.: 262 + 263 + echo 1 > /sys/kernel/debug/tracing/snapshot 264 + cat snapshot 265 + 256 266 config TRACE_BRANCH_PROFILING 257 267 bool 258 268 select GENERIC_TRACER
+18
kernel/trace/ring_buffer.c
··· 3103 3103 EXPORT_SYMBOL_GPL(ring_buffer_dropped_events_cpu); 3104 3104 3105 3105 /** 3106 + * ring_buffer_read_events_cpu - get the number of events successfully read 3107 + * @buffer: The ring buffer 3108 + * @cpu: The per CPU buffer to get the number of events read 3109 + */ 3110 + unsigned long 3111 + ring_buffer_read_events_cpu(struct ring_buffer *buffer, int cpu) 3112 + { 3113 + struct ring_buffer_per_cpu *cpu_buffer; 3114 + 3115 + if (!cpumask_test_cpu(cpu, buffer->cpumask)) 3116 + return 0; 3117 + 3118 + cpu_buffer = buffer->buffers[cpu]; 3119 + return cpu_buffer->read; 3120 + } 3121 + EXPORT_SYMBOL_GPL(ring_buffer_read_events_cpu); 3122 + 3123 + /** 3106 3124 * ring_buffer_entries - get the number of entries in a buffer 3107 3125 * @buffer: The ring buffer 3108 3126 *
+163 -59
kernel/trace/trace.c
··· 249 249 static struct tracer *trace_types __read_mostly; 250 250 251 251 /* current_trace points to the tracer that is currently active */ 252 - static struct tracer *current_trace __read_mostly; 252 + static struct tracer *current_trace __read_mostly = &nop_trace; 253 253 254 254 /* 255 255 * trace_types_lock is used to protect the trace_types list. ··· 710 710 711 711 WARN_ON_ONCE(!irqs_disabled()); 712 712 713 - /* If we disabled the tracer, stop now */ 714 - if (current_trace == &nop_trace) 713 + if (!current_trace->allocated_snapshot) { 714 + /* Only the nop tracer should hit this when disabling */ 715 + WARN_ON_ONCE(current_trace != &nop_trace); 715 716 return; 716 - 717 - if (WARN_ON_ONCE(!current_trace->use_max_tr)) 718 - return; 717 + } 719 718 720 719 arch_spin_lock(&ftrace_max_lock); 721 720 ··· 742 743 return; 743 744 744 745 WARN_ON_ONCE(!irqs_disabled()); 745 - if (!current_trace->use_max_tr) { 746 - WARN_ON_ONCE(1); 746 + if (WARN_ON_ONCE(!current_trace->allocated_snapshot)) 747 747 return; 748 - } 749 748 750 749 arch_spin_lock(&ftrace_max_lock); 751 750 ··· 863 866 864 867 current_trace = type; 865 868 866 - /* If we expanded the buffers, make sure the max is expanded too */ 867 - if (ring_buffer_expanded && type->use_max_tr) 868 - ring_buffer_resize(max_tr.buffer, trace_buf_size, 869 - RING_BUFFER_ALL_CPUS); 869 + if (type->use_max_tr) { 870 + /* If we expanded the buffers, make sure the max is expanded too */ 871 + if (ring_buffer_expanded) 872 + ring_buffer_resize(max_tr.buffer, trace_buf_size, 873 + RING_BUFFER_ALL_CPUS); 874 + type->allocated_snapshot = true; 875 + } 870 876 871 877 /* the test is responsible for initializing and enabling */ 872 878 pr_info("Testing tracer %s: ", type->name); ··· 885 885 /* Only reset on passing, to avoid touching corrupted buffers */ 886 886 tracing_reset_online_cpus(tr); 887 887 888 - /* Shrink the max buffer again */ 889 - if (ring_buffer_expanded && type->use_max_tr) 890 - ring_buffer_resize(max_tr.buffer, 1, 891 - RING_BUFFER_ALL_CPUS); 888 + if (type->use_max_tr) { 889 + type->allocated_snapshot = false; 890 + 891 + /* Shrink the max buffer again */ 892 + if (ring_buffer_expanded) 893 + ring_buffer_resize(max_tr.buffer, 1, 894 + RING_BUFFER_ALL_CPUS); 895 + } 892 896 893 897 printk(KERN_CONT "PASSED\n"); 894 898 } ··· 1348 1344 */ 1349 1345 preempt_disable_notrace(); 1350 1346 1351 - use_stack = ++__get_cpu_var(ftrace_stack_reserve); 1347 + use_stack = __this_cpu_inc_return(ftrace_stack_reserve); 1352 1348 /* 1353 1349 * We don't need any atomic variables, just a barrier. 1354 1350 * If an interrupt comes in, we don't care, because it would ··· 1402 1398 out: 1403 1399 /* Again, don't let gcc optimize things here */ 1404 1400 barrier(); 1405 - __get_cpu_var(ftrace_stack_reserve)--; 1401 + __this_cpu_dec(ftrace_stack_reserve); 1406 1402 preempt_enable_notrace(); 1407 1403 1408 1404 } ··· 1952 1948 static void *s_start(struct seq_file *m, loff_t *pos) 1953 1949 { 1954 1950 struct trace_iterator *iter = m->private; 1955 - static struct tracer *old_tracer; 1956 1951 int cpu_file = iter->cpu_file; 1957 1952 void *p = NULL; 1958 1953 loff_t l = 0; 1959 1954 int cpu; 1960 1955 1961 - /* copy the tracer to avoid using a global lock all around */ 1956 + /* 1957 + * copy the tracer to avoid using a global lock all around. 1958 + * iter->trace is a copy of current_trace, the pointer to the 1959 + * name may be used instead of a strcmp(), as iter->trace->name 1960 + * will point to the same string as current_trace->name. 1961 + */ 1962 1962 mutex_lock(&trace_types_lock); 1963 - if (unlikely(old_tracer != current_trace && current_trace)) { 1964 - old_tracer = current_trace; 1963 + if (unlikely(current_trace && iter->trace->name != current_trace->name)) 1965 1964 *iter->trace = *current_trace; 1966 - } 1967 1965 mutex_unlock(&trace_types_lock); 1968 1966 1969 - atomic_inc(&trace_record_cmdline_disabled); 1967 + if (iter->snapshot && iter->trace->use_max_tr) 1968 + return ERR_PTR(-EBUSY); 1969 + 1970 + if (!iter->snapshot) 1971 + atomic_inc(&trace_record_cmdline_disabled); 1970 1972 1971 1973 if (*pos != iter->pos) { 1972 1974 iter->ent = NULL; ··· 2011 2001 { 2012 2002 struct trace_iterator *iter = m->private; 2013 2003 2014 - atomic_dec(&trace_record_cmdline_disabled); 2004 + if (iter->snapshot && iter->trace->use_max_tr) 2005 + return; 2006 + 2007 + if (!iter->snapshot) 2008 + atomic_dec(&trace_record_cmdline_disabled); 2015 2009 trace_access_unlock(iter->cpu_file); 2016 2010 trace_event_read_unlock(); 2017 2011 } ··· 2100 2086 unsigned long total; 2101 2087 const char *name = "preemption"; 2102 2088 2103 - if (type) 2104 - name = type->name; 2089 + name = type->name; 2105 2090 2106 2091 get_total_entries(tr, &total, &entries); 2107 2092 ··· 2449 2436 }; 2450 2437 2451 2438 static struct trace_iterator * 2452 - __tracing_open(struct inode *inode, struct file *file) 2439 + __tracing_open(struct inode *inode, struct file *file, bool snapshot) 2453 2440 { 2454 2441 long cpu_file = (long) inode->i_private; 2455 2442 struct trace_iterator *iter; ··· 2476 2463 if (!iter->trace) 2477 2464 goto fail; 2478 2465 2479 - if (current_trace) 2480 - *iter->trace = *current_trace; 2466 + *iter->trace = *current_trace; 2481 2467 2482 2468 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL)) 2483 2469 goto fail; 2484 2470 2485 - if (current_trace && current_trace->print_max) 2471 + if (current_trace->print_max || snapshot) 2486 2472 iter->tr = &max_tr; 2487 2473 else 2488 2474 iter->tr = &global_trace; 2475 + iter->snapshot = snapshot; 2489 2476 iter->pos = -1; 2490 2477 mutex_init(&iter->mutex); 2491 2478 iter->cpu_file = cpu_file; ··· 2502 2489 if (trace_clocks[trace_clock_id].in_ns) 2503 2490 iter->iter_flags |= TRACE_FILE_TIME_IN_NS; 2504 2491 2505 - /* stop the trace while dumping */ 2506 - tracing_stop(); 2492 + /* stop the trace while dumping if we are not opening "snapshot" */ 2493 + if (!iter->snapshot) 2494 + tracing_stop(); 2507 2495 2508 2496 if (iter->cpu_file == TRACE_PIPE_ALL_CPU) { 2509 2497 for_each_tracing_cpu(cpu) { ··· 2567 2553 if (iter->trace && iter->trace->close) 2568 2554 iter->trace->close(iter); 2569 2555 2570 - /* reenable tracing if it was previously enabled */ 2571 - tracing_start(); 2556 + if (!iter->snapshot) 2557 + /* reenable tracing if it was previously enabled */ 2558 + tracing_start(); 2572 2559 mutex_unlock(&trace_types_lock); 2573 2560 2574 2561 mutex_destroy(&iter->mutex); ··· 2597 2582 } 2598 2583 2599 2584 if (file->f_mode & FMODE_READ) { 2600 - iter = __tracing_open(inode, file); 2585 + iter = __tracing_open(inode, file, false); 2601 2586 if (IS_ERR(iter)) 2602 2587 ret = PTR_ERR(iter); 2603 2588 else if (trace_flags & TRACE_ITER_LATENCY_FMT) ··· 3035 3020 int r; 3036 3021 3037 3022 mutex_lock(&trace_types_lock); 3038 - if (current_trace) 3039 - r = sprintf(buf, "%s\n", current_trace->name); 3040 - else 3041 - r = sprintf(buf, "\n"); 3023 + r = sprintf(buf, "%s\n", current_trace->name); 3042 3024 mutex_unlock(&trace_types_lock); 3043 3025 3044 3026 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); ··· 3226 3214 goto out; 3227 3215 3228 3216 trace_branch_disable(); 3229 - if (current_trace && current_trace->reset) 3217 + if (current_trace->reset) 3230 3218 current_trace->reset(tr); 3231 3219 3232 - had_max_tr = current_trace && current_trace->use_max_tr; 3220 + had_max_tr = current_trace->allocated_snapshot; 3233 3221 current_trace = &nop_trace; 3234 3222 3235 3223 if (had_max_tr && !t->use_max_tr) { ··· 3248 3236 */ 3249 3237 ring_buffer_resize(max_tr.buffer, 1, RING_BUFFER_ALL_CPUS); 3250 3238 set_buffer_entries(&max_tr, 1); 3239 + tracing_reset_online_cpus(&max_tr); 3240 + current_trace->allocated_snapshot = false; 3251 3241 } 3252 3242 destroy_trace_option_files(topts); 3253 3243 ··· 3260 3246 RING_BUFFER_ALL_CPUS); 3261 3247 if (ret < 0) 3262 3248 goto out; 3249 + t->allocated_snapshot = true; 3263 3250 } 3264 3251 3265 3252 if (t->init) { ··· 3368 3353 ret = -ENOMEM; 3369 3354 goto fail; 3370 3355 } 3371 - if (current_trace) 3372 - *iter->trace = *current_trace; 3356 + *iter->trace = *current_trace; 3373 3357 3374 3358 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) { 3375 3359 ret = -ENOMEM; ··· 3508 3494 size_t cnt, loff_t *ppos) 3509 3495 { 3510 3496 struct trace_iterator *iter = filp->private_data; 3511 - static struct tracer *old_tracer; 3512 3497 ssize_t sret; 3513 3498 3514 3499 /* return any leftover data */ ··· 3519 3506 3520 3507 /* copy the tracer to avoid using a global lock all around */ 3521 3508 mutex_lock(&trace_types_lock); 3522 - if (unlikely(old_tracer != current_trace && current_trace)) { 3523 - old_tracer = current_trace; 3509 + if (unlikely(iter->trace->name != current_trace->name)) 3524 3510 *iter->trace = *current_trace; 3525 - } 3526 3511 mutex_unlock(&trace_types_lock); 3527 3512 3528 3513 /* ··· 3676 3665 .ops = &tracing_pipe_buf_ops, 3677 3666 .spd_release = tracing_spd_release_pipe, 3678 3667 }; 3679 - static struct tracer *old_tracer; 3680 3668 ssize_t ret; 3681 3669 size_t rem; 3682 3670 unsigned int i; ··· 3685 3675 3686 3676 /* copy the tracer to avoid using a global lock all around */ 3687 3677 mutex_lock(&trace_types_lock); 3688 - if (unlikely(old_tracer != current_trace && current_trace)) { 3689 - old_tracer = current_trace; 3678 + if (unlikely(iter->trace->name != current_trace->name)) 3690 3679 *iter->trace = *current_trace; 3691 - } 3692 3680 mutex_unlock(&trace_types_lock); 3693 3681 3694 3682 mutex_lock(&iter->mutex); ··· 4078 4070 return single_open(file, tracing_clock_show, NULL); 4079 4071 } 4080 4072 4073 + #ifdef CONFIG_TRACER_SNAPSHOT 4074 + static int tracing_snapshot_open(struct inode *inode, struct file *file) 4075 + { 4076 + struct trace_iterator *iter; 4077 + int ret = 0; 4078 + 4079 + if (file->f_mode & FMODE_READ) { 4080 + iter = __tracing_open(inode, file, true); 4081 + if (IS_ERR(iter)) 4082 + ret = PTR_ERR(iter); 4083 + } 4084 + return ret; 4085 + } 4086 + 4087 + static ssize_t 4088 + tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt, 4089 + loff_t *ppos) 4090 + { 4091 + unsigned long val; 4092 + int ret; 4093 + 4094 + ret = tracing_update_buffers(); 4095 + if (ret < 0) 4096 + return ret; 4097 + 4098 + ret = kstrtoul_from_user(ubuf, cnt, 10, &val); 4099 + if (ret) 4100 + return ret; 4101 + 4102 + mutex_lock(&trace_types_lock); 4103 + 4104 + if (current_trace->use_max_tr) { 4105 + ret = -EBUSY; 4106 + goto out; 4107 + } 4108 + 4109 + switch (val) { 4110 + case 0: 4111 + if (current_trace->allocated_snapshot) { 4112 + /* free spare buffer */ 4113 + ring_buffer_resize(max_tr.buffer, 1, 4114 + RING_BUFFER_ALL_CPUS); 4115 + set_buffer_entries(&max_tr, 1); 4116 + tracing_reset_online_cpus(&max_tr); 4117 + current_trace->allocated_snapshot = false; 4118 + } 4119 + break; 4120 + case 1: 4121 + if (!current_trace->allocated_snapshot) { 4122 + /* allocate spare buffer */ 4123 + ret = resize_buffer_duplicate_size(&max_tr, 4124 + &global_trace, RING_BUFFER_ALL_CPUS); 4125 + if (ret < 0) 4126 + break; 4127 + current_trace->allocated_snapshot = true; 4128 + } 4129 + 4130 + local_irq_disable(); 4131 + /* Now, we're going to swap */ 4132 + update_max_tr(&global_trace, current, smp_processor_id()); 4133 + local_irq_enable(); 4134 + break; 4135 + default: 4136 + if (current_trace->allocated_snapshot) 4137 + tracing_reset_online_cpus(&max_tr); 4138 + else 4139 + ret = -EINVAL; 4140 + break; 4141 + } 4142 + 4143 + if (ret >= 0) { 4144 + *ppos += cnt; 4145 + ret = cnt; 4146 + } 4147 + out: 4148 + mutex_unlock(&trace_types_lock); 4149 + return ret; 4150 + } 4151 + #endif /* CONFIG_TRACER_SNAPSHOT */ 4152 + 4153 + 4081 4154 static const struct file_operations tracing_max_lat_fops = { 4082 4155 .open = tracing_open_generic, 4083 4156 .read = tracing_max_lat_read, ··· 4214 4125 .release = single_release, 4215 4126 .write = tracing_clock_write, 4216 4127 }; 4128 + 4129 + #ifdef CONFIG_TRACER_SNAPSHOT 4130 + static const struct file_operations snapshot_fops = { 4131 + .open = tracing_snapshot_open, 4132 + .read = seq_read, 4133 + .write = tracing_snapshot_write, 4134 + .llseek = tracing_seek, 4135 + .release = tracing_release, 4136 + }; 4137 + #endif /* CONFIG_TRACER_SNAPSHOT */ 4217 4138 4218 4139 struct ftrace_buffer_info { 4219 4140 struct trace_array *tr; ··· 4529 4430 cnt = ring_buffer_dropped_events_cpu(tr->buffer, cpu); 4530 4431 trace_seq_printf(s, "dropped events: %ld\n", cnt); 4531 4432 4433 + cnt = ring_buffer_read_events_cpu(tr->buffer, cpu); 4434 + trace_seq_printf(s, "read events: %ld\n", cnt); 4435 + 4532 4436 count = simple_read_from_buffer(ubuf, count, ppos, s->buffer, s->len); 4533 4437 4534 4438 kfree(s); ··· 4608 4506 4609 4507 static struct dentry *d_percpu; 4610 4508 4611 - struct dentry *tracing_dentry_percpu(void) 4509 + static struct dentry *tracing_dentry_percpu(void) 4612 4510 { 4613 4511 static int once; 4614 4512 struct dentry *d_tracer; ··· 5024 4922 &ftrace_update_tot_cnt, &tracing_dyn_info_fops); 5025 4923 #endif 5026 4924 4925 + #ifdef CONFIG_TRACER_SNAPSHOT 4926 + trace_create_file("snapshot", 0644, d_tracer, 4927 + (void *) TRACE_PIPE_ALL_CPU, &snapshot_fops); 4928 + #endif 4929 + 5027 4930 create_trace_options_dir(); 5028 4931 5029 4932 for_each_tracing_cpu(cpu) ··· 5137 5030 if (disable_tracing) 5138 5031 ftrace_kill(); 5139 5032 5033 + /* Simulate the iterator */ 5140 5034 trace_init_global_iter(&iter); 5141 5035 5142 5036 for_each_tracing_cpu(cpu) { ··· 5148 5040 5149 5041 /* don't look at user memory in panic mode */ 5150 5042 trace_flags &= ~TRACE_ITER_SYM_USEROBJ; 5151 - 5152 - /* Simulate the iterator */ 5153 - iter.tr = &global_trace; 5154 - iter.trace = current_trace; 5155 5043 5156 5044 switch (oops_dump_mode) { 5157 5045 case DUMP_ALL: ··· 5293 5189 init_irq_work(&trace_work_wakeup, trace_wake_up); 5294 5190 5295 5191 register_tracer(&nop_trace); 5296 - current_trace = &nop_trace; 5192 + 5297 5193 /* All seems OK, enable tracing */ 5298 5194 tracing_disabled = 0; 5299 5195
+1
kernel/trace/trace.h
··· 287 287 struct tracer_flags *flags; 288 288 bool print_max; 289 289 bool use_max_tr; 290 + bool allocated_snapshot; 290 291 }; 291 292 292 293
+1 -1
kernel/trace/trace_clock.c
··· 84 84 local_irq_save(flags); 85 85 86 86 this_cpu = raw_smp_processor_id(); 87 - now = cpu_clock(this_cpu); 87 + now = sched_clock_cpu(this_cpu); 88 88 /* 89 89 * If in an NMI context then dont risk lockups and return the 90 90 * cpu_clock() time:
+7 -1
kernel/trace/trace_functions_graph.c
··· 191 191 192 192 ftrace_pop_return_trace(&trace, &ret, frame_pointer); 193 193 trace.rettime = trace_clock_local(); 194 - ftrace_graph_return(&trace); 195 194 barrier(); 196 195 current->curr_ret_stack--; 196 + 197 + /* 198 + * The trace should run after decrementing the ret counter 199 + * in case an interrupt were to come in. We don't want to 200 + * lose the interrupt if max_depth is set. 201 + */ 202 + ftrace_graph_return(&trace); 197 203 198 204 if (unlikely(!ret)) { 199 205 ftrace_graph_stop();
-6
samples/Kconfig
··· 5 5 6 6 if SAMPLES 7 7 8 - config SAMPLE_TRACEPOINTS 9 - tristate "Build tracepoints examples -- loadable modules only" 10 - depends on TRACEPOINTS && m 11 - help 12 - This build tracepoints example modules. 13 - 14 8 config SAMPLE_TRACE_EVENTS 15 9 tristate "Build trace_events examples -- loadable modules only" 16 10 depends on EVENT_TRACING && m
+1 -1
samples/Makefile
··· 1 1 # Makefile for Linux samples code 2 2 3 - obj-$(CONFIG_SAMPLES) += kobject/ kprobes/ tracepoints/ trace_events/ \ 3 + obj-$(CONFIG_SAMPLES) += kobject/ kprobes/ trace_events/ \ 4 4 hw_breakpoint/ kfifo/ kdb/ hidraw/ rpmsg/ seccomp/
-6
samples/tracepoints/Makefile
··· 1 - # builds the tracepoint example kernel modules; 2 - # then to use one (as root): insmod <module_name.ko> 3 - 4 - obj-$(CONFIG_SAMPLE_TRACEPOINTS) += tracepoint-sample.o 5 - obj-$(CONFIG_SAMPLE_TRACEPOINTS) += tracepoint-probe-sample.o 6 - obj-$(CONFIG_SAMPLE_TRACEPOINTS) += tracepoint-probe-sample2.o
-11
samples/tracepoints/tp-samples-trace.h
··· 1 - #ifndef _TP_SAMPLES_TRACE_H 2 - #define _TP_SAMPLES_TRACE_H 3 - 4 - #include <linux/proc_fs.h> /* for struct inode and struct file */ 5 - #include <linux/tracepoint.h> 6 - 7 - DECLARE_TRACE(subsys_event, 8 - TP_PROTO(struct inode *inode, struct file *file), 9 - TP_ARGS(inode, file)); 10 - DECLARE_TRACE_NOARGS(subsys_eventb); 11 - #endif
-57
samples/tracepoints/tracepoint-probe-sample.c
··· 1 - /* 2 - * tracepoint-probe-sample.c 3 - * 4 - * sample tracepoint probes. 5 - */ 6 - 7 - #include <linux/module.h> 8 - #include <linux/file.h> 9 - #include <linux/dcache.h> 10 - #include "tp-samples-trace.h" 11 - 12 - /* 13 - * Here the caller only guarantees locking for struct file and struct inode. 14 - * Locking must therefore be done in the probe to use the dentry. 15 - */ 16 - static void probe_subsys_event(void *ignore, 17 - struct inode *inode, struct file *file) 18 - { 19 - path_get(&file->f_path); 20 - dget(file->f_path.dentry); 21 - printk(KERN_INFO "Event is encountered with filename %s\n", 22 - file->f_path.dentry->d_name.name); 23 - dput(file->f_path.dentry); 24 - path_put(&file->f_path); 25 - } 26 - 27 - static void probe_subsys_eventb(void *ignore) 28 - { 29 - printk(KERN_INFO "Event B is encountered\n"); 30 - } 31 - 32 - static int __init tp_sample_trace_init(void) 33 - { 34 - int ret; 35 - 36 - ret = register_trace_subsys_event(probe_subsys_event, NULL); 37 - WARN_ON(ret); 38 - ret = register_trace_subsys_eventb(probe_subsys_eventb, NULL); 39 - WARN_ON(ret); 40 - 41 - return 0; 42 - } 43 - 44 - module_init(tp_sample_trace_init); 45 - 46 - static void __exit tp_sample_trace_exit(void) 47 - { 48 - unregister_trace_subsys_eventb(probe_subsys_eventb, NULL); 49 - unregister_trace_subsys_event(probe_subsys_event, NULL); 50 - tracepoint_synchronize_unregister(); 51 - } 52 - 53 - module_exit(tp_sample_trace_exit); 54 - 55 - MODULE_LICENSE("GPL"); 56 - MODULE_AUTHOR("Mathieu Desnoyers"); 57 - MODULE_DESCRIPTION("Tracepoint Probes Samples");
-44
samples/tracepoints/tracepoint-probe-sample2.c
··· 1 - /* 2 - * tracepoint-probe-sample2.c 3 - * 4 - * 2nd sample tracepoint probes. 5 - */ 6 - 7 - #include <linux/module.h> 8 - #include <linux/fs.h> 9 - #include "tp-samples-trace.h" 10 - 11 - /* 12 - * Here the caller only guarantees locking for struct file and struct inode. 13 - * Locking must therefore be done in the probe to use the dentry. 14 - */ 15 - static void probe_subsys_event(void *ignore, 16 - struct inode *inode, struct file *file) 17 - { 18 - printk(KERN_INFO "Event is encountered with inode number %lu\n", 19 - inode->i_ino); 20 - } 21 - 22 - static int __init tp_sample_trace_init(void) 23 - { 24 - int ret; 25 - 26 - ret = register_trace_subsys_event(probe_subsys_event, NULL); 27 - WARN_ON(ret); 28 - 29 - return 0; 30 - } 31 - 32 - module_init(tp_sample_trace_init); 33 - 34 - static void __exit tp_sample_trace_exit(void) 35 - { 36 - unregister_trace_subsys_event(probe_subsys_event, NULL); 37 - tracepoint_synchronize_unregister(); 38 - } 39 - 40 - module_exit(tp_sample_trace_exit); 41 - 42 - MODULE_LICENSE("GPL"); 43 - MODULE_AUTHOR("Mathieu Desnoyers"); 44 - MODULE_DESCRIPTION("Tracepoint Probes Samples");
-57
samples/tracepoints/tracepoint-sample.c
··· 1 - /* tracepoint-sample.c 2 - * 3 - * Executes a tracepoint when /proc/tracepoint-sample is opened. 4 - * 5 - * (C) Copyright 2007 Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca> 6 - * 7 - * This file is released under the GPLv2. 8 - * See the file COPYING for more details. 9 - */ 10 - 11 - #include <linux/module.h> 12 - #include <linux/sched.h> 13 - #include <linux/proc_fs.h> 14 - #include "tp-samples-trace.h" 15 - 16 - DEFINE_TRACE(subsys_event); 17 - DEFINE_TRACE(subsys_eventb); 18 - 19 - struct proc_dir_entry *pentry_sample; 20 - 21 - static int my_open(struct inode *inode, struct file *file) 22 - { 23 - int i; 24 - 25 - trace_subsys_event(inode, file); 26 - for (i = 0; i < 10; i++) 27 - trace_subsys_eventb(); 28 - return -EPERM; 29 - } 30 - 31 - static const struct file_operations mark_ops = { 32 - .open = my_open, 33 - .llseek = noop_llseek, 34 - }; 35 - 36 - static int __init sample_init(void) 37 - { 38 - printk(KERN_ALERT "sample init\n"); 39 - pentry_sample = proc_create("tracepoint-sample", 0444, NULL, 40 - &mark_ops); 41 - if (!pentry_sample) 42 - return -EPERM; 43 - return 0; 44 - } 45 - 46 - static void __exit sample_exit(void) 47 - { 48 - printk(KERN_ALERT "sample exit\n"); 49 - remove_proc_entry("tracepoint-sample", NULL); 50 - } 51 - 52 - module_init(sample_init) 53 - module_exit(sample_exit) 54 - 55 - MODULE_LICENSE("GPL"); 56 - MODULE_AUTHOR("Mathieu Desnoyers"); 57 - MODULE_DESCRIPTION("Tracepoint sample");