Merge branch 'tracing-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'tracing-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
ftrace: prevent recursion
tracing, doc: update mmiotrace documentation
x86, mmiotrace: fix buffer overrun detection
function tracing: fix wrong position computing of stack_trace

+27 -21
+4 -2
Documentation/tracers/mmiotrace.txt
··· 37 $ cat /debug/tracing/trace_pipe > mydump.txt & 38 Start X or whatever. 39 $ echo "X is up" > /debug/tracing/trace_marker 40 - $ echo none > /debug/tracing/current_tracer 41 Check for lost events. 42 43 ··· 66 do. 67 68 Shut down mmiotrace (requires root privileges): 69 - $ echo none > /debug/tracing/current_tracer 70 The 'cat' process exits. If it does not, kill it by issuing 'fg' command and 71 pressing ctrl+c. 72 ··· 81 $ cat /debug/tracing/trace_entries 82 gives you a number. Approximately double this number and write it back, for 83 instance: 84 $ echo 128000 > /debug/tracing/trace_entries 85 Then start again from the top. 86 87 If you are doing a trace for a driver project, e.g. Nouveau, you should also
··· 37 $ cat /debug/tracing/trace_pipe > mydump.txt & 38 Start X or whatever. 39 $ echo "X is up" > /debug/tracing/trace_marker 40 + $ echo nop > /debug/tracing/current_tracer 41 Check for lost events. 42 43 ··· 66 do. 67 68 Shut down mmiotrace (requires root privileges): 69 + $ echo nop > /debug/tracing/current_tracer 70 The 'cat' process exits. If it does not, kill it by issuing 'fg' command and 71 pressing ctrl+c. 72 ··· 81 $ cat /debug/tracing/trace_entries 82 gives you a number. Approximately double this number and write it back, for 83 instance: 84 + $ echo 0 > /debug/tracing/tracing_enabled 85 $ echo 128000 > /debug/tracing/trace_entries 86 + $ echo 1 > /debug/tracing/tracing_enabled 87 Then start again from the top. 88 89 If you are doing a trace for a driver project, e.g. Nouveau, you should also
+1 -1
kernel/trace/ring_buffer.c
··· 1215 1216 out: 1217 if (resched) 1218 - preempt_enable_notrace(); 1219 else 1220 preempt_enable_notrace(); 1221 return NULL;
··· 1215 1216 out: 1217 if (resched) 1218 + preempt_enable_no_resched_notrace(); 1219 else 1220 preempt_enable_notrace(); 1221 return NULL;
+7 -9
kernel/trace/trace_mmiotrace.c
··· 18 19 static struct trace_array *mmio_trace_array; 20 static bool overrun_detected; 21 22 static void mmio_reset_data(struct trace_array *tr) 23 { 24 int cpu; 25 26 overrun_detected = false; 27 tr->time_start = ftrace_now(tr->cpu); 28 29 for_each_online_cpu(cpu) ··· 130 131 static unsigned long count_overruns(struct trace_iterator *iter) 132 { 133 - int cpu; 134 unsigned long cnt = 0; 135 - /* FIXME: */ 136 - #if 0 137 - for_each_online_cpu(cpu) { 138 - cnt += iter->overrun[cpu]; 139 - iter->overrun[cpu] = 0; 140 - } 141 - #endif 142 - (void)cpu; 143 return cnt; 144 } 145
··· 18 19 static struct trace_array *mmio_trace_array; 20 static bool overrun_detected; 21 + static unsigned long prev_overruns; 22 23 static void mmio_reset_data(struct trace_array *tr) 24 { 25 int cpu; 26 27 overrun_detected = false; 28 + prev_overruns = 0; 29 tr->time_start = ftrace_now(tr->cpu); 30 31 for_each_online_cpu(cpu) ··· 128 129 static unsigned long count_overruns(struct trace_iterator *iter) 130 { 131 unsigned long cnt = 0; 132 + unsigned long over = ring_buffer_overruns(iter->tr->buffer); 133 + 134 + if (over > prev_overruns) 135 + cnt = over - prev_overruns; 136 + prev_overruns = over; 137 return cnt; 138 } 139
+15 -9
kernel/trace/trace_stack.c
··· 184 static void * 185 t_next(struct seq_file *m, void *v, loff_t *pos) 186 { 187 - long i = (long)m->private; 188 189 (*pos)++; 190 191 - i++; 192 193 if (i >= max_stack_trace.nr_entries || 194 stack_dump_trace[i] == ULONG_MAX) ··· 206 207 static void *t_start(struct seq_file *m, loff_t *pos) 208 { 209 - void *t = &m->private; 210 loff_t l = 0; 211 212 local_irq_disable(); 213 __raw_spin_lock(&max_stack_lock); 214 215 for (; t && l < *pos; t = t_next(m, t, &l)) 216 ; ··· 243 244 static int t_show(struct seq_file *m, void *v) 245 { 246 - long i = *(long *)v; 247 int size; 248 249 - if (i < 0) { 250 seq_printf(m, " Depth Size Location" 251 " (%d entries)\n" 252 " ----- ---- --------\n", 253 max_stack_trace.nr_entries); 254 return 0; 255 } 256 257 if (i >= max_stack_trace.nr_entries || 258 stack_dump_trace[i] == ULONG_MAX) ··· 285 int ret; 286 287 ret = seq_open(file, &stack_trace_seq_ops); 288 - if (!ret) { 289 - struct seq_file *m = file->private_data; 290 - m->private = (void *)-1; 291 - } 292 293 return ret; 294 }
··· 184 static void * 185 t_next(struct seq_file *m, void *v, loff_t *pos) 186 { 187 + long i; 188 189 (*pos)++; 190 191 + if (v == SEQ_START_TOKEN) 192 + i = 0; 193 + else { 194 + i = *(long *)v; 195 + i++; 196 + } 197 198 if (i >= max_stack_trace.nr_entries || 199 stack_dump_trace[i] == ULONG_MAX) ··· 201 202 static void *t_start(struct seq_file *m, loff_t *pos) 203 { 204 + void *t = SEQ_START_TOKEN; 205 loff_t l = 0; 206 207 local_irq_disable(); 208 __raw_spin_lock(&max_stack_lock); 209 + 210 + if (*pos == 0) 211 + return SEQ_START_TOKEN; 212 213 for (; t && l < *pos; t = t_next(m, t, &l)) 214 ; ··· 235 236 static int t_show(struct seq_file *m, void *v) 237 { 238 + long i; 239 int size; 240 241 + if (v == SEQ_START_TOKEN) { 242 seq_printf(m, " Depth Size Location" 243 " (%d entries)\n" 244 " ----- ---- --------\n", 245 max_stack_trace.nr_entries); 246 return 0; 247 } 248 + 249 + i = *(long *)v; 250 251 if (i >= max_stack_trace.nr_entries || 252 stack_dump_trace[i] == ULONG_MAX) ··· 275 int ret; 276 277 ret = seq_open(file, &stack_trace_seq_ops); 278 279 return ret; 280 }