Merge branch 'tracing-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'tracing-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
ftrace: prevent recursion
tracing, doc: update mmiotrace documentation
x86, mmiotrace: fix buffer overrun detection
function tracing: fix wrong position computing of stack_trace

+27 -21
+4 -2
Documentation/tracers/mmiotrace.txt
··· 37 37 $ cat /debug/tracing/trace_pipe > mydump.txt & 38 38 Start X or whatever. 39 39 $ echo "X is up" > /debug/tracing/trace_marker 40 - $ echo none > /debug/tracing/current_tracer 40 + $ echo nop > /debug/tracing/current_tracer 41 41 Check for lost events. 42 42 43 43 ··· 66 66 do. 67 67 68 68 Shut down mmiotrace (requires root privileges): 69 - $ echo none > /debug/tracing/current_tracer 69 + $ echo nop > /debug/tracing/current_tracer 70 70 The 'cat' process exits. If it does not, kill it by issuing 'fg' command and 71 71 pressing ctrl+c. 72 72 ··· 81 81 $ cat /debug/tracing/trace_entries 82 82 gives you a number. Approximately double this number and write it back, for 83 83 instance: 84 + $ echo 0 > /debug/tracing/tracing_enabled 84 85 $ echo 128000 > /debug/tracing/trace_entries 86 + $ echo 1 > /debug/tracing/tracing_enabled 85 87 Then start again from the top. 86 88 87 89 If you are doing a trace for a driver project, e.g. Nouveau, you should also
+1 -1
kernel/trace/ring_buffer.c
··· 1215 1215 1216 1216 out: 1217 1217 if (resched) 1218 - preempt_enable_notrace(); 1218 + preempt_enable_no_resched_notrace(); 1219 1219 else 1220 1220 preempt_enable_notrace(); 1221 1221 return NULL;
+7 -9
kernel/trace/trace_mmiotrace.c
··· 18 18 19 19 static struct trace_array *mmio_trace_array; 20 20 static bool overrun_detected; 21 + static unsigned long prev_overruns; 21 22 22 23 static void mmio_reset_data(struct trace_array *tr) 23 24 { 24 25 int cpu; 25 26 26 27 overrun_detected = false; 28 + prev_overruns = 0; 27 29 tr->time_start = ftrace_now(tr->cpu); 28 30 29 31 for_each_online_cpu(cpu) ··· 130 128 131 129 static unsigned long count_overruns(struct trace_iterator *iter) 132 130 { 133 - int cpu; 134 131 unsigned long cnt = 0; 135 - /* FIXME: */ 136 - #if 0 137 - for_each_online_cpu(cpu) { 138 - cnt += iter->overrun[cpu]; 139 - iter->overrun[cpu] = 0; 140 - } 141 - #endif 142 - (void)cpu; 132 + unsigned long over = ring_buffer_overruns(iter->tr->buffer); 133 + 134 + if (over > prev_overruns) 135 + cnt = over - prev_overruns; 136 + prev_overruns = over; 143 137 return cnt; 144 138 } 145 139
+15 -9
kernel/trace/trace_stack.c
··· 184 184 static void * 185 185 t_next(struct seq_file *m, void *v, loff_t *pos) 186 186 { 187 - long i = (long)m->private; 187 + long i; 188 188 189 189 (*pos)++; 190 190 191 - i++; 191 + if (v == SEQ_START_TOKEN) 192 + i = 0; 193 + else { 194 + i = *(long *)v; 195 + i++; 196 + } 192 197 193 198 if (i >= max_stack_trace.nr_entries || 194 199 stack_dump_trace[i] == ULONG_MAX) ··· 206 201 207 202 static void *t_start(struct seq_file *m, loff_t *pos) 208 203 { 209 - void *t = &m->private; 204 + void *t = SEQ_START_TOKEN; 210 205 loff_t l = 0; 211 206 212 207 local_irq_disable(); 213 208 __raw_spin_lock(&max_stack_lock); 209 + 210 + if (*pos == 0) 211 + return SEQ_START_TOKEN; 214 212 215 213 for (; t && l < *pos; t = t_next(m, t, &l)) 216 214 ; ··· 243 235 244 236 static int t_show(struct seq_file *m, void *v) 245 237 { 246 - long i = *(long *)v; 238 + long i; 247 239 int size; 248 240 249 - if (i < 0) { 241 + if (v == SEQ_START_TOKEN) { 250 242 seq_printf(m, " Depth Size Location" 251 243 " (%d entries)\n" 252 244 " ----- ---- --------\n", 253 245 max_stack_trace.nr_entries); 254 246 return 0; 255 247 } 248 + 249 + i = *(long *)v; 256 250 257 251 if (i >= max_stack_trace.nr_entries || 258 252 stack_dump_trace[i] == ULONG_MAX) ··· 285 275 int ret; 286 276 287 277 ret = seq_open(file, &stack_trace_seq_ops); 288 - if (!ret) { 289 - struct seq_file *m = file->private_data; 290 - m->private = (void *)-1; 291 - } 292 278 293 279 return ret; 294 280 }