Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

sched, latencytop: incorporate review feedback from Andrew Morton

Andrew had some suggestions for the latencytop file; this patch takes care
of most of these:

* Add documentation
* Turn account_scheduler_latency into an inline function
* Don't report negative values to userspace
* Make the file operations struct const
* Fix a few checkpatch.pl warnings

Signed-off-by: Arjan van de Ven <arjan@linux.intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>

authored by

Arjan van de Ven and committed by
Ingo Molnar
ad0b0fd5 f437e8b5

+80 -13
+9 -1
include/linux/latencytop.h
··· 9 9 #ifndef _INCLUDE_GUARD_LATENCYTOP_H_ 10 10 #define _INCLUDE_GUARD_LATENCYTOP_H_ 11 11 12 + #include <linux/compiler.h> 12 13 #ifdef CONFIG_LATENCYTOP 13 14 14 15 #define LT_SAVECOUNT 32 ··· 25 24 26 25 struct task_struct; 27 26 28 - void account_scheduler_latency(struct task_struct *task, int usecs, int inter); 27 + extern int latencytop_enabled; 28 + void __account_scheduler_latency(struct task_struct *task, int usecs, int inter); 29 + static inline void 30 + account_scheduler_latency(struct task_struct *task, int usecs, int inter) 31 + { 32 + if (unlikely(latencytop_enabled)) 33 + __account_scheduler_latency(task, usecs, inter); 34 + } 29 35 30 36 void clear_all_latency_tracing(struct task_struct *p); 31 37
+71 -12
kernel/latencytop.c
··· 9 9 * as published by the Free Software Foundation; version 2 10 10 * of the License. 11 11 */ 12 + 13 + /* 14 + * CONFIG_LATENCYTOP enables a kernel latency tracking infrastructure that is 15 + * used by the "latencytop" userspace tool. The latency that is tracked is not 16 + * the 'traditional' interrupt latency (which is primarily caused by something 17 + * else consuming CPU), but instead, it is the latency an application encounters 18 + * because the kernel sleeps on its behalf for various reasons. 19 + * 20 + * This code tracks 2 levels of statistics: 21 + * 1) System level latency 22 + * 2) Per process latency 23 + * 24 + * The latency is stored in fixed sized data structures in an accumulated form; 25 + * if the "same" latency cause is hit twice, this will be tracked as one entry 26 + * in the data structure. Both the count, total accumulated latency and maximum 27 + * latency are tracked in this data structure. When the fixed size structure is 28 + * full, no new causes are tracked until the buffer is flushed by writing to 29 + * the /proc file; the userspace tool does this on a regular basis. 30 + * 31 + * A latency cause is identified by a stringified backtrace at the point that 32 + * the scheduler gets invoked. The userland tool will use this string to 33 + * identify the cause of the latency in human readable form. 34 + * 35 + * The information is exported via /proc/latency_stats and /proc/<pid>/latency. 36 + * These files look like this: 37 + * 38 + * Latency Top version : v0.1 39 + * 70 59433 4897 i915_irq_wait drm_ioctl vfs_ioctl do_vfs_ioctl sys_ioctl 40 + * | | | | 41 + * | | | +----> the stringified backtrace 42 + * | | +---------> The maximum latency for this entry in microseconds 43 + * | +--------------> The accumulated latency for this entry (microseconds) 44 + * +-------------------> The number of times this entry is hit 45 + * 46 + * (note: the average latency is the accumulated latency divided by the number 47 + * of times) 48 + */ 49 + 12 50 #include <linux/latencytop.h> 13 51 #include <linux/kallsyms.h> 14 52 #include <linux/seq_file.h> ··· 110 72 firstnonnull = i; 111 73 continue; 112 74 } 113 - for (q = 0 ; q < LT_BACKTRACEDEPTH ; q++) { 75 + for (q = 0; q < LT_BACKTRACEDEPTH; q++) { 114 76 unsigned long record = lat->backtrace[q]; 115 77 116 78 if (latency_record[i].backtrace[q] != record) { ··· 139 101 memcpy(&latency_record[i], lat, sizeof(struct latency_record)); 140 102 } 141 103 142 - static inline void store_stacktrace(struct task_struct *tsk, struct latency_record *lat) 104 + /* 105 + * Iterator to store a backtrace into a latency record entry 106 + */ 107 + static inline void store_stacktrace(struct task_struct *tsk, 108 + struct latency_record *lat) 143 109 { 144 110 struct stack_trace trace; 145 111 146 112 memset(&trace, 0, sizeof(trace)); 147 113 trace.max_entries = LT_BACKTRACEDEPTH; 148 114 trace.entries = &lat->backtrace[0]; 149 - trace.skip = 0; 150 115 save_stack_trace_tsk(tsk, &trace); 151 116 } 152 117 118 + /** 119 + * __account_scheduler_latency - record an occured latency 120 + * @tsk - the task struct of the task hitting the latency 121 + * @usecs - the duration of the latency in microseconds 122 + * @inter - 1 if the sleep was interruptible, 0 if uninterruptible 123 + * 124 + * This function is the main entry point for recording latency entries 125 + * as called by the scheduler. 126 + * 127 + * This function has a few special cases to deal with normal 'non-latency' 128 + * sleeps: specifically, interruptible sleep longer than 5 msec is skipped 129 + * since this usually is caused by waiting for events via select() and co. 130 + * 131 + * Negative latencies (caused by time going backwards) are also explicitly 132 + * skipped. 133 + */ 153 134 void __sched 154 - account_scheduler_latency(struct task_struct *tsk, int usecs, int inter) 135 + __account_scheduler_latency(struct task_struct *tsk, int usecs, int inter) 155 136 { 156 137 unsigned long flags; 157 138 int i, q; 158 139 struct latency_record lat; 159 140 160 - if (!latencytop_enabled) 161 - return; 162 - 163 141 /* Long interruptible waits are generally user requested... */ 164 142 if (inter && usecs > 5000) 143 + return; 144 + 145 + /* Negative sleeps are time going backwards */ 146 + /* Zero-time sleeps are non-interesting */ 147 + if (usecs <= 0) 165 148 return; 166 149 167 150 memset(&lat, 0, sizeof(lat)); ··· 202 143 if (tsk->latency_record_count >= LT_SAVECOUNT) 203 144 goto out_unlock; 204 145 205 - for (i = 0; i < LT_SAVECOUNT ; i++) { 146 + for (i = 0; i < LT_SAVECOUNT; i++) { 206 147 struct latency_record *mylat; 207 148 int same = 1; 208 149 209 150 mylat = &tsk->latency_record[i]; 210 - for (q = 0 ; q < LT_BACKTRACEDEPTH ; q++) { 151 + for (q = 0; q < LT_BACKTRACEDEPTH; q++) { 211 152 unsigned long record = lat.backtrace[q]; 212 153 213 154 if (mylat->backtrace[q] != record) { ··· 245 186 for (i = 0; i < MAXLR; i++) { 246 187 if (latency_record[i].backtrace[0]) { 247 188 int q; 248 - seq_printf(m, "%i %li %li ", 189 + seq_printf(m, "%i %lu %lu ", 249 190 latency_record[i].count, 250 191 latency_record[i].time, 251 192 latency_record[i].max); ··· 282 223 return single_open(filp, lstats_show, NULL); 283 224 } 284 225 285 - static struct file_operations lstats_fops = { 226 + static const struct file_operations lstats_fops = { 286 227 .open = lstats_open, 287 228 .read = seq_read, 288 229 .write = lstats_write, ··· 295 236 proc_create("latency_stats", 0644, NULL, &lstats_fops); 296 237 return 0; 297 238 } 298 - __initcall(init_lstats_procfs); 239 + device_initcall(init_lstats_procfs);