Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

tracing: Buffer the output of seq_file in case of filled buffer

If the seq_read fills the buffer it will call s_start again on the next
itertation with the same position. This causes a problem with the
function_graph tracer because it consumes the iteration in order to
determine leaf functions.

What happens is that the iterator stores the entry, and the function
graph plugin will look at the next entry. If that next entry is a return
of the same function and task, then the function is a leaf and the
function_graph plugin calls ring_buffer_read which moves the ring buffer
iterator forward (the trace iterator still points to the function start
entry).

The copying of the trace_seq to the seq_file buffer will fail if the
seq_file buffer is full. The seq_read will not show this entry.
The next read by userspace will cause seq_read to again call s_start
which will reuse the trace iterator entry (the function start entry).
But the function return entry was already consumed. The function graph
plugin will think that this entry is a nested function and not a leaf.

To solve this, the trace code now checks the return status of the
seq_printf (trace_print_seq). If the writing to the seq_file buffer
fails, we set a flag in the iterator (leftover) and we do not reset
the trace_seq buffer. On the next call to s_start, we check the leftover
flag, and if it is set, we just reuse the trace_seq buffer and do not
call into the plugin print functions.

Before this patch:

2) | fput() {
2) | __fput() {
2) 0.550 us | inotify_inode_queue_event();
2) | __fsnotify_parent() {
2) 0.540 us | inotify_dentry_parent_queue_event();

After the patch:

2) | fput() {
2) | __fput() {
2) 0.550 us | inotify_inode_queue_event();
2) 0.548 us | __fsnotify_parent();
2) 0.540 us | inotify_dentry_parent_queue_event();

[
Updated the patch to fix a missing return 0 from the trace_print_seq()
stub when CONFIG_TRACING is disabled.

Reported-by: Ingo Molnar <mingo@elte.hu>
]

Reported-by: Jiri Olsa <jolsa@redhat.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>

authored by

Steven Rostedt and committed by
Steven Rostedt
a63ce5b3 29bf4a5e

+47 -8
+1
include/linux/ftrace_event.h
··· 57 57 /* The below is zeroed out in pipe_read */ 58 58 struct trace_seq seq; 59 59 struct trace_entry *ent; 60 + int leftover; 60 61 int cpu; 61 62 u64 ts; 62 63
+3 -2
include/linux/trace_seq.h
··· 33 33 __attribute__ ((format (printf, 2, 0))); 34 34 extern int 35 35 trace_seq_bprintf(struct trace_seq *s, const char *fmt, const u32 *binary); 36 - extern void trace_print_seq(struct seq_file *m, struct trace_seq *s); 36 + extern int trace_print_seq(struct seq_file *m, struct trace_seq *s); 37 37 extern ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, 38 38 size_t cnt); 39 39 extern int trace_seq_puts(struct trace_seq *s, const char *str); ··· 55 55 return 0; 56 56 } 57 57 58 - static inline void trace_print_seq(struct seq_file *m, struct trace_seq *s) 58 + static inline int trace_print_seq(struct seq_file *m, struct trace_seq *s) 59 59 { 60 + return 0; 60 61 } 61 62 static inline ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, 62 63 size_t cnt)
+32 -3
kernel/trace/trace.c
··· 1516 1516 int i = (int)*pos; 1517 1517 void *ent; 1518 1518 1519 + WARN_ON_ONCE(iter->leftover); 1520 + 1519 1521 (*pos)++; 1520 1522 1521 1523 /* can't go backwards */ ··· 1616 1614 ; 1617 1615 1618 1616 } else { 1619 - l = *pos - 1; 1620 - p = s_next(m, p, &l); 1617 + /* 1618 + * If we overflowed the seq_file before, then we want 1619 + * to just reuse the trace_seq buffer again. 1620 + */ 1621 + if (iter->leftover) 1622 + p = iter; 1623 + else { 1624 + l = *pos - 1; 1625 + p = s_next(m, p, &l); 1626 + } 1621 1627 } 1622 1628 1623 1629 trace_event_read_lock(); ··· 1933 1923 static int s_show(struct seq_file *m, void *v) 1934 1924 { 1935 1925 struct trace_iterator *iter = v; 1926 + int ret; 1936 1927 1937 1928 if (iter->ent == NULL) { 1938 1929 if (iter->tr) { ··· 1953 1942 if (!(trace_flags & TRACE_ITER_VERBOSE)) 1954 1943 print_func_help_header(m); 1955 1944 } 1945 + } else if (iter->leftover) { 1946 + /* 1947 + * If we filled the seq_file buffer earlier, we 1948 + * want to just show it now. 1949 + */ 1950 + ret = trace_print_seq(m, &iter->seq); 1951 + 1952 + /* ret should this time be zero, but you never know */ 1953 + iter->leftover = ret; 1954 + 1956 1955 } else { 1957 1956 print_trace_line(iter); 1958 - trace_print_seq(m, &iter->seq); 1957 + ret = trace_print_seq(m, &iter->seq); 1958 + /* 1959 + * If we overflow the seq_file buffer, then it will 1960 + * ask us for this data again at start up. 1961 + * Use that instead. 1962 + * ret is 0 if seq_file write succeeded. 1963 + * -1 otherwise. 1964 + */ 1965 + iter->leftover = ret; 1959 1966 } 1960 1967 1961 1968 return 0;
+11 -3
kernel/trace/trace_output.c
··· 23 23 24 24 static int next_event_type = __TRACE_LAST_TYPE + 1; 25 25 26 - void trace_print_seq(struct seq_file *m, struct trace_seq *s) 26 + int trace_print_seq(struct seq_file *m, struct trace_seq *s) 27 27 { 28 28 int len = s->len >= PAGE_SIZE ? PAGE_SIZE - 1 : s->len; 29 + int ret; 29 30 30 - seq_write(m, s->buffer, len); 31 + ret = seq_write(m, s->buffer, len); 31 32 32 - trace_seq_init(s); 33 + /* 34 + * Only reset this buffer if we successfully wrote to the 35 + * seq_file buffer. 36 + */ 37 + if (!ret) 38 + trace_seq_init(s); 39 + 40 + return ret; 33 41 } 34 42 35 43 enum print_line_t trace_print_bprintk_msg_only(struct trace_iterator *iter)