ring-buffer: Move zeroing out excess in page to ring buffer code

Currently the trace splice code zeros out the excess bytes in the page before
sending it off to userspace.

This is to make sure userspace is not getting anything it should not be
when reading the pages, because the excess data was never initialized
to zero before writing (for perfomance reasons).

But the splice code has no business in doing this work, it should be
done by the ring buffer. With the latest changes for recording lost
events, the splice code gets it wrong anyway.

Move the zeroing out of excess bytes into the ring buffer code.

Signed-off-by: Steven Rostedt <rostedt@goodmis.org>

authored by Steven Rostedt and committed by Steven Rostedt 2711ca23 b3230c8b

+9 -8
+9 -2
kernel/trace/ring_buffer.c
··· 3902 ret = read; 3903 3904 cpu_buffer->lost_events = 0; 3905 /* 3906 * Set a flag in the commit field if we lost events 3907 */ 3908 if (missed_events) { 3909 - commit = local_read(&bpage->commit); 3910 - 3911 /* If there is room at the end of the page to save the 3912 * missed events, then record it there. 3913 */ ··· 3915 memcpy(&bpage->data[commit], &missed_events, 3916 sizeof(missed_events)); 3917 local_add(RB_MISSED_STORED, &bpage->commit); 3918 } 3919 local_add(RB_MISSED_EVENTS, &bpage->commit); 3920 } 3921 3922 out_unlock: 3923 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
··· 3902 ret = read; 3903 3904 cpu_buffer->lost_events = 0; 3905 + 3906 + commit = local_read(&bpage->commit); 3907 /* 3908 * Set a flag in the commit field if we lost events 3909 */ 3910 if (missed_events) { 3911 /* If there is room at the end of the page to save the 3912 * missed events, then record it there. 3913 */ ··· 3915 memcpy(&bpage->data[commit], &missed_events, 3916 sizeof(missed_events)); 3917 local_add(RB_MISSED_STORED, &bpage->commit); 3918 + commit += sizeof(missed_events); 3919 } 3920 local_add(RB_MISSED_EVENTS, &bpage->commit); 3921 } 3922 + 3923 + /* 3924 + * This page may be off to user land. Zero it out here. 3925 + */ 3926 + if (commit < BUF_PAGE_SIZE) 3927 + memset(&bpage->data[commit], 0, BUF_PAGE_SIZE - commit); 3928 3929 out_unlock: 3930 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
-6
kernel/trace/trace.c
··· 3661 size_t count, loff_t *ppos) 3662 { 3663 struct ftrace_buffer_info *info = filp->private_data; 3664 - unsigned int pos; 3665 ssize_t ret; 3666 size_t size; 3667 ··· 3686 trace_access_unlock(info->cpu); 3687 if (ret < 0) 3688 return 0; 3689 - 3690 - pos = ring_buffer_page_len(info->spare); 3691 - 3692 - if (pos < PAGE_SIZE) 3693 - memset(info->spare + pos, 0, PAGE_SIZE - pos); 3694 3695 read: 3696 size = PAGE_SIZE - info->read;
··· 3661 size_t count, loff_t *ppos) 3662 { 3663 struct ftrace_buffer_info *info = filp->private_data; 3664 ssize_t ret; 3665 size_t size; 3666 ··· 3687 trace_access_unlock(info->cpu); 3688 if (ret < 0) 3689 return 0; 3690 3691 read: 3692 size = PAGE_SIZE - info->read;