function tracing: fix wrong pos computing when read buffer has been fulfilled

Impact: make output of available_filter_functions complete

phenomenon:

The first value of dyn_ftrace_total_info is not equal with
`cat available_filter_functions | wc -l`, but they should be equal.

root cause:

When printing functions with seq_printf in t_show, if the read buffer
is just overflowed by current function record, then this function
won't be printed to user space through read buffer, it will
just be dropped. So we can't see this function printing.

So, every time the last function to fill the read buffer, if overflowed,
will be dropped.

This also applies to set_ftrace_filter if set_ftrace_filter has
more bytes than read buffer.

fix:

Through checking return value of seq_printf, if less than 0, we know
this function doesn't be printed. Then we decrease position to force
this function to be printed next time, in next read buffer.

Another little fix is to show correct allocating pages count.

Signed-off-by: walimis <walimisdev@gmail.com>
Acked-by: Steven Rostedt <rostedt@goodmis.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>

authored by walimis and committed by Ingo Molnar 5821e1b7 ee51a1de

+15 -11
+15 -11
kernel/trace/ftrace.c
··· 673 674 cnt = num_to_init / ENTRIES_PER_PAGE; 675 pr_info("ftrace: allocating %ld entries in %d pages\n", 676 - num_to_init, cnt); 677 678 for (i = 0; i < cnt; i++) { 679 pg->next = (void *)get_zeroed_page(GFP_KERNEL); ··· 753 void *p = NULL; 754 loff_t l = -1; 755 756 - if (*pos != iter->pos) { 757 - for (p = t_next(m, p, &l); p && l < *pos; p = t_next(m, p, &l)) 758 - ; 759 - } else { 760 - l = *pos; 761 - p = t_next(m, p, &l); 762 - } 763 764 return p; 765 } ··· 768 769 static int t_show(struct seq_file *m, void *v) 770 { 771 struct dyn_ftrace *rec = v; 772 char str[KSYM_SYMBOL_LEN]; 773 774 if (!rec) 775 return 0; 776 777 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str); 778 779 - seq_printf(m, "%s\n", str); 780 781 return 0; 782 } ··· 808 return -ENOMEM; 809 810 iter->pg = ftrace_pages_start; 811 - iter->pos = -1; 812 813 ret = seq_open(file, &show_ftrace_seq_ops); 814 if (!ret) { ··· 895 896 if (file->f_mode & FMODE_READ) { 897 iter->pg = ftrace_pages_start; 898 - iter->pos = -1; 899 iter->flags = enable ? FTRACE_ITER_FILTER : 900 FTRACE_ITER_NOTRACE; 901
··· 673 674 cnt = num_to_init / ENTRIES_PER_PAGE; 675 pr_info("ftrace: allocating %ld entries in %d pages\n", 676 + num_to_init, cnt + 1); 677 678 for (i = 0; i < cnt; i++) { 679 pg->next = (void *)get_zeroed_page(GFP_KERNEL); ··· 753 void *p = NULL; 754 loff_t l = -1; 755 756 + if (*pos > iter->pos) 757 + *pos = iter->pos; 758 + 759 + l = *pos; 760 + p = t_next(m, p, &l); 761 762 return p; 763 } ··· 770 771 static int t_show(struct seq_file *m, void *v) 772 { 773 + struct ftrace_iterator *iter = m->private; 774 struct dyn_ftrace *rec = v; 775 char str[KSYM_SYMBOL_LEN]; 776 + int ret = 0; 777 778 if (!rec) 779 return 0; 780 781 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str); 782 783 + ret = seq_printf(m, "%s\n", str); 784 + if (ret < 0) { 785 + iter->pos--; 786 + iter->idx--; 787 + } 788 789 return 0; 790 } ··· 804 return -ENOMEM; 805 806 iter->pg = ftrace_pages_start; 807 + iter->pos = 0; 808 809 ret = seq_open(file, &show_ftrace_seq_ops); 810 if (!ret) { ··· 891 892 if (file->f_mode & FMODE_READ) { 893 iter->pg = ftrace_pages_start; 894 + iter->pos = 0; 895 iter->flags = enable ? FTRACE_ITER_FILTER : 896 FTRACE_ITER_NOTRACE; 897