Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

bpf: Fix nested bpf_bprintf_prepare with more per-cpu buffers

The bpf_seq_printf, bpf_trace_printk and bpf_snprintf helpers share one
per-cpu buffer that they use to store temporary data (arguments to
bprintf). They "get" that buffer with try_get_fmt_tmp_buf and "put" it
by the end of their scope with bpf_bprintf_cleanup.

If one of these helpers gets called within the scope of one of these
helpers, for example: a first bpf program gets called, uses
bpf_trace_printk which calls raw_spin_lock_irqsave which is traced by
another bpf program that calls bpf_snprintf, then the second "get"
fails. Essentially, these helpers are not re-entrant. They would return
-EBUSY and print a warning message once.

This patch triples the number of bprintf buffers to allow three levels
of nesting. This is very similar to what was done for tracepoints in
"9594dc3c7e7 bpf: fix nested bpf tracepoints with per-cpu data"

Fixes: d9c9e4db186a ("bpf: Factorize bpf_trace_printk and bpf_seq_printf")
Reported-by: syzbot+63122d0bc347f18c1884@syzkaller.appspotmail.com
Signed-off-by: Florent Revest <revest@chromium.org>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Link: https://lore.kernel.org/bpf/20210511081054.2125874-1-revest@chromium.org

authored by

Florent Revest and committed by
Alexei Starovoitov
e2d5b2bb 35e3815f

+14 -13
+14 -13
kernel/bpf/helpers.c
··· 696 696 */ 697 697 #define MAX_PRINTF_BUF_LEN 512 698 698 699 - struct bpf_printf_buf { 700 - char tmp_buf[MAX_PRINTF_BUF_LEN]; 699 + /* Support executing three nested bprintf helper calls on a given CPU */ 700 + struct bpf_bprintf_buffers { 701 + char tmp_bufs[3][MAX_PRINTF_BUF_LEN]; 701 702 }; 702 - static DEFINE_PER_CPU(struct bpf_printf_buf, bpf_printf_buf); 703 - static DEFINE_PER_CPU(int, bpf_printf_buf_used); 703 + static DEFINE_PER_CPU(struct bpf_bprintf_buffers, bpf_bprintf_bufs); 704 + static DEFINE_PER_CPU(int, bpf_bprintf_nest_level); 704 705 705 706 static int try_get_fmt_tmp_buf(char **tmp_buf) 706 707 { 707 - struct bpf_printf_buf *bufs; 708 - int used; 708 + struct bpf_bprintf_buffers *bufs; 709 + int nest_level; 709 710 710 711 preempt_disable(); 711 - used = this_cpu_inc_return(bpf_printf_buf_used); 712 - if (WARN_ON_ONCE(used > 1)) { 713 - this_cpu_dec(bpf_printf_buf_used); 712 + nest_level = this_cpu_inc_return(bpf_bprintf_nest_level); 713 + if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(bufs->tmp_bufs))) { 714 + this_cpu_dec(bpf_bprintf_nest_level); 714 715 preempt_enable(); 715 716 return -EBUSY; 716 717 } 717 - bufs = this_cpu_ptr(&bpf_printf_buf); 718 - *tmp_buf = bufs->tmp_buf; 718 + bufs = this_cpu_ptr(&bpf_bprintf_bufs); 719 + *tmp_buf = bufs->tmp_bufs[nest_level - 1]; 719 720 720 721 return 0; 721 722 } 722 723 723 724 void bpf_bprintf_cleanup(void) 724 725 { 725 - if (this_cpu_read(bpf_printf_buf_used)) { 726 - this_cpu_dec(bpf_printf_buf_used); 726 + if (this_cpu_read(bpf_bprintf_nest_level)) { 727 + this_cpu_dec(bpf_bprintf_nest_level); 727 728 preempt_enable(); 728 729 } 729 730 }