Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'bpf-fix-and-test-aux-usage-after-do_check_insn'

Luis Gerhorst says:

====================
bpf: Fix and test aux usage after do_check_insn()

Fix cur_aux()->nospec_result test after do_check_insn() referring to the
to-be-analyzed (potentially unsafe) instruction, not the
already-analyzed (safe) instruction. This might allow a unsafe insn to
slip through on a speculative path. Create some tests from the
reproducer [1].

Commit d6f1c85f2253 ("bpf: Fall back to nospec for Spectre v1") should
not be in any stable kernel yet, therefore bpf-next should suffice.

[1] https://lore.kernel.org/bpf/685b3c1b.050a0220.2303ee.0010.GAE@google.com/

Changes since v2:
- Use insn_aux variable instead of introducing prev_aux() as suggested
by Eduard (and therefore also drop patch 1)
- v2: https://lore.kernel.org/bpf/20250628145016.784256-1-luis.gerhorst@fau.de/

Changes since v1:
- Fix compiler error due to missed rename of prev_insn_idx in first
patch
- v1: https://lore.kernel.org/bpf/20250628125927.763088-1-luis.gerhorst@fau.de/

Changes since RFC:
- Introduce prev_aux() as suggested by Alexei. For this, we must move
the env->prev_insn_idx assignment to happen directly after
do_check_insn(), for which I have created a separate commit. This
patch could be simplified by using a local prev_aux variable as
sugested by Eduard, but I figured one might find the new
assignment-strategy easier to understand (before, prev_insn_idx and
env->prev_insn_idx were out-of-sync for the latter part of the loop).
Also, like this we do not have an additional prev_* variable that must
be kept in-sync and the local variable's usage (old prev_insn_idx, new
tmp) is much more local. If you think it would be better to not take
the risk and keep the fix simple by just introducing the prev_aux
variable, let me know.
- Change WARN_ON_ONCE() to verifier_bug_if() as suggested by Alexei
- Change assertion to check instruction is BPF_JMP[32] as suggested by
Eduard
- RFC: https://lore.kernel.org/bpf/8734bmoemx.fsf@fau.de/
====================

Link: https://patch.msgid.link/20250705190908.1756862-1-luis.gerhorst@fau.de
Signed-off-by: Alexei Starovoitov <ast@kernel.org>

+167 -5
+14 -5
kernel/bpf/verifier.c
··· 19953 19953 19954 19954 for (;;) { 19955 19955 struct bpf_insn *insn; 19956 + struct bpf_insn_aux_data *insn_aux; 19956 19957 int err; 19957 19958 19958 19959 /* reset current history entry on each new instruction */ ··· 19967 19966 } 19968 19967 19969 19968 insn = &insns[env->insn_idx]; 19969 + insn_aux = &env->insn_aux_data[env->insn_idx]; 19970 19970 19971 19971 if (++env->insn_processed > BPF_COMPLEXITY_LIMIT_INSNS) { 19972 19972 verbose(env, ··· 20044 20042 /* Reduce verification complexity by stopping speculative path 20045 20043 * verification when a nospec is encountered. 20046 20044 */ 20047 - if (state->speculative && cur_aux(env)->nospec) 20045 + if (state->speculative && insn_aux->nospec) 20048 20046 goto process_bpf_exit; 20049 20047 20050 20048 err = do_check_insn(env, &do_print_state); ··· 20052 20050 /* Prevent this speculative path from ever reaching the 20053 20051 * insn that would have been unsafe to execute. 20054 20052 */ 20055 - cur_aux(env)->nospec = true; 20053 + insn_aux->nospec = true; 20056 20054 /* If it was an ADD/SUB insn, potentially remove any 20057 20055 * markings for alu sanitization. 20058 20056 */ 20059 - cur_aux(env)->alu_state = 0; 20057 + insn_aux->alu_state = 0; 20060 20058 goto process_bpf_exit; 20061 20059 } else if (err < 0) { 20062 20060 return err; ··· 20065 20063 } 20066 20064 WARN_ON_ONCE(err); 20067 20065 20068 - if (state->speculative && cur_aux(env)->nospec_result) { 20066 + if (state->speculative && insn_aux->nospec_result) { 20069 20067 /* If we are on a path that performed a jump-op, this 20070 20068 * may skip a nospec patched-in after the jump. This can 20071 20069 * currently never happen because nospec_result is only ··· 20074 20072 * never skip the following insn. Still, add a warning 20075 20073 * to document this in case nospec_result is used 20076 20074 * elsewhere in the future. 20075 + * 20076 + * All non-branch instructions have a single 20077 + * fall-through edge. For these, nospec_result should 20078 + * already work. 20077 20079 */ 20078 - WARN_ON_ONCE(env->insn_idx != prev_insn_idx + 1); 20080 + if (verifier_bug_if(BPF_CLASS(insn->code) == BPF_JMP || 20081 + BPF_CLASS(insn->code) == BPF_JMP32, env, 20082 + "speculation barrier after jump instruction may not have the desired effect")) 20083 + return -EFAULT; 20079 20084 process_bpf_exit: 20080 20085 mark_verifier_state_scratched(env); 20081 20086 err = update_branch_counts(env, env->cur_state);
+4
tools/testing/selftests/bpf/progs/bpf_misc.h
··· 237 237 #define SPEC_V1 238 238 #endif 239 239 240 + #if defined(__TARGET_ARCH_x86) 241 + #define SPEC_V4 242 + #endif 243 + 240 244 #endif
+149
tools/testing/selftests/bpf/progs/verifier_unpriv.c
··· 801 801 : __clobber_all); 802 802 } 803 803 804 + SEC("socket") 805 + __description("unpriv: ldimm64 before Spectre v4 barrier") 806 + __success __success_unpriv 807 + __retval(0) 808 + #ifdef SPEC_V4 809 + __xlated_unpriv("r1 = 0x2020200005642020") /* should not matter */ 810 + __xlated_unpriv("*(u64 *)(r10 -8) = r1") 811 + __xlated_unpriv("nospec") 812 + #endif 813 + __naked void unpriv_ldimm64_spectre_v4(void) 814 + { 815 + asm volatile (" \ 816 + r1 = 0x2020200005642020 ll; \ 817 + *(u64 *)(r10 -8) = r1; \ 818 + r0 = 0; \ 819 + exit; \ 820 + " ::: __clobber_all); 821 + } 822 + 823 + SEC("socket") 824 + __description("unpriv: Spectre v1 and v4 barrier") 825 + __success __success_unpriv 826 + __retval(0) 827 + #ifdef SPEC_V1 828 + #ifdef SPEC_V4 829 + /* starts with r0 == r8 == r9 == 0 */ 830 + __xlated_unpriv("if r8 != 0x0 goto pc+1") 831 + __xlated_unpriv("goto pc+2") 832 + __xlated_unpriv("if r9 == 0x0 goto pc+4") 833 + __xlated_unpriv("r2 = r0") 834 + /* Following nospec required to prevent following dangerous `*(u64 *)(NOT_FP -64) 835 + * = r1` iff `if r9 == 0 goto pc+4` was mispredicted because of Spectre v1. The 836 + * test therefore ensures the Spectre-v4--induced nospec does not prevent the 837 + * Spectre-v1--induced speculative path from being fully analyzed. 838 + */ 839 + __xlated_unpriv("nospec") /* Spectre v1 */ 840 + __xlated_unpriv("*(u64 *)(r2 -64) = r1") /* could be used to leak r2 */ 841 + __xlated_unpriv("nospec") /* Spectre v4 */ 842 + #endif 843 + #endif 844 + __naked void unpriv_spectre_v1_and_v4(void) 845 + { 846 + asm volatile (" \ 847 + r1 = 0; \ 848 + *(u64*)(r10 - 8) = r1; \ 849 + r2 = r10; \ 850 + r2 += -8; \ 851 + r1 = %[map_hash_8b] ll; \ 852 + call %[bpf_map_lookup_elem]; \ 853 + r8 = r0; \ 854 + r2 = r10; \ 855 + r2 += -8; \ 856 + r1 = %[map_hash_8b] ll; \ 857 + call %[bpf_map_lookup_elem]; \ 858 + r9 = r0; \ 859 + r0 = r10; \ 860 + r1 = 0; \ 861 + r2 = r10; \ 862 + if r8 != 0 goto l0_%=; \ 863 + if r9 != 0 goto l0_%=; \ 864 + r0 = 0; \ 865 + l0_%=: if r8 != 0 goto l1_%=; \ 866 + goto l2_%=; \ 867 + l1_%=: if r9 == 0 goto l3_%=; \ 868 + r2 = r0; \ 869 + l2_%=: *(u64 *)(r2 -64) = r1; \ 870 + l3_%=: r0 = 0; \ 871 + exit; \ 872 + " : 873 + : __imm(bpf_map_lookup_elem), 874 + __imm_addr(map_hash_8b) 875 + : __clobber_all); 876 + } 877 + 878 + SEC("socket") 879 + __description("unpriv: Spectre v1 and v4 barrier (simple)") 880 + __success __success_unpriv 881 + __retval(0) 882 + #ifdef SPEC_V1 883 + #ifdef SPEC_V4 884 + __xlated_unpriv("if r8 != 0x0 goto pc+1") 885 + __xlated_unpriv("goto pc+2") 886 + __xlated_unpriv("goto pc-1") /* if r9 == 0 goto l3_%= */ 887 + __xlated_unpriv("goto pc-1") /* r2 = r0 */ 888 + __xlated_unpriv("nospec") 889 + __xlated_unpriv("*(u64 *)(r2 -64) = r1") 890 + __xlated_unpriv("nospec") 891 + #endif 892 + #endif 893 + __naked void unpriv_spectre_v1_and_v4_simple(void) 894 + { 895 + asm volatile (" \ 896 + r8 = 0; \ 897 + r9 = 0; \ 898 + r0 = r10; \ 899 + r1 = 0; \ 900 + r2 = r10; \ 901 + if r8 != 0 goto l0_%=; \ 902 + if r9 != 0 goto l0_%=; \ 903 + r0 = 0; \ 904 + l0_%=: if r8 != 0 goto l1_%=; \ 905 + goto l2_%=; \ 906 + l1_%=: if r9 == 0 goto l3_%=; \ 907 + r2 = r0; \ 908 + l2_%=: *(u64 *)(r2 -64) = r1; \ 909 + l3_%=: r0 = 0; \ 910 + exit; \ 911 + " ::: __clobber_all); 912 + } 913 + 914 + SEC("socket") 915 + __description("unpriv: ldimm64 before Spectre v1 and v4 barrier (simple)") 916 + __success __success_unpriv 917 + __retval(0) 918 + #ifdef SPEC_V1 919 + #ifdef SPEC_V4 920 + __xlated_unpriv("if r8 != 0x0 goto pc+1") 921 + __xlated_unpriv("goto pc+4") 922 + __xlated_unpriv("goto pc-1") /* if r9 == 0 goto l3_%= */ 923 + __xlated_unpriv("goto pc-1") /* r2 = r0 */ 924 + __xlated_unpriv("goto pc-1") /* r1 = 0x2020200005642020 ll */ 925 + __xlated_unpriv("goto pc-1") /* second part of ldimm64 */ 926 + __xlated_unpriv("nospec") 927 + __xlated_unpriv("*(u64 *)(r2 -64) = r1") 928 + __xlated_unpriv("nospec") 929 + #endif 930 + #endif 931 + __naked void unpriv_ldimm64_spectre_v1_and_v4_simple(void) 932 + { 933 + asm volatile (" \ 934 + r8 = 0; \ 935 + r9 = 0; \ 936 + r0 = r10; \ 937 + r1 = 0; \ 938 + r2 = r10; \ 939 + if r8 != 0 goto l0_%=; \ 940 + if r9 != 0 goto l0_%=; \ 941 + r0 = 0; \ 942 + l0_%=: if r8 != 0 goto l1_%=; \ 943 + goto l2_%=; \ 944 + l1_%=: if r9 == 0 goto l3_%=; \ 945 + r2 = r0; \ 946 + r1 = 0x2020200005642020 ll; \ 947 + l2_%=: *(u64 *)(r2 -64) = r1; \ 948 + l3_%=: r0 = 0; \ 949 + exit; \ 950 + " ::: __clobber_all); 951 + } 952 + 804 953 char _license[] SEC("license") = "GPL";