Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

selftests/bpf: Add Spectre v4 tests

Add the following tests:

1. A test with an (unimportant) ldimm64 (16 byte insn) and a
Spectre-v4--induced nospec that clarifies and serves as a basic
Spectre v4 test.

2. Make sure a Spectre v4 nospec_result does not prevent a Spectre v1
nospec from being added before the dangerous instruction (tests that
[1] is fixed).

3. Combine the two, which is the combination that triggers the warning
in [2]. This is because the unanalyzed stack write has nospec_result
set, but the ldimm64 (which was just analyzed) had incremented
insn_idx by 2. That violates the assertion that nospec_result is only
used after insns that increment insn_idx by 1 (i.e., stack writes).

[1] https://lore.kernel.org/bpf/4266fd5de04092aa4971cbef14f1b4b96961f432.camel@gmail.com/
[2] https://lore.kernel.org/bpf/685b3c1b.050a0220.2303ee.0010.GAE@google.com/

Signed-off-by: Luis Gerhorst <luis.gerhorst@fau.de>
Link: https://lore.kernel.org/r/20250705190908.1756862-3-luis.gerhorst@fau.de
Signed-off-by: Alexei Starovoitov <ast@kernel.org>

authored by

Luis Gerhorst and committed by
Alexei Starovoitov
92974cef dadb5910

+153
+4
tools/testing/selftests/bpf/progs/bpf_misc.h
··· 237 237 #define SPEC_V1 238 238 #endif 239 239 240 + #if defined(__TARGET_ARCH_x86) 241 + #define SPEC_V4 242 + #endif 243 + 240 244 #endif
+149
tools/testing/selftests/bpf/progs/verifier_unpriv.c
··· 801 801 : __clobber_all); 802 802 } 803 803 804 + SEC("socket") 805 + __description("unpriv: ldimm64 before Spectre v4 barrier") 806 + __success __success_unpriv 807 + __retval(0) 808 + #ifdef SPEC_V4 809 + __xlated_unpriv("r1 = 0x2020200005642020") /* should not matter */ 810 + __xlated_unpriv("*(u64 *)(r10 -8) = r1") 811 + __xlated_unpriv("nospec") 812 + #endif 813 + __naked void unpriv_ldimm64_spectre_v4(void) 814 + { 815 + asm volatile (" \ 816 + r1 = 0x2020200005642020 ll; \ 817 + *(u64 *)(r10 -8) = r1; \ 818 + r0 = 0; \ 819 + exit; \ 820 + " ::: __clobber_all); 821 + } 822 + 823 + SEC("socket") 824 + __description("unpriv: Spectre v1 and v4 barrier") 825 + __success __success_unpriv 826 + __retval(0) 827 + #ifdef SPEC_V1 828 + #ifdef SPEC_V4 829 + /* starts with r0 == r8 == r9 == 0 */ 830 + __xlated_unpriv("if r8 != 0x0 goto pc+1") 831 + __xlated_unpriv("goto pc+2") 832 + __xlated_unpriv("if r9 == 0x0 goto pc+4") 833 + __xlated_unpriv("r2 = r0") 834 + /* Following nospec required to prevent following dangerous `*(u64 *)(NOT_FP -64) 835 + * = r1` iff `if r9 == 0 goto pc+4` was mispredicted because of Spectre v1. The 836 + * test therefore ensures the Spectre-v4--induced nospec does not prevent the 837 + * Spectre-v1--induced speculative path from being fully analyzed. 838 + */ 839 + __xlated_unpriv("nospec") /* Spectre v1 */ 840 + __xlated_unpriv("*(u64 *)(r2 -64) = r1") /* could be used to leak r2 */ 841 + __xlated_unpriv("nospec") /* Spectre v4 */ 842 + #endif 843 + #endif 844 + __naked void unpriv_spectre_v1_and_v4(void) 845 + { 846 + asm volatile (" \ 847 + r1 = 0; \ 848 + *(u64*)(r10 - 8) = r1; \ 849 + r2 = r10; \ 850 + r2 += -8; \ 851 + r1 = %[map_hash_8b] ll; \ 852 + call %[bpf_map_lookup_elem]; \ 853 + r8 = r0; \ 854 + r2 = r10; \ 855 + r2 += -8; \ 856 + r1 = %[map_hash_8b] ll; \ 857 + call %[bpf_map_lookup_elem]; \ 858 + r9 = r0; \ 859 + r0 = r10; \ 860 + r1 = 0; \ 861 + r2 = r10; \ 862 + if r8 != 0 goto l0_%=; \ 863 + if r9 != 0 goto l0_%=; \ 864 + r0 = 0; \ 865 + l0_%=: if r8 != 0 goto l1_%=; \ 866 + goto l2_%=; \ 867 + l1_%=: if r9 == 0 goto l3_%=; \ 868 + r2 = r0; \ 869 + l2_%=: *(u64 *)(r2 -64) = r1; \ 870 + l3_%=: r0 = 0; \ 871 + exit; \ 872 + " : 873 + : __imm(bpf_map_lookup_elem), 874 + __imm_addr(map_hash_8b) 875 + : __clobber_all); 876 + } 877 + 878 + SEC("socket") 879 + __description("unpriv: Spectre v1 and v4 barrier (simple)") 880 + __success __success_unpriv 881 + __retval(0) 882 + #ifdef SPEC_V1 883 + #ifdef SPEC_V4 884 + __xlated_unpriv("if r8 != 0x0 goto pc+1") 885 + __xlated_unpriv("goto pc+2") 886 + __xlated_unpriv("goto pc-1") /* if r9 == 0 goto l3_%= */ 887 + __xlated_unpriv("goto pc-1") /* r2 = r0 */ 888 + __xlated_unpriv("nospec") 889 + __xlated_unpriv("*(u64 *)(r2 -64) = r1") 890 + __xlated_unpriv("nospec") 891 + #endif 892 + #endif 893 + __naked void unpriv_spectre_v1_and_v4_simple(void) 894 + { 895 + asm volatile (" \ 896 + r8 = 0; \ 897 + r9 = 0; \ 898 + r0 = r10; \ 899 + r1 = 0; \ 900 + r2 = r10; \ 901 + if r8 != 0 goto l0_%=; \ 902 + if r9 != 0 goto l0_%=; \ 903 + r0 = 0; \ 904 + l0_%=: if r8 != 0 goto l1_%=; \ 905 + goto l2_%=; \ 906 + l1_%=: if r9 == 0 goto l3_%=; \ 907 + r2 = r0; \ 908 + l2_%=: *(u64 *)(r2 -64) = r1; \ 909 + l3_%=: r0 = 0; \ 910 + exit; \ 911 + " ::: __clobber_all); 912 + } 913 + 914 + SEC("socket") 915 + __description("unpriv: ldimm64 before Spectre v1 and v4 barrier (simple)") 916 + __success __success_unpriv 917 + __retval(0) 918 + #ifdef SPEC_V1 919 + #ifdef SPEC_V4 920 + __xlated_unpriv("if r8 != 0x0 goto pc+1") 921 + __xlated_unpriv("goto pc+4") 922 + __xlated_unpriv("goto pc-1") /* if r9 == 0 goto l3_%= */ 923 + __xlated_unpriv("goto pc-1") /* r2 = r0 */ 924 + __xlated_unpriv("goto pc-1") /* r1 = 0x2020200005642020 ll */ 925 + __xlated_unpriv("goto pc-1") /* second part of ldimm64 */ 926 + __xlated_unpriv("nospec") 927 + __xlated_unpriv("*(u64 *)(r2 -64) = r1") 928 + __xlated_unpriv("nospec") 929 + #endif 930 + #endif 931 + __naked void unpriv_ldimm64_spectre_v1_and_v4_simple(void) 932 + { 933 + asm volatile (" \ 934 + r8 = 0; \ 935 + r9 = 0; \ 936 + r0 = r10; \ 937 + r1 = 0; \ 938 + r2 = r10; \ 939 + if r8 != 0 goto l0_%=; \ 940 + if r9 != 0 goto l0_%=; \ 941 + r0 = 0; \ 942 + l0_%=: if r8 != 0 goto l1_%=; \ 943 + goto l2_%=; \ 944 + l1_%=: if r9 == 0 goto l3_%=; \ 945 + r2 = r0; \ 946 + r1 = 0x2020200005642020 ll; \ 947 + l2_%=: *(u64 *)(r2 -64) = r1; \ 948 + l3_%=: r0 = 0; \ 949 + exit; \ 950 + " ::: __clobber_all); 951 + } 952 + 804 953 char _license[] SEC("license") = "GPL";