Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'bpf-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf

Pull bpf fixes from Alexei Starovoitov:

- Fix how linked registers track zero extension of subregisters (Daniel
Borkmann)

- Fix unsound scalar fork for OR instructions (Daniel Wade)

- Fix exception exit lock check for subprogs (Ihor Solodrai)

- Fix undefined behavior in interpreter for SDIV/SMOD instructions
(Jenny Guanni Qu)

- Release module's BTF when module is unloaded (Kumar Kartikeya
Dwivedi)

- Fix constant blinding for PROBE_MEM32 instructions (Sachin Kumar)

- Reset register ID for END instructions to prevent incorrect value
tracking (Yazhou Tang)

* tag 'bpf-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf:
selftests/bpf: Add a test cases for sync_linked_regs regarding zext propagation
bpf: Fix sync_linked_regs regarding BPF_ADD_CONST32 zext propagation
selftests/bpf: Add tests for maybe_fork_scalars() OR vs AND handling
bpf: Fix unsound scalar forking in maybe_fork_scalars() for BPF_OR
selftests/bpf: Add tests for sdiv32/smod32 with INT_MIN dividend
bpf: Fix undefined behavior in interpreter sdiv/smod for INT_MIN
selftests/bpf: Add tests for bpf_throw lock leak from subprogs
bpf: Fix exception exit lock checking for subprogs
bpf: Release module BTF IDR before module unload
selftests/bpf: Fix pkg-config call on static builds
bpf: Fix constant blinding for PROBE_MEM32 stores
selftests/bpf: Add test for BPF_END register ID reset
bpf: Reset register ID for BPF_END value tracking

+416 -24
+20 -4
kernel/bpf/btf.c
··· 1787 1787 * of the _bh() version. 1788 1788 */ 1789 1789 spin_lock_irqsave(&btf_idr_lock, flags); 1790 - idr_remove(&btf_idr, btf->id); 1790 + if (btf->id) { 1791 + idr_remove(&btf_idr, btf->id); 1792 + /* 1793 + * Clear the id here to make this function idempotent, since it will get 1794 + * called a couple of times for module BTFs: on module unload, and then 1795 + * the final btf_put(). btf_alloc_id() starts IDs with 1, so we can use 1796 + * 0 as sentinel value. 1797 + */ 1798 + WRITE_ONCE(btf->id, 0); 1799 + } 1791 1800 spin_unlock_irqrestore(&btf_idr_lock, flags); 1792 1801 } 1793 1802 ··· 8124 8115 { 8125 8116 const struct btf *btf = filp->private_data; 8126 8117 8127 - seq_printf(m, "btf_id:\t%u\n", btf->id); 8118 + seq_printf(m, "btf_id:\t%u\n", READ_ONCE(btf->id)); 8128 8119 } 8129 8120 #endif 8130 8121 ··· 8206 8197 if (copy_from_user(&info, uinfo, info_copy)) 8207 8198 return -EFAULT; 8208 8199 8209 - info.id = btf->id; 8200 + info.id = READ_ONCE(btf->id); 8210 8201 ubtf = u64_to_user_ptr(info.btf); 8211 8202 btf_copy = min_t(u32, btf->data_size, info.btf_size); 8212 8203 if (copy_to_user(ubtf, btf->data, btf_copy)) ··· 8269 8260 8270 8261 u32 btf_obj_id(const struct btf *btf) 8271 8262 { 8272 - return btf->id; 8263 + return READ_ONCE(btf->id); 8273 8264 } 8274 8265 8275 8266 bool btf_is_kernel(const struct btf *btf) ··· 8391 8382 if (btf_mod->module != module) 8392 8383 continue; 8393 8384 8385 + /* 8386 + * For modules, we do the freeing of BTF IDR as soon as 8387 + * module goes away to disable BTF discovery, since the 8388 + * btf_try_get_module() on such BTFs will fail. This may 8389 + * be called again on btf_put(), but it's ok to do so. 8390 + */ 8391 + btf_free_id(btf_mod->btf); 8394 8392 list_del(&btf_mod->list); 8395 8393 if (btf_mod->sysfs_attr) 8396 8394 sysfs_remove_bin_file(btf_kobj, btf_mod->sysfs_attr);
+35 -8
kernel/bpf/core.c
··· 1422 1422 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd); 1423 1423 *to++ = BPF_STX_MEM(from->code, from->dst_reg, BPF_REG_AX, from->off); 1424 1424 break; 1425 + 1426 + case BPF_ST | BPF_PROBE_MEM32 | BPF_DW: 1427 + case BPF_ST | BPF_PROBE_MEM32 | BPF_W: 1428 + case BPF_ST | BPF_PROBE_MEM32 | BPF_H: 1429 + case BPF_ST | BPF_PROBE_MEM32 | BPF_B: 1430 + *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ 1431 + from->imm); 1432 + *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd); 1433 + /* 1434 + * Cannot use BPF_STX_MEM() macro here as it 1435 + * hardcodes BPF_MEM mode, losing PROBE_MEM32 1436 + * and breaking arena addressing in the JIT. 1437 + */ 1438 + *to++ = (struct bpf_insn) { 1439 + .code = BPF_STX | BPF_PROBE_MEM32 | 1440 + BPF_SIZE(from->code), 1441 + .dst_reg = from->dst_reg, 1442 + .src_reg = BPF_REG_AX, 1443 + .off = from->off, 1444 + }; 1445 + break; 1425 1446 } 1426 1447 out: 1427 1448 return to - to_buff; ··· 1757 1736 } 1758 1737 1759 1738 #ifndef CONFIG_BPF_JIT_ALWAYS_ON 1739 + /* Absolute value of s32 without undefined behavior for S32_MIN */ 1740 + static u32 abs_s32(s32 x) 1741 + { 1742 + return x >= 0 ? (u32)x : -(u32)x; 1743 + } 1744 + 1760 1745 /** 1761 1746 * ___bpf_prog_run - run eBPF program on a given context 1762 1747 * @regs: is the array of MAX_BPF_EXT_REG eBPF pseudo-registers ··· 1927 1900 DST = do_div(AX, (u32) SRC); 1928 1901 break; 1929 1902 case 1: 1930 - AX = abs((s32)DST); 1931 - AX = do_div(AX, abs((s32)SRC)); 1903 + AX = abs_s32((s32)DST); 1904 + AX = do_div(AX, abs_s32((s32)SRC)); 1932 1905 if ((s32)DST < 0) 1933 1906 DST = (u32)-AX; 1934 1907 else ··· 1955 1928 DST = do_div(AX, (u32) IMM); 1956 1929 break; 1957 1930 case 1: 1958 - AX = abs((s32)DST); 1959 - AX = do_div(AX, abs((s32)IMM)); 1931 + AX = abs_s32((s32)DST); 1932 + AX = do_div(AX, abs_s32((s32)IMM)); 1960 1933 if ((s32)DST < 0) 1961 1934 DST = (u32)-AX; 1962 1935 else ··· 1982 1955 DST = (u32) AX; 1983 1956 break; 1984 1957 case 1: 1985 - AX = abs((s32)DST); 1986 - do_div(AX, abs((s32)SRC)); 1958 + AX = abs_s32((s32)DST); 1959 + do_div(AX, abs_s32((s32)SRC)); 1987 1960 if (((s32)DST < 0) == ((s32)SRC < 0)) 1988 1961 DST = (u32)AX; 1989 1962 else ··· 2009 1982 DST = (u32) AX; 2010 1983 break; 2011 1984 case 1: 2012 - AX = abs((s32)DST); 2013 - do_div(AX, abs((s32)IMM)); 1985 + AX = abs_s32((s32)DST); 1986 + do_div(AX, abs_s32((s32)IMM)); 2014 1987 if (((s32)DST < 0) == ((s32)IMM < 0)) 2015 1988 DST = (u32)AX; 2016 1989 else
+25 -8
kernel/bpf/verifier.c
··· 15910 15910 /* Apply bswap if alu64 or switch between big-endian and little-endian machines */ 15911 15911 bool need_bswap = alu64 || (to_le == is_big_endian); 15912 15912 15913 + /* 15914 + * If the register is mutated, manually reset its scalar ID to break 15915 + * any existing ties and avoid incorrect bounds propagation. 15916 + */ 15917 + if (need_bswap || insn->imm == 16 || insn->imm == 32) 15918 + dst_reg->id = 0; 15919 + 15913 15920 if (need_bswap) { 15914 15921 if (insn->imm == 16) 15915 15922 dst_reg->var_off = tnum_bswap16(dst_reg->var_off); ··· 15999 15992 else 16000 15993 return 0; 16001 15994 16002 - branch = push_stack(env, env->insn_idx + 1, env->insn_idx, false); 15995 + branch = push_stack(env, env->insn_idx, env->insn_idx, false); 16003 15996 if (IS_ERR(branch)) 16004 15997 return PTR_ERR(branch); 16005 15998 ··· 17415 17408 continue; 17416 17409 if ((reg->id & ~BPF_ADD_CONST) != (known_reg->id & ~BPF_ADD_CONST)) 17417 17410 continue; 17411 + /* 17412 + * Skip mixed 32/64-bit links: the delta relationship doesn't 17413 + * hold across different ALU widths. 17414 + */ 17415 + if (((reg->id ^ known_reg->id) & BPF_ADD_CONST) == BPF_ADD_CONST) 17416 + continue; 17418 17417 if ((!(reg->id & BPF_ADD_CONST) && !(known_reg->id & BPF_ADD_CONST)) || 17419 17418 reg->off == known_reg->off) { 17420 17419 s32 saved_subreg_def = reg->subreg_def; ··· 17448 17435 scalar32_min_max_add(reg, &fake_reg); 17449 17436 scalar_min_max_add(reg, &fake_reg); 17450 17437 reg->var_off = tnum_add(reg->var_off, fake_reg.var_off); 17451 - if (known_reg->id & BPF_ADD_CONST32) 17438 + if ((reg->id | known_reg->id) & BPF_ADD_CONST32) 17452 17439 zext_32_to_64(reg); 17453 17440 reg_bounds_sync(reg); 17454 17441 } ··· 19876 19863 * Also verify that new value satisfies old value range knowledge. 19877 19864 */ 19878 19865 19879 - /* ADD_CONST mismatch: different linking semantics */ 19880 - if ((rold->id & BPF_ADD_CONST) && !(rcur->id & BPF_ADD_CONST)) 19881 - return false; 19882 - 19883 - if (rold->id && !(rold->id & BPF_ADD_CONST) && (rcur->id & BPF_ADD_CONST)) 19866 + /* 19867 + * ADD_CONST flags must match exactly: BPF_ADD_CONST32 and 19868 + * BPF_ADD_CONST64 have different linking semantics in 19869 + * sync_linked_regs() (alu32 zero-extends, alu64 does not), 19870 + * so pruning across different flag types is unsafe. 19871 + */ 19872 + if (rold->id && 19873 + (rold->id & BPF_ADD_CONST) != (rcur->id & BPF_ADD_CONST)) 19884 19874 return false; 19885 19875 19886 19876 /* Both have offset linkage: offsets must match */ ··· 20920 20904 * state when it exits. 20921 20905 */ 20922 20906 int err = check_resource_leak(env, exception_exit, 20923 - !env->cur_state->curframe, 20907 + exception_exit || !env->cur_state->curframe, 20908 + exception_exit ? "bpf_throw" : 20924 20909 "BPF_EXIT instruction in main prog"); 20925 20910 if (err) 20926 20911 return err;
+1 -1
tools/testing/selftests/bpf/Makefile
··· 409 409 CC="$(HOSTCC)" LD="$(HOSTLD)" AR="$(HOSTAR)" \ 410 410 LIBBPF_INCLUDE=$(HOST_INCLUDE_DIR) \ 411 411 EXTRA_LDFLAGS='$(SAN_LDFLAGS) $(EXTRA_LDFLAGS)' \ 412 - HOSTPKG_CONFIG=$(PKG_CONFIG) \ 412 + HOSTPKG_CONFIG='$(PKG_CONFIG)' \ 413 413 OUTPUT=$(HOST_BUILD_DIR)/resolve_btfids/ BPFOBJ=$(HOST_BPFOBJ) 414 414 415 415 # Get Clang's default includes on this system, as opposed to those seen by
+53 -3
tools/testing/selftests/bpf/progs/exceptions_fail.c
··· 8 8 #include "bpf_experimental.h" 9 9 10 10 extern void bpf_rcu_read_lock(void) __ksym; 11 + extern void bpf_rcu_read_unlock(void) __ksym; 12 + extern void bpf_preempt_disable(void) __ksym; 13 + extern void bpf_preempt_enable(void) __ksym; 14 + extern void bpf_local_irq_save(unsigned long *) __ksym; 15 + extern void bpf_local_irq_restore(unsigned long *) __ksym; 11 16 12 17 #define private(name) SEC(".bss." #name) __hidden __attribute__((aligned(8))) 13 18 ··· 136 131 } 137 132 138 133 SEC("?tc") 139 - __failure __msg("BPF_EXIT instruction in main prog cannot be used inside bpf_rcu_read_lock-ed region") 134 + __failure __msg("bpf_throw cannot be used inside bpf_rcu_read_lock-ed region") 140 135 int reject_with_rcu_read_lock(void *ctx) 141 136 { 142 137 bpf_rcu_read_lock(); ··· 152 147 } 153 148 154 149 SEC("?tc") 155 - __failure __msg("BPF_EXIT instruction in main prog cannot be used inside bpf_rcu_read_lock-ed region") 150 + __failure __msg("bpf_throw cannot be used inside bpf_rcu_read_lock-ed region") 156 151 int reject_subprog_with_rcu_read_lock(void *ctx) 157 152 { 158 153 bpf_rcu_read_lock(); 159 - return throwing_subprog(ctx); 154 + throwing_subprog(ctx); 155 + bpf_rcu_read_unlock(); 156 + return 0; 160 157 } 161 158 162 159 static bool rbless(struct bpf_rb_node *n1, const struct bpf_rb_node *n2) ··· 350 343 bpf_loop(5, loop_cb1, NULL, 0); 351 344 else 352 345 bpf_loop(5, loop_cb2, NULL, 0); 346 + return 0; 347 + } 348 + 349 + __noinline static int always_throws(void) 350 + { 351 + bpf_throw(0); 352 + return 0; 353 + } 354 + 355 + __noinline static int rcu_lock_then_throw(void) 356 + { 357 + bpf_rcu_read_lock(); 358 + bpf_throw(0); 359 + return 0; 360 + } 361 + 362 + SEC("?tc") 363 + __failure __msg("bpf_throw cannot be used inside bpf_rcu_read_lock-ed region") 364 + int reject_subprog_rcu_lock_throw(void *ctx) 365 + { 366 + rcu_lock_then_throw(); 367 + return 0; 368 + } 369 + 370 + SEC("?tc") 371 + __failure __msg("bpf_throw cannot be used inside bpf_preempt_disable-ed region") 372 + int reject_subprog_throw_preempt_lock(void *ctx) 373 + { 374 + bpf_preempt_disable(); 375 + always_throws(); 376 + bpf_preempt_enable(); 377 + return 0; 378 + } 379 + 380 + SEC("?tc") 381 + __failure __msg("bpf_throw cannot be used inside bpf_local_irq_save-ed region") 382 + int reject_subprog_throw_irq_lock(void *ctx) 383 + { 384 + unsigned long flags; 385 + 386 + bpf_local_irq_save(&flags); 387 + always_throws(); 388 + bpf_local_irq_restore(&flags); 353 389 return 0; 354 390 } 355 391
+94
tools/testing/selftests/bpf/progs/verifier_bounds.c
··· 2037 2037 : __clobber_all); 2038 2038 } 2039 2039 2040 + SEC("socket") 2041 + __description("maybe_fork_scalars: OR with constant rejects OOB") 2042 + __failure __msg("invalid access to map value") 2043 + __naked void or_scalar_fork_rejects_oob(void) 2044 + { 2045 + asm volatile (" \ 2046 + r1 = 0; \ 2047 + *(u64*)(r10 - 8) = r1; \ 2048 + r2 = r10; \ 2049 + r2 += -8; \ 2050 + r1 = %[map_hash_8b] ll; \ 2051 + call %[bpf_map_lookup_elem]; \ 2052 + if r0 == 0 goto l0_%=; \ 2053 + r9 = r0; \ 2054 + r6 = *(u64*)(r9 + 0); \ 2055 + r6 s>>= 63; \ 2056 + r6 |= 8; \ 2057 + /* r6 is -1 (current) or 8 (pushed) */ \ 2058 + if r6 s< 0 goto l0_%=; \ 2059 + /* pushed path: r6 = 8, OOB for value_size=8 */ \ 2060 + r9 += r6; \ 2061 + r0 = *(u8*)(r9 + 0); \ 2062 + l0_%=: r0 = 0; \ 2063 + exit; \ 2064 + " : 2065 + : __imm(bpf_map_lookup_elem), 2066 + __imm_addr(map_hash_8b) 2067 + : __clobber_all); 2068 + } 2069 + 2070 + SEC("socket") 2071 + __description("maybe_fork_scalars: AND with constant still works") 2072 + __success __retval(0) 2073 + __naked void and_scalar_fork_still_works(void) 2074 + { 2075 + asm volatile (" \ 2076 + r1 = 0; \ 2077 + *(u64*)(r10 - 8) = r1; \ 2078 + r2 = r10; \ 2079 + r2 += -8; \ 2080 + r1 = %[map_hash_8b] ll; \ 2081 + call %[bpf_map_lookup_elem]; \ 2082 + if r0 == 0 goto l0_%=; \ 2083 + r9 = r0; \ 2084 + r6 = *(u64*)(r9 + 0); \ 2085 + r6 s>>= 63; \ 2086 + r6 &= 4; \ 2087 + /* \ 2088 + * r6 is 0 (pushed, 0&4==0) or 4 (current) \ 2089 + * both within value_size=8 \ 2090 + */ \ 2091 + if r6 s< 0 goto l0_%=; \ 2092 + r9 += r6; \ 2093 + r0 = *(u8*)(r9 + 0); \ 2094 + l0_%=: r0 = 0; \ 2095 + exit; \ 2096 + " : 2097 + : __imm(bpf_map_lookup_elem), 2098 + __imm_addr(map_hash_8b) 2099 + : __clobber_all); 2100 + } 2101 + 2102 + SEC("socket") 2103 + __description("maybe_fork_scalars: OR with constant allows in-bounds") 2104 + __success __retval(0) 2105 + __naked void or_scalar_fork_allows_inbounds(void) 2106 + { 2107 + asm volatile (" \ 2108 + r1 = 0; \ 2109 + *(u64*)(r10 - 8) = r1; \ 2110 + r2 = r10; \ 2111 + r2 += -8; \ 2112 + r1 = %[map_hash_8b] ll; \ 2113 + call %[bpf_map_lookup_elem]; \ 2114 + if r0 == 0 goto l0_%=; \ 2115 + r9 = r0; \ 2116 + r6 = *(u64*)(r9 + 0); \ 2117 + r6 s>>= 63; \ 2118 + r6 |= 4; \ 2119 + /* \ 2120 + * r6 is -1 (current) or 4 (pushed) \ 2121 + * pushed path: r6 = 4, within value_size=8 \ 2122 + */ \ 2123 + if r6 s< 0 goto l0_%=; \ 2124 + r9 += r6; \ 2125 + r0 = *(u8*)(r9 + 0); \ 2126 + l0_%=: r0 = 0; \ 2127 + exit; \ 2128 + " : 2129 + : __imm(bpf_map_lookup_elem), 2130 + __imm_addr(map_hash_8b) 2131 + : __clobber_all); 2132 + } 2133 + 2040 2134 char _license[] SEC("license") = "GPL";
+22
tools/testing/selftests/bpf/progs/verifier_bswap.c
··· 91 91 BSWAP_RANGE_TEST(le64_range, "le64", 0x3f00, 0x3f000000000000) 92 92 #endif 93 93 94 + SEC("socket") 95 + __description("BSWAP, reset reg id") 96 + __failure __msg("math between fp pointer and register with unbounded min value is not allowed") 97 + __naked void bswap_reset_reg_id(void) 98 + { 99 + asm volatile (" \ 100 + call %[bpf_ktime_get_ns]; \ 101 + r1 = r0; \ 102 + r0 = be16 r0; \ 103 + if r0 != 1 goto l0_%=; \ 104 + r2 = r10; \ 105 + r2 += -512; \ 106 + r2 += r1; \ 107 + *(u8 *)(r2 + 0) = 0; \ 108 + l0_%=: \ 109 + r0 = 0; \ 110 + exit; \ 111 + " : 112 + : __imm(bpf_ktime_get_ns) 113 + : __clobber_all); 114 + } 115 + 94 116 #else 95 117 96 118 SEC("socket")
+108
tools/testing/selftests/bpf/progs/verifier_linked_scalars.c
··· 348 348 : __clobber_all); 349 349 } 350 350 351 + /* 352 + * Test that sync_linked_regs() checks reg->id (the linked target register) 353 + * for BPF_ADD_CONST32 rather than known_reg->id (the branch register). 354 + */ 355 + SEC("socket") 356 + __success 357 + __naked void scalars_alu32_zext_linked_reg(void) 358 + { 359 + asm volatile (" \ 360 + call %[bpf_get_prandom_u32]; \ 361 + w6 = w0; /* r6 in [0, 0xFFFFFFFF] */ \ 362 + r7 = r6; /* linked: same id as r6 */ \ 363 + w7 += 1; /* alu32: r7.id |= BPF_ADD_CONST32 */ \ 364 + r8 = 0xFFFFffff ll; \ 365 + if r6 < r8 goto l0_%=; \ 366 + /* r6 in [0xFFFFFFFF, 0xFFFFFFFF] */ \ 367 + /* sync_linked_regs: known_reg=r6, reg=r7 */ \ 368 + /* CPU: w7 = (u32)(0xFFFFFFFF + 1) = 0, zext -> r7 = 0 */ \ 369 + /* With fix: r7 64-bit = [0, 0] (zext applied) */ \ 370 + /* Without fix: r7 64-bit = [0x100000000] (no zext) */ \ 371 + r7 >>= 32; \ 372 + if r7 == 0 goto l0_%=; \ 373 + r0 /= 0; /* unreachable with fix */ \ 374 + l0_%=: \ 375 + r0 = 0; \ 376 + exit; \ 377 + " : 378 + : __imm(bpf_get_prandom_u32) 379 + : __clobber_all); 380 + } 381 + 382 + /* 383 + * Test that sync_linked_regs() skips propagation when one register used 384 + * alu32 (BPF_ADD_CONST32) and the other used alu64 (BPF_ADD_CONST64). 385 + * The delta relationship doesn't hold across different ALU widths. 386 + */ 387 + SEC("socket") 388 + __failure __msg("div by zero") 389 + __naked void scalars_alu32_alu64_cross_type(void) 390 + { 391 + asm volatile (" \ 392 + call %[bpf_get_prandom_u32]; \ 393 + w6 = w0; /* r6 in [0, 0xFFFFFFFF] */ \ 394 + r7 = r6; /* linked: same id as r6 */ \ 395 + w7 += 1; /* alu32: BPF_ADD_CONST32, delta = 1 */ \ 396 + r8 = r6; /* linked: same id as r6 */ \ 397 + r8 += 2; /* alu64: BPF_ADD_CONST64, delta = 2 */ \ 398 + r9 = 0xFFFFffff ll; \ 399 + if r7 < r9 goto l0_%=; \ 400 + /* r7 = 0xFFFFFFFF */ \ 401 + /* sync: known_reg=r7 (ADD_CONST32), reg=r8 (ADD_CONST64) */ \ 402 + /* Without fix: r8 = zext(0xFFFFFFFF + 1) = 0 */ \ 403 + /* With fix: r8 stays [2, 0x100000001] (r8 >= 2) */ \ 404 + if r8 > 0 goto l1_%=; \ 405 + goto l0_%=; \ 406 + l1_%=: \ 407 + r0 /= 0; /* div by zero */ \ 408 + l0_%=: \ 409 + r0 = 0; \ 410 + exit; \ 411 + " : 412 + : __imm(bpf_get_prandom_u32) 413 + : __clobber_all); 414 + } 415 + 416 + /* 417 + * Test that regsafe() prevents pruning when two paths reach the same program 418 + * point with linked registers carrying different ADD_CONST flags (one 419 + * BPF_ADD_CONST32 from alu32, another BPF_ADD_CONST64 from alu64). 420 + */ 421 + SEC("socket") 422 + __failure __msg("div by zero") 423 + __flag(BPF_F_TEST_STATE_FREQ) 424 + __naked void scalars_alu32_alu64_regsafe_pruning(void) 425 + { 426 + asm volatile (" \ 427 + call %[bpf_get_prandom_u32]; \ 428 + w6 = w0; /* r6 in [0, 0xFFFFFFFF] */ \ 429 + r7 = r6; /* linked: same id as r6 */ \ 430 + /* Get another random value for the path branch */ \ 431 + call %[bpf_get_prandom_u32]; \ 432 + if r0 > 0 goto l_pathb_%=; \ 433 + /* Path A: alu32 */ \ 434 + w7 += 1; /* BPF_ADD_CONST32, delta = 1 */\ 435 + goto l_merge_%=; \ 436 + l_pathb_%=: \ 437 + /* Path B: alu64 */ \ 438 + r7 += 1; /* BPF_ADD_CONST64, delta = 1 */\ 439 + l_merge_%=: \ 440 + /* Merge point: regsafe() compares path B against cached path A. */ \ 441 + /* Narrow r6 to trigger sync_linked_regs for r7 */ \ 442 + r9 = 0xFFFFffff ll; \ 443 + if r6 < r9 goto l0_%=; \ 444 + /* r6 = 0xFFFFFFFF */ \ 445 + /* sync: r7 = 0xFFFFFFFF + 1 = 0x100000000 */ \ 446 + /* Path A: zext -> r7 = 0 */ \ 447 + /* Path B: no zext -> r7 = 0x100000000 */ \ 448 + r7 >>= 32; \ 449 + if r7 == 0 goto l0_%=; \ 450 + r0 /= 0; /* div by zero on path B */ \ 451 + l0_%=: \ 452 + r0 = 0; \ 453 + exit; \ 454 + " : 455 + : __imm(bpf_get_prandom_u32) 456 + : __clobber_all); 457 + } 458 + 351 459 SEC("socket") 352 460 __success 353 461 void alu32_negative_offset(void)
+58
tools/testing/selftests/bpf/progs/verifier_sdiv.c
··· 1209 1209 : __clobber_all); 1210 1210 } 1211 1211 1212 + SEC("socket") 1213 + __description("SDIV32, INT_MIN divided by 2, imm") 1214 + __success __success_unpriv __retval(-1073741824) 1215 + __naked void sdiv32_int_min_div_2_imm(void) 1216 + { 1217 + asm volatile (" \ 1218 + w0 = %[int_min]; \ 1219 + w0 s/= 2; \ 1220 + exit; \ 1221 + " : 1222 + : __imm_const(int_min, INT_MIN) 1223 + : __clobber_all); 1224 + } 1225 + 1226 + SEC("socket") 1227 + __description("SDIV32, INT_MIN divided by 2, reg") 1228 + __success __success_unpriv __retval(-1073741824) 1229 + __naked void sdiv32_int_min_div_2_reg(void) 1230 + { 1231 + asm volatile (" \ 1232 + w0 = %[int_min]; \ 1233 + w1 = 2; \ 1234 + w0 s/= w1; \ 1235 + exit; \ 1236 + " : 1237 + : __imm_const(int_min, INT_MIN) 1238 + : __clobber_all); 1239 + } 1240 + 1241 + SEC("socket") 1242 + __description("SMOD32, INT_MIN modulo 2, imm") 1243 + __success __success_unpriv __retval(0) 1244 + __naked void smod32_int_min_mod_2_imm(void) 1245 + { 1246 + asm volatile (" \ 1247 + w0 = %[int_min]; \ 1248 + w0 s%%= 2; \ 1249 + exit; \ 1250 + " : 1251 + : __imm_const(int_min, INT_MIN) 1252 + : __clobber_all); 1253 + } 1254 + 1255 + SEC("socket") 1256 + __description("SMOD32, INT_MIN modulo -2, imm") 1257 + __success __success_unpriv __retval(0) 1258 + __naked void smod32_int_min_mod_neg2_imm(void) 1259 + { 1260 + asm volatile (" \ 1261 + w0 = %[int_min]; \ 1262 + w0 s%%= -2; \ 1263 + exit; \ 1264 + " : 1265 + : __imm_const(int_min, INT_MIN) 1266 + : __clobber_all); 1267 + } 1268 + 1269 + 1212 1270 #else 1213 1271 1214 1272 SEC("socket")