Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

bpf: Rename BPF_XADD and prepare to encode other atomics in .imm

A subsequent patch will add additional atomic operations. These new
operations will use the same opcode field as the existing XADD, with
the immediate discriminating different operations.

In preparation, rename the instruction mode BPF_ATOMIC and start
calling the zero immediate BPF_ADD.

This is possible (doesn't break existing valid BPF progs) because the
immediate field is currently reserved MBZ and BPF_ADD is zero.

All uses are removed from the tree but the BPF_XADD definition is
kept around to avoid breaking builds for people including kernel
headers.

Signed-off-by: Brendan Jackman <jackmanb@google.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Acked-by: Björn Töpel <bjorn.topel@gmail.com>
Link: https://lore.kernel.org/bpf/20210114181751.768687-5-jackmanb@google.com

authored by

Brendan Jackman and committed by
Alexei Starovoitov
91c960b0 e5f02cac

+291 -152
+19 -11
Documentation/networking/filter.rst
··· 1006 1006 1007 1007 Mode modifier is one of:: 1008 1008 1009 - BPF_IMM 0x00 /* used for 32-bit mov in classic BPF and 64-bit in eBPF */ 1010 - BPF_ABS 0x20 1011 - BPF_IND 0x40 1012 - BPF_MEM 0x60 1013 - BPF_LEN 0x80 /* classic BPF only, reserved in eBPF */ 1014 - BPF_MSH 0xa0 /* classic BPF only, reserved in eBPF */ 1015 - BPF_XADD 0xc0 /* eBPF only, exclusive add */ 1009 + BPF_IMM 0x00 /* used for 32-bit mov in classic BPF and 64-bit in eBPF */ 1010 + BPF_ABS 0x20 1011 + BPF_IND 0x40 1012 + BPF_MEM 0x60 1013 + BPF_LEN 0x80 /* classic BPF only, reserved in eBPF */ 1014 + BPF_MSH 0xa0 /* classic BPF only, reserved in eBPF */ 1015 + BPF_ATOMIC 0xc0 /* eBPF only, atomic operations */ 1016 1016 1017 1017 eBPF has two non-generic instructions: (BPF_ABS | <size> | BPF_LD) and 1018 1018 (BPF_IND | <size> | BPF_LD) which are used to access packet data. ··· 1044 1044 BPF_MEM | <size> | BPF_STX: *(size *) (dst_reg + off) = src_reg 1045 1045 BPF_MEM | <size> | BPF_ST: *(size *) (dst_reg + off) = imm32 1046 1046 BPF_MEM | <size> | BPF_LDX: dst_reg = *(size *) (src_reg + off) 1047 - BPF_XADD | BPF_W | BPF_STX: lock xadd *(u32 *)(dst_reg + off16) += src_reg 1048 - BPF_XADD | BPF_DW | BPF_STX: lock xadd *(u64 *)(dst_reg + off16) += src_reg 1049 1047 1050 - Where size is one of: BPF_B or BPF_H or BPF_W or BPF_DW. Note that 1 and 1051 - 2 byte atomic increments are not supported. 1048 + Where size is one of: BPF_B or BPF_H or BPF_W or BPF_DW. 1049 + 1050 + It also includes atomic operations, which use the immediate field for extra 1051 + encoding. 1052 + 1053 + .imm = BPF_ADD, .code = BPF_ATOMIC | BPF_W | BPF_STX: lock xadd *(u32 *)(dst_reg + off16) += src_reg 1054 + .imm = BPF_ADD, .code = BPF_ATOMIC | BPF_DW | BPF_STX: lock xadd *(u64 *)(dst_reg + off16) += src_reg 1055 + 1056 + Note that 1 and 2 byte atomic operations are not supported. 1057 + 1058 + You may encounter BPF_XADD - this is a legacy name for BPF_ATOMIC, referring to 1059 + the exclusive-add operation encoded when the immediate field is zero. 1052 1060 1053 1061 eBPF has one 16-byte instruction: BPF_LD | BPF_DW | BPF_IMM which consists 1054 1062 of two consecutive ``struct bpf_insn`` 8-byte blocks and interpreted as single
+3 -4
arch/arm/net/bpf_jit_32.c
··· 1620 1620 } 1621 1621 emit_str_r(dst_lo, tmp2, off, ctx, BPF_SIZE(code)); 1622 1622 break; 1623 - /* STX XADD: lock *(u32 *)(dst + off) += src */ 1624 - case BPF_STX | BPF_XADD | BPF_W: 1625 - /* STX XADD: lock *(u64 *)(dst + off) += src */ 1626 - case BPF_STX | BPF_XADD | BPF_DW: 1623 + /* Atomic ops */ 1624 + case BPF_STX | BPF_ATOMIC | BPF_W: 1625 + case BPF_STX | BPF_ATOMIC | BPF_DW: 1627 1626 goto notyet; 1628 1627 /* STX: *(size *)(dst + off) = src */ 1629 1628 case BPF_STX | BPF_MEM | BPF_W:
+12 -4
arch/arm64/net/bpf_jit_comp.c
··· 875 875 } 876 876 break; 877 877 878 - /* STX XADD: lock *(u32 *)(dst + off) += src */ 879 - case BPF_STX | BPF_XADD | BPF_W: 880 - /* STX XADD: lock *(u64 *)(dst + off) += src */ 881 - case BPF_STX | BPF_XADD | BPF_DW: 878 + case BPF_STX | BPF_ATOMIC | BPF_W: 879 + case BPF_STX | BPF_ATOMIC | BPF_DW: 880 + if (insn->imm != BPF_ADD) { 881 + pr_err_once("unknown atomic op code %02x\n", insn->imm); 882 + return -EINVAL; 883 + } 884 + 885 + /* STX XADD: lock *(u32 *)(dst + off) += src 886 + * and 887 + * STX XADD: lock *(u64 *)(dst + off) += src 888 + */ 889 + 882 890 if (!off) { 883 891 reg = dst; 884 892 } else {
+8 -3
arch/mips/net/ebpf_jit.c
··· 1423 1423 case BPF_STX | BPF_H | BPF_MEM: 1424 1424 case BPF_STX | BPF_W | BPF_MEM: 1425 1425 case BPF_STX | BPF_DW | BPF_MEM: 1426 - case BPF_STX | BPF_W | BPF_XADD: 1427 - case BPF_STX | BPF_DW | BPF_XADD: 1426 + case BPF_STX | BPF_W | BPF_ATOMIC: 1427 + case BPF_STX | BPF_DW | BPF_ATOMIC: 1428 1428 if (insn->dst_reg == BPF_REG_10) { 1429 1429 ctx->flags |= EBPF_SEEN_FP; 1430 1430 dst = MIPS_R_SP; ··· 1438 1438 src = ebpf_to_mips_reg(ctx, insn, src_reg_no_fp); 1439 1439 if (src < 0) 1440 1440 return src; 1441 - if (BPF_MODE(insn->code) == BPF_XADD) { 1441 + if (BPF_MODE(insn->code) == BPF_ATOMIC) { 1442 + if (insn->imm != BPF_ADD) { 1443 + pr_err("ATOMIC OP %02x NOT HANDLED\n", insn->imm); 1444 + return -EINVAL; 1445 + } 1446 + 1442 1447 /* 1443 1448 * If mem_off does not fit within the 9 bit ll/sc 1444 1449 * instruction immediate field, use a temp reg.
+20 -5
arch/powerpc/net/bpf_jit_comp64.c
··· 683 683 break; 684 684 685 685 /* 686 - * BPF_STX XADD (atomic_add) 686 + * BPF_STX ATOMIC (atomic ops) 687 687 */ 688 - /* *(u32 *)(dst + off) += src */ 689 - case BPF_STX | BPF_XADD | BPF_W: 688 + case BPF_STX | BPF_ATOMIC | BPF_W: 689 + if (insn->imm != BPF_ADD) { 690 + pr_err_ratelimited( 691 + "eBPF filter atomic op code %02x (@%d) unsupported\n", 692 + code, i); 693 + return -ENOTSUPP; 694 + } 695 + 696 + /* *(u32 *)(dst + off) += src */ 697 + 690 698 /* Get EA into TMP_REG_1 */ 691 699 EMIT(PPC_RAW_ADDI(b2p[TMP_REG_1], dst_reg, off)); 692 700 tmp_idx = ctx->idx * 4; ··· 707 699 /* we're done if this succeeded */ 708 700 PPC_BCC_SHORT(COND_NE, tmp_idx); 709 701 break; 710 - /* *(u64 *)(dst + off) += src */ 711 - case BPF_STX | BPF_XADD | BPF_DW: 702 + case BPF_STX | BPF_ATOMIC | BPF_DW: 703 + if (insn->imm != BPF_ADD) { 704 + pr_err_ratelimited( 705 + "eBPF filter atomic op code %02x (@%d) unsupported\n", 706 + code, i); 707 + return -ENOTSUPP; 708 + } 709 + /* *(u64 *)(dst + off) += src */ 710 + 712 711 EMIT(PPC_RAW_ADDI(b2p[TMP_REG_1], dst_reg, off)); 713 712 tmp_idx = ctx->idx * 4; 714 713 EMIT(PPC_RAW_LDARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0));
+16 -4
arch/riscv/net/bpf_jit_comp32.c
··· 881 881 const s8 *rd = bpf_get_reg64(dst, tmp1, ctx); 882 882 const s8 *rs = bpf_get_reg64(src, tmp2, ctx); 883 883 884 - if (mode == BPF_XADD && size != BPF_W) 884 + if (mode == BPF_ATOMIC && size != BPF_W) 885 885 return -1; 886 886 887 887 emit_imm(RV_REG_T0, off, ctx); ··· 899 899 case BPF_MEM: 900 900 emit(rv_sw(RV_REG_T0, 0, lo(rs)), ctx); 901 901 break; 902 - case BPF_XADD: 902 + case BPF_ATOMIC: /* Only BPF_ADD supported */ 903 903 emit(rv_amoadd_w(RV_REG_ZERO, lo(rs), RV_REG_T0, 0, 0), 904 904 ctx); 905 905 break; ··· 1260 1260 case BPF_STX | BPF_MEM | BPF_H: 1261 1261 case BPF_STX | BPF_MEM | BPF_W: 1262 1262 case BPF_STX | BPF_MEM | BPF_DW: 1263 - case BPF_STX | BPF_XADD | BPF_W: 1264 1263 if (BPF_CLASS(code) == BPF_ST) { 1265 1264 emit_imm32(tmp2, imm, ctx); 1266 1265 src = tmp2; ··· 1270 1271 return -1; 1271 1272 break; 1272 1273 1274 + case BPF_STX | BPF_ATOMIC | BPF_W: 1275 + if (insn->imm != BPF_ADD) { 1276 + pr_info_once( 1277 + "bpf-jit: not supported: atomic operation %02x ***\n", 1278 + insn->imm); 1279 + return -EFAULT; 1280 + } 1281 + 1282 + if (emit_store_r64(dst, src, off, ctx, BPF_SIZE(code), 1283 + BPF_MODE(code))) 1284 + return -1; 1285 + break; 1286 + 1273 1287 /* No hardware support for 8-byte atomics in RV32. */ 1274 - case BPF_STX | BPF_XADD | BPF_DW: 1288 + case BPF_STX | BPF_ATOMIC | BPF_DW: 1275 1289 /* Fallthrough. */ 1276 1290 1277 1291 notsupported:
+12 -4
arch/riscv/net/bpf_jit_comp64.c
··· 1027 1027 emit_add(RV_REG_T1, RV_REG_T1, rd, ctx); 1028 1028 emit_sd(RV_REG_T1, 0, rs, ctx); 1029 1029 break; 1030 - /* STX XADD: lock *(u32 *)(dst + off) += src */ 1031 - case BPF_STX | BPF_XADD | BPF_W: 1032 - /* STX XADD: lock *(u64 *)(dst + off) += src */ 1033 - case BPF_STX | BPF_XADD | BPF_DW: 1030 + case BPF_STX | BPF_ATOMIC | BPF_W: 1031 + case BPF_STX | BPF_ATOMIC | BPF_DW: 1032 + if (insn->imm != BPF_ADD) { 1033 + pr_err("bpf-jit: not supported: atomic operation %02x ***\n", 1034 + insn->imm); 1035 + return -EINVAL; 1036 + } 1037 + 1038 + /* atomic_add: lock *(u32 *)(dst + off) += src 1039 + * atomic_add: lock *(u64 *)(dst + off) += src 1040 + */ 1041 + 1034 1042 if (off) { 1035 1043 if (is_12b_int(off)) { 1036 1044 emit_addi(RV_REG_T1, rd, off, ctx);
+16 -11
arch/s390/net/bpf_jit_comp.c
··· 1205 1205 jit->seen |= SEEN_MEM; 1206 1206 break; 1207 1207 /* 1208 - * BPF_STX XADD (atomic_add) 1208 + * BPF_ATOMIC 1209 1209 */ 1210 - case BPF_STX | BPF_XADD | BPF_W: /* *(u32 *)(dst + off) += src */ 1211 - /* laal %w0,%src,off(%dst) */ 1212 - EMIT6_DISP_LH(0xeb000000, 0x00fa, REG_W0, src_reg, 1213 - dst_reg, off); 1214 - jit->seen |= SEEN_MEM; 1215 - break; 1216 - case BPF_STX | BPF_XADD | BPF_DW: /* *(u64 *)(dst + off) += src */ 1217 - /* laalg %w0,%src,off(%dst) */ 1218 - EMIT6_DISP_LH(0xeb000000, 0x00ea, REG_W0, src_reg, 1219 - dst_reg, off); 1210 + case BPF_STX | BPF_ATOMIC | BPF_DW: 1211 + case BPF_STX | BPF_ATOMIC | BPF_W: 1212 + if (insn->imm != BPF_ADD) { 1213 + pr_err("Unknown atomic operation %02x\n", insn->imm); 1214 + return -1; 1215 + } 1216 + 1217 + /* *(u32/u64 *)(dst + off) += src 1218 + * 1219 + * BFW_W: laal %w0,%src,off(%dst) 1220 + * BPF_DW: laalg %w0,%src,off(%dst) 1221 + */ 1222 + EMIT6_DISP_LH(0xeb000000, 1223 + BPF_SIZE(insn->code) == BPF_W ? 0x00fa : 0x00ea, 1224 + REG_W0, src_reg, dst_reg, off); 1220 1225 jit->seen |= SEEN_MEM; 1221 1226 break; 1222 1227 /*
+14 -3
arch/sparc/net/bpf_jit_comp_64.c
··· 1366 1366 break; 1367 1367 } 1368 1368 1369 - /* STX XADD: lock *(u32 *)(dst + off) += src */ 1370 - case BPF_STX | BPF_XADD | BPF_W: { 1369 + case BPF_STX | BPF_ATOMIC | BPF_W: { 1371 1370 const u8 tmp = bpf2sparc[TMP_REG_1]; 1372 1371 const u8 tmp2 = bpf2sparc[TMP_REG_2]; 1373 1372 const u8 tmp3 = bpf2sparc[TMP_REG_3]; 1373 + 1374 + if (insn->imm != BPF_ADD) { 1375 + pr_err_once("unknown atomic op %02x\n", insn->imm); 1376 + return -EINVAL; 1377 + } 1378 + 1379 + /* lock *(u32 *)(dst + off) += src */ 1374 1380 1375 1381 if (insn->dst_reg == BPF_REG_FP) 1376 1382 ctx->saw_frame_pointer = true; ··· 1396 1390 break; 1397 1391 } 1398 1392 /* STX XADD: lock *(u64 *)(dst + off) += src */ 1399 - case BPF_STX | BPF_XADD | BPF_DW: { 1393 + case BPF_STX | BPF_ATOMIC | BPF_DW: { 1400 1394 const u8 tmp = bpf2sparc[TMP_REG_1]; 1401 1395 const u8 tmp2 = bpf2sparc[TMP_REG_2]; 1402 1396 const u8 tmp3 = bpf2sparc[TMP_REG_3]; 1397 + 1398 + if (insn->imm != BPF_ADD) { 1399 + pr_err_once("unknown atomic op %02x\n", insn->imm); 1400 + return -EINVAL; 1401 + } 1403 1402 1404 1403 if (insn->dst_reg == BPF_REG_FP) 1405 1404 ctx->saw_frame_pointer = true;
+34 -12
arch/x86/net/bpf_jit_comp.c
··· 795 795 *pprog = prog; 796 796 } 797 797 798 + static int emit_atomic(u8 **pprog, u8 atomic_op, 799 + u32 dst_reg, u32 src_reg, s16 off, u8 bpf_size) 800 + { 801 + u8 *prog = *pprog; 802 + int cnt = 0; 803 + 804 + EMIT1(0xF0); /* lock prefix */ 805 + 806 + maybe_emit_mod(&prog, dst_reg, src_reg, bpf_size == BPF_DW); 807 + 808 + /* emit opcode */ 809 + switch (atomic_op) { 810 + case BPF_ADD: 811 + /* lock *(u32/u64*)(dst_reg + off) <op>= src_reg */ 812 + EMIT1(simple_alu_opcodes[atomic_op]); 813 + break; 814 + default: 815 + pr_err("bpf_jit: unknown atomic opcode %02x\n", atomic_op); 816 + return -EFAULT; 817 + } 818 + 819 + emit_insn_suffix(&prog, dst_reg, src_reg, off); 820 + 821 + *pprog = prog; 822 + return 0; 823 + } 824 + 798 825 static bool ex_handler_bpf(const struct exception_table_entry *x, 799 826 struct pt_regs *regs, int trapnr, 800 827 unsigned long error_code, unsigned long fault_addr) ··· 866 839 int i, cnt = 0, excnt = 0; 867 840 int proglen = 0; 868 841 u8 *prog = temp; 842 + int err; 869 843 870 844 detect_reg_usage(insn, insn_cnt, callee_regs_used, 871 845 &tail_call_seen); ··· 1278 1250 } 1279 1251 break; 1280 1252 1281 - /* STX XADD: lock *(u32*)(dst_reg + off) += src_reg */ 1282 - case BPF_STX | BPF_XADD | BPF_W: 1283 - /* Emit 'lock add dword ptr [rax + off], eax' */ 1284 - if (is_ereg(dst_reg) || is_ereg(src_reg)) 1285 - EMIT3(0xF0, add_2mod(0x40, dst_reg, src_reg), 0x01); 1286 - else 1287 - EMIT2(0xF0, 0x01); 1288 - goto xadd; 1289 - case BPF_STX | BPF_XADD | BPF_DW: 1290 - EMIT3(0xF0, add_2mod(0x48, dst_reg, src_reg), 0x01); 1291 - xadd: 1292 - emit_modrm_dstoff(&prog, dst_reg, src_reg, insn->off); 1253 + case BPF_STX | BPF_ATOMIC | BPF_W: 1254 + case BPF_STX | BPF_ATOMIC | BPF_DW: 1255 + err = emit_atomic(&prog, insn->imm, dst_reg, src_reg, 1256 + insn->off, BPF_SIZE(insn->code)); 1257 + if (err) 1258 + return err; 1293 1259 break; 1294 1260 1295 1261 /* call */
+2 -4
arch/x86/net/bpf_jit_comp32.c
··· 2243 2243 return -EFAULT; 2244 2244 } 2245 2245 break; 2246 - /* STX XADD: lock *(u32 *)(dst + off) += src */ 2247 - case BPF_STX | BPF_XADD | BPF_W: 2248 - /* STX XADD: lock *(u64 *)(dst + off) += src */ 2249 - case BPF_STX | BPF_XADD | BPF_DW: 2246 + case BPF_STX | BPF_ATOMIC | BPF_W: 2247 + case BPF_STX | BPF_ATOMIC | BPF_DW: 2250 2248 goto notyet; 2251 2249 case BPF_JMP | BPF_EXIT: 2252 2250 if (seen_exit) {
+10 -4
drivers/net/ethernet/netronome/nfp/bpf/jit.c
··· 3109 3109 return 0; 3110 3110 } 3111 3111 3112 - static int mem_xadd4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 3112 + static int mem_atomic4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 3113 3113 { 3114 + if (meta->insn.imm != BPF_ADD) 3115 + return -EOPNOTSUPP; 3116 + 3114 3117 return mem_xadd(nfp_prog, meta, false); 3115 3118 } 3116 3119 3117 - static int mem_xadd8(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 3120 + static int mem_atomic8(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) 3118 3121 { 3122 + if (meta->insn.imm != BPF_ADD) 3123 + return -EOPNOTSUPP; 3124 + 3119 3125 return mem_xadd(nfp_prog, meta, true); 3120 3126 } 3121 3127 ··· 3481 3475 [BPF_STX | BPF_MEM | BPF_H] = mem_stx2, 3482 3476 [BPF_STX | BPF_MEM | BPF_W] = mem_stx4, 3483 3477 [BPF_STX | BPF_MEM | BPF_DW] = mem_stx8, 3484 - [BPF_STX | BPF_XADD | BPF_W] = mem_xadd4, 3485 - [BPF_STX | BPF_XADD | BPF_DW] = mem_xadd8, 3478 + [BPF_STX | BPF_ATOMIC | BPF_W] = mem_atomic4, 3479 + [BPF_STX | BPF_ATOMIC | BPF_DW] = mem_atomic8, 3486 3480 [BPF_ST | BPF_MEM | BPF_B] = mem_st1, 3487 3481 [BPF_ST | BPF_MEM | BPF_H] = mem_st2, 3488 3482 [BPF_ST | BPF_MEM | BPF_W] = mem_st4,
+2 -2
drivers/net/ethernet/netronome/nfp/bpf/main.h
··· 428 428 return is_mbpf_classic_store(meta) && meta->ptr.type == PTR_TO_PACKET; 429 429 } 430 430 431 - static inline bool is_mbpf_xadd(const struct nfp_insn_meta *meta) 431 + static inline bool is_mbpf_atomic(const struct nfp_insn_meta *meta) 432 432 { 433 - return (meta->insn.code & ~BPF_SIZE_MASK) == (BPF_STX | BPF_XADD); 433 + return (meta->insn.code & ~BPF_SIZE_MASK) == (BPF_STX | BPF_ATOMIC); 434 434 } 435 435 436 436 static inline bool is_mbpf_mul(const struct nfp_insn_meta *meta)
+10 -5
drivers/net/ethernet/netronome/nfp/bpf/verifier.c
··· 479 479 pr_vlog(env, "map writes not supported\n"); 480 480 return -EOPNOTSUPP; 481 481 } 482 - if (is_mbpf_xadd(meta)) { 482 + if (is_mbpf_atomic(meta)) { 483 483 err = nfp_bpf_map_mark_used(env, meta, reg, 484 484 NFP_MAP_USE_ATOMIC_CNT); 485 485 if (err) ··· 523 523 } 524 524 525 525 static int 526 - nfp_bpf_check_xadd(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 527 - struct bpf_verifier_env *env) 526 + nfp_bpf_check_atomic(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, 527 + struct bpf_verifier_env *env) 528 528 { 529 529 const struct bpf_reg_state *sreg = cur_regs(env) + meta->insn.src_reg; 530 530 const struct bpf_reg_state *dreg = cur_regs(env) + meta->insn.dst_reg; 531 + 532 + if (meta->insn.imm != BPF_ADD) { 533 + pr_vlog(env, "atomic op not implemented: %d\n", meta->insn.imm); 534 + return -EOPNOTSUPP; 535 + } 531 536 532 537 if (dreg->type != PTR_TO_MAP_VALUE) { 533 538 pr_vlog(env, "atomic add not to a map value pointer: %d\n", ··· 660 655 if (is_mbpf_store(meta)) 661 656 return nfp_bpf_check_store(nfp_prog, meta, env); 662 657 663 - if (is_mbpf_xadd(meta)) 664 - return nfp_bpf_check_xadd(nfp_prog, meta, env); 658 + if (is_mbpf_atomic(meta)) 659 + return nfp_bpf_check_atomic(nfp_prog, meta, env); 665 660 666 661 if (is_mbpf_alu(meta)) 667 662 return nfp_bpf_check_alu(nfp_prog, meta, env);
+12 -4
include/linux/filter.h
··· 259 259 .off = OFF, \ 260 260 .imm = 0 }) 261 261 262 - /* Atomic memory add, *(uint *)(dst_reg + off16) += src_reg */ 263 262 264 - #define BPF_STX_XADD(SIZE, DST, SRC, OFF) \ 263 + /* 264 + * Atomic operations: 265 + * 266 + * BPF_ADD *(uint *) (dst_reg + off16) += src_reg 267 + */ 268 + 269 + #define BPF_ATOMIC_OP(SIZE, OP, DST, SRC, OFF) \ 265 270 ((struct bpf_insn) { \ 266 - .code = BPF_STX | BPF_SIZE(SIZE) | BPF_XADD, \ 271 + .code = BPF_STX | BPF_SIZE(SIZE) | BPF_ATOMIC, \ 267 272 .dst_reg = DST, \ 268 273 .src_reg = SRC, \ 269 274 .off = OFF, \ 270 - .imm = 0 }) 275 + .imm = OP }) 276 + 277 + /* Legacy alias */ 278 + #define BPF_STX_XADD(SIZE, DST, SRC, OFF) BPF_ATOMIC_OP(SIZE, BPF_ADD, DST, SRC, OFF) 271 279 272 280 /* Memory store, *(uint *) (dst_reg + off16) = imm32 */ 273 281
+3 -2
include/uapi/linux/bpf.h
··· 19 19 20 20 /* ld/ldx fields */ 21 21 #define BPF_DW 0x18 /* double word (64-bit) */ 22 - #define BPF_XADD 0xc0 /* exclusive add */ 22 + #define BPF_ATOMIC 0xc0 /* atomic memory ops - op type in immediate */ 23 + #define BPF_XADD 0xc0 /* exclusive add - legacy name */ 23 24 24 25 /* alu/jmp fields */ 25 26 #define BPF_MOV 0xb0 /* mov reg to reg */ ··· 2449 2448 * running simultaneously. 2450 2449 * 2451 2450 * A user should care about the synchronization by himself. 2452 - * For example, by using the **BPF_STX_XADD** instruction to alter 2451 + * For example, by using the **BPF_ATOMIC** instructions to alter 2453 2452 * the shared data. 2454 2453 * Return 2455 2454 * A pointer to the local storage area.
+22 -9
kernel/bpf/core.c
··· 1309 1309 INSN_3(STX, MEM, H), \ 1310 1310 INSN_3(STX, MEM, W), \ 1311 1311 INSN_3(STX, MEM, DW), \ 1312 - INSN_3(STX, XADD, W), \ 1313 - INSN_3(STX, XADD, DW), \ 1312 + INSN_3(STX, ATOMIC, W), \ 1313 + INSN_3(STX, ATOMIC, DW), \ 1314 1314 /* Immediate based. */ \ 1315 1315 INSN_3(ST, MEM, B), \ 1316 1316 INSN_3(ST, MEM, H), \ ··· 1618 1618 LDX_PROBE(DW, 8) 1619 1619 #undef LDX_PROBE 1620 1620 1621 - STX_XADD_W: /* lock xadd *(u32 *)(dst_reg + off16) += src_reg */ 1622 - atomic_add((u32) SRC, (atomic_t *)(unsigned long) 1623 - (DST + insn->off)); 1621 + STX_ATOMIC_W: 1622 + switch (IMM) { 1623 + case BPF_ADD: 1624 + /* lock xadd *(u32 *)(dst_reg + off16) += src_reg */ 1625 + atomic_add((u32) SRC, (atomic_t *)(unsigned long) 1626 + (DST + insn->off)); 1627 + default: 1628 + goto default_label; 1629 + } 1624 1630 CONT; 1625 - STX_XADD_DW: /* lock xadd *(u64 *)(dst_reg + off16) += src_reg */ 1626 - atomic64_add((u64) SRC, (atomic64_t *)(unsigned long) 1627 - (DST + insn->off)); 1631 + STX_ATOMIC_DW: 1632 + switch (IMM) { 1633 + case BPF_ADD: 1634 + /* lock xadd *(u64 *)(dst_reg + off16) += src_reg */ 1635 + atomic64_add((u64) SRC, (atomic64_t *)(unsigned long) 1636 + (DST + insn->off)); 1637 + default: 1638 + goto default_label; 1639 + } 1628 1640 CONT; 1629 1641 1630 1642 default_label: ··· 1646 1634 * 1647 1635 * Note, verifier whitelists all opcodes in bpf_opcode_in_insntable(). 1648 1636 */ 1649 - pr_warn("BPF interpreter: unknown opcode %02x\n", insn->code); 1637 + pr_warn("BPF interpreter: unknown opcode %02x (imm: 0x%x)\n", 1638 + insn->code, insn->imm); 1650 1639 BUG_ON(1); 1651 1640 return 0; 1652 1641 }
+4 -2
kernel/bpf/disasm.c
··· 153 153 bpf_ldst_string[BPF_SIZE(insn->code) >> 3], 154 154 insn->dst_reg, 155 155 insn->off, insn->src_reg); 156 - else if (BPF_MODE(insn->code) == BPF_XADD) 156 + else if (BPF_MODE(insn->code) == BPF_ATOMIC && 157 + insn->imm == BPF_ADD) { 157 158 verbose(cbs->private_data, "(%02x) lock *(%s *)(r%d %+d) += r%d\n", 158 159 insn->code, 159 160 bpf_ldst_string[BPF_SIZE(insn->code) >> 3], 160 161 insn->dst_reg, insn->off, 161 162 insn->src_reg); 162 - else 163 + } else { 163 164 verbose(cbs->private_data, "BUG_%02x\n", insn->code); 165 + } 164 166 } else if (class == BPF_ST) { 165 167 if (BPF_MODE(insn->code) != BPF_MEM) { 166 168 verbose(cbs->private_data, "BUG_st_%02x\n", insn->code);
+14 -10
kernel/bpf/verifier.c
··· 3604 3604 return err; 3605 3605 } 3606 3606 3607 - static int check_xadd(struct bpf_verifier_env *env, int insn_idx, struct bpf_insn *insn) 3607 + static int check_atomic(struct bpf_verifier_env *env, int insn_idx, struct bpf_insn *insn) 3608 3608 { 3609 3609 int err; 3610 3610 3611 - if ((BPF_SIZE(insn->code) != BPF_W && BPF_SIZE(insn->code) != BPF_DW) || 3612 - insn->imm != 0) { 3613 - verbose(env, "BPF_XADD uses reserved fields\n"); 3611 + if (insn->imm != BPF_ADD) { 3612 + verbose(env, "BPF_ATOMIC uses invalid atomic opcode %02x\n", insn->imm); 3613 + return -EINVAL; 3614 + } 3615 + 3616 + if (BPF_SIZE(insn->code) != BPF_W && BPF_SIZE(insn->code) != BPF_DW) { 3617 + verbose(env, "invalid atomic operand size\n"); 3614 3618 return -EINVAL; 3615 3619 } 3616 3620 ··· 3637 3633 is_pkt_reg(env, insn->dst_reg) || 3638 3634 is_flow_key_reg(env, insn->dst_reg) || 3639 3635 is_sk_reg(env, insn->dst_reg)) { 3640 - verbose(env, "BPF_XADD stores into R%d %s is not allowed\n", 3636 + verbose(env, "BPF_ATOMIC stores into R%d %s is not allowed\n", 3641 3637 insn->dst_reg, 3642 3638 reg_type_str[reg_state(env, insn->dst_reg)->type]); 3643 3639 return -EACCES; 3644 3640 } 3645 3641 3646 - /* check whether atomic_add can read the memory */ 3642 + /* check whether we can read the memory */ 3647 3643 err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off, 3648 3644 BPF_SIZE(insn->code), BPF_READ, -1, true); 3649 3645 if (err) 3650 3646 return err; 3651 3647 3652 - /* check whether atomic_add can write into the same memory */ 3648 + /* check whether we can write into the same memory */ 3653 3649 return check_mem_access(env, insn_idx, insn->dst_reg, insn->off, 3654 3650 BPF_SIZE(insn->code), BPF_WRITE, -1, true); 3655 3651 } ··· 9528 9524 } else if (class == BPF_STX) { 9529 9525 enum bpf_reg_type *prev_dst_type, dst_reg_type; 9530 9526 9531 - if (BPF_MODE(insn->code) == BPF_XADD) { 9532 - err = check_xadd(env, env->insn_idx, insn); 9527 + if (BPF_MODE(insn->code) == BPF_ATOMIC) { 9528 + err = check_atomic(env, env->insn_idx, insn); 9533 9529 if (err) 9534 9530 return err; 9535 9531 env->insn_idx++; ··· 10014 10010 10015 10011 if (BPF_CLASS(insn->code) == BPF_STX && 10016 10012 ((BPF_MODE(insn->code) != BPF_MEM && 10017 - BPF_MODE(insn->code) != BPF_XADD) || insn->imm != 0)) { 10013 + BPF_MODE(insn->code) != BPF_ATOMIC) || insn->imm != 0)) { 10018 10014 verbose(env, "BPF_STX uses reserved fields\n"); 10019 10015 return -EINVAL; 10020 10016 }
+7 -7
lib/test_bpf.c
··· 4295 4295 { { 0, 0xffffffff } }, 4296 4296 .stack_depth = 40, 4297 4297 }, 4298 - /* BPF_STX | BPF_XADD | BPF_W/DW */ 4298 + /* BPF_STX | BPF_ATOMIC | BPF_W/DW */ 4299 4299 { 4300 4300 "STX_XADD_W: Test: 0x12 + 0x10 = 0x22", 4301 4301 .u.insns_int = { 4302 4302 BPF_ALU32_IMM(BPF_MOV, R0, 0x12), 4303 4303 BPF_ST_MEM(BPF_W, R10, -40, 0x10), 4304 - BPF_STX_XADD(BPF_W, R10, R0, -40), 4304 + BPF_ATOMIC_OP(BPF_W, BPF_ADD, R10, R0, -40), 4305 4305 BPF_LDX_MEM(BPF_W, R0, R10, -40), 4306 4306 BPF_EXIT_INSN(), 4307 4307 }, ··· 4316 4316 BPF_ALU64_REG(BPF_MOV, R1, R10), 4317 4317 BPF_ALU32_IMM(BPF_MOV, R0, 0x12), 4318 4318 BPF_ST_MEM(BPF_W, R10, -40, 0x10), 4319 - BPF_STX_XADD(BPF_W, R10, R0, -40), 4319 + BPF_ATOMIC_OP(BPF_W, BPF_ADD, R10, R0, -40), 4320 4320 BPF_ALU64_REG(BPF_MOV, R0, R10), 4321 4321 BPF_ALU64_REG(BPF_SUB, R0, R1), 4322 4322 BPF_EXIT_INSN(), ··· 4331 4331 .u.insns_int = { 4332 4332 BPF_ALU32_IMM(BPF_MOV, R0, 0x12), 4333 4333 BPF_ST_MEM(BPF_W, R10, -40, 0x10), 4334 - BPF_STX_XADD(BPF_W, R10, R0, -40), 4334 + BPF_ATOMIC_OP(BPF_W, BPF_ADD, R10, R0, -40), 4335 4335 BPF_EXIT_INSN(), 4336 4336 }, 4337 4337 INTERNAL, ··· 4352 4352 .u.insns_int = { 4353 4353 BPF_ALU32_IMM(BPF_MOV, R0, 0x12), 4354 4354 BPF_ST_MEM(BPF_DW, R10, -40, 0x10), 4355 - BPF_STX_XADD(BPF_DW, R10, R0, -40), 4355 + BPF_ATOMIC_OP(BPF_DW, BPF_ADD, R10, R0, -40), 4356 4356 BPF_LDX_MEM(BPF_DW, R0, R10, -40), 4357 4357 BPF_EXIT_INSN(), 4358 4358 }, ··· 4367 4367 BPF_ALU64_REG(BPF_MOV, R1, R10), 4368 4368 BPF_ALU32_IMM(BPF_MOV, R0, 0x12), 4369 4369 BPF_ST_MEM(BPF_DW, R10, -40, 0x10), 4370 - BPF_STX_XADD(BPF_DW, R10, R0, -40), 4370 + BPF_ATOMIC_OP(BPF_DW, BPF_ADD, R10, R0, -40), 4371 4371 BPF_ALU64_REG(BPF_MOV, R0, R10), 4372 4372 BPF_ALU64_REG(BPF_SUB, R0, R1), 4373 4373 BPF_EXIT_INSN(), ··· 4382 4382 .u.insns_int = { 4383 4383 BPF_ALU32_IMM(BPF_MOV, R0, 0x12), 4384 4384 BPF_ST_MEM(BPF_DW, R10, -40, 0x10), 4385 - BPF_STX_XADD(BPF_DW, R10, R0, -40), 4385 + BPF_ATOMIC_OP(BPF_DW, BPF_ADD, R10, R0, -40), 4386 4386 BPF_EXIT_INSN(), 4387 4387 }, 4388 4388 INTERNAL,
+2 -2
samples/bpf/bpf_insn.h
··· 138 138 139 139 #define BPF_STX_XADD(SIZE, DST, SRC, OFF) \ 140 140 ((struct bpf_insn) { \ 141 - .code = BPF_STX | BPF_SIZE(SIZE) | BPF_XADD, \ 141 + .code = BPF_STX | BPF_SIZE(SIZE) | BPF_ATOMIC, \ 142 142 .dst_reg = DST, \ 143 143 .src_reg = SRC, \ 144 144 .off = OFF, \ 145 - .imm = 0 }) 145 + .imm = BPF_ADD }) 146 146 147 147 /* Memory store, *(uint *) (dst_reg + off16) = imm32 */ 148 148
+4 -4
samples/bpf/cookie_uid_helper_example.c
··· 147 147 */ 148 148 BPF_MOV64_REG(BPF_REG_9, BPF_REG_0), 149 149 BPF_MOV64_IMM(BPF_REG_1, 1), 150 - BPF_STX_XADD(BPF_DW, BPF_REG_9, BPF_REG_1, 151 - offsetof(struct stats, packets)), 150 + BPF_ATOMIC_OP(BPF_DW, BPF_ADD, BPF_REG_9, BPF_REG_1, 151 + offsetof(struct stats, packets)), 152 152 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_6, 153 153 offsetof(struct __sk_buff, len)), 154 - BPF_STX_XADD(BPF_DW, BPF_REG_9, BPF_REG_1, 155 - offsetof(struct stats, bytes)), 154 + BPF_ATOMIC_OP(BPF_DW, BPF_ADD, BPF_REG_9, BPF_REG_1, 155 + offsetof(struct stats, bytes)), 156 156 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6, 157 157 offsetof(struct __sk_buff, len)), 158 158 BPF_EXIT_INSN(),
+1 -1
samples/bpf/sock_example.c
··· 54 54 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 55 55 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), 56 56 BPF_MOV64_IMM(BPF_REG_1, 1), /* r1 = 1 */ 57 - BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_DW, BPF_REG_0, BPF_REG_1, 0, 0), /* xadd r0 += r1 */ 57 + BPF_ATOMIC_OP(BPF_DW, BPF_ADD, BPF_REG_0, BPF_REG_1, 0), 58 58 BPF_MOV64_IMM(BPF_REG_0, 0), /* r0 = 0 */ 59 59 BPF_EXIT_INSN(), 60 60 };
+3 -2
samples/bpf/test_cgrp2_attach.c
··· 53 53 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 54 54 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), 55 55 BPF_MOV64_IMM(BPF_REG_1, 1), /* r1 = 1 */ 56 - BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_DW, BPF_REG_0, BPF_REG_1, 0, 0), /* xadd r0 += r1 */ 56 + BPF_ATOMIC_OP(BPF_DW, BPF_ADD, BPF_REG_0, BPF_REG_1, 0), 57 57 58 58 /* Count bytes */ 59 59 BPF_MOV64_IMM(BPF_REG_0, MAP_KEY_BYTES), /* r0 = 1 */ ··· 64 64 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 65 65 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), 66 66 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_6, offsetof(struct __sk_buff, len)), /* r1 = skb->len */ 67 - BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_DW, BPF_REG_0, BPF_REG_1, 0, 0), /* xadd r0 += r1 */ 67 + 68 + BPF_ATOMIC_OP(BPF_DW, BPF_ADD, BPF_REG_0, BPF_REG_1, 0), 68 69 69 70 BPF_MOV64_IMM(BPF_REG_0, verdict), /* r0 = verdict */ 70 71 BPF_EXIT_INSN(),
+11 -4
tools/include/linux/filter.h
··· 169 169 .off = OFF, \ 170 170 .imm = 0 }) 171 171 172 - /* Atomic memory add, *(uint *)(dst_reg + off16) += src_reg */ 172 + /* 173 + * Atomic operations: 174 + * 175 + * BPF_ADD *(uint *) (dst_reg + off16) += src_reg 176 + */ 173 177 174 - #define BPF_STX_XADD(SIZE, DST, SRC, OFF) \ 178 + #define BPF_ATOMIC_OP(SIZE, OP, DST, SRC, OFF) \ 175 179 ((struct bpf_insn) { \ 176 - .code = BPF_STX | BPF_SIZE(SIZE) | BPF_XADD, \ 180 + .code = BPF_STX | BPF_SIZE(SIZE) | BPF_ATOMIC, \ 177 181 .dst_reg = DST, \ 178 182 .src_reg = SRC, \ 179 183 .off = OFF, \ 180 - .imm = 0 }) 184 + .imm = OP }) 185 + 186 + /* Legacy alias */ 187 + #define BPF_STX_XADD(SIZE, DST, SRC, OFF) BPF_ATOMIC_OP(SIZE, BPF_ADD, DST, SRC, OFF) 181 188 182 189 /* Memory store, *(uint *) (dst_reg + off16) = imm32 */ 183 190
+3 -2
tools/include/uapi/linux/bpf.h
··· 19 19 20 20 /* ld/ldx fields */ 21 21 #define BPF_DW 0x18 /* double word (64-bit) */ 22 - #define BPF_XADD 0xc0 /* exclusive add */ 22 + #define BPF_ATOMIC 0xc0 /* atomic memory ops - op type in immediate */ 23 + #define BPF_XADD 0xc0 /* exclusive add - legacy name */ 23 24 24 25 /* alu/jmp fields */ 25 26 #define BPF_MOV 0xb0 /* mov reg to reg */ ··· 2449 2448 * running simultaneously. 2450 2449 * 2451 2450 * A user should care about the synchronization by himself. 2452 - * For example, by using the **BPF_STX_XADD** instruction to alter 2451 + * For example, by using the **BPF_ATOMIC** instructions to alter 2453 2452 * the shared data. 2454 2453 * Return 2455 2454 * A pointer to the local storage area.
+2 -2
tools/testing/selftests/bpf/prog_tests/cgroup_attach_multi.c
··· 45 45 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 46 46 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), 47 47 BPF_MOV64_IMM(BPF_REG_1, val), /* r1 = 1 */ 48 - BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_DW, BPF_REG_0, BPF_REG_1, 0, 0), /* xadd r0 += r1 */ 48 + BPF_ATOMIC_OP(BPF_DW, BPF_ADD, BPF_REG_0, BPF_REG_1, 0), 49 49 50 50 BPF_LD_MAP_FD(BPF_REG_1, cgroup_storage_fd), 51 51 BPF_MOV64_IMM(BPF_REG_2, 0), 52 52 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_local_storage), 53 53 BPF_MOV64_IMM(BPF_REG_1, val), 54 - BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_W, BPF_REG_0, BPF_REG_1, 0, 0), 54 + BPF_ATOMIC_OP(BPF_W, BPF_ADD, BPF_REG_0, BPF_REG_1, 0), 55 55 56 56 BPF_LD_MAP_FD(BPF_REG_1, percpu_cgroup_storage_fd), 57 57 BPF_MOV64_IMM(BPF_REG_2, 0),
+1 -1
tools/testing/selftests/bpf/test_cgroup_storage.c
··· 29 29 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 30 30 BPF_FUNC_get_local_storage), 31 31 BPF_MOV64_IMM(BPF_REG_1, 1), 32 - BPF_STX_XADD(BPF_DW, BPF_REG_0, BPF_REG_1, 0), 32 + BPF_ATOMIC_OP(BPF_DW, BPF_ADD, BPF_REG_0, BPF_REG_1, 0), 33 33 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0), 34 34 BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0x1), 35 35 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+3 -4
tools/testing/selftests/bpf/verifier/ctx.c
··· 10 10 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 11 11 }, 12 12 { 13 - "context stores via XADD", 13 + "context stores via BPF_ATOMIC", 14 14 .insns = { 15 15 BPF_MOV64_IMM(BPF_REG_0, 0), 16 - BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_W, BPF_REG_1, 17 - BPF_REG_0, offsetof(struct __sk_buff, mark), 0), 16 + BPF_ATOMIC_OP(BPF_W, BPF_ADD, BPF_REG_1, BPF_REG_0, offsetof(struct __sk_buff, mark)), 18 17 BPF_EXIT_INSN(), 19 18 }, 20 - .errstr = "BPF_XADD stores into R1 ctx is not allowed", 19 + .errstr = "BPF_ATOMIC stores into R1 ctx is not allowed", 21 20 .result = REJECT, 22 21 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 23 22 },
+2 -2
tools/testing/selftests/bpf/verifier/direct_packet_access.c
··· 333 333 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10), 334 334 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8), 335 335 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0), 336 - BPF_STX_XADD(BPF_DW, BPF_REG_4, BPF_REG_5, 0), 336 + BPF_ATOMIC_OP(BPF_DW, BPF_ADD, BPF_REG_4, BPF_REG_5, 0), 337 337 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0), 338 338 BPF_STX_MEM(BPF_W, BPF_REG_2, BPF_REG_5, 0), 339 339 BPF_MOV64_IMM(BPF_REG_0, 0), ··· 488 488 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 11), 489 489 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -8), 490 490 BPF_MOV64_IMM(BPF_REG_4, 0xffffffff), 491 - BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_4, -8), 491 + BPF_ATOMIC_OP(BPF_DW, BPF_ADD, BPF_REG_10, BPF_REG_4, -8), 492 492 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8), 493 493 BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 49), 494 494 BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
+5 -5
tools/testing/selftests/bpf/verifier/leak_ptr.c
··· 5 5 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 6 6 offsetof(struct __sk_buff, cb[0])), 7 7 BPF_LD_MAP_FD(BPF_REG_2, 0), 8 - BPF_STX_XADD(BPF_DW, BPF_REG_1, BPF_REG_2, 8 + BPF_ATOMIC_OP(BPF_DW, BPF_ADD, BPF_REG_1, BPF_REG_2, 9 9 offsetof(struct __sk_buff, cb[0])), 10 10 BPF_EXIT_INSN(), 11 11 }, ··· 13 13 .errstr_unpriv = "R2 leaks addr into mem", 14 14 .result_unpriv = REJECT, 15 15 .result = REJECT, 16 - .errstr = "BPF_XADD stores into R1 ctx is not allowed", 16 + .errstr = "BPF_ATOMIC stores into R1 ctx is not allowed", 17 17 }, 18 18 { 19 19 "leak pointer into ctx 2", ··· 21 21 BPF_MOV64_IMM(BPF_REG_0, 0), 22 22 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 23 23 offsetof(struct __sk_buff, cb[0])), 24 - BPF_STX_XADD(BPF_DW, BPF_REG_1, BPF_REG_10, 24 + BPF_ATOMIC_OP(BPF_DW, BPF_ADD, BPF_REG_1, BPF_REG_10, 25 25 offsetof(struct __sk_buff, cb[0])), 26 26 BPF_EXIT_INSN(), 27 27 }, 28 28 .errstr_unpriv = "R10 leaks addr into mem", 29 29 .result_unpriv = REJECT, 30 30 .result = REJECT, 31 - .errstr = "BPF_XADD stores into R1 ctx is not allowed", 31 + .errstr = "BPF_ATOMIC stores into R1 ctx is not allowed", 32 32 }, 33 33 { 34 34 "leak pointer into ctx 3", ··· 56 56 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3), 57 57 BPF_MOV64_IMM(BPF_REG_3, 0), 58 58 BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0), 59 - BPF_STX_XADD(BPF_DW, BPF_REG_0, BPF_REG_6, 0), 59 + BPF_ATOMIC_OP(BPF_DW, BPF_ADD, BPF_REG_0, BPF_REG_6, 0), 60 60 BPF_MOV64_IMM(BPF_REG_0, 0), 61 61 BPF_EXIT_INSN(), 62 62 },
+2 -2
tools/testing/selftests/bpf/verifier/meta_access.c
··· 171 171 BPF_MOV64_IMM(BPF_REG_5, 42), 172 172 BPF_MOV64_IMM(BPF_REG_6, 24), 173 173 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_5, -8), 174 - BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_6, -8), 174 + BPF_ATOMIC_OP(BPF_DW, BPF_ADD, BPF_REG_10, BPF_REG_6, -8), 175 175 BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -8), 176 176 BPF_JMP_IMM(BPF_JGT, BPF_REG_5, 100, 6), 177 177 BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_5), ··· 196 196 BPF_MOV64_IMM(BPF_REG_5, 42), 197 197 BPF_MOV64_IMM(BPF_REG_6, 24), 198 198 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_5, -8), 199 - BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_6, -8), 199 + BPF_ATOMIC_OP(BPF_DW, BPF_ADD, BPF_REG_10, BPF_REG_6, -8), 200 200 BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -8), 201 201 BPF_JMP_IMM(BPF_JGT, BPF_REG_5, 100, 6), 202 202 BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_5),
+2 -1
tools/testing/selftests/bpf/verifier/unpriv.c
··· 207 207 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8), 208 208 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0), 209 209 BPF_MOV64_IMM(BPF_REG_0, 1), 210 - BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_DW, BPF_REG_10, BPF_REG_0, -8, 0), 210 + BPF_RAW_INSN(BPF_STX | BPF_ATOMIC | BPF_DW, 211 + BPF_REG_10, BPF_REG_0, -8, BPF_ADD), 211 212 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0), 212 213 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_hash_recalc), 213 214 BPF_EXIT_INSN(),
+1 -1
tools/testing/selftests/bpf/verifier/value_illegal_alu.c
··· 82 82 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 83 83 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 84 84 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0), 85 - BPF_STX_XADD(BPF_DW, BPF_REG_2, BPF_REG_3, 0), 85 + BPF_ATOMIC_OP(BPF_DW, BPF_ADD, BPF_REG_2, BPF_REG_3, 0), 86 86 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 0), 87 87 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22), 88 88 BPF_EXIT_INSN(),
+9 -9
tools/testing/selftests/bpf/verifier/xadd.c
··· 3 3 .insns = { 4 4 BPF_MOV64_IMM(BPF_REG_0, 1), 5 5 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8), 6 - BPF_STX_XADD(BPF_W, BPF_REG_10, BPF_REG_0, -7), 6 + BPF_ATOMIC_OP(BPF_W, BPF_ADD, BPF_REG_10, BPF_REG_0, -7), 7 7 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8), 8 8 BPF_EXIT_INSN(), 9 9 }, ··· 22 22 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), 23 23 BPF_EXIT_INSN(), 24 24 BPF_MOV64_IMM(BPF_REG_1, 1), 25 - BPF_STX_XADD(BPF_W, BPF_REG_0, BPF_REG_1, 3), 25 + BPF_ATOMIC_OP(BPF_W, BPF_ADD, BPF_REG_0, BPF_REG_1, 3), 26 26 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, 3), 27 27 BPF_EXIT_INSN(), 28 28 }, ··· 45 45 BPF_MOV64_IMM(BPF_REG_0, 1), 46 46 BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0), 47 47 BPF_ST_MEM(BPF_W, BPF_REG_2, 3, 0), 48 - BPF_STX_XADD(BPF_W, BPF_REG_2, BPF_REG_0, 1), 49 - BPF_STX_XADD(BPF_W, BPF_REG_2, BPF_REG_0, 2), 48 + BPF_ATOMIC_OP(BPF_W, BPF_ADD, BPF_REG_2, BPF_REG_0, 1), 49 + BPF_ATOMIC_OP(BPF_W, BPF_ADD, BPF_REG_2, BPF_REG_0, 2), 50 50 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 1), 51 51 BPF_EXIT_INSN(), 52 52 }, 53 53 .result = REJECT, 54 - .errstr = "BPF_XADD stores into R2 pkt is not allowed", 54 + .errstr = "BPF_ATOMIC stores into R2 pkt is not allowed", 55 55 .prog_type = BPF_PROG_TYPE_XDP, 56 56 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, 57 57 }, ··· 62 62 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), 63 63 BPF_MOV64_REG(BPF_REG_7, BPF_REG_10), 64 64 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8), 65 - BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_0, -8), 66 - BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_0, -8), 65 + BPF_ATOMIC_OP(BPF_DW, BPF_ADD, BPF_REG_10, BPF_REG_0, -8), 66 + BPF_ATOMIC_OP(BPF_DW, BPF_ADD, BPF_REG_10, BPF_REG_0, -8), 67 67 BPF_JMP_REG(BPF_JNE, BPF_REG_6, BPF_REG_0, 3), 68 68 BPF_JMP_REG(BPF_JNE, BPF_REG_7, BPF_REG_10, 2), 69 69 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8), ··· 82 82 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), 83 83 BPF_MOV64_REG(BPF_REG_7, BPF_REG_10), 84 84 BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -8), 85 - BPF_STX_XADD(BPF_W, BPF_REG_10, BPF_REG_0, -8), 86 - BPF_STX_XADD(BPF_W, BPF_REG_10, BPF_REG_0, -8), 85 + BPF_ATOMIC_OP(BPF_W, BPF_ADD, BPF_REG_10, BPF_REG_0, -8), 86 + BPF_ATOMIC_OP(BPF_W, BPF_ADD, BPF_REG_10, BPF_REG_0, -8), 87 87 BPF_JMP_REG(BPF_JNE, BPF_REG_6, BPF_REG_0, 3), 88 88 BPF_JMP_REG(BPF_JNE, BPF_REG_7, BPF_REG_10, 2), 89 89 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -8),