Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf

Alexei Starovoitov says:

====================
pull-request: bpf 2021-02-26

1) Fix for bpf atomic insns with src_reg=r0, from Brendan.

2) Fix use after free due to bpf_prog_clone, from Cong.

3) Drop imprecise verifier log message, from Dmitrii.

4) Remove incorrect blank line in bpf helper description, from Hangbin.

* https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf:
selftests/bpf: No need to drop the packet when there is no geneve opt
bpf: Remove blank line in bpf helper description comment
tools/resolve_btfids: Fix build error with older host toolchains
selftests/bpf: Fix a compiler warning in global func test
bpf: Drop imprecise log message
bpf: Clear percpu pointers in bpf_prog_clone_free()
bpf: Fix a warning message in mark_ptr_not_null_reg()
bpf, x86: Fix BPF_FETCH atomic and/or/xor with r0 as src
====================

Link: https://lore.kernel.org/r/20210226193737.57004-1-alexei.starovoitov@gmail.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>

+41 -13
+7 -3
arch/x86/net/bpf_jit_comp.c
··· 1349 1349 insn->imm == (BPF_XOR | BPF_FETCH)) { 1350 1350 u8 *branch_target; 1351 1351 bool is64 = BPF_SIZE(insn->code) == BPF_DW; 1352 + u32 real_src_reg = src_reg; 1352 1353 1353 1354 /* 1354 1355 * Can't be implemented with a single x86 insn. ··· 1358 1357 1359 1358 /* Will need RAX as a CMPXCHG operand so save R0 */ 1360 1359 emit_mov_reg(&prog, true, BPF_REG_AX, BPF_REG_0); 1360 + if (src_reg == BPF_REG_0) 1361 + real_src_reg = BPF_REG_AX; 1362 + 1361 1363 branch_target = prog; 1362 1364 /* Load old value */ 1363 1365 emit_ldx(&prog, BPF_SIZE(insn->code), ··· 1370 1366 * put the result in the AUX_REG. 1371 1367 */ 1372 1368 emit_mov_reg(&prog, is64, AUX_REG, BPF_REG_0); 1373 - maybe_emit_mod(&prog, AUX_REG, src_reg, is64); 1369 + maybe_emit_mod(&prog, AUX_REG, real_src_reg, is64); 1374 1370 EMIT2(simple_alu_opcodes[BPF_OP(insn->imm)], 1375 - add_2reg(0xC0, AUX_REG, src_reg)); 1371 + add_2reg(0xC0, AUX_REG, real_src_reg)); 1376 1372 /* Attempt to swap in new value */ 1377 1373 err = emit_atomic(&prog, BPF_CMPXCHG, 1378 1374 dst_reg, AUX_REG, insn->off, ··· 1385 1381 */ 1386 1382 EMIT2(X86_JNE, -(prog - branch_target) - 2); 1387 1383 /* Return the pre-modification value */ 1388 - emit_mov_reg(&prog, is64, src_reg, BPF_REG_0); 1384 + emit_mov_reg(&prog, is64, real_src_reg, BPF_REG_0); 1389 1385 /* Restore R0 after clobbering RAX */ 1390 1386 emit_mov_reg(&prog, true, BPF_REG_0, BPF_REG_AX); 1391 1387 break;
-1
include/uapi/linux/bpf.h
··· 3850 3850 * 3851 3851 * long bpf_check_mtu(void *ctx, u32 ifindex, u32 *mtu_len, s32 len_diff, u64 flags) 3852 3852 * Description 3853 - 3854 3853 * Check ctx packet size against exceeding MTU of net device (based 3855 3854 * on *ifindex*). This helper will likely be used in combination 3856 3855 * with helpers that adjust/change the packet size.
-2
kernel/bpf/btf.c
··· 4321 4321 * is not supported yet. 4322 4322 * BPF_PROG_TYPE_RAW_TRACEPOINT is fine. 4323 4323 */ 4324 - if (log->level & BPF_LOG_LEVEL) 4325 - bpf_log(log, "arg#%d type is not a struct\n", arg); 4326 4324 return NULL; 4327 4325 } 4328 4326 tname = btf_name_by_offset(btf, t->name_off);
+2
kernel/bpf/core.c
··· 1118 1118 * clone is guaranteed to not be locked. 1119 1119 */ 1120 1120 fp->aux = NULL; 1121 + fp->stats = NULL; 1122 + fp->active = NULL; 1121 1123 __bpf_prog_free(fp); 1122 1124 } 1123 1125
+1 -1
kernel/bpf/verifier.c
··· 1120 1120 reg->type = PTR_TO_RDWR_BUF; 1121 1121 break; 1122 1122 default: 1123 - WARN_ON("unknown nullable register type"); 1123 + WARN_ONCE(1, "unknown nullable register type"); 1124 1124 } 1125 1125 } 1126 1126
+5
tools/bpf/resolve_btfids/main.c
··· 260 260 return btf_id__add(root, id, false); 261 261 } 262 262 263 + /* Older libelf.h and glibc elf.h might not yet define the ELF compression types. */ 264 + #ifndef SHF_COMPRESSED 265 + #define SHF_COMPRESSED (1 << 11) /* Section with compressed data. */ 266 + #endif 267 + 263 268 /* 264 269 * The data of compressed section should be aligned to 4 265 270 * (for 32bit) or 8 (for 64 bit) bytes. The binutils ld
-1
tools/include/uapi/linux/bpf.h
··· 3850 3850 * 3851 3851 * long bpf_check_mtu(void *ctx, u32 ifindex, u32 *mtu_len, s32 len_diff, u64 flags) 3852 3852 * Description 3853 - 3854 3853 * Check ctx packet size against exceeding MTU of net device (based 3855 3854 * on *ifindex*). This helper will likely be used in combination 3856 3855 * with helpers that adjust/change the packet size.
+1 -1
tools/testing/selftests/bpf/progs/test_global_func11.c
··· 15 15 SEC("cgroup_skb/ingress") 16 16 int test_cls(struct __sk_buff *skb) 17 17 { 18 - return foo(skb); 18 + return foo((const void *)skb); 19 19 }
+2 -4
tools/testing/selftests/bpf/progs/test_tunnel_kern.c
··· 446 446 } 447 447 448 448 ret = bpf_skb_get_tunnel_opt(skb, &gopt, sizeof(gopt)); 449 - if (ret < 0) { 450 - ERROR(ret); 451 - return TC_ACT_SHOT; 452 - } 449 + if (ret < 0) 450 + gopt.opt_class = 0; 453 451 454 452 bpf_trace_printk(fmt, sizeof(fmt), 455 453 key.tunnel_id, key.remote_ipv4, gopt.opt_class);
+23
tools/testing/selftests/bpf/verifier/atomic_and.c
··· 75 75 }, 76 76 .result = ACCEPT, 77 77 }, 78 + { 79 + "BPF_ATOMIC_AND with fetch - r0 as source reg", 80 + .insns = { 81 + /* val = 0x110; */ 82 + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0x110), 83 + /* old = atomic_fetch_and(&val, 0x011); */ 84 + BPF_MOV64_IMM(BPF_REG_0, 0x011), 85 + BPF_ATOMIC_OP(BPF_DW, BPF_AND | BPF_FETCH, BPF_REG_10, BPF_REG_0, -8), 86 + /* if (old != 0x110) exit(3); */ 87 + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0x110, 2), 88 + BPF_MOV64_IMM(BPF_REG_0, 3), 89 + BPF_EXIT_INSN(), 90 + /* if (val != 0x010) exit(2); */ 91 + BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -8), 92 + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0x010, 2), 93 + BPF_MOV64_IMM(BPF_REG_1, 2), 94 + BPF_EXIT_INSN(), 95 + /* exit(0); */ 96 + BPF_MOV64_IMM(BPF_REG_0, 0), 97 + BPF_EXIT_INSN(), 98 + }, 99 + .result = ACCEPT, 100 + },