Merge tag 'bpf-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf

Pull bpf fixes from Alexei Starovoitov:

- Mark migrate_disable/enable() as always_inline to avoid issues with
partial inlining (Yonghong Song)

- Fix powerpc stack register definition in libbpf bpf_tracing.h (Andrii
Nakryiko)

- Reject negative head_room in __bpf_skb_change_head (Daniel Borkmann)

- Conditionally include dynptr copy kfuncs (Malin Jonsson)

- Sync pending IRQ work before freeing BPF ring buffer (Noorain Eqbal)

- Do not audit capability check in x86 do_jit() (Ondrej Mosnacek)

- Fix arm64 JIT of BPF_ST insn when it writes into arena memory
(Puranjay Mohan)

* tag 'bpf-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf:
bpf/arm64: Fix BPF_ST into arena memory
bpf: Make migrate_disable always inline to avoid partial inlining
bpf: Reject negative head_room in __bpf_skb_change_head
bpf: Conditionally include dynptr copy kfuncs
libbpf: Fix powerpc's stack register definition in bpf_tracing.h
bpf: Do not audit capability check in do_jit()
bpf: Sync pending IRQ work before freeing ring buffer

+13 -7
+3 -2
arch/arm64/net/bpf_jit_comp.c
··· 1213 1213 u8 src = bpf2a64[insn->src_reg]; 1214 1214 const u8 tmp = bpf2a64[TMP_REG_1]; 1215 1215 const u8 tmp2 = bpf2a64[TMP_REG_2]; 1216 + const u8 tmp3 = bpf2a64[TMP_REG_3]; 1216 1217 const u8 fp = bpf2a64[BPF_REG_FP]; 1217 1218 const u8 arena_vm_base = bpf2a64[ARENA_VM_START]; 1218 1219 const u8 priv_sp = bpf2a64[PRIVATE_SP]; ··· 1758 1757 case BPF_ST | BPF_PROBE_MEM32 | BPF_W: 1759 1758 case BPF_ST | BPF_PROBE_MEM32 | BPF_DW: 1760 1759 if (BPF_MODE(insn->code) == BPF_PROBE_MEM32) { 1761 - emit(A64_ADD(1, tmp2, dst, arena_vm_base), ctx); 1762 - dst = tmp2; 1760 + emit(A64_ADD(1, tmp3, dst, arena_vm_base), ctx); 1761 + dst = tmp3; 1763 1762 } 1764 1763 if (dst == fp) { 1765 1764 dst_adj = ctx->priv_sp_used ? priv_sp : A64_SP;
+1 -1
arch/x86/net/bpf_jit_comp.c
··· 2701 2701 /* Update cleanup_addr */ 2702 2702 ctx->cleanup_addr = proglen; 2703 2703 if (bpf_prog_was_classic(bpf_prog) && 2704 - !capable(CAP_SYS_ADMIN)) { 2704 + !ns_capable_noaudit(&init_user_ns, CAP_SYS_ADMIN)) { 2705 2705 u8 *ip = image + addrs[i - 1]; 2706 2706 2707 2707 if (emit_spectre_bhb_barrier(&prog, ip, bpf_prog))
+2 -2
include/linux/sched.h
··· 2407 2407 * be defined in kernel/sched/core.c. 2408 2408 */ 2409 2409 #ifndef INSTANTIATE_EXPORTED_MIGRATE_DISABLE 2410 - static inline void migrate_disable(void) 2410 + static __always_inline void migrate_disable(void) 2411 2411 { 2412 2412 __migrate_disable(); 2413 2413 } 2414 2414 2415 - static inline void migrate_enable(void) 2415 + static __always_inline void migrate_enable(void) 2416 2416 { 2417 2417 __migrate_enable(); 2418 2418 }
+2
kernel/bpf/helpers.c
··· 4345 4345 BTF_ID_FLAGS(func, bpf_iter_kmem_cache_destroy, KF_ITER_DESTROY | KF_SLEEPABLE) 4346 4346 BTF_ID_FLAGS(func, bpf_local_irq_save) 4347 4347 BTF_ID_FLAGS(func, bpf_local_irq_restore) 4348 + #ifdef CONFIG_BPF_EVENTS 4348 4349 BTF_ID_FLAGS(func, bpf_probe_read_user_dynptr) 4349 4350 BTF_ID_FLAGS(func, bpf_probe_read_kernel_dynptr) 4350 4351 BTF_ID_FLAGS(func, bpf_probe_read_user_str_dynptr) ··· 4354 4353 BTF_ID_FLAGS(func, bpf_copy_from_user_str_dynptr, KF_SLEEPABLE) 4355 4354 BTF_ID_FLAGS(func, bpf_copy_from_user_task_dynptr, KF_SLEEPABLE | KF_TRUSTED_ARGS) 4356 4355 BTF_ID_FLAGS(func, bpf_copy_from_user_task_str_dynptr, KF_SLEEPABLE | KF_TRUSTED_ARGS) 4356 + #endif 4357 4357 #ifdef CONFIG_DMA_SHARED_BUFFER 4358 4358 BTF_ID_FLAGS(func, bpf_iter_dmabuf_new, KF_ITER_NEW | KF_SLEEPABLE) 4359 4359 BTF_ID_FLAGS(func, bpf_iter_dmabuf_next, KF_ITER_NEXT | KF_RET_NULL | KF_SLEEPABLE)
+2
kernel/bpf/ringbuf.c
··· 216 216 217 217 static void bpf_ringbuf_free(struct bpf_ringbuf *rb) 218 218 { 219 + irq_work_sync(&rb->work); 220 + 219 221 /* copy pages pointer and nr_pages to local variable, as we are going 220 222 * to unmap rb itself with vunmap() below 221 223 */
+2 -1
net/core/filter.c
··· 3877 3877 u32 new_len = skb->len + head_room; 3878 3878 int ret; 3879 3879 3880 - if (unlikely(flags || (!skb_is_gso(skb) && new_len > max_len) || 3880 + if (unlikely(flags || (int)head_room < 0 || 3881 + (!skb_is_gso(skb) && new_len > max_len) || 3881 3882 new_len < skb->len)) 3882 3883 return -EINVAL; 3883 3884
+1 -1
tools/lib/bpf/bpf_tracing.h
··· 311 311 #define __PT_RET_REG regs[31] 312 312 #define __PT_FP_REG __unsupported__ 313 313 #define __PT_RC_REG gpr[3] 314 - #define __PT_SP_REG sp 314 + #define __PT_SP_REG gpr[1] 315 315 #define __PT_IP_REG nip 316 316 317 317 #elif defined(bpf_target_sparc)