Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'bpf-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf

Pull bpf fixes from Daniel Borkmann:

- Fix BPF verifier to not affect subreg_def marks in its range
propagation (Eduard Zingerman)

- Fix a truncation bug in the BPF verifier's handling of
coerce_reg_to_size_sx (Dimitar Kanaliev)

- Fix the BPF verifier's delta propagation between linked registers
under 32-bit addition (Daniel Borkmann)

- Fix a NULL pointer dereference in BPF devmap due to missing rxq
information (Florian Kauer)

- Fix a memory leak in bpf_core_apply (Jiri Olsa)

- Fix an UBSAN-reported array-index-out-of-bounds in BTF parsing for
arrays of nested structs (Hou Tao)

- Fix build ID fetching where memory areas backing the file were
created with memfd_secret (Andrii Nakryiko)

- Fix BPF task iterator tid filtering which was incorrectly using pid
instead of tid (Jordan Rome)

- Several fixes for BPF sockmap and BPF sockhash redirection in
combination with vsocks (Michal Luczaj)

- Fix riscv BPF JIT and make BPF_CMPXCHG fully ordered (Andrea Parri)

- Fix riscv BPF JIT under CONFIG_CFI_CLANG to prevent the possibility
of an infinite BPF tailcall (Pu Lehui)

- Fix a build warning from resolve_btfids that bpf_lsm_key_free cannot
be resolved (Thomas Weißschuh)

- Fix a bug in kfunc BTF caching for modules where the wrong BTF object
was returned (Toke Høiland-Jørgensen)

- Fix a BPF selftest compilation error in cgroup-related tests with
musl libc (Tony Ambardar)

- Several fixes to BPF link info dumps to fill missing fields (Tyrone
Wu)

- Add BPF selftests for kfuncs from multiple modules, checking that the
correct kfuncs are called (Simon Sundberg)

- Ensure that internal and user-facing bpf_redirect flags don't overlap
(Toke Høiland-Jørgensen)

- Switch to use kvzmalloc to allocate BPF verifier environment (Rik van
Riel)

- Use raw_spinlock_t in BPF ringbuf to fix a sleep in atomic splat
under RT (Wander Lairson Costa)

* tag 'bpf-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf: (38 commits)
lib/buildid: Handle memfd_secret() files in build_id_parse()
selftests/bpf: Add test case for delta propagation
bpf: Fix print_reg_state's constant scalar dump
bpf: Fix incorrect delta propagation between linked registers
bpf: Properly test iter/task tid filtering
bpf: Fix iter/task tid filtering
riscv, bpf: Make BPF_CMPXCHG fully ordered
bpf, vsock: Drop static vsock_bpf_prot initialization
vsock: Update msg_count on read_skb()
vsock: Update rx_bytes on read_skb()
bpf, sockmap: SK_DROP on attempted redirects of unsupported af_vsock
selftests/bpf: Add asserts for netfilter link info
bpf: Fix link info netfilter flags to populate defrag flag
selftests/bpf: Add test for sign extension in coerce_subreg_to_size_sx()
selftests/bpf: Add test for truncation after sign extension in coerce_reg_to_size_sx()
bpf: Fix truncation bug in coerce_reg_to_size_sx()
selftests/bpf: Assert link info uprobe_multi count & path_size if unset
bpf: Fix unpopulated path_size when uprobe_multi fields unset
selftests/bpf: Fix cross-compiling urandom_read
selftests/bpf: Add test for kfunc module order
...

+849 -134
+5 -3
arch/riscv/net/bpf_jit_comp64.c
··· 18 18 #define RV_MAX_REG_ARGS 8 19 19 #define RV_FENTRY_NINSNS 2 20 20 #define RV_FENTRY_NBYTES (RV_FENTRY_NINSNS * 4) 21 + #define RV_KCFI_NINSNS (IS_ENABLED(CONFIG_CFI_CLANG) ? 1 : 0) 21 22 /* imm that allows emit_imm to emit max count insns */ 22 23 #define RV_MAX_COUNT_IMM 0x7FFF7FF7FF7FF7FF 23 24 ··· 272 271 if (!is_tail_call) 273 272 emit_addiw(RV_REG_A0, RV_REG_A5, 0, ctx); 274 273 emit_jalr(RV_REG_ZERO, is_tail_call ? RV_REG_T3 : RV_REG_RA, 275 - is_tail_call ? (RV_FENTRY_NINSNS + 1) * 4 : 0, /* skip reserved nops and TCC init */ 274 + /* kcfi, fentry and TCC init insns will be skipped on tailcall */ 275 + is_tail_call ? (RV_KCFI_NINSNS + RV_FENTRY_NINSNS + 1) * 4 : 0, 276 276 ctx); 277 277 } 278 278 ··· 550 548 rv_lr_w(r0, 0, rd, 0, 0), ctx); 551 549 jmp_offset = ninsns_rvoff(8); 552 550 emit(rv_bne(RV_REG_T2, r0, jmp_offset >> 1), ctx); 553 - emit(is64 ? rv_sc_d(RV_REG_T3, rs, rd, 0, 0) : 554 - rv_sc_w(RV_REG_T3, rs, rd, 0, 0), ctx); 551 + emit(is64 ? rv_sc_d(RV_REG_T3, rs, rd, 0, 1) : 552 + rv_sc_w(RV_REG_T3, rs, rd, 0, 1), ctx); 555 553 jmp_offset = ninsns_rvoff(-6); 556 554 emit(rv_bne(RV_REG_T3, 0, jmp_offset >> 1), ctx); 557 555 emit(rv_fence(0x3, 0x3), ctx);
+5
include/net/sock.h
··· 2717 2717 return sk->sk_family == AF_UNIX && sk->sk_type == SOCK_STREAM; 2718 2718 } 2719 2719 2720 + static inline bool sk_is_vsock(const struct sock *sk) 2721 + { 2722 + return sk->sk_family == AF_VSOCK; 2723 + } 2724 + 2720 2725 /** 2721 2726 * sk_eat_skb - Release a skb if it is no longer needed 2722 2727 * @sk: socket to eat this skb from
+5 -8
include/uapi/linux/bpf.h
··· 6047 6047 BPF_F_MARK_ENFORCE = (1ULL << 6), 6048 6048 }; 6049 6049 6050 - /* BPF_FUNC_clone_redirect and BPF_FUNC_redirect flags. */ 6051 - enum { 6052 - BPF_F_INGRESS = (1ULL << 0), 6053 - }; 6054 - 6055 6050 /* BPF_FUNC_skb_set_tunnel_key and BPF_FUNC_skb_get_tunnel_key flags. */ 6056 6051 enum { 6057 6052 BPF_F_TUNINFO_IPV6 = (1ULL << 0), ··· 6193 6198 BPF_F_BPRM_SECUREEXEC = (1ULL << 0), 6194 6199 }; 6195 6200 6196 - /* Flags for bpf_redirect_map helper */ 6201 + /* Flags for bpf_redirect and bpf_redirect_map helpers */ 6197 6202 enum { 6198 - BPF_F_BROADCAST = (1ULL << 3), 6199 - BPF_F_EXCLUDE_INGRESS = (1ULL << 4), 6203 + BPF_F_INGRESS = (1ULL << 0), /* used for skb path */ 6204 + BPF_F_BROADCAST = (1ULL << 3), /* used for XDP path */ 6205 + BPF_F_EXCLUDE_INGRESS = (1ULL << 4), /* used for XDP path */ 6206 + #define BPF_F_REDIRECT_FLAGS (BPF_F_INGRESS | BPF_F_BROADCAST | BPF_F_EXCLUDE_INGRESS) 6200 6207 }; 6201 6208 6202 6209 #define __bpf_md_ptr(type, name) \
-4
kernel/bpf/bpf_lsm.c
··· 339 339 BTF_ID(func, bpf_lsm_path_chown) 340 340 #endif /* CONFIG_SECURITY_PATH */ 341 341 342 - #ifdef CONFIG_KEYS 343 - BTF_ID(func, bpf_lsm_key_free) 344 - #endif /* CONFIG_KEYS */ 345 - 346 342 BTF_ID(func, bpf_lsm_mmap_file) 347 343 BTF_ID(func, bpf_lsm_netlink_send) 348 344 BTF_ID(func, bpf_lsm_path_notify)
+11 -4
kernel/bpf/btf.c
··· 3523 3523 * (i + 1) * elem_size 3524 3524 * where i is the repeat index and elem_size is the size of an element. 3525 3525 */ 3526 - static int btf_repeat_fields(struct btf_field_info *info, 3526 + static int btf_repeat_fields(struct btf_field_info *info, int info_cnt, 3527 3527 u32 field_cnt, u32 repeat_cnt, u32 elem_size) 3528 3528 { 3529 3529 u32 i, j; ··· 3542 3542 return -EINVAL; 3543 3543 } 3544 3544 } 3545 + 3546 + /* The type of struct size or variable size is u32, 3547 + * so the multiplication will not overflow. 3548 + */ 3549 + if (field_cnt * (repeat_cnt + 1) > info_cnt) 3550 + return -E2BIG; 3545 3551 3546 3552 cur = field_cnt; 3547 3553 for (i = 0; i < repeat_cnt; i++) { ··· 3593 3587 info[i].off += off; 3594 3588 3595 3589 if (nelems > 1) { 3596 - err = btf_repeat_fields(info, ret, nelems - 1, t->size); 3590 + err = btf_repeat_fields(info, info_cnt, ret, nelems - 1, t->size); 3597 3591 if (err == 0) 3598 3592 ret *= nelems; 3599 3593 else ··· 3687 3681 3688 3682 if (ret == BTF_FIELD_IGNORE) 3689 3683 return 0; 3690 - if (nelems > info_cnt) 3684 + if (!info_cnt) 3691 3685 return -E2BIG; 3692 3686 if (nelems > 1) { 3693 - ret = btf_repeat_fields(info, 1, nelems - 1, sz); 3687 + ret = btf_repeat_fields(info, info_cnt, 1, nelems - 1, sz); 3694 3688 if (ret < 0) 3695 3689 return ret; 3696 3690 } ··· 8967 8961 if (!type) { 8968 8962 bpf_log(ctx->log, "relo #%u: bad type id %u\n", 8969 8963 relo_idx, relo->type_id); 8964 + kfree(specs); 8970 8965 return -EINVAL; 8971 8966 } 8972 8967
+7 -4
kernel/bpf/devmap.c
··· 333 333 334 334 static int dev_map_bpf_prog_run(struct bpf_prog *xdp_prog, 335 335 struct xdp_frame **frames, int n, 336 - struct net_device *dev) 336 + struct net_device *tx_dev, 337 + struct net_device *rx_dev) 337 338 { 338 - struct xdp_txq_info txq = { .dev = dev }; 339 + struct xdp_txq_info txq = { .dev = tx_dev }; 340 + struct xdp_rxq_info rxq = { .dev = rx_dev }; 339 341 struct xdp_buff xdp; 340 342 int i, nframes = 0; 341 343 ··· 348 346 349 347 xdp_convert_frame_to_buff(xdpf, &xdp); 350 348 xdp.txq = &txq; 349 + xdp.rxq = &rxq; 351 350 352 351 act = bpf_prog_run_xdp(xdp_prog, &xdp); 353 352 switch (act) { ··· 363 360 bpf_warn_invalid_xdp_action(NULL, xdp_prog, act); 364 361 fallthrough; 365 362 case XDP_ABORTED: 366 - trace_xdp_exception(dev, xdp_prog, act); 363 + trace_xdp_exception(tx_dev, xdp_prog, act); 367 364 fallthrough; 368 365 case XDP_DROP: 369 366 xdp_return_frame_rx_napi(xdpf); ··· 391 388 } 392 389 393 390 if (bq->xdp_prog) { 394 - to_send = dev_map_bpf_prog_run(bq->xdp_prog, bq->q, cnt, dev); 391 + to_send = dev_map_bpf_prog_run(bq->xdp_prog, bq->q, cnt, dev, bq->dev_rx); 395 392 if (!to_send) 396 393 goto out; 397 394 }
+1 -2
kernel/bpf/log.c
··· 688 688 if (t == SCALAR_VALUE && reg->precise) 689 689 verbose(env, "P"); 690 690 if (t == SCALAR_VALUE && tnum_is_const(reg->var_off)) { 691 - /* reg->off should be 0 for SCALAR_VALUE */ 692 - verbose_snum(env, reg->var_off.value + reg->off); 691 + verbose_snum(env, reg->var_off.value); 693 692 return; 694 693 } 695 694
+6 -6
kernel/bpf/ringbuf.c
··· 29 29 u64 mask; 30 30 struct page **pages; 31 31 int nr_pages; 32 - spinlock_t spinlock ____cacheline_aligned_in_smp; 32 + raw_spinlock_t spinlock ____cacheline_aligned_in_smp; 33 33 /* For user-space producer ring buffers, an atomic_t busy bit is used 34 34 * to synchronize access to the ring buffers in the kernel, rather than 35 35 * the spinlock that is used for kernel-producer ring buffers. This is ··· 173 173 if (!rb) 174 174 return NULL; 175 175 176 - spin_lock_init(&rb->spinlock); 176 + raw_spin_lock_init(&rb->spinlock); 177 177 atomic_set(&rb->busy, 0); 178 178 init_waitqueue_head(&rb->waitq); 179 179 init_irq_work(&rb->work, bpf_ringbuf_notify); ··· 421 421 cons_pos = smp_load_acquire(&rb->consumer_pos); 422 422 423 423 if (in_nmi()) { 424 - if (!spin_trylock_irqsave(&rb->spinlock, flags)) 424 + if (!raw_spin_trylock_irqsave(&rb->spinlock, flags)) 425 425 return NULL; 426 426 } else { 427 - spin_lock_irqsave(&rb->spinlock, flags); 427 + raw_spin_lock_irqsave(&rb->spinlock, flags); 428 428 } 429 429 430 430 pend_pos = rb->pending_pos; ··· 450 450 */ 451 451 if (new_prod_pos - cons_pos > rb->mask || 452 452 new_prod_pos - pend_pos > rb->mask) { 453 - spin_unlock_irqrestore(&rb->spinlock, flags); 453 + raw_spin_unlock_irqrestore(&rb->spinlock, flags); 454 454 return NULL; 455 455 } 456 456 ··· 462 462 /* pairs with consumer's smp_load_acquire() */ 463 463 smp_store_release(&rb->producer_pos, new_prod_pos); 464 464 465 - spin_unlock_irqrestore(&rb->spinlock, flags); 465 + raw_spin_unlock_irqrestore(&rb->spinlock, flags); 466 466 467 467 return (void *)hdr + BPF_RINGBUF_HDR_SZ; 468 468 }
+23 -8
kernel/bpf/syscall.c
··· 3565 3565 } 3566 3566 3567 3567 static int bpf_perf_link_fill_common(const struct perf_event *event, 3568 - char __user *uname, u32 ulen, 3568 + char __user *uname, u32 *ulenp, 3569 3569 u64 *probe_offset, u64 *probe_addr, 3570 3570 u32 *fd_type, unsigned long *missed) 3571 3571 { 3572 3572 const char *buf; 3573 - u32 prog_id; 3573 + u32 prog_id, ulen; 3574 3574 size_t len; 3575 3575 int err; 3576 3576 3577 + ulen = *ulenp; 3577 3578 if (!ulen ^ !uname) 3578 3579 return -EINVAL; 3579 3580 ··· 3582 3581 probe_offset, probe_addr, missed); 3583 3582 if (err) 3584 3583 return err; 3585 - if (!uname) 3586 - return 0; 3584 + 3587 3585 if (buf) { 3588 3586 len = strlen(buf); 3587 + *ulenp = len + 1; 3588 + } else { 3589 + *ulenp = 1; 3590 + } 3591 + if (!uname) 3592 + return 0; 3593 + 3594 + if (buf) { 3589 3595 err = bpf_copy_to_user(uname, buf, ulen, len); 3590 3596 if (err) 3591 3597 return err; ··· 3617 3609 3618 3610 uname = u64_to_user_ptr(info->perf_event.kprobe.func_name); 3619 3611 ulen = info->perf_event.kprobe.name_len; 3620 - err = bpf_perf_link_fill_common(event, uname, ulen, &offset, &addr, 3612 + err = bpf_perf_link_fill_common(event, uname, &ulen, &offset, &addr, 3621 3613 &type, &missed); 3622 3614 if (err) 3623 3615 return err; ··· 3625 3617 info->perf_event.type = BPF_PERF_EVENT_KRETPROBE; 3626 3618 else 3627 3619 info->perf_event.type = BPF_PERF_EVENT_KPROBE; 3628 - 3620 + info->perf_event.kprobe.name_len = ulen; 3629 3621 info->perf_event.kprobe.offset = offset; 3630 3622 info->perf_event.kprobe.missed = missed; 3631 3623 if (!kallsyms_show_value(current_cred())) ··· 3647 3639 3648 3640 uname = u64_to_user_ptr(info->perf_event.uprobe.file_name); 3649 3641 ulen = info->perf_event.uprobe.name_len; 3650 - err = bpf_perf_link_fill_common(event, uname, ulen, &offset, &addr, 3642 + err = bpf_perf_link_fill_common(event, uname, &ulen, &offset, &addr, 3651 3643 &type, NULL); 3652 3644 if (err) 3653 3645 return err; ··· 3656 3648 info->perf_event.type = BPF_PERF_EVENT_URETPROBE; 3657 3649 else 3658 3650 info->perf_event.type = BPF_PERF_EVENT_UPROBE; 3651 + info->perf_event.uprobe.name_len = ulen; 3659 3652 info->perf_event.uprobe.offset = offset; 3660 3653 info->perf_event.uprobe.cookie = event->bpf_cookie; 3661 3654 return 0; ··· 3682 3673 { 3683 3674 char __user *uname; 3684 3675 u32 ulen; 3676 + int err; 3685 3677 3686 3678 uname = u64_to_user_ptr(info->perf_event.tracepoint.tp_name); 3687 3679 ulen = info->perf_event.tracepoint.name_len; 3680 + err = bpf_perf_link_fill_common(event, uname, &ulen, NULL, NULL, NULL, NULL); 3681 + if (err) 3682 + return err; 3683 + 3688 3684 info->perf_event.type = BPF_PERF_EVENT_TRACEPOINT; 3685 + info->perf_event.tracepoint.name_len = ulen; 3689 3686 info->perf_event.tracepoint.cookie = event->bpf_cookie; 3690 - return bpf_perf_link_fill_common(event, uname, ulen, NULL, NULL, NULL, NULL); 3687 + return 0; 3691 3688 } 3692 3689 3693 3690 static int bpf_perf_link_fill_perf_event(const struct perf_event *event,
+1 -1
kernel/bpf/task_iter.c
··· 99 99 rcu_read_lock(); 100 100 pid = find_pid_ns(common->pid, common->ns); 101 101 if (pid) { 102 - task = get_pid_task(pid, PIDTYPE_TGID); 102 + task = get_pid_task(pid, PIDTYPE_PID); 103 103 *tid = common->pid; 104 104 } 105 105 rcu_read_unlock();
+24 -12
kernel/bpf/verifier.c
··· 2750 2750 b->module = mod; 2751 2751 b->offset = offset; 2752 2752 2753 + /* sort() reorders entries by value, so b may no longer point 2754 + * to the right entry after this 2755 + */ 2753 2756 sort(tab->descs, tab->nr_descs, sizeof(tab->descs[0]), 2754 2757 kfunc_btf_cmp_by_off, NULL); 2758 + } else { 2759 + btf = b->btf; 2755 2760 } 2756 - return b->btf; 2761 + 2762 + return btf; 2757 2763 } 2758 2764 2759 2765 void bpf_free_kfunc_btf_tab(struct bpf_kfunc_btf_tab *tab) ··· 6339 6333 6340 6334 /* both of s64_max/s64_min positive or negative */ 6341 6335 if ((s64_max >= 0) == (s64_min >= 0)) { 6342 - reg->smin_value = reg->s32_min_value = s64_min; 6343 - reg->smax_value = reg->s32_max_value = s64_max; 6344 - reg->umin_value = reg->u32_min_value = s64_min; 6345 - reg->umax_value = reg->u32_max_value = s64_max; 6336 + reg->s32_min_value = reg->smin_value = s64_min; 6337 + reg->s32_max_value = reg->smax_value = s64_max; 6338 + reg->u32_min_value = reg->umin_value = s64_min; 6339 + reg->u32_max_value = reg->umax_value = s64_max; 6346 6340 reg->var_off = tnum_range(s64_min, s64_max); 6347 6341 return; 6348 6342 } ··· 14270 14264 * r1 += 0x1 14271 14265 * if r2 < 1000 goto ... 14272 14266 * use r1 in memory access 14273 - * So remember constant delta between r2 and r1 and update r1 after 14274 - * 'if' condition. 14267 + * So for 64-bit alu remember constant delta between r2 and r1 and 14268 + * update r1 after 'if' condition. 14275 14269 */ 14276 - if (env->bpf_capable && BPF_OP(insn->code) == BPF_ADD && 14277 - dst_reg->id && is_reg_const(src_reg, alu32)) { 14278 - u64 val = reg_const_value(src_reg, alu32); 14270 + if (env->bpf_capable && 14271 + BPF_OP(insn->code) == BPF_ADD && !alu32 && 14272 + dst_reg->id && is_reg_const(src_reg, false)) { 14273 + u64 val = reg_const_value(src_reg, false); 14279 14274 14280 14275 if ((dst_reg->id & BPF_ADD_CONST) || 14281 14276 /* prevent overflow in sync_linked_regs() later */ ··· 15333 15326 continue; 15334 15327 if ((!(reg->id & BPF_ADD_CONST) && !(known_reg->id & BPF_ADD_CONST)) || 15335 15328 reg->off == known_reg->off) { 15329 + s32 saved_subreg_def = reg->subreg_def; 15330 + 15336 15331 copy_register_state(reg, known_reg); 15332 + reg->subreg_def = saved_subreg_def; 15337 15333 } else { 15334 + s32 saved_subreg_def = reg->subreg_def; 15338 15335 s32 saved_off = reg->off; 15339 15336 15340 15337 fake_reg.type = SCALAR_VALUE; ··· 15351 15340 * otherwise another sync_linked_regs() will be incorrect. 15352 15341 */ 15353 15342 reg->off = saved_off; 15343 + reg->subreg_def = saved_subreg_def; 15354 15344 15355 15345 scalar32_min_max_add(reg, &fake_reg); 15356 15346 scalar_min_max_add(reg, &fake_reg); ··· 22322 22310 /* 'struct bpf_verifier_env' can be global, but since it's not small, 22323 22311 * allocate/free it every time bpf_check() is called 22324 22312 */ 22325 - env = kzalloc(sizeof(struct bpf_verifier_env), GFP_KERNEL); 22313 + env = kvzalloc(sizeof(struct bpf_verifier_env), GFP_KERNEL); 22326 22314 if (!env) 22327 22315 return -ENOMEM; 22328 22316 ··· 22558 22546 mutex_unlock(&bpf_verifier_lock); 22559 22547 vfree(env->insn_aux_data); 22560 22548 err_free_env: 22561 - kfree(env); 22549 + kvfree(env); 22562 22550 return ret; 22563 22551 }
+17 -19
kernel/trace/bpf_trace.c
··· 3133 3133 struct bpf_uprobe_multi_link *umulti_link; 3134 3134 u32 ucount = info->uprobe_multi.count; 3135 3135 int err = 0, i; 3136 - long left; 3136 + char *p, *buf; 3137 + long left = 0; 3137 3138 3138 3139 if (!upath ^ !upath_size) 3139 3140 return -EINVAL; ··· 3148 3147 info->uprobe_multi.pid = umulti_link->task ? 3149 3148 task_pid_nr_ns(umulti_link->task, task_active_pid_ns(current)) : 0; 3150 3149 3151 - if (upath) { 3152 - char *p, *buf; 3153 - 3154 - upath_size = min_t(u32, upath_size, PATH_MAX); 3155 - 3156 - buf = kmalloc(upath_size, GFP_KERNEL); 3157 - if (!buf) 3158 - return -ENOMEM; 3159 - p = d_path(&umulti_link->path, buf, upath_size); 3160 - if (IS_ERR(p)) { 3161 - kfree(buf); 3162 - return PTR_ERR(p); 3163 - } 3164 - upath_size = buf + upath_size - p; 3165 - left = copy_to_user(upath, p, upath_size); 3150 + upath_size = upath_size ? min_t(u32, upath_size, PATH_MAX) : PATH_MAX; 3151 + buf = kmalloc(upath_size, GFP_KERNEL); 3152 + if (!buf) 3153 + return -ENOMEM; 3154 + p = d_path(&umulti_link->path, buf, upath_size); 3155 + if (IS_ERR(p)) { 3166 3156 kfree(buf); 3167 - if (left) 3168 - return -EFAULT; 3169 - info->uprobe_multi.path_size = upath_size; 3157 + return PTR_ERR(p); 3170 3158 } 3159 + upath_size = buf + upath_size - p; 3160 + 3161 + if (upath) 3162 + left = copy_to_user(upath, p, upath_size); 3163 + kfree(buf); 3164 + if (left) 3165 + return -EFAULT; 3166 + info->uprobe_multi.path_size = upath_size; 3171 3167 3172 3168 if (!uoffsets && !ucookies && !uref_ctr_offsets) 3173 3169 return 0;
+5
lib/buildid.c
··· 5 5 #include <linux/elf.h> 6 6 #include <linux/kernel.h> 7 7 #include <linux/pagemap.h> 8 + #include <linux/secretmem.h> 8 9 9 10 #define BUILD_ID 3 10 11 ··· 64 63 return 0; 65 64 66 65 freader_put_folio(r); 66 + 67 + /* reject secretmem folios created with memfd_secret() */ 68 + if (secretmem_mapping(r->file->f_mapping)) 69 + return -EFAULT; 67 70 68 71 r->folio = filemap_get_folio(r->file->f_mapping, file_off >> PAGE_SHIFT); 69 72
+5 -3
net/core/filter.c
··· 2438 2438 2439 2439 /* Internal, non-exposed redirect flags. */ 2440 2440 enum { 2441 - BPF_F_NEIGH = (1ULL << 1), 2442 - BPF_F_PEER = (1ULL << 2), 2443 - BPF_F_NEXTHOP = (1ULL << 3), 2441 + BPF_F_NEIGH = (1ULL << 16), 2442 + BPF_F_PEER = (1ULL << 17), 2443 + BPF_F_NEXTHOP = (1ULL << 18), 2444 2444 #define BPF_F_REDIRECT_INTERNAL (BPF_F_NEIGH | BPF_F_PEER | BPF_F_NEXTHOP) 2445 2445 }; 2446 2446 ··· 2449 2449 struct net_device *dev; 2450 2450 struct sk_buff *clone; 2451 2451 int ret; 2452 + 2453 + BUILD_BUG_ON(BPF_F_REDIRECT_INTERNAL & BPF_F_REDIRECT_FLAGS); 2452 2454 2453 2455 if (unlikely(flags & (~(BPF_F_INGRESS) | BPF_F_REDIRECT_INTERNAL))) 2454 2456 return -EINVAL;
+8
net/core/sock_map.c
··· 647 647 sk = __sock_map_lookup_elem(map, key); 648 648 if (unlikely(!sk || !sock_map_redirect_allowed(sk))) 649 649 return SK_DROP; 650 + if ((flags & BPF_F_INGRESS) && sk_is_vsock(sk)) 651 + return SK_DROP; 650 652 651 653 skb_bpf_set_redir(skb, sk, flags & BPF_F_INGRESS); 652 654 return SK_PASS; ··· 676 674 if (unlikely(!sk || !sock_map_redirect_allowed(sk))) 677 675 return SK_DROP; 678 676 if (!(flags & BPF_F_INGRESS) && !sk_is_tcp(sk)) 677 + return SK_DROP; 678 + if (sk_is_vsock(sk)) 679 679 return SK_DROP; 680 680 681 681 msg->flags = flags; ··· 1253 1249 sk = __sock_hash_lookup_elem(map, key); 1254 1250 if (unlikely(!sk || !sock_map_redirect_allowed(sk))) 1255 1251 return SK_DROP; 1252 + if ((flags & BPF_F_INGRESS) && sk_is_vsock(sk)) 1253 + return SK_DROP; 1256 1254 1257 1255 skb_bpf_set_redir(skb, sk, flags & BPF_F_INGRESS); 1258 1256 return SK_PASS; ··· 1282 1276 if (unlikely(!sk || !sock_map_redirect_allowed(sk))) 1283 1277 return SK_DROP; 1284 1278 if (!(flags & BPF_F_INGRESS) && !sk_is_tcp(sk)) 1279 + return SK_DROP; 1280 + if (sk_is_vsock(sk)) 1285 1281 return SK_DROP; 1286 1282 1287 1283 msg->flags = flags;
+2 -1
net/netfilter/nf_bpf_link.c
··· 150 150 struct bpf_link_info *info) 151 151 { 152 152 struct bpf_nf_link *nf_link = container_of(link, struct bpf_nf_link, link); 153 + const struct nf_defrag_hook *hook = nf_link->defrag_hook; 153 154 154 155 info->netfilter.pf = nf_link->hook_ops.pf; 155 156 info->netfilter.hooknum = nf_link->hook_ops.hooknum; 156 157 info->netfilter.priority = nf_link->hook_ops.priority; 157 - info->netfilter.flags = 0; 158 + info->netfilter.flags = hook ? BPF_F_NETFILTER_IP_DEFRAG : 0; 158 159 159 160 return 0; 160 161 }
+12 -2
net/vmw_vsock/virtio_transport_common.c
··· 1707 1707 { 1708 1708 struct virtio_vsock_sock *vvs = vsk->trans; 1709 1709 struct sock *sk = sk_vsock(vsk); 1710 + struct virtio_vsock_hdr *hdr; 1710 1711 struct sk_buff *skb; 1711 1712 int off = 0; 1712 1713 int err; ··· 1717 1716 * works for types other than dgrams. 1718 1717 */ 1719 1718 skb = __skb_recv_datagram(sk, &vvs->rx_queue, MSG_DONTWAIT, &off, &err); 1719 + if (!skb) { 1720 + spin_unlock_bh(&vvs->rx_lock); 1721 + return err; 1722 + } 1723 + 1724 + hdr = virtio_vsock_hdr(skb); 1725 + if (le32_to_cpu(hdr->flags) & VIRTIO_VSOCK_SEQ_EOM) 1726 + vvs->msg_count--; 1727 + 1728 + virtio_transport_dec_rx_pkt(vvs, le32_to_cpu(hdr->len)); 1720 1729 spin_unlock_bh(&vvs->rx_lock); 1721 1730 1722 - if (!skb) 1723 - return err; 1731 + virtio_transport_send_credit_update(vsk); 1724 1732 1725 1733 return recv_actor(sk, skb); 1726 1734 }
-8
net/vmw_vsock/vsock_bpf.c
··· 114 114 return copied; 115 115 } 116 116 117 - /* Copy of original proto with updated sock_map methods */ 118 - static struct proto vsock_bpf_prot = { 119 - .close = sock_map_close, 120 - .recvmsg = vsock_bpf_recvmsg, 121 - .sock_is_readable = sk_msg_is_readable, 122 - .unhash = sock_map_unhash, 123 - }; 124 - 125 117 static void vsock_bpf_rebuild_protos(struct proto *prot, const struct proto *base) 126 118 { 127 119 *prot = *base;
+10 -12
tools/include/uapi/linux/bpf.h
··· 5519 5519 * **-EOPNOTSUPP** if the hash calculation failed or **-EINVAL** if 5520 5520 * invalid arguments are passed. 5521 5521 * 5522 - * void *bpf_kptr_xchg(void *map_value, void *ptr) 5522 + * void *bpf_kptr_xchg(void *dst, void *ptr) 5523 5523 * Description 5524 - * Exchange kptr at pointer *map_value* with *ptr*, and return the 5525 - * old value. *ptr* can be NULL, otherwise it must be a referenced 5526 - * pointer which will be released when this helper is called. 5524 + * Exchange kptr at pointer *dst* with *ptr*, and return the old value. 5525 + * *dst* can be map value or local kptr. *ptr* can be NULL, otherwise 5526 + * it must be a referenced pointer which will be released when this helper 5527 + * is called. 5527 5528 * Return 5528 5529 * The old value of kptr (which can be NULL). The returned pointer 5529 5530 * if not NULL, is a reference which must be released using its ··· 6047 6046 BPF_F_MARK_ENFORCE = (1ULL << 6), 6048 6047 }; 6049 6048 6050 - /* BPF_FUNC_clone_redirect and BPF_FUNC_redirect flags. */ 6051 - enum { 6052 - BPF_F_INGRESS = (1ULL << 0), 6053 - }; 6054 - 6055 6049 /* BPF_FUNC_skb_set_tunnel_key and BPF_FUNC_skb_get_tunnel_key flags. */ 6056 6050 enum { 6057 6051 BPF_F_TUNINFO_IPV6 = (1ULL << 0), ··· 6193 6197 BPF_F_BPRM_SECUREEXEC = (1ULL << 0), 6194 6198 }; 6195 6199 6196 - /* Flags for bpf_redirect_map helper */ 6200 + /* Flags for bpf_redirect and bpf_redirect_map helpers */ 6197 6201 enum { 6198 - BPF_F_BROADCAST = (1ULL << 3), 6199 - BPF_F_EXCLUDE_INGRESS = (1ULL << 4), 6202 + BPF_F_INGRESS = (1ULL << 0), /* used for skb path */ 6203 + BPF_F_BROADCAST = (1ULL << 3), /* used for XDP path */ 6204 + BPF_F_EXCLUDE_INGRESS = (1ULL << 4), /* used for XDP path */ 6205 + #define BPF_F_REDIRECT_FLAGS (BPF_F_INGRESS | BPF_F_BROADCAST | BPF_F_EXCLUDE_INGRESS) 6200 6206 }; 6201 6207 6202 6208 #define __bpf_md_ptr(type, name) \
+20 -2
tools/testing/selftests/bpf/Makefile
··· 157 157 flow_dissector_load test_flow_dissector test_tcp_check_syncookie_user \ 158 158 test_lirc_mode2_user xdping test_cpp runqslower bench bpf_testmod.ko \ 159 159 xskxceiver xdp_redirect_multi xdp_synproxy veristat xdp_hw_metadata \ 160 - xdp_features bpf_test_no_cfi.ko 160 + xdp_features bpf_test_no_cfi.ko bpf_test_modorder_x.ko \ 161 + bpf_test_modorder_y.ko 161 162 162 163 TEST_GEN_FILES += liburandom_read.so urandom_read sign-file uprobe_multi 163 164 ··· 264 263 ifeq ($(SRCARCH),$(filter $(SRCARCH),x86 riscv)) 265 264 LLD := lld 266 265 else 267 - LLD := ld 266 + LLD := $(shell command -v $(LD)) 268 267 endif 269 268 270 269 # Filter out -static for liburandom_read.so and its dependent targets so that static builds ··· 303 302 $(Q)$(RM) bpf_test_no_cfi/bpf_test_no_cfi.ko # force re-compilation 304 303 $(Q)$(MAKE) $(submake_extras) RESOLVE_BTFIDS=$(RESOLVE_BTFIDS) -C bpf_test_no_cfi 305 304 $(Q)cp bpf_test_no_cfi/bpf_test_no_cfi.ko $@ 305 + 306 + $(OUTPUT)/bpf_test_modorder_x.ko: $(VMLINUX_BTF) $(RESOLVE_BTFIDS) $(wildcard bpf_test_modorder_x/Makefile bpf_test_modorder_x/*.[ch]) 307 + $(call msg,MOD,,$@) 308 + $(Q)$(RM) bpf_test_modorder_x/bpf_test_modorder_x.ko # force re-compilation 309 + $(Q)$(MAKE) $(submake_extras) RESOLVE_BTFIDS=$(RESOLVE_BTFIDS) -C bpf_test_modorder_x 310 + $(Q)cp bpf_test_modorder_x/bpf_test_modorder_x.ko $@ 311 + 312 + $(OUTPUT)/bpf_test_modorder_y.ko: $(VMLINUX_BTF) $(RESOLVE_BTFIDS) $(wildcard bpf_test_modorder_y/Makefile bpf_test_modorder_y/*.[ch]) 313 + $(call msg,MOD,,$@) 314 + $(Q)$(RM) bpf_test_modorder_y/bpf_test_modorder_y.ko # force re-compilation 315 + $(Q)$(MAKE) $(submake_extras) RESOLVE_BTFIDS=$(RESOLVE_BTFIDS) -C bpf_test_modorder_y 316 + $(Q)cp bpf_test_modorder_y/bpf_test_modorder_y.ko $@ 317 + 306 318 307 319 DEFAULT_BPFTOOL := $(HOST_SCRATCH_DIR)/sbin/bpftool 308 320 ifneq ($(CROSS_COMPILE),) ··· 736 722 ip_check_defrag_frags.h 737 723 TRUNNER_EXTRA_FILES := $(OUTPUT)/urandom_read $(OUTPUT)/bpf_testmod.ko \ 738 724 $(OUTPUT)/bpf_test_no_cfi.ko \ 725 + $(OUTPUT)/bpf_test_modorder_x.ko \ 726 + $(OUTPUT)/bpf_test_modorder_y.ko \ 739 727 $(OUTPUT)/liburandom_read.so \ 740 728 $(OUTPUT)/xdp_synproxy \ 741 729 $(OUTPUT)/sign-file \ ··· 872 856 $(addprefix $(OUTPUT)/,*.o *.d *.skel.h *.lskel.h *.subskel.h \ 873 857 no_alu32 cpuv4 bpf_gcc bpf_testmod.ko \ 874 858 bpf_test_no_cfi.ko \ 859 + bpf_test_modorder_x.ko \ 860 + bpf_test_modorder_y.ko \ 875 861 liburandom_read.so) \ 876 862 $(OUTPUT)/FEATURE-DUMP.selftests 877 863
+19
tools/testing/selftests/bpf/bpf_test_modorder_x/Makefile
··· 1 + BPF_TESTMOD_DIR := $(realpath $(dir $(abspath $(lastword $(MAKEFILE_LIST))))) 2 + KDIR ?= $(abspath $(BPF_TESTMOD_DIR)/../../../../..) 3 + 4 + ifeq ($(V),1) 5 + Q = 6 + else 7 + Q = @ 8 + endif 9 + 10 + MODULES = bpf_test_modorder_x.ko 11 + 12 + obj-m += bpf_test_modorder_x.o 13 + 14 + all: 15 + +$(Q)make -C $(KDIR) M=$(BPF_TESTMOD_DIR) modules 16 + 17 + clean: 18 + +$(Q)make -C $(KDIR) M=$(BPF_TESTMOD_DIR) clean 19 +
+39
tools/testing/selftests/bpf/bpf_test_modorder_x/bpf_test_modorder_x.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + #include <linux/bpf.h> 3 + #include <linux/btf.h> 4 + #include <linux/module.h> 5 + #include <linux/init.h> 6 + 7 + __bpf_kfunc_start_defs(); 8 + 9 + __bpf_kfunc int bpf_test_modorder_retx(void) 10 + { 11 + return 'x'; 12 + } 13 + 14 + __bpf_kfunc_end_defs(); 15 + 16 + BTF_KFUNCS_START(bpf_test_modorder_kfunc_x_ids) 17 + BTF_ID_FLAGS(func, bpf_test_modorder_retx); 18 + BTF_KFUNCS_END(bpf_test_modorder_kfunc_x_ids) 19 + 20 + static const struct btf_kfunc_id_set bpf_test_modorder_x_set = { 21 + .owner = THIS_MODULE, 22 + .set = &bpf_test_modorder_kfunc_x_ids, 23 + }; 24 + 25 + static int __init bpf_test_modorder_x_init(void) 26 + { 27 + return register_btf_kfunc_id_set(BPF_PROG_TYPE_SCHED_CLS, 28 + &bpf_test_modorder_x_set); 29 + } 30 + 31 + static void __exit bpf_test_modorder_x_exit(void) 32 + { 33 + } 34 + 35 + module_init(bpf_test_modorder_x_init); 36 + module_exit(bpf_test_modorder_x_exit); 37 + 38 + MODULE_DESCRIPTION("BPF selftest ordertest module X"); 39 + MODULE_LICENSE("GPL");
+19
tools/testing/selftests/bpf/bpf_test_modorder_y/Makefile
··· 1 + BPF_TESTMOD_DIR := $(realpath $(dir $(abspath $(lastword $(MAKEFILE_LIST))))) 2 + KDIR ?= $(abspath $(BPF_TESTMOD_DIR)/../../../../..) 3 + 4 + ifeq ($(V),1) 5 + Q = 6 + else 7 + Q = @ 8 + endif 9 + 10 + MODULES = bpf_test_modorder_y.ko 11 + 12 + obj-m += bpf_test_modorder_y.o 13 + 14 + all: 15 + +$(Q)make -C $(KDIR) M=$(BPF_TESTMOD_DIR) modules 16 + 17 + clean: 18 + +$(Q)make -C $(KDIR) M=$(BPF_TESTMOD_DIR) clean 19 +
+39
tools/testing/selftests/bpf/bpf_test_modorder_y/bpf_test_modorder_y.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + #include <linux/bpf.h> 3 + #include <linux/btf.h> 4 + #include <linux/module.h> 5 + #include <linux/init.h> 6 + 7 + __bpf_kfunc_start_defs(); 8 + 9 + __bpf_kfunc int bpf_test_modorder_rety(void) 10 + { 11 + return 'y'; 12 + } 13 + 14 + __bpf_kfunc_end_defs(); 15 + 16 + BTF_KFUNCS_START(bpf_test_modorder_kfunc_y_ids) 17 + BTF_ID_FLAGS(func, bpf_test_modorder_rety); 18 + BTF_KFUNCS_END(bpf_test_modorder_kfunc_y_ids) 19 + 20 + static const struct btf_kfunc_id_set bpf_test_modorder_y_set = { 21 + .owner = THIS_MODULE, 22 + .set = &bpf_test_modorder_kfunc_y_ids, 23 + }; 24 + 25 + static int __init bpf_test_modorder_y_init(void) 26 + { 27 + return register_btf_kfunc_id_set(BPF_PROG_TYPE_SCHED_CLS, 28 + &bpf_test_modorder_y_set); 29 + } 30 + 31 + static void __exit bpf_test_modorder_y_exit(void) 32 + { 33 + } 34 + 35 + module_init(bpf_test_modorder_y_init); 36 + module_exit(bpf_test_modorder_y_exit); 37 + 38 + MODULE_DESCRIPTION("BPF selftest ordertest module Y"); 39 + MODULE_LICENSE("GPL");
+22 -5
tools/testing/selftests/bpf/prog_tests/bpf_iter.c
··· 226 226 ASSERT_OK(pthread_create(&thread_id, NULL, &do_nothing_wait, NULL), 227 227 "pthread_create"); 228 228 229 - skel->bss->tid = getpid(); 229 + skel->bss->tid = gettid(); 230 230 231 231 do_dummy_read_opts(skel->progs.dump_task, opts); 232 232 ··· 249 249 ASSERT_EQ(num_known_tid, num_known, "check_num_known_tid"); 250 250 } 251 251 252 - static void test_task_tid(void) 252 + static void *run_test_task_tid(void *arg) 253 253 { 254 254 LIBBPF_OPTS(bpf_iter_attach_opts, opts); 255 255 union bpf_iter_link_info linfo; 256 256 int num_unknown_tid, num_known_tid; 257 257 258 + ASSERT_NEQ(getpid(), gettid(), "check_new_thread_id"); 259 + 258 260 memset(&linfo, 0, sizeof(linfo)); 259 - linfo.task.tid = getpid(); 261 + linfo.task.tid = gettid(); 260 262 opts.link_info = &linfo; 261 263 opts.link_info_len = sizeof(linfo); 262 264 test_task_common(&opts, 0, 1); 263 265 264 266 linfo.task.tid = 0; 265 267 linfo.task.pid = getpid(); 266 - test_task_common(&opts, 1, 1); 268 + /* This includes the parent thread, this thread, 269 + * and the do_nothing_wait thread 270 + */ 271 + test_task_common(&opts, 2, 1); 267 272 268 273 test_task_common_nocheck(NULL, &num_unknown_tid, &num_known_tid); 269 - ASSERT_GT(num_unknown_tid, 1, "check_num_unknown_tid"); 274 + ASSERT_GT(num_unknown_tid, 2, "check_num_unknown_tid"); 270 275 ASSERT_EQ(num_known_tid, 1, "check_num_known_tid"); 276 + 277 + return NULL; 278 + } 279 + 280 + static void test_task_tid(void) 281 + { 282 + pthread_t thread_id; 283 + 284 + /* Create a new thread so pid and tid aren't the same */ 285 + ASSERT_OK(pthread_create(&thread_id, NULL, &run_test_task_tid, NULL), 286 + "pthread_create"); 287 + ASSERT_FALSE(pthread_join(thread_id, NULL), "pthread_join"); 271 288 } 272 289 273 290 static void test_task_pid(void)
+1 -1
tools/testing/selftests/bpf/prog_tests/cgroup_ancestor.c
··· 35 35 if (!ASSERT_OK_FD(sock, "create socket")) 36 36 return sock; 37 37 38 - if (!ASSERT_OK(connect(sock, &addr, sizeof(addr)), "connect")) { 38 + if (!ASSERT_OK(connect(sock, (struct sockaddr *)&addr, sizeof(addr)), "connect")) { 39 39 close(sock); 40 40 return -1; 41 41 }
+1
tools/testing/selftests/bpf/prog_tests/cpumask.c
··· 23 23 "test_global_mask_array_l2_rcu", 24 24 "test_global_mask_nested_rcu", 25 25 "test_global_mask_nested_deep_rcu", 26 + "test_global_mask_nested_deep_array_rcu", 26 27 "test_cpumask_weight", 27 28 }; 28 29
+55
tools/testing/selftests/bpf/prog_tests/kfunc_module_order.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + #include <test_progs.h> 3 + #include <testing_helpers.h> 4 + 5 + #include "kfunc_module_order.skel.h" 6 + 7 + static int test_run_prog(const struct bpf_program *prog, 8 + struct bpf_test_run_opts *opts) 9 + { 10 + int err; 11 + 12 + err = bpf_prog_test_run_opts(bpf_program__fd(prog), opts); 13 + if (!ASSERT_OK(err, "bpf_prog_test_run_opts")) 14 + return err; 15 + 16 + if (!ASSERT_EQ((int)opts->retval, 0, bpf_program__name(prog))) 17 + return -EINVAL; 18 + 19 + return 0; 20 + } 21 + 22 + void test_kfunc_module_order(void) 23 + { 24 + struct kfunc_module_order *skel; 25 + char pkt_data[64] = {}; 26 + int err = 0; 27 + 28 + DECLARE_LIBBPF_OPTS(bpf_test_run_opts, test_opts, .data_in = pkt_data, 29 + .data_size_in = sizeof(pkt_data)); 30 + 31 + err = load_module("bpf_test_modorder_x.ko", 32 + env_verbosity > VERBOSE_NONE); 33 + if (!ASSERT_OK(err, "load bpf_test_modorder_x.ko")) 34 + return; 35 + 36 + err = load_module("bpf_test_modorder_y.ko", 37 + env_verbosity > VERBOSE_NONE); 38 + if (!ASSERT_OK(err, "load bpf_test_modorder_y.ko")) 39 + goto exit_modx; 40 + 41 + skel = kfunc_module_order__open_and_load(); 42 + if (!ASSERT_OK_PTR(skel, "kfunc_module_order__open_and_load()")) { 43 + err = -EINVAL; 44 + goto exit_mods; 45 + } 46 + 47 + test_run_prog(skel->progs.call_kfunc_xy, &test_opts); 48 + test_run_prog(skel->progs.call_kfunc_yx, &test_opts); 49 + 50 + kfunc_module_order__destroy(skel); 51 + exit_mods: 52 + unload_module("bpf_test_modorder_y", env_verbosity > VERBOSE_NONE); 53 + exit_modx: 54 + unload_module("bpf_test_modorder_x", env_verbosity > VERBOSE_NONE); 55 + }
+2
tools/testing/selftests/bpf/prog_tests/verifier.c
··· 44 44 #include "verifier_ld_ind.skel.h" 45 45 #include "verifier_ldsx.skel.h" 46 46 #include "verifier_leak_ptr.skel.h" 47 + #include "verifier_linked_scalars.skel.h" 47 48 #include "verifier_loops1.skel.h" 48 49 #include "verifier_lwt.skel.h" 49 50 #include "verifier_map_in_map.skel.h" ··· 171 170 void test_verifier_ld_ind(void) { RUN(verifier_ld_ind); } 172 171 void test_verifier_ldsx(void) { RUN(verifier_ldsx); } 173 172 void test_verifier_leak_ptr(void) { RUN(verifier_leak_ptr); } 173 + void test_verifier_linked_scalars(void) { RUN(verifier_linked_scalars); } 174 174 void test_verifier_loops1(void) { RUN(verifier_loops1); } 175 175 void test_verifier_lwt(void) { RUN(verifier_lwt); } 176 176 void test_verifier_map_in_map(void) { RUN(verifier_map_in_map); }
+118 -9
tools/testing/selftests/bpf/prog_tests/xdp_devmap_attach.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0 2 + #include <arpa/inet.h> 2 3 #include <uapi/linux/bpf.h> 3 4 #include <linux/if_link.h> 5 + #include <network_helpers.h> 6 + #include <net/if.h> 4 7 #include <test_progs.h> 5 8 6 9 #include "test_xdp_devmap_helpers.skel.h" ··· 11 8 #include "test_xdp_with_devmap_helpers.skel.h" 12 9 13 10 #define IFINDEX_LO 1 11 + #define TEST_NS "devmap_attach_ns" 14 12 15 13 static void test_xdp_with_devmap_helpers(void) 16 14 { 17 - struct test_xdp_with_devmap_helpers *skel; 15 + struct test_xdp_with_devmap_helpers *skel = NULL; 18 16 struct bpf_prog_info info = {}; 19 17 struct bpf_devmap_val val = { 20 18 .ifindex = IFINDEX_LO, 21 19 }; 22 20 __u32 len = sizeof(info); 23 - int err, dm_fd, map_fd; 21 + int err, dm_fd, dm_fd_redir, map_fd; 22 + struct nstoken *nstoken = NULL; 23 + char data[10] = {}; 24 24 __u32 idx = 0; 25 25 26 + SYS(out_close, "ip netns add %s", TEST_NS); 27 + nstoken = open_netns(TEST_NS); 28 + if (!ASSERT_OK_PTR(nstoken, "open_netns")) 29 + goto out_close; 30 + SYS(out_close, "ip link set dev lo up"); 26 31 27 32 skel = test_xdp_with_devmap_helpers__open_and_load(); 28 33 if (!ASSERT_OK_PTR(skel, "test_xdp_with_devmap_helpers__open_and_load")) 29 - return; 30 - 31 - dm_fd = bpf_program__fd(skel->progs.xdp_redir_prog); 32 - err = bpf_xdp_attach(IFINDEX_LO, dm_fd, XDP_FLAGS_SKB_MODE, NULL); 33 - if (!ASSERT_OK(err, "Generic attach of program with 8-byte devmap")) 34 34 goto out_close; 35 35 36 - err = bpf_xdp_detach(IFINDEX_LO, XDP_FLAGS_SKB_MODE, NULL); 37 - ASSERT_OK(err, "XDP program detach"); 36 + dm_fd_redir = bpf_program__fd(skel->progs.xdp_redir_prog); 37 + err = bpf_xdp_attach(IFINDEX_LO, dm_fd_redir, XDP_FLAGS_SKB_MODE, NULL); 38 + if (!ASSERT_OK(err, "Generic attach of program with 8-byte devmap")) 39 + goto out_close; 38 40 39 41 dm_fd = bpf_program__fd(skel->progs.xdp_dummy_dm); 40 42 map_fd = bpf_map__fd(skel->maps.dm_ports); ··· 54 46 err = bpf_map_lookup_elem(map_fd, &idx, &val); 55 47 ASSERT_OK(err, "Read devmap entry"); 56 48 ASSERT_EQ(info.id, val.bpf_prog.id, "Match program id to devmap entry prog_id"); 49 + 50 + /* send a packet to trigger any potential bugs in there */ 51 + DECLARE_LIBBPF_OPTS(bpf_test_run_opts, opts, 52 + .data_in = &data, 53 + .data_size_in = 10, 54 + .flags = BPF_F_TEST_XDP_LIVE_FRAMES, 55 + .repeat = 1, 56 + ); 57 + err = bpf_prog_test_run_opts(dm_fd_redir, &opts); 58 + ASSERT_OK(err, "XDP test run"); 59 + 60 + /* wait for the packets to be flushed */ 61 + kern_sync_rcu(); 62 + 63 + err = bpf_xdp_detach(IFINDEX_LO, XDP_FLAGS_SKB_MODE, NULL); 64 + ASSERT_OK(err, "XDP program detach"); 57 65 58 66 /* can not attach BPF_XDP_DEVMAP program to a device */ 59 67 err = bpf_xdp_attach(IFINDEX_LO, dm_fd, XDP_FLAGS_SKB_MODE, NULL); ··· 91 67 ASSERT_NEQ(err, 0, "Add BPF_XDP program with frags to devmap entry"); 92 68 93 69 out_close: 70 + close_netns(nstoken); 71 + SYS_NOFAIL("ip netns del %s", TEST_NS); 94 72 test_xdp_with_devmap_helpers__destroy(skel); 95 73 } 96 74 ··· 150 124 test_xdp_with_devmap_frags_helpers__destroy(skel); 151 125 } 152 126 127 + static void test_xdp_with_devmap_helpers_veth(void) 128 + { 129 + struct test_xdp_with_devmap_helpers *skel = NULL; 130 + struct bpf_prog_info info = {}; 131 + struct bpf_devmap_val val = {}; 132 + struct nstoken *nstoken = NULL; 133 + __u32 len = sizeof(info); 134 + int err, dm_fd, dm_fd_redir, map_fd, ifindex_dst; 135 + char data[10] = {}; 136 + __u32 idx = 0; 137 + 138 + SYS(out_close, "ip netns add %s", TEST_NS); 139 + nstoken = open_netns(TEST_NS); 140 + if (!ASSERT_OK_PTR(nstoken, "open_netns")) 141 + goto out_close; 142 + 143 + SYS(out_close, "ip link add veth_src type veth peer name veth_dst"); 144 + SYS(out_close, "ip link set dev veth_src up"); 145 + SYS(out_close, "ip link set dev veth_dst up"); 146 + 147 + val.ifindex = if_nametoindex("veth_src"); 148 + ifindex_dst = if_nametoindex("veth_dst"); 149 + if (!ASSERT_NEQ(val.ifindex, 0, "val.ifindex") || 150 + !ASSERT_NEQ(ifindex_dst, 0, "ifindex_dst")) 151 + goto out_close; 152 + 153 + skel = test_xdp_with_devmap_helpers__open_and_load(); 154 + if (!ASSERT_OK_PTR(skel, "test_xdp_with_devmap_helpers__open_and_load")) 155 + goto out_close; 156 + 157 + dm_fd_redir = bpf_program__fd(skel->progs.xdp_redir_prog); 158 + err = bpf_xdp_attach(val.ifindex, dm_fd_redir, XDP_FLAGS_DRV_MODE, NULL); 159 + if (!ASSERT_OK(err, "Attach of program with 8-byte devmap")) 160 + goto out_close; 161 + 162 + dm_fd = bpf_program__fd(skel->progs.xdp_dummy_dm); 163 + map_fd = bpf_map__fd(skel->maps.dm_ports); 164 + err = bpf_prog_get_info_by_fd(dm_fd, &info, &len); 165 + if (!ASSERT_OK(err, "bpf_prog_get_info_by_fd")) 166 + goto out_close; 167 + 168 + val.bpf_prog.fd = dm_fd; 169 + err = bpf_map_update_elem(map_fd, &idx, &val, 0); 170 + ASSERT_OK(err, "Add program to devmap entry"); 171 + 172 + err = bpf_map_lookup_elem(map_fd, &idx, &val); 173 + ASSERT_OK(err, "Read devmap entry"); 174 + ASSERT_EQ(info.id, val.bpf_prog.id, "Match program id to devmap entry prog_id"); 175 + 176 + /* attach dummy to other side to enable reception */ 177 + dm_fd = bpf_program__fd(skel->progs.xdp_dummy_prog); 178 + err = bpf_xdp_attach(ifindex_dst, dm_fd, XDP_FLAGS_DRV_MODE, NULL); 179 + if (!ASSERT_OK(err, "Attach of dummy XDP")) 180 + goto out_close; 181 + 182 + /* send a packet to trigger any potential bugs in there */ 183 + DECLARE_LIBBPF_OPTS(bpf_test_run_opts, opts, 184 + .data_in = &data, 185 + .data_size_in = 10, 186 + .flags = BPF_F_TEST_XDP_LIVE_FRAMES, 187 + .repeat = 1, 188 + ); 189 + err = bpf_prog_test_run_opts(dm_fd_redir, &opts); 190 + ASSERT_OK(err, "XDP test run"); 191 + 192 + /* wait for the packets to be flushed */ 193 + kern_sync_rcu(); 194 + 195 + err = bpf_xdp_detach(val.ifindex, XDP_FLAGS_DRV_MODE, NULL); 196 + ASSERT_OK(err, "XDP program detach"); 197 + 198 + err = bpf_xdp_detach(ifindex_dst, XDP_FLAGS_DRV_MODE, NULL); 199 + ASSERT_OK(err, "XDP program detach"); 200 + 201 + out_close: 202 + close_netns(nstoken); 203 + SYS_NOFAIL("ip netns del %s", TEST_NS); 204 + test_xdp_with_devmap_helpers__destroy(skel); 205 + } 206 + 153 207 void serial_test_xdp_devmap_attach(void) 154 208 { 155 209 if (test__start_subtest("DEVMAP with programs in entries")) ··· 240 134 241 135 if (test__start_subtest("Verifier check of DEVMAP programs")) 242 136 test_neg_xdp_devmap_helpers(); 137 + 138 + if (test__start_subtest("DEVMAP with programs in entries on veth")) 139 + test_xdp_with_devmap_helpers_veth(); 243 140 }
+5
tools/testing/selftests/bpf/progs/cpumask_common.h
··· 7 7 #include "errno.h" 8 8 #include <stdbool.h> 9 9 10 + /* Should use BTF_FIELDS_MAX, but it is not always available in vmlinux.h, 11 + * so use the hard-coded number as a workaround. 12 + */ 13 + #define CPUMASK_KPTR_FIELDS_MAX 11 14 + 10 15 int err; 11 16 12 17 #define private(name) SEC(".bss." #name) __attribute__((aligned(8)))
+35
tools/testing/selftests/bpf/progs/cpumask_failure.c
··· 10 10 11 11 char _license[] SEC("license") = "GPL"; 12 12 13 + struct kptr_nested_array_2 { 14 + struct bpf_cpumask __kptr * mask; 15 + }; 16 + 17 + struct kptr_nested_array_1 { 18 + /* Make btf_parse_fields() in map_create() return -E2BIG */ 19 + struct kptr_nested_array_2 d_2[CPUMASK_KPTR_FIELDS_MAX + 1]; 20 + }; 21 + 22 + struct kptr_nested_array { 23 + struct kptr_nested_array_1 d_1; 24 + }; 25 + 26 + private(MASK_NESTED) static struct kptr_nested_array global_mask_nested_arr; 27 + 13 28 /* Prototype for all of the program trace events below: 14 29 * 15 30 * TRACE_EVENT(task_newtask, ··· 199 184 bpf_rcu_read_unlock(); 200 185 if (prev) 201 186 bpf_cpumask_release(prev); 187 + 188 + return 0; 189 + } 190 + 191 + SEC("tp_btf/task_newtask") 192 + __failure __msg("has no valid kptr") 193 + int BPF_PROG(test_invalid_nested_array, struct task_struct *task, u64 clone_flags) 194 + { 195 + struct bpf_cpumask *local, *prev; 196 + 197 + local = create_cpumask(); 198 + if (!local) 199 + return 0; 200 + 201 + prev = bpf_kptr_xchg(&global_mask_nested_arr.d_1.d_2[CPUMASK_KPTR_FIELDS_MAX].mask, local); 202 + if (prev) { 203 + bpf_cpumask_release(prev); 204 + err = 3; 205 + return 0; 206 + } 202 207 203 208 return 0; 204 209 }
+76 -2
tools/testing/selftests/bpf/progs/cpumask_success.c
··· 31 31 struct kptr_nested_pair ptr_pairs[3]; 32 32 }; 33 33 34 + struct kptr_nested_deep_array_1_2 { 35 + int dummy; 36 + struct bpf_cpumask __kptr * mask[CPUMASK_KPTR_FIELDS_MAX]; 37 + }; 38 + 39 + struct kptr_nested_deep_array_1_1 { 40 + int dummy; 41 + struct kptr_nested_deep_array_1_2 d_2; 42 + }; 43 + 44 + struct kptr_nested_deep_array_1 { 45 + long dummy; 46 + struct kptr_nested_deep_array_1_1 d_1; 47 + }; 48 + 49 + struct kptr_nested_deep_array_2_2 { 50 + long dummy[2]; 51 + struct bpf_cpumask __kptr * mask; 52 + }; 53 + 54 + struct kptr_nested_deep_array_2_1 { 55 + int dummy; 56 + struct kptr_nested_deep_array_2_2 d_2[CPUMASK_KPTR_FIELDS_MAX]; 57 + }; 58 + 59 + struct kptr_nested_deep_array_2 { 60 + long dummy; 61 + struct kptr_nested_deep_array_2_1 d_1; 62 + }; 63 + 64 + struct kptr_nested_deep_array_3_2 { 65 + long dummy[2]; 66 + struct bpf_cpumask __kptr * mask; 67 + }; 68 + 69 + struct kptr_nested_deep_array_3_1 { 70 + int dummy; 71 + struct kptr_nested_deep_array_3_2 d_2; 72 + }; 73 + 74 + struct kptr_nested_deep_array_3 { 75 + long dummy; 76 + struct kptr_nested_deep_array_3_1 d_1[CPUMASK_KPTR_FIELDS_MAX]; 77 + }; 78 + 34 79 private(MASK) static struct bpf_cpumask __kptr * global_mask_array[2]; 35 80 private(MASK) static struct bpf_cpumask __kptr * global_mask_array_l2[2][1]; 36 81 private(MASK) static struct bpf_cpumask __kptr * global_mask_array_one[1]; 37 82 private(MASK) static struct kptr_nested global_mask_nested[2]; 38 83 private(MASK_DEEP) static struct kptr_nested_deep global_mask_nested_deep; 84 + private(MASK_1) static struct kptr_nested_deep_array_1 global_mask_nested_deep_array_1; 85 + private(MASK_2) static struct kptr_nested_deep_array_2 global_mask_nested_deep_array_2; 86 + private(MASK_3) static struct kptr_nested_deep_array_3 global_mask_nested_deep_array_3; 39 87 40 88 static bool is_test_task(void) 41 89 { ··· 591 543 goto err_exit; 592 544 } 593 545 594 - /* [<mask 0>, NULL] */ 595 - if (!*mask0 || *mask1) { 546 + /* [<mask 0>, *] */ 547 + if (!*mask0) { 596 548 err = 2; 549 + goto err_exit; 550 + } 551 + 552 + if (!mask1) 553 + goto err_exit; 554 + 555 + /* [*, NULL] */ 556 + if (*mask1) { 557 + err = 3; 597 558 goto err_exit; 598 559 } 599 560 ··· 685 628 if (r) 686 629 return r; 687 630 } 631 + return 0; 632 + } 633 + 634 + SEC("tp_btf/task_newtask") 635 + int BPF_PROG(test_global_mask_nested_deep_array_rcu, struct task_struct *task, u64 clone_flags) 636 + { 637 + int i; 638 + 639 + for (i = 0; i < CPUMASK_KPTR_FIELDS_MAX; i++) 640 + _global_mask_array_rcu(&global_mask_nested_deep_array_1.d_1.d_2.mask[i], NULL); 641 + 642 + for (i = 0; i < CPUMASK_KPTR_FIELDS_MAX; i++) 643 + _global_mask_array_rcu(&global_mask_nested_deep_array_2.d_1.d_2[i].mask, NULL); 644 + 645 + for (i = 0; i < CPUMASK_KPTR_FIELDS_MAX; i++) 646 + _global_mask_array_rcu(&global_mask_nested_deep_array_3.d_1[i].d_2.mask, NULL); 647 + 688 648 return 0; 689 649 } 690 650
+30
tools/testing/selftests/bpf/progs/kfunc_module_order.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + #include <linux/bpf.h> 3 + #include <bpf/bpf_helpers.h> 4 + 5 + extern int bpf_test_modorder_retx(void) __ksym; 6 + extern int bpf_test_modorder_rety(void) __ksym; 7 + 8 + SEC("classifier") 9 + int call_kfunc_xy(struct __sk_buff *skb) 10 + { 11 + int ret1, ret2; 12 + 13 + ret1 = bpf_test_modorder_retx(); 14 + ret2 = bpf_test_modorder_rety(); 15 + 16 + return ret1 == 'x' && ret2 == 'y' ? 0 : -1; 17 + } 18 + 19 + SEC("classifier") 20 + int call_kfunc_yx(struct __sk_buff *skb) 21 + { 22 + int ret1, ret2; 23 + 24 + ret1 = bpf_test_modorder_rety(); 25 + ret2 = bpf_test_modorder_retx(); 26 + 27 + return ret1 == 'y' && ret2 == 'x' ? 0 : -1; 28 + } 29 + 30 + char _license[] SEC("license") = "GPL";
+1 -1
tools/testing/selftests/bpf/progs/test_xdp_with_devmap_helpers.c
··· 12 12 SEC("xdp") 13 13 int xdp_redir_prog(struct xdp_md *ctx) 14 14 { 15 - return bpf_redirect_map(&dm_ports, 1, 0); 15 + return bpf_redirect_map(&dm_ports, 0, 0); 16 16 } 17 17 18 18 /* invalid program on DEVMAP entry;
+34
tools/testing/selftests/bpf/progs/verifier_linked_scalars.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + 3 + #include <linux/bpf.h> 4 + #include <bpf/bpf_helpers.h> 5 + #include "bpf_misc.h" 6 + 7 + SEC("socket") 8 + __description("scalars: find linked scalars") 9 + __failure 10 + __msg("math between fp pointer and 2147483647 is not allowed") 11 + __naked void scalars(void) 12 + { 13 + asm volatile (" \ 14 + r0 = 0; \ 15 + r1 = 0x80000001 ll; \ 16 + r1 /= 1; \ 17 + r2 = r1; \ 18 + r4 = r1; \ 19 + w2 += 0x7FFFFFFF; \ 20 + w4 += 0; \ 21 + if r2 == 0 goto l1; \ 22 + exit; \ 23 + l1: \ 24 + r4 >>= 63; \ 25 + r3 = 1; \ 26 + r3 -= r4; \ 27 + r3 *= 0x7FFFFFFF; \ 28 + r3 += r10; \ 29 + *(u8*)(r3 - 1) = r0; \ 30 + exit; \ 31 + " ::: __clobber_all); 32 + } 33 + 34 + char _license[] SEC("license") = "GPL";
+40
tools/testing/selftests/bpf/progs/verifier_movsx.c
··· 287 287 : __clobber_all); 288 288 } 289 289 290 + SEC("socket") 291 + __description("MOV64SX, S8, unsigned range_check") 292 + __success __retval(0) 293 + __naked void mov64sx_s8_range_check(void) 294 + { 295 + asm volatile (" \ 296 + call %[bpf_get_prandom_u32]; \ 297 + r0 &= 0x1; \ 298 + r0 += 0xfe; \ 299 + r0 = (s8)r0; \ 300 + if r0 < 0xfffffffffffffffe goto label_%=; \ 301 + r0 = 0; \ 302 + exit; \ 303 + label_%=: \ 304 + exit; \ 305 + " : 306 + : __imm(bpf_get_prandom_u32) 307 + : __clobber_all); 308 + } 309 + 310 + SEC("socket") 311 + __description("MOV32SX, S8, unsigned range_check") 312 + __success __retval(0) 313 + __naked void mov32sx_s8_range_check(void) 314 + { 315 + asm volatile (" \ 316 + call %[bpf_get_prandom_u32]; \ 317 + w0 &= 0x1; \ 318 + w0 += 0xfe; \ 319 + w0 = (s8)w0; \ 320 + if w0 < 0xfffffffe goto label_%=; \ 321 + r0 = 0; \ 322 + exit; \ 323 + label_%=: \ 324 + exit; \ 325 + " : 326 + : __imm(bpf_get_prandom_u32) 327 + : __clobber_all); 328 + } 329 + 290 330 #else 291 331 292 332 SEC("socket")
+67
tools/testing/selftests/bpf/progs/verifier_scalar_ids.c
··· 760 760 : __clobber_all); 761 761 } 762 762 763 + SEC("socket") 764 + /* Note the flag, see verifier.c:opt_subreg_zext_lo32_rnd_hi32() */ 765 + __flag(BPF_F_TEST_RND_HI32) 766 + __success 767 + /* This test was added because of a bug in verifier.c:sync_linked_regs(), 768 + * upon range propagation it destroyed subreg_def marks for registers. 769 + * The subreg_def mark is used to decide whether zero extension instructions 770 + * are needed when register is read. When BPF_F_TEST_RND_HI32 is set it 771 + * also causes generation of statements to randomize upper halves of 772 + * read registers. 773 + * 774 + * The test is written in a way to return an upper half of a register 775 + * that is affected by range propagation and must have it's subreg_def 776 + * preserved. This gives a return value of 0 and leads to undefined 777 + * return value if subreg_def mark is not preserved. 778 + */ 779 + __retval(0) 780 + /* Check that verifier believes r1/r0 are zero at exit */ 781 + __log_level(2) 782 + __msg("4: (77) r1 >>= 32 ; R1_w=0") 783 + __msg("5: (bf) r0 = r1 ; R0_w=0 R1_w=0") 784 + __msg("6: (95) exit") 785 + __msg("from 3 to 4") 786 + __msg("4: (77) r1 >>= 32 ; R1_w=0") 787 + __msg("5: (bf) r0 = r1 ; R0_w=0 R1_w=0") 788 + __msg("6: (95) exit") 789 + /* Verify that statements to randomize upper half of r1 had not been 790 + * generated. 791 + */ 792 + __xlated("call unknown") 793 + __xlated("r0 &= 2147483647") 794 + __xlated("w1 = w0") 795 + /* This is how disasm.c prints BPF_ZEXT_REG at the moment, x86 and arm 796 + * are the only CI archs that do not need zero extension for subregs. 797 + */ 798 + #if !defined(__TARGET_ARCH_x86) && !defined(__TARGET_ARCH_arm64) 799 + __xlated("w1 = w1") 800 + #endif 801 + __xlated("if w0 < 0xa goto pc+0") 802 + __xlated("r1 >>= 32") 803 + __xlated("r0 = r1") 804 + __xlated("exit") 805 + __naked void linked_regs_and_subreg_def(void) 806 + { 807 + asm volatile ( 808 + "call %[bpf_ktime_get_ns];" 809 + /* make sure r0 is in 32-bit range, otherwise w1 = w0 won't 810 + * assign same IDs to registers. 811 + */ 812 + "r0 &= 0x7fffffff;" 813 + /* link w1 and w0 via ID */ 814 + "w1 = w0;" 815 + /* 'if' statement propagates range info from w0 to w1, 816 + * but should not affect w1->subreg_def property. 817 + */ 818 + "if w0 < 10 goto +0;" 819 + /* r1 is read here, on archs that require subreg zero 820 + * extension this would cause zext patch generation. 821 + */ 822 + "r1 >>= 32;" 823 + "r0 = r1;" 824 + "exit;" 825 + : 826 + : __imm(bpf_ktime_get_ns) 827 + : __clobber_all); 828 + } 829 + 763 830 char _license[] SEC("license") = "GPL";
+22 -12
tools/testing/selftests/bpf/testing_helpers.c
··· 367 367 return syscall(__NR_delete_module, name, flags); 368 368 } 369 369 370 - int unload_bpf_testmod(bool verbose) 370 + int unload_module(const char *name, bool verbose) 371 371 { 372 372 int ret, cnt = 0; 373 373 ··· 375 375 fprintf(stdout, "Failed to trigger kernel-side RCU sync!\n"); 376 376 377 377 for (;;) { 378 - ret = delete_module("bpf_testmod", 0); 378 + ret = delete_module(name, 0); 379 379 if (!ret || errno != EAGAIN) 380 380 break; 381 381 if (++cnt > 10000) { 382 - fprintf(stdout, "Unload of bpf_testmod timed out\n"); 382 + fprintf(stdout, "Unload of %s timed out\n", name); 383 383 break; 384 384 } 385 385 usleep(100); ··· 388 388 if (ret) { 389 389 if (errno == ENOENT) { 390 390 if (verbose) 391 - fprintf(stdout, "bpf_testmod.ko is already unloaded.\n"); 391 + fprintf(stdout, "%s.ko is already unloaded.\n", name); 392 392 return -1; 393 393 } 394 - fprintf(stdout, "Failed to unload bpf_testmod.ko from kernel: %d\n", -errno); 394 + fprintf(stdout, "Failed to unload %s.ko from kernel: %d\n", name, -errno); 395 395 return -1; 396 396 } 397 397 if (verbose) 398 - fprintf(stdout, "Successfully unloaded bpf_testmod.ko.\n"); 398 + fprintf(stdout, "Successfully unloaded %s.ko.\n", name); 399 399 return 0; 400 400 } 401 401 402 - int load_bpf_testmod(bool verbose) 402 + int load_module(const char *path, bool verbose) 403 403 { 404 404 int fd; 405 405 406 406 if (verbose) 407 - fprintf(stdout, "Loading bpf_testmod.ko...\n"); 407 + fprintf(stdout, "Loading %s...\n", path); 408 408 409 - fd = open("bpf_testmod.ko", O_RDONLY); 409 + fd = open(path, O_RDONLY); 410 410 if (fd < 0) { 411 - fprintf(stdout, "Can't find bpf_testmod.ko kernel module: %d\n", -errno); 411 + fprintf(stdout, "Can't find %s kernel module: %d\n", path, -errno); 412 412 return -ENOENT; 413 413 } 414 414 if (finit_module(fd, "", 0)) { 415 - fprintf(stdout, "Failed to load bpf_testmod.ko into the kernel: %d\n", -errno); 415 + fprintf(stdout, "Failed to load %s into the kernel: %d\n", path, -errno); 416 416 close(fd); 417 417 return -EINVAL; 418 418 } 419 419 close(fd); 420 420 421 421 if (verbose) 422 - fprintf(stdout, "Successfully loaded bpf_testmod.ko.\n"); 422 + fprintf(stdout, "Successfully loaded %s.\n", path); 423 423 return 0; 424 + } 425 + 426 + int unload_bpf_testmod(bool verbose) 427 + { 428 + return unload_module("bpf_testmod", verbose); 429 + } 430 + 431 + int load_bpf_testmod(bool verbose) 432 + { 433 + return load_module("bpf_testmod.ko", verbose); 424 434 } 425 435 426 436 /*
+2
tools/testing/selftests/bpf/testing_helpers.h
··· 38 38 int kern_sync_rcu(void); 39 39 int finit_module(int fd, const char *param_values, int flags); 40 40 int delete_module(const char *name, int flags); 41 + int load_module(const char *path, bool verbose); 42 + int unload_module(const char *name, bool verbose); 41 43 42 44 static inline __u64 get_time_ns(void) 43 45 {