Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'for-netdev' of https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next

Daniel Borkmann says:

====================
pull-request: bpf-next 2024-06-06

We've added 54 non-merge commits during the last 10 day(s) which contain
a total of 50 files changed, 1887 insertions(+), 527 deletions(-).

The main changes are:

1) Add a user space notification mechanism via epoll when a struct_ops
object is getting detached/unregistered, from Kui-Feng Lee.

2) Big batch of BPF selftest refactoring for sockmap and BPF congctl
tests, from Geliang Tang.

3) Add BTF field (type and string fields, right now) iterator support
to libbpf instead of using existing callback-based approaches,
from Andrii Nakryiko.

4) Extend BPF selftests for the latter with a new btf_field_iter
selftest, from Alan Maguire.

5) Add new kfuncs for a generic, open-coded bits iterator,
from Yafang Shao.

6) Fix BPF selftests' kallsyms_find() helper under kernels configured
with CONFIG_LTO_CLANG_THIN, from Yonghong Song.

7) Remove a bunch of unused structs in BPF selftests,
from David Alan Gilbert.

8) Convert test_sockmap section names into names understood by libbpf
so it can deduce program type and attach type, from Jakub Sitnicki.

9) Extend libbpf with the ability to configure log verbosity
via LIBBPF_LOG_LEVEL environment variable, from Mykyta Yatsenko.

10) Fix BPF selftests with regards to bpf_cookie and find_vma flakiness
in nested VMs, from Song Liu.

11) Extend riscv32/64 JITs to introduce shift/add helpers to generate Zba
optimization, from Xiao Wang.

12) Enable BPF programs to declare arrays and struct fields with kptr,
bpf_rb_root, and bpf_list_head, from Kui-Feng Lee.

* tag 'for-netdev' of https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next: (54 commits)
selftests/bpf: Drop useless arguments of do_test in bpf_tcp_ca
selftests/bpf: Use start_test in test_dctcp in bpf_tcp_ca
selftests/bpf: Use start_test in test_dctcp_fallback in bpf_tcp_ca
selftests/bpf: Add start_test helper in bpf_tcp_ca
selftests/bpf: Use connect_to_fd_opts in do_test in bpf_tcp_ca
libbpf: Auto-attach struct_ops BPF maps in BPF skeleton
selftests/bpf: Add btf_field_iter selftests
selftests/bpf: Fix send_signal test with nested CONFIG_PARAVIRT
libbpf: Remove callback-based type/string BTF field visitor helpers
bpftool: Use BTF field iterator in btfgen
libbpf: Make use of BTF field iterator in BTF handling code
libbpf: Make use of BTF field iterator in BPF linker code
libbpf: Add BTF field iterator
selftests/bpf: Ignore .llvm.<hash> suffix in kallsyms_find()
selftests/bpf: Fix bpf_cookie and find_vma in nested VM
selftests/bpf: Test global bpf_list_head arrays.
selftests/bpf: Test global bpf_rb_root arrays and fields in nested struct types.
selftests/bpf: Test kptr arrays and kptrs in nested struct fields.
bpf: limit the number of levels of a nested struct type.
bpf: look into the types of the fields of a struct type recursively.
...
====================

Link: https://lore.kernel.org/r/20240606223146.23020-1-daniel@iogearbox.net
Signed-off-by: Jakub Kicinski <kuba@kernel.org>

+1896 -536
+8
Documentation/bpf/libbpf/libbpf_overview.rst
··· 219 219 space part of the BPF application easier. Note that the BPF program themselves 220 220 must still be written in plain C. 221 221 222 + libbpf logging 223 + ============== 224 + 225 + By default, libbpf logs informational and warning messages to stderr. The 226 + verbosity of these messages can be controlled by setting the environment 227 + variable LIBBPF_LOG_LEVEL to either warn, info, or debug. A custom log 228 + callback can be set using ``libbpf_set_print()``. 229 + 222 230 Additional Documentation 223 231 ======================== 224 232
+33
arch/riscv/net/bpf_jit.h
··· 742 742 return rv_css_insn(0x6, imm, rs2, 0x2); 743 743 } 744 744 745 + /* RVZBA instructions. */ 746 + static inline u32 rvzba_sh2add(u8 rd, u8 rs1, u8 rs2) 747 + { 748 + return rv_r_insn(0x10, rs2, rs1, 0x4, rd, 0x33); 749 + } 750 + 751 + static inline u32 rvzba_sh3add(u8 rd, u8 rs1, u8 rs2) 752 + { 753 + return rv_r_insn(0x10, rs2, rs1, 0x6, rd, 0x33); 754 + } 755 + 745 756 /* RVZBB instructions. */ 746 757 static inline u32 rvzbb_sextb(u8 rd, u8 rs1) 747 758 { ··· 1104 1093 emitc(rvc_sw(rs1, off, rs2), ctx); 1105 1094 else 1106 1095 emit(rv_sw(rs1, off, rs2), ctx); 1096 + } 1097 + 1098 + static inline void emit_sh2add(u8 rd, u8 rs1, u8 rs2, struct rv_jit_context *ctx) 1099 + { 1100 + if (rvzba_enabled()) { 1101 + emit(rvzba_sh2add(rd, rs1, rs2), ctx); 1102 + return; 1103 + } 1104 + 1105 + emit_slli(rd, rs1, 2, ctx); 1106 + emit_add(rd, rd, rs2, ctx); 1107 + } 1108 + 1109 + static inline void emit_sh3add(u8 rd, u8 rs1, u8 rs2, struct rv_jit_context *ctx) 1110 + { 1111 + if (rvzba_enabled()) { 1112 + emit(rvzba_sh3add(rd, rs1, rs2), ctx); 1113 + return; 1114 + } 1115 + 1116 + emit_slli(rd, rs1, 3, ctx); 1117 + emit_add(rd, rd, rs2, ctx); 1107 1118 } 1108 1119 1109 1120 /* RV64-only helper functions. */
+1 -2
arch/riscv/net/bpf_jit_comp32.c
··· 811 811 * if (!prog) 812 812 * goto out; 813 813 */ 814 - emit(rv_slli(RV_REG_T0, lo(idx_reg), 2), ctx); 815 - emit(rv_add(RV_REG_T0, RV_REG_T0, lo(arr_reg)), ctx); 814 + emit_sh2add(RV_REG_T0, lo(idx_reg), lo(arr_reg), ctx); 816 815 off = offsetof(struct bpf_array, ptrs); 817 816 if (is_12b_check(off, insn)) 818 817 return -1;
+3 -6
arch/riscv/net/bpf_jit_comp64.c
··· 380 380 * if (!prog) 381 381 * goto out; 382 382 */ 383 - emit_slli(RV_REG_T2, RV_REG_A2, 3, ctx); 384 - emit_add(RV_REG_T2, RV_REG_T2, RV_REG_A1, ctx); 383 + emit_sh3add(RV_REG_T2, RV_REG_A2, RV_REG_A1, ctx); 385 384 off = offsetof(struct bpf_array, ptrs); 386 385 if (is_12b_check(off, insn)) 387 386 return -1; ··· 1098 1099 /* Load current CPU number in T1 */ 1099 1100 emit_ld(RV_REG_T1, offsetof(struct thread_info, cpu), 1100 1101 RV_REG_TP, ctx); 1101 - /* << 3 because offsets are 8 bytes */ 1102 - emit_slli(RV_REG_T1, RV_REG_T1, 3, ctx); 1103 1102 /* Load address of __per_cpu_offset array in T2 */ 1104 1103 emit_addr(RV_REG_T2, (u64)&__per_cpu_offset, extra_pass, ctx); 1105 - /* Add offset of current CPU to __per_cpu_offset */ 1106 - emit_add(RV_REG_T1, RV_REG_T2, RV_REG_T1, ctx); 1104 + /* Get address of __per_cpu_offset[cpu] in T1 */ 1105 + emit_sh3add(RV_REG_T1, RV_REG_T1, RV_REG_T2, ctx); 1107 1106 /* Load __per_cpu_offset[cpu] in T1 */ 1108 1107 emit_ld(RV_REG_T1, 0, RV_REG_T1, ctx); 1109 1108 /* Add the offset to Rd */
+10 -3
include/linux/bpf.h
··· 1612 1612 struct bpf_link_info *info); 1613 1613 int (*update_map)(struct bpf_link *link, struct bpf_map *new_map, 1614 1614 struct bpf_map *old_map); 1615 + __poll_t (*poll)(struct file *file, struct poll_table_struct *pts); 1615 1616 }; 1616 1617 1617 1618 struct bpf_tramp_link { ··· 1731 1730 int (*init_member)(const struct btf_type *t, 1732 1731 const struct btf_member *member, 1733 1732 void *kdata, const void *udata); 1734 - int (*reg)(void *kdata); 1735 - void (*unreg)(void *kdata); 1736 - int (*update)(void *kdata, void *old_kdata); 1733 + int (*reg)(void *kdata, struct bpf_link *link); 1734 + void (*unreg)(void *kdata, struct bpf_link *link); 1735 + int (*update)(void *kdata, void *old_kdata, struct bpf_link *link); 1737 1736 int (*validate)(void *kdata); 1738 1737 void *cfi_stubs; 1739 1738 struct module *owner; ··· 2334 2333 int bpf_link_settle(struct bpf_link_primer *primer); 2335 2334 void bpf_link_cleanup(struct bpf_link_primer *primer); 2336 2335 void bpf_link_inc(struct bpf_link *link); 2336 + struct bpf_link *bpf_link_inc_not_zero(struct bpf_link *link); 2337 2337 void bpf_link_put(struct bpf_link *link); 2338 2338 int bpf_link_new_fd(struct bpf_link *link); 2339 2339 struct bpf_link *bpf_link_get_from_fd(u32 ufd); ··· 2704 2702 2705 2703 static inline void bpf_link_inc(struct bpf_link *link) 2706 2704 { 2705 + } 2706 + 2707 + static inline struct bpf_link *bpf_link_inc_not_zero(struct bpf_link *link) 2708 + { 2709 + return NULL; 2707 2710 } 2708 2711 2709 2712 static inline void bpf_link_put(struct bpf_link *link)
+65 -10
kernel/bpf/bpf_struct_ops.c
··· 12 12 #include <linux/mutex.h> 13 13 #include <linux/btf_ids.h> 14 14 #include <linux/rcupdate_wait.h> 15 + #include <linux/poll.h> 15 16 16 17 struct bpf_struct_ops_value { 17 18 struct bpf_struct_ops_common_value common; ··· 57 56 struct bpf_struct_ops_link { 58 57 struct bpf_link link; 59 58 struct bpf_map __rcu *map; 59 + wait_queue_head_t wait_hup; 60 60 }; 61 61 62 62 static DEFINE_MUTEX(update_mutex); ··· 759 757 goto unlock; 760 758 } 761 759 762 - err = st_ops->reg(kdata); 760 + err = st_ops->reg(kdata, NULL); 763 761 if (likely(!err)) { 764 762 /* This refcnt increment on the map here after 765 763 * 'st_ops->reg()' is secure since the state of the ··· 807 805 BPF_STRUCT_OPS_STATE_TOBEFREE); 808 806 switch (prev_state) { 809 807 case BPF_STRUCT_OPS_STATE_INUSE: 810 - st_map->st_ops_desc->st_ops->unreg(&st_map->kvalue.data); 808 + st_map->st_ops_desc->st_ops->unreg(&st_map->kvalue.data, NULL); 811 809 bpf_map_put(map); 812 810 return 0; 813 811 case BPF_STRUCT_OPS_STATE_TOBEFREE: ··· 1059 1057 st_map = (struct bpf_struct_ops_map *) 1060 1058 rcu_dereference_protected(st_link->map, true); 1061 1059 if (st_map) { 1062 - /* st_link->map can be NULL if 1063 - * bpf_struct_ops_link_create() fails to register. 1064 - */ 1065 - st_map->st_ops_desc->st_ops->unreg(&st_map->kvalue.data); 1060 + st_map->st_ops_desc->st_ops->unreg(&st_map->kvalue.data, link); 1066 1061 bpf_map_put(&st_map->map); 1067 1062 } 1068 1063 kfree(st_link); ··· 1074 1075 st_link = container_of(link, struct bpf_struct_ops_link, link); 1075 1076 rcu_read_lock(); 1076 1077 map = rcu_dereference(st_link->map); 1077 - seq_printf(seq, "map_id:\t%d\n", map->id); 1078 + if (map) 1079 + seq_printf(seq, "map_id:\t%d\n", map->id); 1078 1080 rcu_read_unlock(); 1079 1081 } 1080 1082 ··· 1088 1088 st_link = container_of(link, struct bpf_struct_ops_link, link); 1089 1089 rcu_read_lock(); 1090 1090 map = rcu_dereference(st_link->map); 1091 - info->struct_ops.map_id = map->id; 1091 + if (map) 1092 + info->struct_ops.map_id = map->id; 1092 1093 rcu_read_unlock(); 1093 1094 return 0; 1094 1095 } ··· 1114 1113 mutex_lock(&update_mutex); 1115 1114 1116 1115 old_map = rcu_dereference_protected(st_link->map, lockdep_is_held(&update_mutex)); 1116 + if (!old_map) { 1117 + err = -ENOLINK; 1118 + goto err_out; 1119 + } 1117 1120 if (expected_old_map && old_map != expected_old_map) { 1118 1121 err = -EPERM; 1119 1122 goto err_out; ··· 1130 1125 goto err_out; 1131 1126 } 1132 1127 1133 - err = st_map->st_ops_desc->st_ops->update(st_map->kvalue.data, old_st_map->kvalue.data); 1128 + err = st_map->st_ops_desc->st_ops->update(st_map->kvalue.data, old_st_map->kvalue.data, link); 1134 1129 if (err) 1135 1130 goto err_out; 1136 1131 ··· 1144 1139 return err; 1145 1140 } 1146 1141 1142 + static int bpf_struct_ops_map_link_detach(struct bpf_link *link) 1143 + { 1144 + struct bpf_struct_ops_link *st_link = container_of(link, struct bpf_struct_ops_link, link); 1145 + struct bpf_struct_ops_map *st_map; 1146 + struct bpf_map *map; 1147 + 1148 + mutex_lock(&update_mutex); 1149 + 1150 + map = rcu_dereference_protected(st_link->map, lockdep_is_held(&update_mutex)); 1151 + if (!map) { 1152 + mutex_unlock(&update_mutex); 1153 + return 0; 1154 + } 1155 + st_map = container_of(map, struct bpf_struct_ops_map, map); 1156 + 1157 + st_map->st_ops_desc->st_ops->unreg(&st_map->kvalue.data, link); 1158 + 1159 + RCU_INIT_POINTER(st_link->map, NULL); 1160 + /* Pair with bpf_map_get() in bpf_struct_ops_link_create() or 1161 + * bpf_map_inc() in bpf_struct_ops_map_link_update(). 1162 + */ 1163 + bpf_map_put(&st_map->map); 1164 + 1165 + mutex_unlock(&update_mutex); 1166 + 1167 + wake_up_interruptible_poll(&st_link->wait_hup, EPOLLHUP); 1168 + 1169 + return 0; 1170 + } 1171 + 1172 + static __poll_t bpf_struct_ops_map_link_poll(struct file *file, 1173 + struct poll_table_struct *pts) 1174 + { 1175 + struct bpf_struct_ops_link *st_link = file->private_data; 1176 + 1177 + poll_wait(file, &st_link->wait_hup, pts); 1178 + 1179 + return rcu_access_pointer(st_link->map) ? 0 : EPOLLHUP; 1180 + } 1181 + 1147 1182 static const struct bpf_link_ops bpf_struct_ops_map_lops = { 1148 1183 .dealloc = bpf_struct_ops_map_link_dealloc, 1184 + .detach = bpf_struct_ops_map_link_detach, 1149 1185 .show_fdinfo = bpf_struct_ops_map_link_show_fdinfo, 1150 1186 .fill_link_info = bpf_struct_ops_map_link_fill_link_info, 1151 1187 .update_map = bpf_struct_ops_map_link_update, 1188 + .poll = bpf_struct_ops_map_link_poll, 1152 1189 }; 1153 1190 1154 1191 int bpf_struct_ops_link_create(union bpf_attr *attr) ··· 1223 1176 if (err) 1224 1177 goto err_out; 1225 1178 1226 - err = st_map->st_ops_desc->st_ops->reg(st_map->kvalue.data); 1179 + init_waitqueue_head(&link->wait_hup); 1180 + 1181 + /* Hold the update_mutex such that the subsystem cannot 1182 + * do link->ops->detach() before the link is fully initialized. 1183 + */ 1184 + mutex_lock(&update_mutex); 1185 + err = st_map->st_ops_desc->st_ops->reg(st_map->kvalue.data, &link->link); 1227 1186 if (err) { 1187 + mutex_unlock(&update_mutex); 1228 1188 bpf_link_cleanup(&link_primer); 1229 1189 link = NULL; 1230 1190 goto err_out; 1231 1191 } 1232 1192 RCU_INIT_POINTER(link->map, map); 1193 + mutex_unlock(&update_mutex); 1233 1194 1234 1195 return bpf_link_settle(&link_primer); 1235 1196
+202 -108
kernel/bpf/btf.c
··· 3442 3442 goto end; \ 3443 3443 } 3444 3444 3445 - static int btf_get_field_type(const char *name, u32 field_mask, u32 *seen_mask, 3445 + static int btf_get_field_type(const struct btf *btf, const struct btf_type *var_type, 3446 + u32 field_mask, u32 *seen_mask, 3446 3447 int *align, int *sz) 3447 3448 { 3448 3449 int type = 0; 3450 + const char *name = __btf_name_by_offset(btf, var_type->name_off); 3449 3451 3450 3452 if (field_mask & BPF_SPIN_LOCK) { 3451 3453 if (!strcmp(name, "bpf_spin_lock")) { ··· 3483 3481 field_mask_test_name(BPF_REFCOUNT, "bpf_refcount"); 3484 3482 3485 3483 /* Only return BPF_KPTR when all other types with matchable names fail */ 3486 - if (field_mask & BPF_KPTR) { 3484 + if (field_mask & BPF_KPTR && !__btf_type_is_struct(var_type)) { 3487 3485 type = BPF_KPTR_REF; 3488 3486 goto end; 3489 3487 } ··· 3496 3494 3497 3495 #undef field_mask_test_name 3498 3496 3497 + /* Repeat a number of fields for a specified number of times. 3498 + * 3499 + * Copy the fields starting from the first field and repeat them for 3500 + * repeat_cnt times. The fields are repeated by adding the offset of each 3501 + * field with 3502 + * (i + 1) * elem_size 3503 + * where i is the repeat index and elem_size is the size of an element. 3504 + */ 3505 + static int btf_repeat_fields(struct btf_field_info *info, 3506 + u32 field_cnt, u32 repeat_cnt, u32 elem_size) 3507 + { 3508 + u32 i, j; 3509 + u32 cur; 3510 + 3511 + /* Ensure not repeating fields that should not be repeated. */ 3512 + for (i = 0; i < field_cnt; i++) { 3513 + switch (info[i].type) { 3514 + case BPF_KPTR_UNREF: 3515 + case BPF_KPTR_REF: 3516 + case BPF_KPTR_PERCPU: 3517 + case BPF_LIST_HEAD: 3518 + case BPF_RB_ROOT: 3519 + break; 3520 + default: 3521 + return -EINVAL; 3522 + } 3523 + } 3524 + 3525 + cur = field_cnt; 3526 + for (i = 0; i < repeat_cnt; i++) { 3527 + memcpy(&info[cur], &info[0], field_cnt * sizeof(info[0])); 3528 + for (j = 0; j < field_cnt; j++) 3529 + info[cur++].off += (i + 1) * elem_size; 3530 + } 3531 + 3532 + return 0; 3533 + } 3534 + 3499 3535 static int btf_find_struct_field(const struct btf *btf, 3500 3536 const struct btf_type *t, u32 field_mask, 3501 - struct btf_field_info *info, int info_cnt) 3537 + struct btf_field_info *info, int info_cnt, 3538 + u32 level); 3539 + 3540 + /* Find special fields in the struct type of a field. 3541 + * 3542 + * This function is used to find fields of special types that is not a 3543 + * global variable or a direct field of a struct type. It also handles the 3544 + * repetition if it is the element type of an array. 3545 + */ 3546 + static int btf_find_nested_struct(const struct btf *btf, const struct btf_type *t, 3547 + u32 off, u32 nelems, 3548 + u32 field_mask, struct btf_field_info *info, 3549 + int info_cnt, u32 level) 3502 3550 { 3503 - int ret, idx = 0, align, sz, field_type; 3504 - const struct btf_member *member; 3551 + int ret, err, i; 3552 + 3553 + level++; 3554 + if (level >= MAX_RESOLVE_DEPTH) 3555 + return -E2BIG; 3556 + 3557 + ret = btf_find_struct_field(btf, t, field_mask, info, info_cnt, level); 3558 + 3559 + if (ret <= 0) 3560 + return ret; 3561 + 3562 + /* Shift the offsets of the nested struct fields to the offsets 3563 + * related to the container. 3564 + */ 3565 + for (i = 0; i < ret; i++) 3566 + info[i].off += off; 3567 + 3568 + if (nelems > 1) { 3569 + err = btf_repeat_fields(info, ret, nelems - 1, t->size); 3570 + if (err == 0) 3571 + ret *= nelems; 3572 + else 3573 + ret = err; 3574 + } 3575 + 3576 + return ret; 3577 + } 3578 + 3579 + static int btf_find_field_one(const struct btf *btf, 3580 + const struct btf_type *var, 3581 + const struct btf_type *var_type, 3582 + int var_idx, 3583 + u32 off, u32 expected_size, 3584 + u32 field_mask, u32 *seen_mask, 3585 + struct btf_field_info *info, int info_cnt, 3586 + u32 level) 3587 + { 3588 + int ret, align, sz, field_type; 3505 3589 struct btf_field_info tmp; 3590 + const struct btf_array *array; 3591 + u32 i, nelems = 1; 3592 + 3593 + /* Walk into array types to find the element type and the number of 3594 + * elements in the (flattened) array. 3595 + */ 3596 + for (i = 0; i < MAX_RESOLVE_DEPTH && btf_type_is_array(var_type); i++) { 3597 + array = btf_array(var_type); 3598 + nelems *= array->nelems; 3599 + var_type = btf_type_by_id(btf, array->type); 3600 + } 3601 + if (i == MAX_RESOLVE_DEPTH) 3602 + return -E2BIG; 3603 + if (nelems == 0) 3604 + return 0; 3605 + 3606 + field_type = btf_get_field_type(btf, var_type, 3607 + field_mask, seen_mask, &align, &sz); 3608 + /* Look into variables of struct types */ 3609 + if (!field_type && __btf_type_is_struct(var_type)) { 3610 + sz = var_type->size; 3611 + if (expected_size && expected_size != sz * nelems) 3612 + return 0; 3613 + ret = btf_find_nested_struct(btf, var_type, off, nelems, field_mask, 3614 + &info[0], info_cnt, level); 3615 + return ret; 3616 + } 3617 + 3618 + if (field_type == 0) 3619 + return 0; 3620 + if (field_type < 0) 3621 + return field_type; 3622 + 3623 + if (expected_size && expected_size != sz * nelems) 3624 + return 0; 3625 + if (off % align) 3626 + return 0; 3627 + 3628 + switch (field_type) { 3629 + case BPF_SPIN_LOCK: 3630 + case BPF_TIMER: 3631 + case BPF_WORKQUEUE: 3632 + case BPF_LIST_NODE: 3633 + case BPF_RB_NODE: 3634 + case BPF_REFCOUNT: 3635 + ret = btf_find_struct(btf, var_type, off, sz, field_type, 3636 + info_cnt ? &info[0] : &tmp); 3637 + if (ret < 0) 3638 + return ret; 3639 + break; 3640 + case BPF_KPTR_UNREF: 3641 + case BPF_KPTR_REF: 3642 + case BPF_KPTR_PERCPU: 3643 + ret = btf_find_kptr(btf, var_type, off, sz, 3644 + info_cnt ? &info[0] : &tmp); 3645 + if (ret < 0) 3646 + return ret; 3647 + break; 3648 + case BPF_LIST_HEAD: 3649 + case BPF_RB_ROOT: 3650 + ret = btf_find_graph_root(btf, var, var_type, 3651 + var_idx, off, sz, 3652 + info_cnt ? &info[0] : &tmp, 3653 + field_type); 3654 + if (ret < 0) 3655 + return ret; 3656 + break; 3657 + default: 3658 + return -EFAULT; 3659 + } 3660 + 3661 + if (ret == BTF_FIELD_IGNORE) 3662 + return 0; 3663 + if (nelems > info_cnt) 3664 + return -E2BIG; 3665 + if (nelems > 1) { 3666 + ret = btf_repeat_fields(info, 1, nelems - 1, sz); 3667 + if (ret < 0) 3668 + return ret; 3669 + } 3670 + return nelems; 3671 + } 3672 + 3673 + static int btf_find_struct_field(const struct btf *btf, 3674 + const struct btf_type *t, u32 field_mask, 3675 + struct btf_field_info *info, int info_cnt, 3676 + u32 level) 3677 + { 3678 + int ret, idx = 0; 3679 + const struct btf_member *member; 3506 3680 u32 i, off, seen_mask = 0; 3507 3681 3508 3682 for_each_member(i, t, member) { 3509 3683 const struct btf_type *member_type = btf_type_by_id(btf, 3510 3684 member->type); 3511 3685 3512 - field_type = btf_get_field_type(__btf_name_by_offset(btf, member_type->name_off), 3513 - field_mask, &seen_mask, &align, &sz); 3514 - if (field_type == 0) 3515 - continue; 3516 - if (field_type < 0) 3517 - return field_type; 3518 - 3519 3686 off = __btf_member_bit_offset(t, member); 3520 3687 if (off % 8) 3521 3688 /* valid C code cannot generate such BTF */ 3522 3689 return -EINVAL; 3523 3690 off /= 8; 3524 - if (off % align) 3525 - continue; 3526 3691 3527 - switch (field_type) { 3528 - case BPF_SPIN_LOCK: 3529 - case BPF_TIMER: 3530 - case BPF_WORKQUEUE: 3531 - case BPF_LIST_NODE: 3532 - case BPF_RB_NODE: 3533 - case BPF_REFCOUNT: 3534 - ret = btf_find_struct(btf, member_type, off, sz, field_type, 3535 - idx < info_cnt ? &info[idx] : &tmp); 3536 - if (ret < 0) 3537 - return ret; 3538 - break; 3539 - case BPF_KPTR_UNREF: 3540 - case BPF_KPTR_REF: 3541 - case BPF_KPTR_PERCPU: 3542 - ret = btf_find_kptr(btf, member_type, off, sz, 3543 - idx < info_cnt ? &info[idx] : &tmp); 3544 - if (ret < 0) 3545 - return ret; 3546 - break; 3547 - case BPF_LIST_HEAD: 3548 - case BPF_RB_ROOT: 3549 - ret = btf_find_graph_root(btf, t, member_type, 3550 - i, off, sz, 3551 - idx < info_cnt ? &info[idx] : &tmp, 3552 - field_type); 3553 - if (ret < 0) 3554 - return ret; 3555 - break; 3556 - default: 3557 - return -EFAULT; 3558 - } 3559 - 3560 - if (ret == BTF_FIELD_IGNORE) 3561 - continue; 3562 - if (idx >= info_cnt) 3563 - return -E2BIG; 3564 - ++idx; 3692 + ret = btf_find_field_one(btf, t, member_type, i, 3693 + off, 0, 3694 + field_mask, &seen_mask, 3695 + &info[idx], info_cnt - idx, level); 3696 + if (ret < 0) 3697 + return ret; 3698 + idx += ret; 3565 3699 } 3566 3700 return idx; 3567 3701 } 3568 3702 3569 3703 static int btf_find_datasec_var(const struct btf *btf, const struct btf_type *t, 3570 3704 u32 field_mask, struct btf_field_info *info, 3571 - int info_cnt) 3705 + int info_cnt, u32 level) 3572 3706 { 3573 - int ret, idx = 0, align, sz, field_type; 3707 + int ret, idx = 0; 3574 3708 const struct btf_var_secinfo *vsi; 3575 - struct btf_field_info tmp; 3576 3709 u32 i, off, seen_mask = 0; 3577 3710 3578 3711 for_each_vsi(i, t, vsi) { 3579 3712 const struct btf_type *var = btf_type_by_id(btf, vsi->type); 3580 3713 const struct btf_type *var_type = btf_type_by_id(btf, var->type); 3581 3714 3582 - field_type = btf_get_field_type(__btf_name_by_offset(btf, var_type->name_off), 3583 - field_mask, &seen_mask, &align, &sz); 3584 - if (field_type == 0) 3585 - continue; 3586 - if (field_type < 0) 3587 - return field_type; 3588 - 3589 3715 off = vsi->offset; 3590 - if (vsi->size != sz) 3591 - continue; 3592 - if (off % align) 3593 - continue; 3594 - 3595 - switch (field_type) { 3596 - case BPF_SPIN_LOCK: 3597 - case BPF_TIMER: 3598 - case BPF_WORKQUEUE: 3599 - case BPF_LIST_NODE: 3600 - case BPF_RB_NODE: 3601 - case BPF_REFCOUNT: 3602 - ret = btf_find_struct(btf, var_type, off, sz, field_type, 3603 - idx < info_cnt ? &info[idx] : &tmp); 3604 - if (ret < 0) 3605 - return ret; 3606 - break; 3607 - case BPF_KPTR_UNREF: 3608 - case BPF_KPTR_REF: 3609 - case BPF_KPTR_PERCPU: 3610 - ret = btf_find_kptr(btf, var_type, off, sz, 3611 - idx < info_cnt ? &info[idx] : &tmp); 3612 - if (ret < 0) 3613 - return ret; 3614 - break; 3615 - case BPF_LIST_HEAD: 3616 - case BPF_RB_ROOT: 3617 - ret = btf_find_graph_root(btf, var, var_type, 3618 - -1, off, sz, 3619 - idx < info_cnt ? &info[idx] : &tmp, 3620 - field_type); 3621 - if (ret < 0) 3622 - return ret; 3623 - break; 3624 - default: 3625 - return -EFAULT; 3626 - } 3627 - 3628 - if (ret == BTF_FIELD_IGNORE) 3629 - continue; 3630 - if (idx >= info_cnt) 3631 - return -E2BIG; 3632 - ++idx; 3716 + ret = btf_find_field_one(btf, var, var_type, -1, off, vsi->size, 3717 + field_mask, &seen_mask, 3718 + &info[idx], info_cnt - idx, 3719 + level); 3720 + if (ret < 0) 3721 + return ret; 3722 + idx += ret; 3633 3723 } 3634 3724 return idx; 3635 3725 } ··· 3731 3637 int info_cnt) 3732 3638 { 3733 3639 if (__btf_type_is_struct(t)) 3734 - return btf_find_struct_field(btf, t, field_mask, info, info_cnt); 3640 + return btf_find_struct_field(btf, t, field_mask, info, info_cnt, 0); 3735 3641 else if (btf_type_is_datasec(t)) 3736 - return btf_find_datasec_var(btf, t, field_mask, info, info_cnt); 3642 + return btf_find_datasec_var(btf, t, field_mask, info, info_cnt, 0); 3737 3643 return -EINVAL; 3738 3644 } 3739 3645 ··· 6787 6693 for (i = 0; i < rec->cnt; i++) { 6788 6694 struct btf_field *field = &rec->fields[i]; 6789 6695 u32 offset = field->offset; 6790 - if (off < offset + btf_field_type_size(field->type) && offset < off + size) { 6696 + if (off < offset + field->size && offset < off + size) { 6791 6697 bpf_log(log, 6792 6698 "direct access to %s is disallowed\n", 6793 6699 btf_field_type_name(field->type));
+119
kernel/bpf/helpers.c
··· 2744 2744 preempt_enable(); 2745 2745 } 2746 2746 2747 + struct bpf_iter_bits { 2748 + __u64 __opaque[2]; 2749 + } __aligned(8); 2750 + 2751 + struct bpf_iter_bits_kern { 2752 + union { 2753 + unsigned long *bits; 2754 + unsigned long bits_copy; 2755 + }; 2756 + u32 nr_bits; 2757 + int bit; 2758 + } __aligned(8); 2759 + 2760 + /** 2761 + * bpf_iter_bits_new() - Initialize a new bits iterator for a given memory area 2762 + * @it: The new bpf_iter_bits to be created 2763 + * @unsafe_ptr__ign: A pointer pointing to a memory area to be iterated over 2764 + * @nr_words: The size of the specified memory area, measured in 8-byte units. 2765 + * Due to the limitation of memalloc, it can't be greater than 512. 2766 + * 2767 + * This function initializes a new bpf_iter_bits structure for iterating over 2768 + * a memory area which is specified by the @unsafe_ptr__ign and @nr_words. It 2769 + * copies the data of the memory area to the newly created bpf_iter_bits @it for 2770 + * subsequent iteration operations. 2771 + * 2772 + * On success, 0 is returned. On failure, ERR is returned. 2773 + */ 2774 + __bpf_kfunc int 2775 + bpf_iter_bits_new(struct bpf_iter_bits *it, const u64 *unsafe_ptr__ign, u32 nr_words) 2776 + { 2777 + struct bpf_iter_bits_kern *kit = (void *)it; 2778 + u32 nr_bytes = nr_words * sizeof(u64); 2779 + u32 nr_bits = BYTES_TO_BITS(nr_bytes); 2780 + int err; 2781 + 2782 + BUILD_BUG_ON(sizeof(struct bpf_iter_bits_kern) != sizeof(struct bpf_iter_bits)); 2783 + BUILD_BUG_ON(__alignof__(struct bpf_iter_bits_kern) != 2784 + __alignof__(struct bpf_iter_bits)); 2785 + 2786 + kit->nr_bits = 0; 2787 + kit->bits_copy = 0; 2788 + kit->bit = -1; 2789 + 2790 + if (!unsafe_ptr__ign || !nr_words) 2791 + return -EINVAL; 2792 + 2793 + /* Optimization for u64 mask */ 2794 + if (nr_bits == 64) { 2795 + err = bpf_probe_read_kernel_common(&kit->bits_copy, nr_bytes, unsafe_ptr__ign); 2796 + if (err) 2797 + return -EFAULT; 2798 + 2799 + kit->nr_bits = nr_bits; 2800 + return 0; 2801 + } 2802 + 2803 + /* Fallback to memalloc */ 2804 + kit->bits = bpf_mem_alloc(&bpf_global_ma, nr_bytes); 2805 + if (!kit->bits) 2806 + return -ENOMEM; 2807 + 2808 + err = bpf_probe_read_kernel_common(kit->bits, nr_bytes, unsafe_ptr__ign); 2809 + if (err) { 2810 + bpf_mem_free(&bpf_global_ma, kit->bits); 2811 + return err; 2812 + } 2813 + 2814 + kit->nr_bits = nr_bits; 2815 + return 0; 2816 + } 2817 + 2818 + /** 2819 + * bpf_iter_bits_next() - Get the next bit in a bpf_iter_bits 2820 + * @it: The bpf_iter_bits to be checked 2821 + * 2822 + * This function returns a pointer to a number representing the value of the 2823 + * next bit in the bits. 2824 + * 2825 + * If there are no further bits available, it returns NULL. 2826 + */ 2827 + __bpf_kfunc int *bpf_iter_bits_next(struct bpf_iter_bits *it) 2828 + { 2829 + struct bpf_iter_bits_kern *kit = (void *)it; 2830 + u32 nr_bits = kit->nr_bits; 2831 + const unsigned long *bits; 2832 + int bit; 2833 + 2834 + if (nr_bits == 0) 2835 + return NULL; 2836 + 2837 + bits = nr_bits == 64 ? &kit->bits_copy : kit->bits; 2838 + bit = find_next_bit(bits, nr_bits, kit->bit + 1); 2839 + if (bit >= nr_bits) { 2840 + kit->nr_bits = 0; 2841 + return NULL; 2842 + } 2843 + 2844 + kit->bit = bit; 2845 + return &kit->bit; 2846 + } 2847 + 2848 + /** 2849 + * bpf_iter_bits_destroy() - Destroy a bpf_iter_bits 2850 + * @it: The bpf_iter_bits to be destroyed 2851 + * 2852 + * Destroy the resource associated with the bpf_iter_bits. 2853 + */ 2854 + __bpf_kfunc void bpf_iter_bits_destroy(struct bpf_iter_bits *it) 2855 + { 2856 + struct bpf_iter_bits_kern *kit = (void *)it; 2857 + 2858 + if (kit->nr_bits <= 64) 2859 + return; 2860 + bpf_mem_free(&bpf_global_ma, kit->bits); 2861 + } 2862 + 2747 2863 __bpf_kfunc_end_defs(); 2748 2864 2749 2865 BTF_KFUNCS_START(generic_btf_ids) ··· 2942 2826 BTF_ID_FLAGS(func, bpf_wq_start) 2943 2827 BTF_ID_FLAGS(func, bpf_preempt_disable) 2944 2828 BTF_ID_FLAGS(func, bpf_preempt_enable) 2829 + BTF_ID_FLAGS(func, bpf_iter_bits_new, KF_ITER_NEW) 2830 + BTF_ID_FLAGS(func, bpf_iter_bits_next, KF_ITER_NEXT | KF_RET_NULL) 2831 + BTF_ID_FLAGS(func, bpf_iter_bits_destroy, KF_ITER_DESTROY) 2945 2832 BTF_KFUNCS_END(common_btf_ids) 2946 2833 2947 2834 static const struct btf_kfunc_id_set common_kfunc_set = {
+28 -6
kernel/bpf/syscall.c
··· 3151 3151 } 3152 3152 #endif 3153 3153 3154 + static __poll_t bpf_link_poll(struct file *file, struct poll_table_struct *pts) 3155 + { 3156 + struct bpf_link *link = file->private_data; 3157 + 3158 + return link->ops->poll(file, pts); 3159 + } 3160 + 3154 3161 static const struct file_operations bpf_link_fops = { 3155 3162 #ifdef CONFIG_PROC_FS 3156 3163 .show_fdinfo = bpf_link_show_fdinfo, ··· 3165 3158 .release = bpf_link_release, 3166 3159 .read = bpf_dummy_read, 3167 3160 .write = bpf_dummy_write, 3161 + }; 3162 + 3163 + static const struct file_operations bpf_link_fops_poll = { 3164 + #ifdef CONFIG_PROC_FS 3165 + .show_fdinfo = bpf_link_show_fdinfo, 3166 + #endif 3167 + .release = bpf_link_release, 3168 + .read = bpf_dummy_read, 3169 + .write = bpf_dummy_write, 3170 + .poll = bpf_link_poll, 3168 3171 }; 3169 3172 3170 3173 static int bpf_link_alloc_id(struct bpf_link *link) ··· 3219 3202 return id; 3220 3203 } 3221 3204 3222 - file = anon_inode_getfile("bpf_link", &bpf_link_fops, link, O_CLOEXEC); 3205 + file = anon_inode_getfile("bpf_link", 3206 + link->ops->poll ? &bpf_link_fops_poll : &bpf_link_fops, 3207 + link, O_CLOEXEC); 3223 3208 if (IS_ERR(file)) { 3224 3209 bpf_link_free_id(id); 3225 3210 put_unused_fd(fd); ··· 3249 3230 3250 3231 int bpf_link_new_fd(struct bpf_link *link) 3251 3232 { 3252 - return anon_inode_getfd("bpf-link", &bpf_link_fops, link, O_CLOEXEC); 3233 + return anon_inode_getfd("bpf-link", 3234 + link->ops->poll ? &bpf_link_fops_poll : &bpf_link_fops, 3235 + link, O_CLOEXEC); 3253 3236 } 3254 3237 3255 3238 struct bpf_link *bpf_link_get_from_fd(u32 ufd) ··· 3261 3240 3262 3241 if (!f.file) 3263 3242 return ERR_PTR(-EBADF); 3264 - if (f.file->f_op != &bpf_link_fops) { 3243 + if (f.file->f_op != &bpf_link_fops && f.file->f_op != &bpf_link_fops_poll) { 3265 3244 fdput(f); 3266 3245 return ERR_PTR(-EINVAL); 3267 3246 } ··· 4993 4972 uattr); 4994 4973 else if (f.file->f_op == &btf_fops) 4995 4974 err = bpf_btf_get_info_by_fd(f.file, f.file->private_data, attr, uattr); 4996 - else if (f.file->f_op == &bpf_link_fops) 4975 + else if (f.file->f_op == &bpf_link_fops || f.file->f_op == &bpf_link_fops_poll) 4997 4976 err = bpf_link_get_info_by_fd(f.file, f.file->private_data, 4998 4977 attr, uattr); 4999 4978 else ··· 5128 5107 if (!file) 5129 5108 return -EBADF; 5130 5109 5131 - if (file->f_op == &bpf_link_fops) { 5110 + if (file->f_op == &bpf_link_fops || file->f_op == &bpf_link_fops_poll) { 5132 5111 struct bpf_link *link = file->private_data; 5133 5112 5134 5113 if (link->ops == &bpf_raw_tp_link_lops) { ··· 5438 5417 return ret; 5439 5418 } 5440 5419 5441 - static struct bpf_link *bpf_link_inc_not_zero(struct bpf_link *link) 5420 + struct bpf_link *bpf_link_inc_not_zero(struct bpf_link *link) 5442 5421 { 5443 5422 return atomic64_fetch_add_unless(&link->refcnt, 1, 0) ? link : ERR_PTR(-ENOENT); 5444 5423 } 5424 + EXPORT_SYMBOL(bpf_link_inc_not_zero); 5445 5425 5446 5426 struct bpf_link *bpf_link_by_id(u32 id) 5447 5427 {
+2 -2
kernel/bpf/verifier.c
··· 5448 5448 * this program. To check that [x1, x2) overlaps with [y1, y2), 5449 5449 * it is sufficient to check x1 < y2 && y1 < x2. 5450 5450 */ 5451 - if (reg->smin_value + off < p + btf_field_type_size(field->type) && 5451 + if (reg->smin_value + off < p + field->size && 5452 5452 p < reg->umax_value + off + size) { 5453 5453 switch (field->type) { 5454 5454 case BPF_KPTR_UNREF: ··· 11648 11648 11649 11649 node_off = reg->off + reg->var_off.value; 11650 11650 field = reg_find_field_offset(reg, node_off, node_field_type); 11651 - if (!field || field->offset != node_off) { 11651 + if (!field) { 11652 11652 verbose(env, "%s not found at offset=%u\n", node_type_name, node_off); 11653 11653 return -EINVAL; 11654 11654 }
+1
lib/test_bpf.c
··· 15706 15706 module_init(test_bpf_init); 15707 15707 module_exit(test_bpf_exit); 15708 15708 15709 + MODULE_DESCRIPTION("Testsuite for BPF interpreter and BPF JIT compiler"); 15709 15710 MODULE_LICENSE("GPL");
+2 -2
net/bpf/bpf_dummy_struct_ops.c
··· 272 272 return -EOPNOTSUPP; 273 273 } 274 274 275 - static int bpf_dummy_reg(void *kdata) 275 + static int bpf_dummy_reg(void *kdata, struct bpf_link *link) 276 276 { 277 277 return -EOPNOTSUPP; 278 278 } 279 279 280 - static void bpf_dummy_unreg(void *kdata) 280 + static void bpf_dummy_unreg(void *kdata, struct bpf_link *link) 281 281 { 282 282 } 283 283
+3 -3
net/ipv4/bpf_tcp_ca.c
··· 260 260 return 0; 261 261 } 262 262 263 - static int bpf_tcp_ca_reg(void *kdata) 263 + static int bpf_tcp_ca_reg(void *kdata, struct bpf_link *link) 264 264 { 265 265 return tcp_register_congestion_control(kdata); 266 266 } 267 267 268 - static void bpf_tcp_ca_unreg(void *kdata) 268 + static void bpf_tcp_ca_unreg(void *kdata, struct bpf_link *link) 269 269 { 270 270 tcp_unregister_congestion_control(kdata); 271 271 } 272 272 273 - static int bpf_tcp_ca_update(void *kdata, void *old_kdata) 273 + static int bpf_tcp_ca_update(void *kdata, void *old_kdata, struct bpf_link *link) 274 274 { 275 275 return tcp_update_congestion_control(kdata, old_kdata); 276 276 }
+38 -14
tools/bpf/bpftool/gen.c
··· 848 848 } 849 849 850 850 static void 851 - codegen_maps_skeleton(struct bpf_object *obj, size_t map_cnt, bool mmaped) 851 + codegen_maps_skeleton(struct bpf_object *obj, size_t map_cnt, bool mmaped, bool populate_links) 852 852 { 853 853 struct bpf_map *map; 854 854 char ident[256]; ··· 886 886 /* memory-mapped internal maps */ 887 887 if (mmaped && is_mmapable_map(map, ident, sizeof(ident))) { 888 888 printf("\ts->maps[%zu].mmaped = (void **)&obj->%s;\n", 889 + i, ident); 890 + } 891 + 892 + if (populate_links && bpf_map__type(map) == BPF_MAP_TYPE_STRUCT_OPS) { 893 + codegen("\ 894 + \n\ 895 + s->maps[%zu].link = &obj->links.%s;\n\ 896 + ", 889 897 i, ident); 890 898 } 891 899 i++; ··· 1149 1141 static int do_skeleton(int argc, char **argv) 1150 1142 { 1151 1143 char header_guard[MAX_OBJ_NAME_LEN + sizeof("__SKEL_H__")]; 1152 - size_t map_cnt = 0, prog_cnt = 0, file_sz, mmap_sz; 1144 + size_t map_cnt = 0, prog_cnt = 0, attach_map_cnt = 0, file_sz, mmap_sz; 1153 1145 DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts); 1154 1146 char obj_name[MAX_OBJ_NAME_LEN] = "", *obj_data; 1155 1147 struct bpf_object *obj = NULL; ··· 1233 1225 bpf_map__name(map)); 1234 1226 continue; 1235 1227 } 1228 + 1229 + if (bpf_map__type(map) == BPF_MAP_TYPE_STRUCT_OPS) 1230 + attach_map_cnt++; 1231 + 1236 1232 map_cnt++; 1237 1233 } 1238 1234 bpf_object__for_each_program(prog, obj) { ··· 1309 1297 bpf_program__name(prog)); 1310 1298 } 1311 1299 printf("\t} progs;\n"); 1300 + } 1301 + 1302 + if (prog_cnt + attach_map_cnt) { 1312 1303 printf("\tstruct {\n"); 1313 1304 bpf_object__for_each_program(prog, obj) { 1314 1305 if (use_loader) ··· 1321 1306 printf("\t\tstruct bpf_link *%s;\n", 1322 1307 bpf_program__name(prog)); 1323 1308 } 1309 + 1310 + bpf_object__for_each_map(map, obj) { 1311 + if (!get_map_ident(map, ident, sizeof(ident))) 1312 + continue; 1313 + if (bpf_map__type(map) != BPF_MAP_TYPE_STRUCT_OPS) 1314 + continue; 1315 + 1316 + if (use_loader) 1317 + printf("t\tint %s_fd;\n", ident); 1318 + else 1319 + printf("\t\tstruct bpf_link *%s;\n", ident); 1320 + } 1321 + 1324 1322 printf("\t} links;\n"); 1325 1323 } 1326 1324 ··· 1476 1448 obj_name 1477 1449 ); 1478 1450 1479 - codegen_maps_skeleton(obj, map_cnt, true /*mmaped*/); 1451 + codegen_maps_skeleton(obj, map_cnt, true /*mmaped*/, true /*links*/); 1480 1452 codegen_progs_skeleton(obj, prog_cnt, true /*populate_links*/); 1481 1453 1482 1454 codegen("\ ··· 1814 1786 } 1815 1787 } 1816 1788 1817 - codegen_maps_skeleton(obj, map_cnt, false /*mmaped*/); 1789 + codegen_maps_skeleton(obj, map_cnt, false /*mmaped*/, false /*links*/); 1818 1790 codegen_progs_skeleton(obj, prog_cnt, false /*links*/); 1819 1791 1820 1792 codegen("\ ··· 2407 2379 return err; 2408 2380 } 2409 2381 2410 - static int btfgen_remap_id(__u32 *type_id, void *ctx) 2411 - { 2412 - unsigned int *ids = ctx; 2413 - 2414 - *type_id = ids[*type_id]; 2415 - 2416 - return 0; 2417 - } 2418 - 2419 2382 /* Generate BTF from relocation information previously recorded */ 2420 2383 static struct btf *btfgen_get_btf(struct btfgen_info *info) 2421 2384 { ··· 2486 2467 /* second pass: fix up type ids */ 2487 2468 for (i = 1; i < btf__type_cnt(btf_new); i++) { 2488 2469 struct btf_type *btf_type = (struct btf_type *) btf__type_by_id(btf_new, i); 2470 + struct btf_field_iter it; 2471 + __u32 *type_id; 2489 2472 2490 - err = btf_type_visit_type_ids(btf_type, btfgen_remap_id, ids); 2473 + err = btf_field_iter_init(&it, btf_type, BTF_FIELD_ITER_IDS); 2491 2474 if (err) 2492 2475 goto err_out; 2476 + 2477 + while ((type_id = btf_field_iter_next(&it))) 2478 + *type_id = ids[*type_id]; 2493 2479 } 2494 2480 2495 2481 free(ids);
+6 -1
tools/bpf/bpftool/skeleton/pid_iter.bpf.c
··· 29 29 }; 30 30 31 31 extern const void bpf_link_fops __ksym; 32 + extern const void bpf_link_fops_poll __ksym __weak; 32 33 extern const void bpf_map_fops __ksym; 33 34 extern const void bpf_prog_fops __ksym; 34 35 extern const void btf_fops __ksym; ··· 85 84 fops = &btf_fops; 86 85 break; 87 86 case BPF_OBJ_LINK: 88 - fops = &bpf_link_fops; 87 + if (&bpf_link_fops_poll && 88 + file->f_op == &bpf_link_fops_poll) 89 + fops = &bpf_link_fops_poll; 90 + else 91 + fops = &bpf_link_fops; 89 92 break; 90 93 default: 91 94 return 0;
+7 -7
tools/bpf/bpftool/skeleton/profiler.bpf.c
··· 40 40 41 41 const volatile __u32 num_cpu = 1; 42 42 const volatile __u32 num_metric = 1; 43 - #define MAX_NUM_MATRICS 4 43 + #define MAX_NUM_METRICS 4 44 44 45 45 SEC("fentry/XXX") 46 46 int BPF_PROG(fentry_XXX) 47 47 { 48 - struct bpf_perf_event_value___local *ptrs[MAX_NUM_MATRICS]; 48 + struct bpf_perf_event_value___local *ptrs[MAX_NUM_METRICS]; 49 49 u32 key = bpf_get_smp_processor_id(); 50 50 u32 i; 51 51 52 52 /* look up before reading, to reduce error */ 53 - for (i = 0; i < num_metric && i < MAX_NUM_MATRICS; i++) { 53 + for (i = 0; i < num_metric && i < MAX_NUM_METRICS; i++) { 54 54 u32 flag = i; 55 55 56 56 ptrs[i] = bpf_map_lookup_elem(&fentry_readings, &flag); ··· 58 58 return 0; 59 59 } 60 60 61 - for (i = 0; i < num_metric && i < MAX_NUM_MATRICS; i++) { 61 + for (i = 0; i < num_metric && i < MAX_NUM_METRICS; i++) { 62 62 struct bpf_perf_event_value___local reading; 63 63 int err; 64 64 ··· 99 99 SEC("fexit/XXX") 100 100 int BPF_PROG(fexit_XXX) 101 101 { 102 - struct bpf_perf_event_value___local readings[MAX_NUM_MATRICS]; 102 + struct bpf_perf_event_value___local readings[MAX_NUM_METRICS]; 103 103 u32 cpu = bpf_get_smp_processor_id(); 104 104 u32 i, zero = 0; 105 105 int err; 106 106 u64 *count; 107 107 108 108 /* read all events before updating the maps, to reduce error */ 109 - for (i = 0; i < num_metric && i < MAX_NUM_MATRICS; i++) { 109 + for (i = 0; i < num_metric && i < MAX_NUM_METRICS; i++) { 110 110 err = bpf_perf_event_read_value(&events, cpu + i * num_cpu, 111 111 (void *)(readings + i), 112 112 sizeof(*readings)); ··· 116 116 count = bpf_map_lookup_elem(&counts, &zero); 117 117 if (count) { 118 118 *count += 1; 119 - for (i = 0; i < num_metric && i < MAX_NUM_MATRICS; i++) 119 + for (i = 0; i < num_metric && i < MAX_NUM_METRICS; i++) 120 120 fexit_update_maps(i, &readings[i]); 121 121 } 122 122 return 0;
+204 -140
tools/lib/bpf/btf.c
··· 1739 1739 struct hashmap *str_off_map; /* map string offsets from src to dst */ 1740 1740 }; 1741 1741 1742 - static int btf_rewrite_str(__u32 *str_off, void *ctx) 1742 + static int btf_rewrite_str(struct btf_pipe *p, __u32 *str_off) 1743 1743 { 1744 - struct btf_pipe *p = ctx; 1745 1744 long mapped_off; 1746 1745 int off, err; 1747 1746 ··· 1773 1774 int btf__add_type(struct btf *btf, const struct btf *src_btf, const struct btf_type *src_type) 1774 1775 { 1775 1776 struct btf_pipe p = { .src = src_btf, .dst = btf }; 1777 + struct btf_field_iter it; 1776 1778 struct btf_type *t; 1779 + __u32 *str_off; 1777 1780 int sz, err; 1778 1781 1779 1782 sz = btf_type_size(src_type); ··· 1792 1791 1793 1792 memcpy(t, src_type, sz); 1794 1793 1795 - err = btf_type_visit_str_offs(t, btf_rewrite_str, &p); 1794 + err = btf_field_iter_init(&it, t, BTF_FIELD_ITER_STRS); 1796 1795 if (err) 1797 1796 return libbpf_err(err); 1798 1797 1798 + while ((str_off = btf_field_iter_next(&it))) { 1799 + err = btf_rewrite_str(&p, str_off); 1800 + if (err) 1801 + return libbpf_err(err); 1802 + } 1803 + 1799 1804 return btf_commit_type(btf, sz); 1800 - } 1801 - 1802 - static int btf_rewrite_type_ids(__u32 *type_id, void *ctx) 1803 - { 1804 - struct btf *btf = ctx; 1805 - 1806 - if (!*type_id) /* nothing to do for VOID references */ 1807 - return 0; 1808 - 1809 - /* we haven't updated btf's type count yet, so 1810 - * btf->start_id + btf->nr_types - 1 is the type ID offset we should 1811 - * add to all newly added BTF types 1812 - */ 1813 - *type_id += btf->start_id + btf->nr_types - 1; 1814 - return 0; 1815 1805 } 1816 1806 1817 1807 static size_t btf_dedup_identity_hash_fn(long key, void *ctx); ··· 1850 1858 memcpy(t, src_btf->types_data, data_sz); 1851 1859 1852 1860 for (i = 0; i < cnt; i++) { 1861 + struct btf_field_iter it; 1862 + __u32 *type_id, *str_off; 1863 + 1853 1864 sz = btf_type_size(t); 1854 1865 if (sz < 0) { 1855 1866 /* unlikely, has to be corrupted src_btf */ ··· 1864 1869 *off = t - btf->types_data; 1865 1870 1866 1871 /* add, dedup, and remap strings referenced by this BTF type */ 1867 - err = btf_type_visit_str_offs(t, btf_rewrite_str, &p); 1872 + err = btf_field_iter_init(&it, t, BTF_FIELD_ITER_STRS); 1873 + if (err) 1874 + goto err_out; 1875 + while ((str_off = btf_field_iter_next(&it))) { 1876 + err = btf_rewrite_str(&p, str_off); 1877 + if (err) 1878 + goto err_out; 1879 + } 1880 + 1881 + /* remap all type IDs referenced from this BTF type */ 1882 + err = btf_field_iter_init(&it, t, BTF_FIELD_ITER_IDS); 1868 1883 if (err) 1869 1884 goto err_out; 1870 1885 1871 - /* remap all type IDs referenced from this BTF type */ 1872 - err = btf_type_visit_type_ids(t, btf_rewrite_type_ids, btf); 1873 - if (err) 1874 - goto err_out; 1886 + while ((type_id = btf_field_iter_next(&it))) { 1887 + if (!*type_id) /* nothing to do for VOID references */ 1888 + continue; 1889 + 1890 + /* we haven't updated btf's type count yet, so 1891 + * btf->start_id + btf->nr_types - 1 is the type ID offset we should 1892 + * add to all newly added BTF types 1893 + */ 1894 + *type_id += btf->start_id + btf->nr_types - 1; 1895 + } 1875 1896 1876 1897 /* go to next type data and type offset index entry */ 1877 1898 t += sz; ··· 3464 3453 int i, r; 3465 3454 3466 3455 for (i = 0; i < d->btf->nr_types; i++) { 3456 + struct btf_field_iter it; 3467 3457 struct btf_type *t = btf_type_by_id(d->btf, d->btf->start_id + i); 3458 + __u32 *str_off; 3468 3459 3469 - r = btf_type_visit_str_offs(t, fn, ctx); 3460 + r = btf_field_iter_init(&it, t, BTF_FIELD_ITER_STRS); 3470 3461 if (r) 3471 3462 return r; 3463 + 3464 + while ((str_off = btf_field_iter_next(&it))) { 3465 + r = fn(str_off, ctx); 3466 + if (r) 3467 + return r; 3468 + } 3472 3469 } 3473 3470 3474 3471 if (!d->btf_ext) ··· 4938 4919 4939 4920 for (i = 0; i < d->btf->nr_types; i++) { 4940 4921 struct btf_type *t = btf_type_by_id(d->btf, d->btf->start_id + i); 4922 + struct btf_field_iter it; 4923 + __u32 *type_id; 4941 4924 4942 - r = btf_type_visit_type_ids(t, btf_dedup_remap_type_id, d); 4925 + r = btf_field_iter_init(&it, t, BTF_FIELD_ITER_IDS); 4943 4926 if (r) 4944 4927 return r; 4928 + 4929 + while ((type_id = btf_field_iter_next(&it))) { 4930 + __u32 resolved_id, new_id; 4931 + 4932 + resolved_id = resolve_type_id(d, *type_id); 4933 + new_id = d->hypot_map[resolved_id]; 4934 + if (new_id > BTF_MAX_NR_TYPES) 4935 + return -EINVAL; 4936 + 4937 + *type_id = new_id; 4938 + } 4945 4939 } 4946 4940 4947 4941 if (!d->btf_ext) ··· 5035 5003 return btf__parse_split(path, vmlinux_btf); 5036 5004 } 5037 5005 5038 - int btf_type_visit_type_ids(struct btf_type *t, type_id_visit_fn visit, void *ctx) 5006 + int btf_field_iter_init(struct btf_field_iter *it, struct btf_type *t, enum btf_field_iter_kind iter_kind) 5039 5007 { 5040 - int i, n, err; 5008 + it->p = NULL; 5009 + it->m_idx = -1; 5010 + it->off_idx = 0; 5011 + it->vlen = 0; 5041 5012 5042 - switch (btf_kind(t)) { 5043 - case BTF_KIND_INT: 5044 - case BTF_KIND_FLOAT: 5045 - case BTF_KIND_ENUM: 5046 - case BTF_KIND_ENUM64: 5047 - return 0; 5048 - 5049 - case BTF_KIND_FWD: 5050 - case BTF_KIND_CONST: 5051 - case BTF_KIND_VOLATILE: 5052 - case BTF_KIND_RESTRICT: 5053 - case BTF_KIND_PTR: 5054 - case BTF_KIND_TYPEDEF: 5055 - case BTF_KIND_FUNC: 5056 - case BTF_KIND_VAR: 5057 - case BTF_KIND_DECL_TAG: 5058 - case BTF_KIND_TYPE_TAG: 5059 - return visit(&t->type, ctx); 5060 - 5061 - case BTF_KIND_ARRAY: { 5062 - struct btf_array *a = btf_array(t); 5063 - 5064 - err = visit(&a->type, ctx); 5065 - err = err ?: visit(&a->index_type, ctx); 5066 - return err; 5067 - } 5068 - 5069 - case BTF_KIND_STRUCT: 5070 - case BTF_KIND_UNION: { 5071 - struct btf_member *m = btf_members(t); 5072 - 5073 - for (i = 0, n = btf_vlen(t); i < n; i++, m++) { 5074 - err = visit(&m->type, ctx); 5075 - if (err) 5076 - return err; 5013 + switch (iter_kind) { 5014 + case BTF_FIELD_ITER_IDS: 5015 + switch (btf_kind(t)) { 5016 + case BTF_KIND_UNKN: 5017 + case BTF_KIND_INT: 5018 + case BTF_KIND_FLOAT: 5019 + case BTF_KIND_ENUM: 5020 + case BTF_KIND_ENUM64: 5021 + it->desc = (struct btf_field_desc) {}; 5022 + break; 5023 + case BTF_KIND_FWD: 5024 + case BTF_KIND_CONST: 5025 + case BTF_KIND_VOLATILE: 5026 + case BTF_KIND_RESTRICT: 5027 + case BTF_KIND_PTR: 5028 + case BTF_KIND_TYPEDEF: 5029 + case BTF_KIND_FUNC: 5030 + case BTF_KIND_VAR: 5031 + case BTF_KIND_DECL_TAG: 5032 + case BTF_KIND_TYPE_TAG: 5033 + it->desc = (struct btf_field_desc) { 1, {offsetof(struct btf_type, type)} }; 5034 + break; 5035 + case BTF_KIND_ARRAY: 5036 + it->desc = (struct btf_field_desc) { 5037 + 2, {sizeof(struct btf_type) + offsetof(struct btf_array, type), 5038 + sizeof(struct btf_type) + offsetof(struct btf_array, index_type)} 5039 + }; 5040 + break; 5041 + case BTF_KIND_STRUCT: 5042 + case BTF_KIND_UNION: 5043 + it->desc = (struct btf_field_desc) { 5044 + 0, {}, 5045 + sizeof(struct btf_member), 5046 + 1, {offsetof(struct btf_member, type)} 5047 + }; 5048 + break; 5049 + case BTF_KIND_FUNC_PROTO: 5050 + it->desc = (struct btf_field_desc) { 5051 + 1, {offsetof(struct btf_type, type)}, 5052 + sizeof(struct btf_param), 5053 + 1, {offsetof(struct btf_param, type)} 5054 + }; 5055 + break; 5056 + case BTF_KIND_DATASEC: 5057 + it->desc = (struct btf_field_desc) { 5058 + 0, {}, 5059 + sizeof(struct btf_var_secinfo), 5060 + 1, {offsetof(struct btf_var_secinfo, type)} 5061 + }; 5062 + break; 5063 + default: 5064 + return -EINVAL; 5077 5065 } 5078 - return 0; 5079 - } 5080 - 5081 - case BTF_KIND_FUNC_PROTO: { 5082 - struct btf_param *m = btf_params(t); 5083 - 5084 - err = visit(&t->type, ctx); 5085 - if (err) 5086 - return err; 5087 - for (i = 0, n = btf_vlen(t); i < n; i++, m++) { 5088 - err = visit(&m->type, ctx); 5089 - if (err) 5090 - return err; 5066 + break; 5067 + case BTF_FIELD_ITER_STRS: 5068 + switch (btf_kind(t)) { 5069 + case BTF_KIND_UNKN: 5070 + it->desc = (struct btf_field_desc) {}; 5071 + break; 5072 + case BTF_KIND_INT: 5073 + case BTF_KIND_FLOAT: 5074 + case BTF_KIND_FWD: 5075 + case BTF_KIND_ARRAY: 5076 + case BTF_KIND_CONST: 5077 + case BTF_KIND_VOLATILE: 5078 + case BTF_KIND_RESTRICT: 5079 + case BTF_KIND_PTR: 5080 + case BTF_KIND_TYPEDEF: 5081 + case BTF_KIND_FUNC: 5082 + case BTF_KIND_VAR: 5083 + case BTF_KIND_DECL_TAG: 5084 + case BTF_KIND_TYPE_TAG: 5085 + case BTF_KIND_DATASEC: 5086 + it->desc = (struct btf_field_desc) { 5087 + 1, {offsetof(struct btf_type, name_off)} 5088 + }; 5089 + break; 5090 + case BTF_KIND_ENUM: 5091 + it->desc = (struct btf_field_desc) { 5092 + 1, {offsetof(struct btf_type, name_off)}, 5093 + sizeof(struct btf_enum), 5094 + 1, {offsetof(struct btf_enum, name_off)} 5095 + }; 5096 + break; 5097 + case BTF_KIND_ENUM64: 5098 + it->desc = (struct btf_field_desc) { 5099 + 1, {offsetof(struct btf_type, name_off)}, 5100 + sizeof(struct btf_enum64), 5101 + 1, {offsetof(struct btf_enum64, name_off)} 5102 + }; 5103 + break; 5104 + case BTF_KIND_STRUCT: 5105 + case BTF_KIND_UNION: 5106 + it->desc = (struct btf_field_desc) { 5107 + 1, {offsetof(struct btf_type, name_off)}, 5108 + sizeof(struct btf_member), 5109 + 1, {offsetof(struct btf_member, name_off)} 5110 + }; 5111 + break; 5112 + case BTF_KIND_FUNC_PROTO: 5113 + it->desc = (struct btf_field_desc) { 5114 + 1, {offsetof(struct btf_type, name_off)}, 5115 + sizeof(struct btf_param), 5116 + 1, {offsetof(struct btf_param, name_off)} 5117 + }; 5118 + break; 5119 + default: 5120 + return -EINVAL; 5091 5121 } 5092 - return 0; 5093 - } 5094 - 5095 - case BTF_KIND_DATASEC: { 5096 - struct btf_var_secinfo *m = btf_var_secinfos(t); 5097 - 5098 - for (i = 0, n = btf_vlen(t); i < n; i++, m++) { 5099 - err = visit(&m->type, ctx); 5100 - if (err) 5101 - return err; 5102 - } 5103 - return 0; 5104 - } 5105 - 5122 + break; 5106 5123 default: 5107 5124 return -EINVAL; 5108 5125 } 5126 + 5127 + if (it->desc.m_sz) 5128 + it->vlen = btf_vlen(t); 5129 + 5130 + it->p = t; 5131 + return 0; 5109 5132 } 5110 5133 5111 - int btf_type_visit_str_offs(struct btf_type *t, str_off_visit_fn visit, void *ctx) 5134 + __u32 *btf_field_iter_next(struct btf_field_iter *it) 5112 5135 { 5113 - int i, n, err; 5136 + if (!it->p) 5137 + return NULL; 5114 5138 5115 - err = visit(&t->name_off, ctx); 5116 - if (err) 5117 - return err; 5118 - 5119 - switch (btf_kind(t)) { 5120 - case BTF_KIND_STRUCT: 5121 - case BTF_KIND_UNION: { 5122 - struct btf_member *m = btf_members(t); 5123 - 5124 - for (i = 0, n = btf_vlen(t); i < n; i++, m++) { 5125 - err = visit(&m->name_off, ctx); 5126 - if (err) 5127 - return err; 5128 - } 5129 - break; 5130 - } 5131 - case BTF_KIND_ENUM: { 5132 - struct btf_enum *m = btf_enum(t); 5133 - 5134 - for (i = 0, n = btf_vlen(t); i < n; i++, m++) { 5135 - err = visit(&m->name_off, ctx); 5136 - if (err) 5137 - return err; 5138 - } 5139 - break; 5140 - } 5141 - case BTF_KIND_ENUM64: { 5142 - struct btf_enum64 *m = btf_enum64(t); 5143 - 5144 - for (i = 0, n = btf_vlen(t); i < n; i++, m++) { 5145 - err = visit(&m->name_off, ctx); 5146 - if (err) 5147 - return err; 5148 - } 5149 - break; 5150 - } 5151 - case BTF_KIND_FUNC_PROTO: { 5152 - struct btf_param *m = btf_params(t); 5153 - 5154 - for (i = 0, n = btf_vlen(t); i < n; i++, m++) { 5155 - err = visit(&m->name_off, ctx); 5156 - if (err) 5157 - return err; 5158 - } 5159 - break; 5160 - } 5161 - default: 5162 - break; 5139 + if (it->m_idx < 0) { 5140 + if (it->off_idx < it->desc.t_off_cnt) 5141 + return it->p + it->desc.t_offs[it->off_idx++]; 5142 + /* move to per-member iteration */ 5143 + it->m_idx = 0; 5144 + it->p += sizeof(struct btf_type); 5145 + it->off_idx = 0; 5163 5146 } 5164 5147 5165 - return 0; 5148 + /* if type doesn't have members, stop */ 5149 + if (it->desc.m_sz == 0) { 5150 + it->p = NULL; 5151 + return NULL; 5152 + } 5153 + 5154 + if (it->off_idx >= it->desc.m_off_cnt) { 5155 + /* exhausted this member's fields, go to the next member */ 5156 + it->m_idx++; 5157 + it->p += it->desc.m_sz; 5158 + it->off_idx = 0; 5159 + } 5160 + 5161 + if (it->m_idx < it->vlen) 5162 + return it->p + it->desc.m_offs[it->off_idx++]; 5163 + 5164 + it->p = NULL; 5165 + return NULL; 5166 5166 } 5167 5167 5168 5168 int btf_ext_visit_type_ids(struct btf_ext *btf_ext, type_id_visit_fn visit, void *ctx)
+85 -4
tools/lib/bpf/libbpf.c
··· 229 229 static int __base_pr(enum libbpf_print_level level, const char *format, 230 230 va_list args) 231 231 { 232 - if (level == LIBBPF_DEBUG) 232 + const char *env_var = "LIBBPF_LOG_LEVEL"; 233 + static enum libbpf_print_level min_level = LIBBPF_INFO; 234 + static bool initialized; 235 + 236 + if (!initialized) { 237 + char *verbosity; 238 + 239 + initialized = true; 240 + verbosity = getenv(env_var); 241 + if (verbosity) { 242 + if (strcasecmp(verbosity, "warn") == 0) 243 + min_level = LIBBPF_WARN; 244 + else if (strcasecmp(verbosity, "debug") == 0) 245 + min_level = LIBBPF_DEBUG; 246 + else if (strcasecmp(verbosity, "info") == 0) 247 + min_level = LIBBPF_INFO; 248 + else 249 + fprintf(stderr, "libbpf: unrecognized '%s' envvar value: '%s', should be one of 'warn', 'debug', or 'info'.\n", 250 + env_var, verbosity); 251 + } 252 + } 253 + 254 + /* if too verbose, skip logging */ 255 + if (level > min_level) 233 256 return 0; 234 257 235 258 return vfprintf(stderr, format, args); ··· 572 549 bool pinned; 573 550 bool reused; 574 551 bool autocreate; 552 + bool autoattach; 575 553 __u64 map_extra; 576 554 }; 577 555 ··· 1401 1377 map->def.value_size = type->size; 1402 1378 map->def.max_entries = 1; 1403 1379 map->def.map_flags = strcmp(sec_name, STRUCT_OPS_LINK_SEC) == 0 ? BPF_F_LINK : 0; 1380 + map->autoattach = true; 1404 1381 1405 1382 map->st_ops = calloc(1, sizeof(*map->st_ops)); 1406 1383 if (!map->st_ops) ··· 4819 4794 4820 4795 map->autocreate = autocreate; 4821 4796 return 0; 4797 + } 4798 + 4799 + int bpf_map__set_autoattach(struct bpf_map *map, bool autoattach) 4800 + { 4801 + if (!bpf_map__is_struct_ops(map)) 4802 + return libbpf_err(-EINVAL); 4803 + 4804 + map->autoattach = autoattach; 4805 + return 0; 4806 + } 4807 + 4808 + bool bpf_map__autoattach(const struct bpf_map *map) 4809 + { 4810 + return map->autoattach; 4822 4811 } 4823 4812 4824 4813 int bpf_map__reuse_fd(struct bpf_map *map, int fd) ··· 12916 12877 __u32 zero = 0; 12917 12878 int err, fd; 12918 12879 12919 - if (!bpf_map__is_struct_ops(map)) 12880 + if (!bpf_map__is_struct_ops(map)) { 12881 + pr_warn("map '%s': can't attach non-struct_ops map\n", map->name); 12920 12882 return libbpf_err_ptr(-EINVAL); 12883 + } 12921 12884 12922 12885 if (map->fd < 0) { 12923 12886 pr_warn("map '%s': can't attach BPF map without FD (was it created?)\n", map->name); ··· 13963 13922 */ 13964 13923 } 13965 13924 13925 + /* Skeleton is created with earlier version of bpftool 13926 + * which does not support auto-attachment 13927 + */ 13928 + if (s->map_skel_sz < sizeof(struct bpf_map_skeleton)) 13929 + return 0; 13930 + 13931 + for (i = 0; i < s->map_cnt; i++) { 13932 + struct bpf_map *map = *s->maps[i].map; 13933 + struct bpf_link **link = s->maps[i].link; 13934 + 13935 + if (!map->autocreate || !map->autoattach) 13936 + continue; 13937 + 13938 + if (*link) 13939 + continue; 13940 + 13941 + /* only struct_ops maps can be attached */ 13942 + if (!bpf_map__is_struct_ops(map)) 13943 + continue; 13944 + *link = bpf_map__attach_struct_ops(map); 13945 + 13946 + if (!*link) { 13947 + err = -errno; 13948 + pr_warn("map '%s': failed to auto-attach: %d\n", 13949 + bpf_map__name(map), err); 13950 + return libbpf_err(err); 13951 + } 13952 + } 13953 + 13966 13954 return 0; 13967 13955 } 13968 13956 ··· 14005 13935 bpf_link__destroy(*link); 14006 13936 *link = NULL; 14007 13937 } 13938 + 13939 + if (s->map_skel_sz < sizeof(struct bpf_map_skeleton)) 13940 + return; 13941 + 13942 + for (i = 0; i < s->map_cnt; i++) { 13943 + struct bpf_link **link = s->maps[i].link; 13944 + 13945 + if (link) { 13946 + bpf_link__destroy(*link); 13947 + *link = NULL; 13948 + } 13949 + } 14008 13950 } 14009 13951 14010 13952 void bpf_object__destroy_skeleton(struct bpf_object_skeleton *s) ··· 14024 13942 if (!s) 14025 13943 return; 14026 13944 14027 - if (s->progs) 14028 - bpf_object__detach_skeleton(s); 13945 + bpf_object__detach_skeleton(s); 14029 13946 if (s->obj) 14030 13947 bpf_object__close(*s->obj); 14031 13948 free(s->maps);
+22 -1
tools/lib/bpf/libbpf.h
··· 98 98 99 99 /** 100 100 * @brief **libbpf_set_print()** sets user-provided log callback function to 101 - * be used for libbpf warnings and informational messages. 101 + * be used for libbpf warnings and informational messages. If the user callback 102 + * is not set, messages are logged to stderr by default. The verbosity of these 103 + * messages can be controlled by setting the environment variable 104 + * LIBBPF_LOG_LEVEL to either warn, info, or debug. 102 105 * @param fn The log print function. If NULL, libbpf won't print anything. 103 106 * @return Pointer to old print function. 104 107 * ··· 979 976 LIBBPF_API bool bpf_map__autocreate(const struct bpf_map *map); 980 977 981 978 /** 979 + * @brief **bpf_map__set_autoattach()** sets whether libbpf has to auto-attach 980 + * map during BPF skeleton attach phase. 981 + * @param map the BPF map instance 982 + * @param autoattach whether to attach map during BPF skeleton attach phase 983 + * @return 0 on success; negative error code, otherwise 984 + */ 985 + LIBBPF_API int bpf_map__set_autoattach(struct bpf_map *map, bool autoattach); 986 + 987 + /** 988 + * @brief **bpf_map__autoattach()** returns whether BPF map is configured to 989 + * auto-attach during BPF skeleton attach phase. 990 + * @param map the BPF map instance 991 + * @return true if map is set to auto-attach during skeleton attach phase; false, otherwise 992 + */ 993 + LIBBPF_API bool bpf_map__autoattach(const struct bpf_map *map); 994 + 995 + /** 982 996 * @brief **bpf_map__fd()** gets the file descriptor of the passed 983 997 * BPF map 984 998 * @param map the BPF map instance ··· 1689 1669 const char *name; 1690 1670 struct bpf_map **map; 1691 1671 void **mmaped; 1672 + struct bpf_link **link; 1692 1673 }; 1693 1674 1694 1675 struct bpf_prog_skeleton {
+2
tools/lib/bpf/libbpf.map
··· 419 419 420 420 LIBBPF_1.5.0 { 421 421 global: 422 + bpf_map__autoattach; 423 + bpf_map__set_autoattach; 422 424 bpf_program__attach_sockmap; 423 425 ring__consume_n; 424 426 ring_buffer__consume_n;
+27 -9
tools/lib/bpf/libbpf_internal.h
··· 508 508 __u32 line_col; 509 509 }; 510 510 511 + enum btf_field_iter_kind { 512 + BTF_FIELD_ITER_IDS, 513 + BTF_FIELD_ITER_STRS, 514 + }; 515 + 516 + struct btf_field_desc { 517 + /* once-per-type offsets */ 518 + int t_off_cnt, t_offs[2]; 519 + /* member struct size, or zero, if no members */ 520 + int m_sz; 521 + /* repeated per-member offsets */ 522 + int m_off_cnt, m_offs[1]; 523 + }; 524 + 525 + struct btf_field_iter { 526 + struct btf_field_desc desc; 527 + void *p; 528 + int m_idx; 529 + int off_idx; 530 + int vlen; 531 + }; 532 + 533 + int btf_field_iter_init(struct btf_field_iter *it, struct btf_type *t, enum btf_field_iter_kind iter_kind); 534 + __u32 *btf_field_iter_next(struct btf_field_iter *it); 511 535 512 536 typedef int (*type_id_visit_fn)(__u32 *type_id, void *ctx); 513 537 typedef int (*str_off_visit_fn)(__u32 *str_off, void *ctx); 514 - int btf_type_visit_type_ids(struct btf_type *t, type_id_visit_fn visit, void *ctx); 515 - int btf_type_visit_str_offs(struct btf_type *t, str_off_visit_fn visit, void *ctx); 516 538 int btf_ext_visit_type_ids(struct btf_ext *btf_ext, type_id_visit_fn visit, void *ctx); 517 539 int btf_ext_visit_str_offs(struct btf_ext *btf_ext, str_off_visit_fn visit, void *ctx); 518 540 __s32 btf__find_by_name_kind_own(const struct btf *btf, const char *type_name, ··· 619 597 return fd; 620 598 } 621 599 622 - static inline int sys_dup2(int oldfd, int newfd) 600 + static inline int sys_dup3(int oldfd, int newfd, int flags) 623 601 { 624 - #ifdef __NR_dup2 625 - return syscall(__NR_dup2, oldfd, newfd); 626 - #else 627 - return syscall(__NR_dup3, oldfd, newfd, 0); 628 - #endif 602 + return syscall(__NR_dup3, oldfd, newfd, flags); 629 603 } 630 604 631 605 /* Point *fixed_fd* to the same file that *tmp_fd* points to. ··· 632 614 { 633 615 int err; 634 616 635 - err = sys_dup2(tmp_fd, fixed_fd); 617 + err = sys_dup3(tmp_fd, fixed_fd, O_CLOEXEC); 636 618 err = err < 0 ? -errno : 0; 637 619 close(tmp_fd); /* clean up temporary FD */ 638 620 return err;
+36 -22
tools/lib/bpf/linker.c
··· 957 957 static int linker_sanity_check_btf(struct src_obj *obj) 958 958 { 959 959 struct btf_type *t; 960 - int i, n, err = 0; 960 + int i, n, err; 961 961 962 962 if (!obj->btf) 963 963 return 0; 964 964 965 965 n = btf__type_cnt(obj->btf); 966 966 for (i = 1; i < n; i++) { 967 + struct btf_field_iter it; 968 + __u32 *type_id, *str_off; 969 + 967 970 t = btf_type_by_id(obj->btf, i); 968 971 969 - err = err ?: btf_type_visit_type_ids(t, check_btf_type_id, obj->btf); 970 - err = err ?: btf_type_visit_str_offs(t, check_btf_str_off, obj->btf); 972 + err = btf_field_iter_init(&it, t, BTF_FIELD_ITER_IDS); 971 973 if (err) 972 974 return err; 975 + while ((type_id = btf_field_iter_next(&it))) { 976 + if (*type_id >= n) 977 + return -EINVAL; 978 + } 979 + 980 + err = btf_field_iter_init(&it, t, BTF_FIELD_ITER_STRS); 981 + if (err) 982 + return err; 983 + while ((str_off = btf_field_iter_next(&it))) { 984 + if (!btf__str_by_offset(obj->btf, *str_off)) 985 + return -EINVAL; 986 + } 973 987 } 974 988 975 989 return 0; ··· 2248 2234 return 0; 2249 2235 } 2250 2236 2251 - static int remap_type_id(__u32 *type_id, void *ctx) 2252 - { 2253 - int *id_map = ctx; 2254 - int new_id = id_map[*type_id]; 2255 - 2256 - /* Error out if the type wasn't remapped. Ignore VOID which stays VOID. */ 2257 - if (new_id == 0 && *type_id != 0) { 2258 - pr_warn("failed to find new ID mapping for original BTF type ID %u\n", *type_id); 2259 - return -EINVAL; 2260 - } 2261 - 2262 - *type_id = id_map[*type_id]; 2263 - 2264 - return 0; 2265 - } 2266 - 2267 2237 static int linker_append_btf(struct bpf_linker *linker, struct src_obj *obj) 2268 2238 { 2269 2239 const struct btf_type *t; 2270 - int i, j, n, start_id, id; 2240 + int i, j, n, start_id, id, err; 2271 2241 const char *name; 2272 2242 2273 2243 if (!obj->btf) ··· 2322 2324 n = btf__type_cnt(linker->btf); 2323 2325 for (i = start_id; i < n; i++) { 2324 2326 struct btf_type *dst_t = btf_type_by_id(linker->btf, i); 2327 + struct btf_field_iter it; 2328 + __u32 *type_id; 2325 2329 2326 - if (btf_type_visit_type_ids(dst_t, remap_type_id, obj->btf_type_map)) 2327 - return -EINVAL; 2330 + err = btf_field_iter_init(&it, dst_t, BTF_FIELD_ITER_IDS); 2331 + if (err) 2332 + return err; 2333 + 2334 + while ((type_id = btf_field_iter_next(&it))) { 2335 + int new_id = obj->btf_type_map[*type_id]; 2336 + 2337 + /* Error out if the type wasn't remapped. Ignore VOID which stays VOID. */ 2338 + if (new_id == 0 && *type_id != 0) { 2339 + pr_warn("failed to find new ID mapping for original BTF type ID %u\n", 2340 + *type_id); 2341 + return -EINVAL; 2342 + } 2343 + 2344 + *type_id = obj->btf_type_map[*type_id]; 2345 + } 2328 2346 } 2329 2347 2330 2348 /* Rewrite VAR/FUNC underlying types (i.e., FUNC's FUNC_PROTO and VAR's
+2 -2
tools/testing/selftests/bpf/bpf_test_no_cfi/bpf_test_no_cfi.c
··· 22 22 return 0; 23 23 } 24 24 25 - static int dummy_reg(void *kdata) 25 + static int dummy_reg(void *kdata, struct bpf_link *link) 26 26 { 27 27 return 0; 28 28 } 29 29 30 - static void dummy_unreg(void *kdata) 30 + static void dummy_unreg(void *kdata, struct bpf_link *link) 31 31 { 32 32 } 33 33
+3 -3
tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c
··· 820 820 .is_valid_access = bpf_testmod_ops_is_valid_access, 821 821 }; 822 822 823 - static int bpf_dummy_reg(void *kdata) 823 + static int bpf_dummy_reg(void *kdata, struct bpf_link *link) 824 824 { 825 825 struct bpf_testmod_ops *ops = kdata; 826 826 ··· 835 835 return 0; 836 836 } 837 837 838 - static void bpf_dummy_unreg(void *kdata) 838 + static void bpf_dummy_unreg(void *kdata, struct bpf_link *link) 839 839 { 840 840 } 841 841 ··· 871 871 .owner = THIS_MODULE, 872 872 }; 873 873 874 - static int bpf_dummy_reg2(void *kdata) 874 + static int bpf_dummy_reg2(void *kdata, struct bpf_link *link) 875 875 { 876 876 struct bpf_testmod_ops2 *ops = kdata; 877 877
+21 -11
tools/testing/selftests/bpf/network_helpers.c
··· 94 94 if (settimeo(fd, opts->timeout_ms)) 95 95 goto error_close; 96 96 97 - if (opts->post_socket_cb && opts->post_socket_cb(fd, NULL)) { 97 + if (opts->post_socket_cb && 98 + opts->post_socket_cb(fd, opts->cb_opts)) { 98 99 log_err("Failed to call post_socket_cb"); 99 100 goto error_close; 100 101 } ··· 119 118 return -1; 120 119 } 121 120 121 + int start_server_str(int family, int type, const char *addr_str, __u16 port, 122 + const struct network_helper_opts *opts) 123 + { 124 + struct sockaddr_storage addr; 125 + socklen_t addrlen; 126 + 127 + if (!opts) 128 + opts = &default_opts; 129 + 130 + if (make_sockaddr(family, addr_str, port, &addr, &addrlen)) 131 + return -1; 132 + 133 + return __start_server(type, (struct sockaddr *)&addr, addrlen, opts); 134 + } 135 + 122 136 int start_server(int family, int type, const char *addr_str, __u16 port, 123 137 int timeout_ms) 124 138 { 125 139 struct network_helper_opts opts = { 126 140 .timeout_ms = timeout_ms, 127 141 }; 128 - struct sockaddr_storage addr; 129 - socklen_t addrlen; 130 142 131 - if (make_sockaddr(family, addr_str, port, &addr, &addrlen)) 132 - return -1; 133 - 134 - return __start_server(type, (struct sockaddr *)&addr, addrlen, &opts); 143 + return start_server_str(family, type, addr_str, port, &opts); 135 144 } 136 145 137 - static int reuseport_cb(int fd, const struct post_socket_opts *opts) 146 + static int reuseport_cb(int fd, void *opts) 138 147 { 139 148 int on = 1; 140 149 ··· 349 338 if (settimeo(fd, opts->timeout_ms)) 350 339 goto error_close; 351 340 352 - if (opts->cc && opts->cc[0] && 353 - setsockopt(fd, SOL_TCP, TCP_CONGESTION, opts->cc, 354 - strlen(opts->cc) + 1)) 341 + if (opts->post_socket_cb && 342 + opts->post_socket_cb(fd, opts->cb_opts)) 355 343 goto error_close; 356 344 357 345 if (!opts->noconnect)
+4 -4
tools/testing/selftests/bpf/network_helpers.h
··· 21 21 #define VIP_NUM 5 22 22 #define MAGIC_BYTES 123 23 23 24 - struct post_socket_opts {}; 25 - 26 24 struct network_helper_opts { 27 - const char *cc; 28 25 int timeout_ms; 29 26 bool must_fail; 30 27 bool noconnect; 31 28 int type; 32 29 int proto; 33 - int (*post_socket_cb)(int fd, const struct post_socket_opts *opts); 30 + int (*post_socket_cb)(int fd, void *opts); 31 + void *cb_opts; 34 32 }; 35 33 36 34 /* ipv4 test vector */ ··· 48 50 extern struct ipv6_packet pkt_v6; 49 51 50 52 int settimeo(int fd, int timeout_ms); 53 + int start_server_str(int family, int type, const char *addr_str, __u16 port, 54 + const struct network_helper_opts *opts); 51 55 int start_server(int family, int type, const char *addr, __u16 port, 52 56 int timeout_ms); 53 57 int *start_reuseport_server(int family, int type, const char *addr_str,
+1 -1
tools/testing/selftests/bpf/prog_tests/bpf_cookie.c
··· 451 451 attr.type = PERF_TYPE_SOFTWARE; 452 452 attr.config = PERF_COUNT_SW_CPU_CLOCK; 453 453 attr.freq = 1; 454 - attr.sample_freq = 1000; 454 + attr.sample_freq = 10000; 455 455 pfd = syscall(__NR_perf_event_open, &attr, -1, 0, -1, PERF_FLAG_FD_CLOEXEC); 456 456 if (!ASSERT_GE(pfd, 0, "perf_fd")) 457 457 goto cleanup;
+145 -53
tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c
··· 23 23 static const unsigned int total_bytes = 10 * 1024 * 1024; 24 24 static int expected_stg = 0xeB9F; 25 25 26 + struct cb_opts { 27 + const char *cc; 28 + int map_fd; 29 + }; 30 + 26 31 static int settcpca(int fd, const char *tcp_ca) 27 32 { 28 33 int err; ··· 39 34 return 0; 40 35 } 41 36 42 - static void do_test(const char *tcp_ca, const struct bpf_map *sk_stg_map) 37 + static bool start_test(char *addr_str, 38 + const struct network_helper_opts *srv_opts, 39 + const struct network_helper_opts *cli_opts, 40 + int *srv_fd, int *cli_fd) 43 41 { 44 - int lfd = -1, fd = -1; 45 - int err; 46 - 47 - lfd = start_server(AF_INET6, SOCK_STREAM, NULL, 0, 0); 48 - if (!ASSERT_NEQ(lfd, -1, "socket")) 49 - return; 50 - 51 - fd = socket(AF_INET6, SOCK_STREAM, 0); 52 - if (!ASSERT_NEQ(fd, -1, "socket")) { 53 - close(lfd); 54 - return; 55 - } 56 - 57 - if (settcpca(lfd, tcp_ca) || settcpca(fd, tcp_ca)) 58 - goto done; 59 - 60 - if (sk_stg_map) { 61 - err = bpf_map_update_elem(bpf_map__fd(sk_stg_map), &fd, 62 - &expected_stg, BPF_NOEXIST); 63 - if (!ASSERT_OK(err, "bpf_map_update_elem(sk_stg_map)")) 64 - goto done; 65 - } 42 + *srv_fd = start_server_str(AF_INET6, SOCK_STREAM, addr_str, 0, srv_opts); 43 + if (!ASSERT_NEQ(*srv_fd, -1, "start_server_str")) 44 + goto err; 66 45 67 46 /* connect to server */ 68 - err = connect_fd_to_fd(fd, lfd, 0); 69 - if (!ASSERT_NEQ(err, -1, "connect")) 70 - goto done; 47 + *cli_fd = connect_to_fd_opts(*srv_fd, cli_opts); 48 + if (!ASSERT_NEQ(*cli_fd, -1, "connect_to_fd_opts")) 49 + goto err; 71 50 72 - if (sk_stg_map) { 73 - int tmp_stg; 51 + return true; 74 52 75 - err = bpf_map_lookup_elem(bpf_map__fd(sk_stg_map), &fd, 76 - &tmp_stg); 77 - if (!ASSERT_ERR(err, "bpf_map_lookup_elem(sk_stg_map)") || 78 - !ASSERT_EQ(errno, ENOENT, "bpf_map_lookup_elem(sk_stg_map)")) 79 - goto done; 53 + err: 54 + if (*srv_fd != -1) { 55 + close(*srv_fd); 56 + *srv_fd = -1; 80 57 } 58 + if (*cli_fd != -1) { 59 + close(*cli_fd); 60 + *cli_fd = -1; 61 + } 62 + return false; 63 + } 64 + 65 + static void do_test(const struct network_helper_opts *opts) 66 + { 67 + int lfd = -1, fd = -1; 68 + 69 + if (!start_test(NULL, opts, opts, &lfd, &fd)) 70 + goto done; 81 71 82 72 ASSERT_OK(send_recv_data(lfd, fd, total_bytes), "send_recv_data"); 83 73 84 74 done: 85 - close(lfd); 86 - close(fd); 75 + if (lfd != -1) 76 + close(lfd); 77 + if (fd != -1) 78 + close(fd); 79 + } 80 + 81 + static int cc_cb(int fd, void *opts) 82 + { 83 + struct cb_opts *cb_opts = (struct cb_opts *)opts; 84 + 85 + return settcpca(fd, cb_opts->cc); 87 86 } 88 87 89 88 static void test_cubic(void) 90 89 { 90 + struct cb_opts cb_opts = { 91 + .cc = "bpf_cubic", 92 + }; 93 + struct network_helper_opts opts = { 94 + .post_socket_cb = cc_cb, 95 + .cb_opts = &cb_opts, 96 + }; 91 97 struct bpf_cubic *cubic_skel; 92 98 struct bpf_link *link; 93 99 ··· 112 96 return; 113 97 } 114 98 115 - do_test("bpf_cubic", NULL); 99 + do_test(&opts); 116 100 117 101 ASSERT_EQ(cubic_skel->bss->bpf_cubic_acked_called, 1, "pkts_acked called"); 118 102 ··· 120 104 bpf_cubic__destroy(cubic_skel); 121 105 } 122 106 107 + static int stg_post_socket_cb(int fd, void *opts) 108 + { 109 + struct cb_opts *cb_opts = (struct cb_opts *)opts; 110 + int err; 111 + 112 + err = settcpca(fd, cb_opts->cc); 113 + if (err) 114 + return err; 115 + 116 + err = bpf_map_update_elem(cb_opts->map_fd, &fd, 117 + &expected_stg, BPF_NOEXIST); 118 + if (!ASSERT_OK(err, "bpf_map_update_elem(sk_stg_map)")) 119 + return err; 120 + 121 + return 0; 122 + } 123 + 123 124 static void test_dctcp(void) 124 125 { 126 + struct cb_opts cb_opts = { 127 + .cc = "bpf_dctcp", 128 + }; 129 + struct network_helper_opts opts = { 130 + .post_socket_cb = cc_cb, 131 + .cb_opts = &cb_opts, 132 + }; 133 + struct network_helper_opts cli_opts = { 134 + .post_socket_cb = stg_post_socket_cb, 135 + .cb_opts = &cb_opts, 136 + }; 137 + int lfd = -1, fd = -1, tmp_stg, err; 125 138 struct bpf_dctcp *dctcp_skel; 126 139 struct bpf_link *link; 127 140 ··· 164 119 return; 165 120 } 166 121 167 - do_test("bpf_dctcp", dctcp_skel->maps.sk_stg_map); 122 + cb_opts.map_fd = bpf_map__fd(dctcp_skel->maps.sk_stg_map); 123 + if (!start_test(NULL, &opts, &cli_opts, &lfd, &fd)) 124 + goto done; 125 + 126 + err = bpf_map_lookup_elem(cb_opts.map_fd, &fd, &tmp_stg); 127 + if (!ASSERT_ERR(err, "bpf_map_lookup_elem(sk_stg_map)") || 128 + !ASSERT_EQ(errno, ENOENT, "bpf_map_lookup_elem(sk_stg_map)")) 129 + goto done; 130 + 131 + ASSERT_OK(send_recv_data(lfd, fd, total_bytes), "send_recv_data"); 168 132 ASSERT_EQ(dctcp_skel->bss->stg_result, expected_stg, "stg_result"); 169 133 134 + done: 170 135 bpf_link__destroy(link); 171 136 bpf_dctcp__destroy(dctcp_skel); 137 + if (lfd != -1) 138 + close(lfd); 139 + if (fd != -1) 140 + close(fd); 172 141 } 173 142 174 143 static char *err_str; ··· 230 171 static void test_dctcp_fallback(void) 231 172 { 232 173 int err, lfd = -1, cli_fd = -1, srv_fd = -1; 233 - struct network_helper_opts opts = { 234 - .cc = "cubic", 235 - }; 236 174 struct bpf_dctcp *dctcp_skel; 237 175 struct bpf_link *link = NULL; 176 + struct cb_opts dctcp = { 177 + .cc = "bpf_dctcp", 178 + }; 179 + struct network_helper_opts srv_opts = { 180 + .post_socket_cb = cc_cb, 181 + .cb_opts = &dctcp, 182 + }; 183 + struct cb_opts cubic = { 184 + .cc = "cubic", 185 + }; 186 + struct network_helper_opts cli_opts = { 187 + .post_socket_cb = cc_cb, 188 + .cb_opts = &cubic, 189 + }; 238 190 char srv_cc[16]; 239 191 socklen_t cc_len = sizeof(srv_cc); 240 192 ··· 260 190 if (!ASSERT_OK_PTR(link, "dctcp link")) 261 191 goto done; 262 192 263 - lfd = start_server(AF_INET6, SOCK_STREAM, "::1", 0, 0); 264 - if (!ASSERT_GE(lfd, 0, "lfd") || 265 - !ASSERT_OK(settcpca(lfd, "bpf_dctcp"), "lfd=>bpf_dctcp")) 266 - goto done; 267 - 268 - cli_fd = connect_to_fd_opts(lfd, &opts); 269 - if (!ASSERT_GE(cli_fd, 0, "cli_fd")) 193 + if (!start_test("::1", &srv_opts, &cli_opts, &lfd, &cli_fd)) 270 194 goto done; 271 195 272 196 srv_fd = accept(lfd, NULL, 0); ··· 361 297 362 298 static void test_update_ca(void) 363 299 { 300 + struct cb_opts cb_opts = { 301 + .cc = "tcp_ca_update", 302 + }; 303 + struct network_helper_opts opts = { 304 + .post_socket_cb = cc_cb, 305 + .cb_opts = &cb_opts, 306 + }; 364 307 struct tcp_ca_update *skel; 365 308 struct bpf_link *link; 366 309 int saved_ca1_cnt; ··· 380 309 link = bpf_map__attach_struct_ops(skel->maps.ca_update_1); 381 310 ASSERT_OK_PTR(link, "attach_struct_ops"); 382 311 383 - do_test("tcp_ca_update", NULL); 312 + do_test(&opts); 384 313 saved_ca1_cnt = skel->bss->ca1_cnt; 385 314 ASSERT_GT(saved_ca1_cnt, 0, "ca1_ca1_cnt"); 386 315 387 316 err = bpf_link__update_map(link, skel->maps.ca_update_2); 388 317 ASSERT_OK(err, "update_map"); 389 318 390 - do_test("tcp_ca_update", NULL); 319 + do_test(&opts); 391 320 ASSERT_EQ(skel->bss->ca1_cnt, saved_ca1_cnt, "ca2_ca1_cnt"); 392 321 ASSERT_GT(skel->bss->ca2_cnt, 0, "ca2_ca2_cnt"); 393 322 ··· 397 326 398 327 static void test_update_wrong(void) 399 328 { 329 + struct cb_opts cb_opts = { 330 + .cc = "tcp_ca_update", 331 + }; 332 + struct network_helper_opts opts = { 333 + .post_socket_cb = cc_cb, 334 + .cb_opts = &cb_opts, 335 + }; 400 336 struct tcp_ca_update *skel; 401 337 struct bpf_link *link; 402 338 int saved_ca1_cnt; ··· 416 338 link = bpf_map__attach_struct_ops(skel->maps.ca_update_1); 417 339 ASSERT_OK_PTR(link, "attach_struct_ops"); 418 340 419 - do_test("tcp_ca_update", NULL); 341 + do_test(&opts); 420 342 saved_ca1_cnt = skel->bss->ca1_cnt; 421 343 ASSERT_GT(saved_ca1_cnt, 0, "ca1_ca1_cnt"); 422 344 423 345 err = bpf_link__update_map(link, skel->maps.ca_wrong); 424 346 ASSERT_ERR(err, "update_map"); 425 347 426 - do_test("tcp_ca_update", NULL); 348 + do_test(&opts); 427 349 ASSERT_GT(skel->bss->ca1_cnt, saved_ca1_cnt, "ca2_ca1_cnt"); 428 350 429 351 bpf_link__destroy(link); ··· 432 354 433 355 static void test_mixed_links(void) 434 356 { 357 + struct cb_opts cb_opts = { 358 + .cc = "tcp_ca_update", 359 + }; 360 + struct network_helper_opts opts = { 361 + .post_socket_cb = cc_cb, 362 + .cb_opts = &cb_opts, 363 + }; 435 364 struct tcp_ca_update *skel; 436 365 struct bpf_link *link, *link_nl; 437 366 int err; ··· 453 368 link = bpf_map__attach_struct_ops(skel->maps.ca_update_1); 454 369 ASSERT_OK_PTR(link, "attach_struct_ops"); 455 370 456 - do_test("tcp_ca_update", NULL); 371 + do_test(&opts); 457 372 ASSERT_GT(skel->bss->ca1_cnt, 0, "ca1_ca1_cnt"); 458 373 459 374 err = bpf_link__update_map(link, skel->maps.ca_no_link); ··· 540 455 541 456 static void test_cc_cubic(void) 542 457 { 458 + struct cb_opts cb_opts = { 459 + .cc = "bpf_cc_cubic", 460 + }; 461 + struct network_helper_opts opts = { 462 + .post_socket_cb = cc_cb, 463 + .cb_opts = &cb_opts, 464 + }; 543 465 struct bpf_cc_cubic *cc_cubic_skel; 544 466 struct bpf_link *link; 545 467 ··· 560 468 return; 561 469 } 562 470 563 - do_test("bpf_cc_cubic", NULL); 471 + do_test(&opts); 564 472 565 473 bpf_link__destroy(link); 566 474 bpf_cc_cubic__destroy(cc_cubic_skel);
-6
tools/testing/selftests/bpf/prog_tests/bpf_verif_scale.c
··· 45 45 return err; 46 46 } 47 47 48 - struct scale_test_def { 49 - const char *file; 50 - enum bpf_prog_type attach_type; 51 - bool fails; 52 - }; 53 - 54 48 static void scale_test(const char *file, 55 49 enum bpf_prog_type attach_type, 56 50 bool should_fail)
+161
tools/testing/selftests/bpf/prog_tests/btf_field_iter.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* Copyright (c) 2024, Oracle and/or its affiliates. */ 3 + 4 + #include <test_progs.h> 5 + #include <bpf/btf.h> 6 + #include "btf_helpers.h" 7 + #include "bpf/libbpf_internal.h" 8 + 9 + struct field_data { 10 + __u32 ids[5]; 11 + const char *strs[5]; 12 + } fields[] = { 13 + { .ids = {}, .strs = {} }, 14 + { .ids = {}, .strs = { "int" } }, 15 + { .ids = {}, .strs = { "int64" } }, 16 + { .ids = { 1 }, .strs = { "" } }, 17 + { .ids = { 2, 1 }, .strs = { "" } }, 18 + { .ids = { 3, 1 }, .strs = { "s1", "f1", "f2" } }, 19 + { .ids = { 1, 5 }, .strs = { "u1", "f1", "f2" } }, 20 + { .ids = {}, .strs = { "e1", "v1", "v2" } }, 21 + { .ids = {}, .strs = { "fw1" } }, 22 + { .ids = { 1 }, .strs = { "t" } }, 23 + { .ids = { 2 }, .strs = { "" } }, 24 + { .ids = { 1 }, .strs = { "" } }, 25 + { .ids = { 3 }, .strs = { "" } }, 26 + { .ids = { 1, 1, 3 }, .strs = { "", "p1", "p2" } }, 27 + { .ids = { 13 }, .strs = { "func" } }, 28 + { .ids = { 1 }, .strs = { "var1" } }, 29 + { .ids = { 3 }, .strs = { "var2" } }, 30 + { .ids = {}, .strs = { "float" } }, 31 + { .ids = { 11 }, .strs = { "decltag" } }, 32 + { .ids = { 6 }, .strs = { "typetag" } }, 33 + { .ids = {}, .strs = { "e64", "eval1", "eval2", "eval3" } }, 34 + { .ids = { 15, 16 }, .strs = { "datasec1" } } 35 + 36 + }; 37 + 38 + /* Fabricate BTF with various types and check BTF field iteration finds types, 39 + * strings expected. 40 + */ 41 + void test_btf_field_iter(void) 42 + { 43 + struct btf *btf = NULL; 44 + int id; 45 + 46 + btf = btf__new_empty(); 47 + if (!ASSERT_OK_PTR(btf, "empty_btf")) 48 + return; 49 + 50 + btf__add_int(btf, "int", 4, BTF_INT_SIGNED); /* [1] int */ 51 + btf__add_int(btf, "int64", 8, BTF_INT_SIGNED); /* [2] int64 */ 52 + btf__add_ptr(btf, 1); /* [3] int * */ 53 + btf__add_array(btf, 1, 2, 3); /* [4] int64[3] */ 54 + btf__add_struct(btf, "s1", 12); /* [5] struct s1 { */ 55 + btf__add_field(btf, "f1", 3, 0, 0); /* int *f1; */ 56 + btf__add_field(btf, "f2", 1, 0, 0); /* int f2; */ 57 + /* } */ 58 + btf__add_union(btf, "u1", 12); /* [6] union u1 { */ 59 + btf__add_field(btf, "f1", 1, 0, 0); /* int f1; */ 60 + btf__add_field(btf, "f2", 5, 0, 0); /* struct s1 f2; */ 61 + /* } */ 62 + btf__add_enum(btf, "e1", 4); /* [7] enum e1 { */ 63 + btf__add_enum_value(btf, "v1", 1); /* v1 = 1; */ 64 + btf__add_enum_value(btf, "v2", 2); /* v2 = 2; */ 65 + /* } */ 66 + 67 + btf__add_fwd(btf, "fw1", BTF_FWD_STRUCT); /* [8] struct fw1; */ 68 + btf__add_typedef(btf, "t", 1); /* [9] typedef int t; */ 69 + btf__add_volatile(btf, 2); /* [10] volatile int64; */ 70 + btf__add_const(btf, 1); /* [11] const int; */ 71 + btf__add_restrict(btf, 3); /* [12] restrict int *; */ 72 + btf__add_func_proto(btf, 1); /* [13] int (*)(int p1, int *p2); */ 73 + btf__add_func_param(btf, "p1", 1); 74 + btf__add_func_param(btf, "p2", 3); 75 + 76 + btf__add_func(btf, "func", BTF_FUNC_GLOBAL, 13);/* [14] int func(int p1, int *p2); */ 77 + btf__add_var(btf, "var1", BTF_VAR_STATIC, 1); /* [15] static int var1; */ 78 + btf__add_var(btf, "var2", BTF_VAR_STATIC, 3); /* [16] static int *var2; */ 79 + btf__add_float(btf, "float", 4); /* [17] float; */ 80 + btf__add_decl_tag(btf, "decltag", 11, -1); /* [18] decltag const int; */ 81 + btf__add_type_tag(btf, "typetag", 6); /* [19] typetag union u1; */ 82 + btf__add_enum64(btf, "e64", 8, true); /* [20] enum { */ 83 + btf__add_enum64_value(btf, "eval1", 1000); /* eval1 = 1000, */ 84 + btf__add_enum64_value(btf, "eval2", 2000); /* eval2 = 2000, */ 85 + btf__add_enum64_value(btf, "eval3", 3000); /* eval3 = 3000 */ 86 + /* } */ 87 + btf__add_datasec(btf, "datasec1", 12); /* [21] datasec datasec1 */ 88 + btf__add_datasec_var_info(btf, 15, 0, 4); 89 + btf__add_datasec_var_info(btf, 16, 4, 8); 90 + 91 + VALIDATE_RAW_BTF( 92 + btf, 93 + "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED", 94 + "[2] INT 'int64' size=8 bits_offset=0 nr_bits=64 encoding=SIGNED", 95 + "[3] PTR '(anon)' type_id=1", 96 + "[4] ARRAY '(anon)' type_id=2 index_type_id=1 nr_elems=3", 97 + "[5] STRUCT 's1' size=12 vlen=2\n" 98 + "\t'f1' type_id=3 bits_offset=0\n" 99 + "\t'f2' type_id=1 bits_offset=0", 100 + "[6] UNION 'u1' size=12 vlen=2\n" 101 + "\t'f1' type_id=1 bits_offset=0\n" 102 + "\t'f2' type_id=5 bits_offset=0", 103 + "[7] ENUM 'e1' encoding=UNSIGNED size=4 vlen=2\n" 104 + "\t'v1' val=1\n" 105 + "\t'v2' val=2", 106 + "[8] FWD 'fw1' fwd_kind=struct", 107 + "[9] TYPEDEF 't' type_id=1", 108 + "[10] VOLATILE '(anon)' type_id=2", 109 + "[11] CONST '(anon)' type_id=1", 110 + "[12] RESTRICT '(anon)' type_id=3", 111 + "[13] FUNC_PROTO '(anon)' ret_type_id=1 vlen=2\n" 112 + "\t'p1' type_id=1\n" 113 + "\t'p2' type_id=3", 114 + "[14] FUNC 'func' type_id=13 linkage=global", 115 + "[15] VAR 'var1' type_id=1, linkage=static", 116 + "[16] VAR 'var2' type_id=3, linkage=static", 117 + "[17] FLOAT 'float' size=4", 118 + "[18] DECL_TAG 'decltag' type_id=11 component_idx=-1", 119 + "[19] TYPE_TAG 'typetag' type_id=6", 120 + "[20] ENUM64 'e64' encoding=SIGNED size=8 vlen=3\n" 121 + "\t'eval1' val=1000\n" 122 + "\t'eval2' val=2000\n" 123 + "\t'eval3' val=3000", 124 + "[21] DATASEC 'datasec1' size=12 vlen=2\n" 125 + "\ttype_id=15 offset=0 size=4\n" 126 + "\ttype_id=16 offset=4 size=8"); 127 + 128 + for (id = 1; id < btf__type_cnt(btf); id++) { 129 + struct btf_type *t = btf_type_by_id(btf, id); 130 + struct btf_field_iter it_strs, it_ids; 131 + int str_idx = 0, id_idx = 0; 132 + __u32 *next_str, *next_id; 133 + 134 + if (!ASSERT_OK_PTR(t, "btf_type_by_id")) 135 + break; 136 + if (!ASSERT_OK(btf_field_iter_init(&it_strs, t, BTF_FIELD_ITER_STRS), 137 + "iter_init_strs")) 138 + break; 139 + if (!ASSERT_OK(btf_field_iter_init(&it_ids, t, BTF_FIELD_ITER_IDS), 140 + "iter_init_ids")) 141 + break; 142 + while ((next_str = btf_field_iter_next(&it_strs))) { 143 + const char *str = btf__str_by_offset(btf, *next_str); 144 + 145 + if (!ASSERT_OK(strcmp(fields[id].strs[str_idx], str), "field_str_match")) 146 + break; 147 + str_idx++; 148 + } 149 + /* ensure no more strings are expected */ 150 + ASSERT_EQ(fields[id].strs[str_idx], NULL, "field_str_cnt"); 151 + 152 + while ((next_id = btf_field_iter_next(&it_ids))) { 153 + if (!ASSERT_EQ(*next_id, fields[id].ids[id_idx], "field_id_match")) 154 + break; 155 + id_idx++; 156 + } 157 + /* ensure no more ids are expected */ 158 + ASSERT_EQ(fields[id].ids[id_idx], 0, "field_id_cnt"); 159 + } 160 + btf__free(btf); 161 + }
+5
tools/testing/selftests/bpf/prog_tests/cpumask.c
··· 18 18 "test_insert_leave", 19 19 "test_insert_remove_release", 20 20 "test_global_mask_rcu", 21 + "test_global_mask_array_one_rcu", 22 + "test_global_mask_array_rcu", 23 + "test_global_mask_array_l2_rcu", 24 + "test_global_mask_nested_rcu", 25 + "test_global_mask_nested_deep_rcu", 21 26 "test_cpumask_weight", 22 27 }; 23 28
+2 -2
tools/testing/selftests/bpf/prog_tests/find_vma.c
··· 29 29 30 30 /* create perf event */ 31 31 attr.size = sizeof(attr); 32 - attr.type = PERF_TYPE_HARDWARE; 33 - attr.config = PERF_COUNT_HW_CPU_CYCLES; 32 + attr.type = PERF_TYPE_SOFTWARE; 33 + attr.config = PERF_COUNT_SW_CPU_CLOCK; 34 34 attr.freq = 1; 35 35 attr.sample_freq = 1000; 36 36 pfd = syscall(__NR_perf_event_open, &attr, 0, -1, -1, PERF_FLAG_FD_CLOEXEC);
+12
tools/testing/selftests/bpf/prog_tests/linked_list.c
··· 183 183 if (!leave_in_map) 184 184 clear_fields(skel->maps.bss_A); 185 185 186 + ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.global_list_push_pop_nested), &opts); 187 + ASSERT_OK(ret, "global_list_push_pop_nested"); 188 + ASSERT_OK(opts.retval, "global_list_push_pop_nested retval"); 189 + if (!leave_in_map) 190 + clear_fields(skel->maps.bss_A); 191 + 192 + ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.global_list_array_push_pop), &opts); 193 + ASSERT_OK(ret, "global_list_array_push_pop"); 194 + ASSERT_OK(opts.retval, "global_list_array_push_pop retval"); 195 + if (!leave_in_map) 196 + clear_fields(skel->maps.bss_A); 197 + 186 198 if (mode == PUSH_POP) 187 199 goto end; 188 200
+47
tools/testing/selftests/bpf/prog_tests/rbtree.c
··· 31 31 rbtree__destroy(skel); 32 32 } 33 33 34 + static void test_rbtree_add_nodes_nested(void) 35 + { 36 + LIBBPF_OPTS(bpf_test_run_opts, opts, 37 + .data_in = &pkt_v4, 38 + .data_size_in = sizeof(pkt_v4), 39 + .repeat = 1, 40 + ); 41 + struct rbtree *skel; 42 + int ret; 43 + 44 + skel = rbtree__open_and_load(); 45 + if (!ASSERT_OK_PTR(skel, "rbtree__open_and_load")) 46 + return; 47 + 48 + ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.rbtree_add_nodes_nested), &opts); 49 + ASSERT_OK(ret, "rbtree_add_nodes_nested run"); 50 + ASSERT_OK(opts.retval, "rbtree_add_nodes_nested retval"); 51 + ASSERT_EQ(skel->data->less_callback_ran, 1, "rbtree_add_nodes_nested less_callback_ran"); 52 + 53 + rbtree__destroy(skel); 54 + } 55 + 34 56 static void test_rbtree_add_and_remove(void) 35 57 { 36 58 LIBBPF_OPTS(bpf_test_run_opts, opts, ··· 71 49 ASSERT_OK(ret, "rbtree_add_and_remove"); 72 50 ASSERT_OK(opts.retval, "rbtree_add_and_remove retval"); 73 51 ASSERT_EQ(skel->data->removed_key, 5, "rbtree_add_and_remove first removed key"); 52 + 53 + rbtree__destroy(skel); 54 + } 55 + 56 + static void test_rbtree_add_and_remove_array(void) 57 + { 58 + LIBBPF_OPTS(bpf_test_run_opts, opts, 59 + .data_in = &pkt_v4, 60 + .data_size_in = sizeof(pkt_v4), 61 + .repeat = 1, 62 + ); 63 + struct rbtree *skel; 64 + int ret; 65 + 66 + skel = rbtree__open_and_load(); 67 + if (!ASSERT_OK_PTR(skel, "rbtree__open_and_load")) 68 + return; 69 + 70 + ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.rbtree_add_and_remove_array), &opts); 71 + ASSERT_OK(ret, "rbtree_add_and_remove_array"); 72 + ASSERT_OK(opts.retval, "rbtree_add_and_remove_array retval"); 74 73 75 74 rbtree__destroy(skel); 76 75 } ··· 147 104 { 148 105 if (test__start_subtest("rbtree_add_nodes")) 149 106 test_rbtree_add_nodes(); 107 + if (test__start_subtest("rbtree_add_nodes_nested")) 108 + test_rbtree_add_nodes_nested(); 150 109 if (test__start_subtest("rbtree_add_and_remove")) 151 110 test_rbtree_add_and_remove(); 111 + if (test__start_subtest("rbtree_add_and_remove_array")) 112 + test_rbtree_add_and_remove_array(); 152 113 if (test__start_subtest("rbtree_first_and_remove")) 153 114 test_rbtree_first_and_remove(); 154 115 if (test__start_subtest("rbtree_api_release_aliasing"))
+2 -1
tools/testing/selftests/bpf/prog_tests/send_signal.c
··· 156 156 static void test_send_signal_perf(bool signal_thread) 157 157 { 158 158 struct perf_event_attr attr = { 159 - .sample_period = 1, 159 + .freq = 1, 160 + .sample_freq = 1000, 160 161 .type = PERF_TYPE_SOFTWARE, 161 162 .config = PERF_COUNT_SW_CPU_CLOCK, 162 163 };
+1 -1
tools/testing/selftests/bpf/prog_tests/sockopt_inherit.c
··· 70 70 return (void *)(long)err; 71 71 } 72 72 73 - static int custom_cb(int fd, const struct post_socket_opts *opts) 73 + static int custom_cb(int fd, void *opts) 74 74 { 75 75 char buf; 76 76 int err;
+57
tools/testing/selftests/bpf/prog_tests/test_struct_ops_module.c
··· 3 3 #include <test_progs.h> 4 4 #include <time.h> 5 5 6 + #include <sys/epoll.h> 7 + 6 8 #include "struct_ops_module.skel.h" 7 9 #include "struct_ops_nulled_out_cb.skel.h" 8 10 #include "struct_ops_forgotten_cb.skel.h" 11 + #include "struct_ops_detach.skel.h" 9 12 10 13 static void check_map_info(struct bpf_map_info *info) 11 14 { ··· 245 242 struct_ops_forgotten_cb__destroy(skel); 246 243 } 247 244 245 + /* Detach a link from a user space program */ 246 + static void test_detach_link(void) 247 + { 248 + struct epoll_event ev, events[2]; 249 + struct struct_ops_detach *skel; 250 + struct bpf_link *link = NULL; 251 + int fd, epollfd = -1, nfds; 252 + int err; 253 + 254 + skel = struct_ops_detach__open_and_load(); 255 + if (!ASSERT_OK_PTR(skel, "struct_ops_detach__open_and_load")) 256 + return; 257 + 258 + link = bpf_map__attach_struct_ops(skel->maps.testmod_do_detach); 259 + if (!ASSERT_OK_PTR(link, "attach_struct_ops")) 260 + goto cleanup; 261 + 262 + fd = bpf_link__fd(link); 263 + if (!ASSERT_GE(fd, 0, "link_fd")) 264 + goto cleanup; 265 + 266 + epollfd = epoll_create1(0); 267 + if (!ASSERT_GE(epollfd, 0, "epoll_create1")) 268 + goto cleanup; 269 + 270 + ev.events = EPOLLHUP; 271 + ev.data.fd = fd; 272 + err = epoll_ctl(epollfd, EPOLL_CTL_ADD, fd, &ev); 273 + if (!ASSERT_OK(err, "epoll_ctl")) 274 + goto cleanup; 275 + 276 + err = bpf_link__detach(link); 277 + if (!ASSERT_OK(err, "detach_link")) 278 + goto cleanup; 279 + 280 + /* Wait for EPOLLHUP */ 281 + nfds = epoll_wait(epollfd, events, 2, 500); 282 + if (!ASSERT_EQ(nfds, 1, "epoll_wait")) 283 + goto cleanup; 284 + 285 + if (!ASSERT_EQ(events[0].data.fd, fd, "epoll_wait_fd")) 286 + goto cleanup; 287 + if (!ASSERT_TRUE(events[0].events & EPOLLHUP, "events[0].events")) 288 + goto cleanup; 289 + 290 + cleanup: 291 + if (epollfd >= 0) 292 + close(epollfd); 293 + bpf_link__destroy(link); 294 + struct_ops_detach__destroy(skel); 295 + } 296 + 248 297 void serial_test_struct_ops_module(void) 249 298 { 250 299 if (test__start_subtest("struct_ops_load")) ··· 309 254 test_struct_ops_nulled_out_cb(); 310 255 if (test__start_subtest("struct_ops_forgotten_cb")) 311 256 test_struct_ops_forgotten_cb(); 257 + if (test__start_subtest("test_detach_link")) 258 + test_detach_link(); 312 259 } 313 260
+2
tools/testing/selftests/bpf/prog_tests/verifier.c
··· 86 86 #include "verifier_xadd.skel.h" 87 87 #include "verifier_xdp.skel.h" 88 88 #include "verifier_xdp_direct_packet_access.skel.h" 89 + #include "verifier_bits_iter.skel.h" 89 90 90 91 #define MAX_ENTRIES 11 91 92 ··· 203 202 void test_verifier_xadd(void) { RUN(verifier_xadd); } 204 203 void test_verifier_xdp(void) { RUN(verifier_xdp); } 205 204 void test_verifier_xdp_direct_packet_access(void) { RUN(verifier_xdp_direct_packet_access); } 205 + void test_verifier_bits_iter(void) { RUN(verifier_bits_iter); } 206 206 207 207 static int init_test_val_map(struct bpf_object *obj, char *map_name) 208 208 {
-6
tools/testing/selftests/bpf/progs/bpf_iter_bpf_array_map.c
··· 6 6 7 7 char _license[] SEC("license") = "GPL"; 8 8 9 - struct key_t { 10 - int a; 11 - int b; 12 - int c; 13 - }; 14 - 15 9 struct { 16 10 __uint(type, BPF_MAP_TYPE_ARRAY); 17 11 __uint(max_entries, 3);
-6
tools/testing/selftests/bpf/progs/bpf_iter_bpf_percpu_array_map.c
··· 6 6 7 7 char _license[] SEC("license") = "GPL"; 8 8 9 - struct key_t { 10 - int a; 11 - int b; 12 - int c; 13 - }; 14 - 15 9 struct { 16 10 __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY); 17 11 __uint(max_entries, 3);
+171
tools/testing/selftests/bpf/progs/cpumask_success.c
··· 12 12 13 13 int pid, nr_cpus; 14 14 15 + struct kptr_nested { 16 + struct bpf_cpumask __kptr * mask; 17 + }; 18 + 19 + struct kptr_nested_pair { 20 + struct bpf_cpumask __kptr * mask_1; 21 + struct bpf_cpumask __kptr * mask_2; 22 + }; 23 + 24 + struct kptr_nested_mid { 25 + int dummy; 26 + struct kptr_nested m; 27 + }; 28 + 29 + struct kptr_nested_deep { 30 + struct kptr_nested_mid ptrs[2]; 31 + struct kptr_nested_pair ptr_pairs[3]; 32 + }; 33 + 34 + private(MASK) static struct bpf_cpumask __kptr * global_mask_array[2]; 35 + private(MASK) static struct bpf_cpumask __kptr * global_mask_array_l2[2][1]; 36 + private(MASK) static struct bpf_cpumask __kptr * global_mask_array_one[1]; 37 + private(MASK) static struct kptr_nested global_mask_nested[2]; 38 + private(MASK_DEEP) static struct kptr_nested_deep global_mask_nested_deep; 39 + 15 40 static bool is_test_task(void) 16 41 { 17 42 int cur_pid = bpf_get_current_pid_tgid() >> 32; ··· 482 457 bpf_cpumask_test_cpu(0, (const struct cpumask *)local); 483 458 bpf_rcu_read_unlock(); 484 459 460 + return 0; 461 + } 462 + 463 + SEC("tp_btf/task_newtask") 464 + int BPF_PROG(test_global_mask_array_one_rcu, struct task_struct *task, u64 clone_flags) 465 + { 466 + struct bpf_cpumask *local, *prev; 467 + 468 + if (!is_test_task()) 469 + return 0; 470 + 471 + /* Kptr arrays with one element are special cased, being treated 472 + * just like a single pointer. 473 + */ 474 + 475 + local = create_cpumask(); 476 + if (!local) 477 + return 0; 478 + 479 + prev = bpf_kptr_xchg(&global_mask_array_one[0], local); 480 + if (prev) { 481 + bpf_cpumask_release(prev); 482 + err = 3; 483 + return 0; 484 + } 485 + 486 + bpf_rcu_read_lock(); 487 + local = global_mask_array_one[0]; 488 + if (!local) { 489 + err = 4; 490 + bpf_rcu_read_unlock(); 491 + return 0; 492 + } 493 + 494 + bpf_rcu_read_unlock(); 495 + 496 + return 0; 497 + } 498 + 499 + static int _global_mask_array_rcu(struct bpf_cpumask **mask0, 500 + struct bpf_cpumask **mask1) 501 + { 502 + struct bpf_cpumask *local; 503 + 504 + if (!is_test_task()) 505 + return 0; 506 + 507 + /* Check if two kptrs in the array work and independently */ 508 + 509 + local = create_cpumask(); 510 + if (!local) 511 + return 0; 512 + 513 + bpf_rcu_read_lock(); 514 + 515 + local = bpf_kptr_xchg(mask0, local); 516 + if (local) { 517 + err = 1; 518 + goto err_exit; 519 + } 520 + 521 + /* [<mask 0>, NULL] */ 522 + if (!*mask0 || *mask1) { 523 + err = 2; 524 + goto err_exit; 525 + } 526 + 527 + local = create_cpumask(); 528 + if (!local) { 529 + err = 9; 530 + goto err_exit; 531 + } 532 + 533 + local = bpf_kptr_xchg(mask1, local); 534 + if (local) { 535 + err = 10; 536 + goto err_exit; 537 + } 538 + 539 + /* [<mask 0>, <mask 1>] */ 540 + if (!*mask0 || !*mask1 || *mask0 == *mask1) { 541 + err = 11; 542 + goto err_exit; 543 + } 544 + 545 + err_exit: 546 + if (local) 547 + bpf_cpumask_release(local); 548 + bpf_rcu_read_unlock(); 549 + return 0; 550 + } 551 + 552 + SEC("tp_btf/task_newtask") 553 + int BPF_PROG(test_global_mask_array_rcu, struct task_struct *task, u64 clone_flags) 554 + { 555 + return _global_mask_array_rcu(&global_mask_array[0], &global_mask_array[1]); 556 + } 557 + 558 + SEC("tp_btf/task_newtask") 559 + int BPF_PROG(test_global_mask_array_l2_rcu, struct task_struct *task, u64 clone_flags) 560 + { 561 + return _global_mask_array_rcu(&global_mask_array_l2[0][0], &global_mask_array_l2[1][0]); 562 + } 563 + 564 + SEC("tp_btf/task_newtask") 565 + int BPF_PROG(test_global_mask_nested_rcu, struct task_struct *task, u64 clone_flags) 566 + { 567 + return _global_mask_array_rcu(&global_mask_nested[0].mask, &global_mask_nested[1].mask); 568 + } 569 + 570 + /* Ensure that the field->offset has been correctly advanced from one 571 + * nested struct or array sub-tree to another. In the case of 572 + * kptr_nested_deep, it comprises two sub-trees: ktpr_1 and kptr_2. By 573 + * calling bpf_kptr_xchg() on every single kptr in both nested sub-trees, 574 + * the verifier should reject the program if the field->offset of any kptr 575 + * is incorrect. 576 + * 577 + * For instance, if we have 10 kptrs in a nested struct and a program that 578 + * accesses each kptr individually with bpf_kptr_xchg(), the compiler 579 + * should emit instructions to access 10 different offsets if it works 580 + * correctly. If the field->offset values of any pair of them are 581 + * incorrectly the same, the number of unique offsets in btf_record for 582 + * this nested struct should be less than 10. The verifier should fail to 583 + * discover some of the offsets emitted by the compiler. 584 + * 585 + * Even if the field->offset values of kptrs are not duplicated, the 586 + * verifier should fail to find a btf_field for the instruction accessing a 587 + * kptr if the corresponding field->offset is pointing to a random 588 + * incorrect offset. 589 + */ 590 + SEC("tp_btf/task_newtask") 591 + int BPF_PROG(test_global_mask_nested_deep_rcu, struct task_struct *task, u64 clone_flags) 592 + { 593 + int r, i; 594 + 595 + r = _global_mask_array_rcu(&global_mask_nested_deep.ptrs[0].m.mask, 596 + &global_mask_nested_deep.ptrs[1].m.mask); 597 + if (r) 598 + return r; 599 + 600 + for (i = 0; i < 3; i++) { 601 + r = _global_mask_array_rcu(&global_mask_nested_deep.ptr_pairs[i].mask_1, 602 + &global_mask_nested_deep.ptr_pairs[i].mask_2); 603 + if (r) 604 + return r; 605 + } 485 606 return 0; 486 607 } 487 608
+42
tools/testing/selftests/bpf/progs/linked_list.c
··· 11 11 12 12 #include "linked_list.h" 13 13 14 + struct head_nested_inner { 15 + struct bpf_spin_lock lock; 16 + struct bpf_list_head head __contains(foo, node2); 17 + }; 18 + 19 + struct head_nested { 20 + int dummy; 21 + struct head_nested_inner inner; 22 + }; 23 + 24 + private(C) struct bpf_spin_lock glock_c; 25 + private(C) struct bpf_list_head ghead_array[2] __contains(foo, node2); 26 + private(C) struct bpf_list_head ghead_array_one[1] __contains(foo, node2); 27 + 28 + private(D) struct head_nested ghead_nested; 29 + 14 30 static __always_inline 15 31 int list_push_pop(struct bpf_spin_lock *lock, struct bpf_list_head *head, bool leave_in_map) 16 32 { ··· 323 307 int global_list_push_pop(void *ctx) 324 308 { 325 309 return test_list_push_pop(&glock, &ghead); 310 + } 311 + 312 + SEC("tc") 313 + int global_list_push_pop_nested(void *ctx) 314 + { 315 + return test_list_push_pop(&ghead_nested.inner.lock, &ghead_nested.inner.head); 316 + } 317 + 318 + SEC("tc") 319 + int global_list_array_push_pop(void *ctx) 320 + { 321 + int r; 322 + 323 + r = test_list_push_pop(&glock_c, &ghead_array[0]); 324 + if (r) 325 + return r; 326 + 327 + r = test_list_push_pop(&glock_c, &ghead_array[1]); 328 + if (r) 329 + return r; 330 + 331 + /* Arrays with only one element is a special case, being treated 332 + * just like a bpf_list_head variable by the verifier, not an 333 + * array. 334 + */ 335 + return test_list_push_pop(&glock_c, &ghead_array_one[0]); 326 336 } 327 337 328 338 SEC("tc")
+77
tools/testing/selftests/bpf/progs/rbtree.c
··· 13 13 struct bpf_rb_node node; 14 14 }; 15 15 16 + struct root_nested_inner { 17 + struct bpf_spin_lock glock; 18 + struct bpf_rb_root root __contains(node_data, node); 19 + }; 20 + 21 + struct root_nested { 22 + struct root_nested_inner inner; 23 + }; 24 + 16 25 long less_callback_ran = -1; 17 26 long removed_key = -1; 18 27 long first_data[2] = {-1, -1}; ··· 29 20 #define private(name) SEC(".data." #name) __hidden __attribute__((aligned(8))) 30 21 private(A) struct bpf_spin_lock glock; 31 22 private(A) struct bpf_rb_root groot __contains(node_data, node); 23 + private(A) struct bpf_rb_root groot_array[2] __contains(node_data, node); 24 + private(A) struct bpf_rb_root groot_array_one[1] __contains(node_data, node); 25 + private(B) struct root_nested groot_nested; 32 26 33 27 static bool less(struct bpf_rb_node *a, const struct bpf_rb_node *b) 34 28 { ··· 84 72 } 85 73 86 74 SEC("tc") 75 + long rbtree_add_nodes_nested(void *ctx) 76 + { 77 + return __add_three(&groot_nested.inner.root, &groot_nested.inner.glock); 78 + } 79 + 80 + SEC("tc") 87 81 long rbtree_add_and_remove(void *ctx) 88 82 { 89 83 struct bpf_rb_node *res = NULL; ··· 124 106 bpf_obj_drop(n); 125 107 if (m) 126 108 bpf_obj_drop(m); 109 + return 1; 110 + } 111 + 112 + SEC("tc") 113 + long rbtree_add_and_remove_array(void *ctx) 114 + { 115 + struct bpf_rb_node *res1 = NULL, *res2 = NULL, *res3 = NULL; 116 + struct node_data *nodes[3][2] = {{NULL, NULL}, {NULL, NULL}, {NULL, NULL}}; 117 + struct node_data *n; 118 + long k1 = -1, k2 = -1, k3 = -1; 119 + int i, j; 120 + 121 + for (i = 0; i < 3; i++) { 122 + for (j = 0; j < 2; j++) { 123 + nodes[i][j] = bpf_obj_new(typeof(*nodes[i][j])); 124 + if (!nodes[i][j]) 125 + goto err_out; 126 + nodes[i][j]->key = i * 2 + j; 127 + } 128 + } 129 + 130 + bpf_spin_lock(&glock); 131 + for (i = 0; i < 2; i++) 132 + for (j = 0; j < 2; j++) 133 + bpf_rbtree_add(&groot_array[i], &nodes[i][j]->node, less); 134 + for (j = 0; j < 2; j++) 135 + bpf_rbtree_add(&groot_array_one[0], &nodes[2][j]->node, less); 136 + res1 = bpf_rbtree_remove(&groot_array[0], &nodes[0][0]->node); 137 + res2 = bpf_rbtree_remove(&groot_array[1], &nodes[1][0]->node); 138 + res3 = bpf_rbtree_remove(&groot_array_one[0], &nodes[2][0]->node); 139 + bpf_spin_unlock(&glock); 140 + 141 + if (res1) { 142 + n = container_of(res1, struct node_data, node); 143 + k1 = n->key; 144 + bpf_obj_drop(n); 145 + } 146 + if (res2) { 147 + n = container_of(res2, struct node_data, node); 148 + k2 = n->key; 149 + bpf_obj_drop(n); 150 + } 151 + if (res3) { 152 + n = container_of(res3, struct node_data, node); 153 + k3 = n->key; 154 + bpf_obj_drop(n); 155 + } 156 + if (k1 != 0 || k2 != 2 || k3 != 4) 157 + return 2; 158 + 159 + return 0; 160 + 161 + err_out: 162 + for (i = 0; i < 3; i++) { 163 + for (j = 0; j < 2; j++) { 164 + if (nodes[i][j]) 165 + bpf_obj_drop(nodes[i][j]); 166 + } 167 + } 127 168 return 1; 128 169 } 129 170
+10
tools/testing/selftests/bpf/progs/struct_ops_detach.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */ 3 + #include <vmlinux.h> 4 + #include <bpf/bpf_helpers.h> 5 + #include "../bpf_testmod/bpf_testmod.h" 6 + 7 + char _license[] SEC("license") = "GPL"; 8 + 9 + SEC(".struct_ops.link") 10 + struct bpf_testmod_ops testmod_do_detach;
+9 -11
tools/testing/selftests/bpf/progs/test_sockmap_kern.h
··· 92 92 __uint(value_size, sizeof(int)); 93 93 } tls_sock_map SEC(".maps"); 94 94 95 - SEC("sk_skb1") 95 + SEC("sk_skb/stream_parser") 96 96 int bpf_prog1(struct __sk_buff *skb) 97 97 { 98 98 int *f, two = 2; ··· 104 104 return skb->len; 105 105 } 106 106 107 - SEC("sk_skb2") 107 + SEC("sk_skb/stream_verdict") 108 108 int bpf_prog2(struct __sk_buff *skb) 109 109 { 110 110 __u32 lport = skb->local_port; ··· 151 151 memcpy(c + offset, "PASS", 4); 152 152 } 153 153 154 - SEC("sk_skb3") 154 + SEC("sk_skb/stream_verdict") 155 155 int bpf_prog3(struct __sk_buff *skb) 156 156 { 157 157 int err, *f, ret = SK_PASS; ··· 177 177 return bpf_sk_redirect_hash(skb, &tls_sock_map, &ret, flags); 178 178 #endif 179 179 } 180 - f = bpf_map_lookup_elem(&sock_skb_opts, &one); 181 - if (f && *f) 182 - ret = SK_DROP; 183 180 err = bpf_skb_adjust_room(skb, 4, 0, 0); 184 181 if (err) 185 182 return SK_DROP; ··· 230 233 return 0; 231 234 } 232 235 233 - SEC("sk_msg1") 236 + SEC("sk_msg") 234 237 int bpf_prog4(struct sk_msg_md *msg) 235 238 { 236 239 int *bytes, zero = 0, one = 1, two = 2, three = 3, four = 4, five = 5; ··· 260 263 return SK_PASS; 261 264 } 262 265 263 - SEC("sk_msg2") 266 + SEC("sk_msg") 264 267 int bpf_prog6(struct sk_msg_md *msg) 265 268 { 266 269 int zero = 0, one = 1, two = 2, three = 3, four = 4, five = 5, key = 0; ··· 305 308 #endif 306 309 } 307 310 308 - SEC("sk_msg3") 311 + SEC("sk_msg") 309 312 int bpf_prog8(struct sk_msg_md *msg) 310 313 { 311 314 void *data_end = (void *)(long) msg->data_end; ··· 326 329 327 330 return SK_PASS; 328 331 } 329 - SEC("sk_msg4") 332 + 333 + SEC("sk_msg") 330 334 int bpf_prog9(struct sk_msg_md *msg) 331 335 { 332 336 void *data_end = (void *)(long) msg->data_end; ··· 345 347 return SK_PASS; 346 348 } 347 349 348 - SEC("sk_msg5") 350 + SEC("sk_msg") 349 351 int bpf_prog10(struct sk_msg_md *msg) 350 352 { 351 353 int *bytes, *start, *end, *start_push, *end_push, *start_pop, *pop;
+153
tools/testing/selftests/bpf/progs/verifier_bits_iter.c
··· 1 + // SPDX-License-Identifier: GPL-2.0-only 2 + /* Copyright (c) 2024 Yafang Shao <laoar.shao@gmail.com> */ 3 + 4 + #include "vmlinux.h" 5 + #include <bpf/bpf_helpers.h> 6 + #include <bpf/bpf_tracing.h> 7 + 8 + #include "bpf_misc.h" 9 + #include "task_kfunc_common.h" 10 + 11 + char _license[] SEC("license") = "GPL"; 12 + 13 + int bpf_iter_bits_new(struct bpf_iter_bits *it, const u64 *unsafe_ptr__ign, 14 + u32 nr_bits) __ksym __weak; 15 + int *bpf_iter_bits_next(struct bpf_iter_bits *it) __ksym __weak; 16 + void bpf_iter_bits_destroy(struct bpf_iter_bits *it) __ksym __weak; 17 + 18 + SEC("iter.s/cgroup") 19 + __description("bits iter without destroy") 20 + __failure __msg("Unreleased reference") 21 + int BPF_PROG(no_destroy, struct bpf_iter_meta *meta, struct cgroup *cgrp) 22 + { 23 + struct bpf_iter_bits it; 24 + u64 data = 1; 25 + 26 + bpf_iter_bits_new(&it, &data, 1); 27 + bpf_iter_bits_next(&it); 28 + return 0; 29 + } 30 + 31 + SEC("iter/cgroup") 32 + __description("uninitialized iter in ->next()") 33 + __failure __msg("expected an initialized iter_bits as arg #1") 34 + int BPF_PROG(next_uninit, struct bpf_iter_meta *meta, struct cgroup *cgrp) 35 + { 36 + struct bpf_iter_bits *it = NULL; 37 + 38 + bpf_iter_bits_next(it); 39 + return 0; 40 + } 41 + 42 + SEC("iter/cgroup") 43 + __description("uninitialized iter in ->destroy()") 44 + __failure __msg("expected an initialized iter_bits as arg #1") 45 + int BPF_PROG(destroy_uninit, struct bpf_iter_meta *meta, struct cgroup *cgrp) 46 + { 47 + struct bpf_iter_bits it = {}; 48 + 49 + bpf_iter_bits_destroy(&it); 50 + return 0; 51 + } 52 + 53 + SEC("syscall") 54 + __description("null pointer") 55 + __success __retval(0) 56 + int null_pointer(void) 57 + { 58 + int nr = 0; 59 + int *bit; 60 + 61 + bpf_for_each(bits, bit, NULL, 1) 62 + nr++; 63 + return nr; 64 + } 65 + 66 + SEC("syscall") 67 + __description("bits copy") 68 + __success __retval(10) 69 + int bits_copy(void) 70 + { 71 + u64 data = 0xf7310UL; /* 4 + 3 + 2 + 1 + 0*/ 72 + int nr = 0; 73 + int *bit; 74 + 75 + bpf_for_each(bits, bit, &data, 1) 76 + nr++; 77 + return nr; 78 + } 79 + 80 + SEC("syscall") 81 + __description("bits memalloc") 82 + __success __retval(64) 83 + int bits_memalloc(void) 84 + { 85 + u64 data[2]; 86 + int nr = 0; 87 + int *bit; 88 + 89 + __builtin_memset(&data, 0xf0, sizeof(data)); /* 4 * 16 */ 90 + bpf_for_each(bits, bit, &data[0], sizeof(data) / sizeof(u64)) 91 + nr++; 92 + return nr; 93 + } 94 + 95 + SEC("syscall") 96 + __description("bit index") 97 + __success __retval(8) 98 + int bit_index(void) 99 + { 100 + u64 data = 0x100; 101 + int bit_idx = 0; 102 + int *bit; 103 + 104 + bpf_for_each(bits, bit, &data, 1) { 105 + if (*bit == 0) 106 + continue; 107 + bit_idx = *bit; 108 + } 109 + return bit_idx; 110 + } 111 + 112 + SEC("syscall") 113 + __description("bits nomem") 114 + __success __retval(0) 115 + int bits_nomem(void) 116 + { 117 + u64 data[4]; 118 + int nr = 0; 119 + int *bit; 120 + 121 + __builtin_memset(&data, 0xff, sizeof(data)); 122 + bpf_for_each(bits, bit, &data[0], 513) /* Be greater than 512 */ 123 + nr++; 124 + return nr; 125 + } 126 + 127 + SEC("syscall") 128 + __description("fewer words") 129 + __success __retval(1) 130 + int fewer_words(void) 131 + { 132 + u64 data[2] = {0x1, 0xff}; 133 + int nr = 0; 134 + int *bit; 135 + 136 + bpf_for_each(bits, bit, &data[0], 1) 137 + nr++; 138 + return nr; 139 + } 140 + 141 + SEC("syscall") 142 + __description("zero words") 143 + __success __retval(0) 144 + int zero_words(void) 145 + { 146 + u64 data[2] = {0x1, 0xff}; 147 + int nr = 0; 148 + int *bit; 149 + 150 + bpf_for_each(bits, bit, &data[0], 0) 151 + nr++; 152 + return nr; 153 + }
+51 -81
tools/testing/selftests/bpf/test_sockmap.c
··· 63 63 int failed; 64 64 int map_fd[9]; 65 65 struct bpf_map *maps[9]; 66 - int prog_fd[9]; 66 + struct bpf_program *progs[9]; 67 + struct bpf_link *links[9]; 67 68 68 69 int txmsg_pass; 69 70 int txmsg_redir; ··· 681 680 } 682 681 } 683 682 684 - s->bytes_recvd += recv; 683 + if (recv > 0) 684 + s->bytes_recvd += recv; 685 685 686 686 if (opt->check_recved_len && s->bytes_recvd > total_bytes) { 687 687 errno = EMSGSIZE; ··· 954 952 955 953 static int run_options(struct sockmap_options *options, int cg_fd, int test) 956 954 { 957 - int i, key, next_key, err, tx_prog_fd = -1, zero = 0; 955 + int i, key, next_key, err, zero = 0; 956 + struct bpf_program *tx_prog; 958 957 959 958 /* If base test skip BPF setup */ 960 959 if (test == BASE || test == BASE_SENDPAGE) ··· 963 960 964 961 /* Attach programs to sockmap */ 965 962 if (!txmsg_omit_skb_parser) { 966 - err = bpf_prog_attach(prog_fd[0], map_fd[0], 967 - BPF_SK_SKB_STREAM_PARSER, 0); 968 - if (err) { 963 + links[0] = bpf_program__attach_sockmap(progs[0], map_fd[0]); 964 + if (!links[0]) { 969 965 fprintf(stderr, 970 - "ERROR: bpf_prog_attach (sockmap %i->%i): %d (%s)\n", 971 - prog_fd[0], map_fd[0], err, strerror(errno)); 972 - return err; 966 + "ERROR: bpf_program__attach_sockmap (sockmap %i->%i): (%s)\n", 967 + bpf_program__fd(progs[0]), map_fd[0], strerror(errno)); 968 + return -1; 973 969 } 974 970 } 975 971 976 - err = bpf_prog_attach(prog_fd[1], map_fd[0], 977 - BPF_SK_SKB_STREAM_VERDICT, 0); 978 - if (err) { 979 - fprintf(stderr, "ERROR: bpf_prog_attach (sockmap): %d (%s)\n", 980 - err, strerror(errno)); 981 - return err; 972 + links[1] = bpf_program__attach_sockmap(progs[1], map_fd[0]); 973 + if (!links[1]) { 974 + fprintf(stderr, "ERROR: bpf_program__attach_sockmap (sockmap): (%s)\n", 975 + strerror(errno)); 976 + return -1; 982 977 } 983 978 984 979 /* Attach programs to TLS sockmap */ 985 980 if (txmsg_ktls_skb) { 986 981 if (!txmsg_omit_skb_parser) { 987 - err = bpf_prog_attach(prog_fd[0], map_fd[8], 988 - BPF_SK_SKB_STREAM_PARSER, 0); 989 - if (err) { 982 + links[2] = bpf_program__attach_sockmap(progs[0], map_fd[8]); 983 + if (!links[2]) { 990 984 fprintf(stderr, 991 - "ERROR: bpf_prog_attach (TLS sockmap %i->%i): %d (%s)\n", 992 - prog_fd[0], map_fd[8], err, strerror(errno)); 993 - return err; 985 + "ERROR: bpf_program__attach_sockmap (TLS sockmap %i->%i): (%s)\n", 986 + bpf_program__fd(progs[0]), map_fd[8], strerror(errno)); 987 + return -1; 994 988 } 995 989 } 996 990 997 - err = bpf_prog_attach(prog_fd[2], map_fd[8], 998 - BPF_SK_SKB_STREAM_VERDICT, 0); 999 - if (err) { 1000 - fprintf(stderr, "ERROR: bpf_prog_attach (TLS sockmap): %d (%s)\n", 1001 - err, strerror(errno)); 1002 - return err; 991 + links[3] = bpf_program__attach_sockmap(progs[2], map_fd[8]); 992 + if (!links[3]) { 993 + fprintf(stderr, "ERROR: bpf_program__attach_sockmap (TLS sockmap): (%s)\n", 994 + strerror(errno)); 995 + return -1; 1003 996 } 1004 997 } 1005 998 1006 999 /* Attach to cgroups */ 1007 - err = bpf_prog_attach(prog_fd[3], cg_fd, BPF_CGROUP_SOCK_OPS, 0); 1000 + err = bpf_prog_attach(bpf_program__fd(progs[3]), cg_fd, BPF_CGROUP_SOCK_OPS, 0); 1008 1001 if (err) { 1009 1002 fprintf(stderr, "ERROR: bpf_prog_attach (groups): %d (%s)\n", 1010 1003 err, strerror(errno)); ··· 1016 1017 1017 1018 /* Attach txmsg program to sockmap */ 1018 1019 if (txmsg_pass) 1019 - tx_prog_fd = prog_fd[4]; 1020 + tx_prog = progs[4]; 1020 1021 else if (txmsg_redir) 1021 - tx_prog_fd = prog_fd[5]; 1022 + tx_prog = progs[5]; 1022 1023 else if (txmsg_apply) 1023 - tx_prog_fd = prog_fd[6]; 1024 + tx_prog = progs[6]; 1024 1025 else if (txmsg_cork) 1025 - tx_prog_fd = prog_fd[7]; 1026 + tx_prog = progs[7]; 1026 1027 else if (txmsg_drop) 1027 - tx_prog_fd = prog_fd[8]; 1028 + tx_prog = progs[8]; 1028 1029 else 1029 - tx_prog_fd = 0; 1030 + tx_prog = NULL; 1030 1031 1031 - if (tx_prog_fd) { 1032 - int redir_fd, i = 0; 1032 + if (tx_prog) { 1033 + int redir_fd; 1033 1034 1034 - err = bpf_prog_attach(tx_prog_fd, 1035 - map_fd[1], BPF_SK_MSG_VERDICT, 0); 1036 - if (err) { 1035 + links[4] = bpf_program__attach_sockmap(tx_prog, map_fd[1]); 1036 + if (!links[4]) { 1037 1037 fprintf(stderr, 1038 - "ERROR: bpf_prog_attach (txmsg): %d (%s)\n", 1039 - err, strerror(errno)); 1038 + "ERROR: bpf_program__attach_sockmap (txmsg): (%s)\n", 1039 + strerror(errno)); 1040 + err = -1; 1040 1041 goto out; 1041 1042 } 1042 1043 1044 + i = 0; 1043 1045 err = bpf_map_update_elem(map_fd[1], &i, &c1, BPF_ANY); 1044 1046 if (err) { 1045 1047 fprintf(stderr, ··· 1279 1279 fprintf(stderr, "unknown test\n"); 1280 1280 out: 1281 1281 /* Detatch and zero all the maps */ 1282 - bpf_prog_detach2(prog_fd[3], cg_fd, BPF_CGROUP_SOCK_OPS); 1283 - bpf_prog_detach2(prog_fd[0], map_fd[0], BPF_SK_SKB_STREAM_PARSER); 1284 - bpf_prog_detach2(prog_fd[1], map_fd[0], BPF_SK_SKB_STREAM_VERDICT); 1285 - bpf_prog_detach2(prog_fd[0], map_fd[8], BPF_SK_SKB_STREAM_PARSER); 1286 - bpf_prog_detach2(prog_fd[2], map_fd[8], BPF_SK_SKB_STREAM_VERDICT); 1282 + bpf_prog_detach2(bpf_program__fd(progs[3]), cg_fd, BPF_CGROUP_SOCK_OPS); 1287 1283 1288 - if (tx_prog_fd >= 0) 1289 - bpf_prog_detach2(tx_prog_fd, map_fd[1], BPF_SK_MSG_VERDICT); 1284 + for (i = 0; i < ARRAY_SIZE(links); i++) { 1285 + if (links[i]) 1286 + bpf_link__detach(links[i]); 1287 + } 1290 1288 1291 - for (i = 0; i < 8; i++) { 1289 + for (i = 0; i < ARRAY_SIZE(map_fd); i++) { 1292 1290 key = next_key = 0; 1293 1291 bpf_map_update_elem(map_fd[i], &key, &zero, BPF_ANY); 1294 1292 while (bpf_map_get_next_key(map_fd[i], &key, &next_key) == 0) { ··· 1781 1783 "tls_sock_map", 1782 1784 }; 1783 1785 1784 - int prog_attach_type[] = { 1785 - BPF_SK_SKB_STREAM_PARSER, 1786 - BPF_SK_SKB_STREAM_VERDICT, 1787 - BPF_SK_SKB_STREAM_VERDICT, 1788 - BPF_CGROUP_SOCK_OPS, 1789 - BPF_SK_MSG_VERDICT, 1790 - BPF_SK_MSG_VERDICT, 1791 - BPF_SK_MSG_VERDICT, 1792 - BPF_SK_MSG_VERDICT, 1793 - BPF_SK_MSG_VERDICT, 1794 - }; 1795 - 1796 - int prog_type[] = { 1797 - BPF_PROG_TYPE_SK_SKB, 1798 - BPF_PROG_TYPE_SK_SKB, 1799 - BPF_PROG_TYPE_SK_SKB, 1800 - BPF_PROG_TYPE_SOCK_OPS, 1801 - BPF_PROG_TYPE_SK_MSG, 1802 - BPF_PROG_TYPE_SK_MSG, 1803 - BPF_PROG_TYPE_SK_MSG, 1804 - BPF_PROG_TYPE_SK_MSG, 1805 - BPF_PROG_TYPE_SK_MSG, 1806 - }; 1807 - 1808 1786 static int populate_progs(char *bpf_file) 1809 1787 { 1810 1788 struct bpf_program *prog; ··· 1799 1825 return -1; 1800 1826 } 1801 1827 1802 - bpf_object__for_each_program(prog, obj) { 1803 - bpf_program__set_type(prog, prog_type[i]); 1804 - bpf_program__set_expected_attach_type(prog, 1805 - prog_attach_type[i]); 1806 - i++; 1807 - } 1808 - 1809 1828 i = bpf_object__load(obj); 1810 1829 i = 0; 1811 1830 bpf_object__for_each_program(prog, obj) { 1812 - prog_fd[i] = bpf_program__fd(prog); 1831 + progs[i] = prog; 1813 1832 i++; 1814 1833 } 1815 1834 ··· 1815 1848 return -1; 1816 1849 } 1817 1850 } 1851 + 1852 + for (i = 0; i < ARRAY_SIZE(links); i++) 1853 + links[i] = NULL; 1818 1854 1819 1855 return 0; 1820 1856 }
+2 -2
tools/testing/selftests/bpf/test_tcp_check_syncookie_user.c
··· 139 139 return ret; 140 140 } 141 141 142 - static int v6only_true(int fd, const struct post_socket_opts *opts) 142 + static int v6only_true(int fd, void *opts) 143 143 { 144 144 int mode = true; 145 145 146 146 return setsockopt(fd, IPPROTO_IPV6, IPV6_V6ONLY, &mode, sizeof(mode)); 147 147 } 148 148 149 - static int v6only_false(int fd, const struct post_socket_opts *opts) 149 + static int v6only_false(int fd, void *opts) 150 150 { 151 151 int mode = false; 152 152
-5
tools/testing/selftests/bpf/test_verifier.c
··· 1237 1237 fixup_prog_kfuncs(prog, fd_array, test->fixup_kfunc_btf_id); 1238 1238 } 1239 1239 1240 - struct libcap { 1241 - struct __user_cap_header_struct hdr; 1242 - struct __user_cap_data_struct data[2]; 1243 - }; 1244 - 1245 1240 static int set_admin(bool admin) 1246 1241 { 1247 1242 int err;
+12 -1
tools/testing/selftests/bpf/trace_helpers.c
··· 211 211 */ 212 212 int kallsyms_find(const char *sym, unsigned long long *addr) 213 213 { 214 - char type, name[500]; 214 + char type, name[500], *match; 215 215 unsigned long long value; 216 216 int err = 0; 217 217 FILE *f; ··· 221 221 return -EINVAL; 222 222 223 223 while (fscanf(f, "%llx %c %499s%*[^\n]\n", &value, &type, name) > 0) { 224 + /* If CONFIG_LTO_CLANG_THIN is enabled, static variable/function 225 + * symbols could be promoted to global due to cross-file inlining. 226 + * For such cases, clang compiler will add .llvm.<hash> suffix 227 + * to those symbols to avoid potential naming conflict. 228 + * Let us ignore .llvm.<hash> suffix during symbol comparison. 229 + */ 230 + if (type == 'd') { 231 + match = strstr(name, ".llvm."); 232 + if (match) 233 + *match = '\0'; 234 + } 224 235 if (strcmp(name, sym) == 0) { 225 236 *addr = value; 226 237 goto out;