Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

bpf: Switch to new kfunc flags infrastructure

Instead of populating multiple sets to indicate some attribute and then
researching the same BTF ID in them, prepare a single unified BTF set
which indicates whether a kfunc is allowed to be called, and also its
attributes if any at the same time. Now, only one call is needed to
perform the lookup for both kfunc availability and its attributes.

Signed-off-by: Kumar Kartikeya Dwivedi <memxor@gmail.com>
Link: https://lore.kernel.org/r/20220721134245.2450-4-memxor@gmail.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>

authored by

Kumar Kartikeya Dwivedi and committed by
Alexei Starovoitov
a4703e31 ef2c6f37

+144 -221
+2 -1
include/linux/bpf.h
··· 1924 1924 struct bpf_reg_state *regs); 1925 1925 int btf_check_kfunc_arg_match(struct bpf_verifier_env *env, 1926 1926 const struct btf *btf, u32 func_id, 1927 - struct bpf_reg_state *regs); 1927 + struct bpf_reg_state *regs, 1928 + u32 kfunc_flags); 1928 1929 int btf_prepare_func_args(struct bpf_verifier_env *env, int subprog, 1929 1930 struct bpf_reg_state *reg); 1930 1931 int btf_check_type_match(struct bpf_verifier_log *log, const struct bpf_prog *prog,
+10 -23
include/linux/btf.h
··· 12 12 #define BTF_TYPE_EMIT(type) ((void)(type *)0) 13 13 #define BTF_TYPE_EMIT_ENUM(enum_val) ((void)enum_val) 14 14 15 - enum btf_kfunc_type { 16 - BTF_KFUNC_TYPE_CHECK, 17 - BTF_KFUNC_TYPE_ACQUIRE, 18 - BTF_KFUNC_TYPE_RELEASE, 19 - BTF_KFUNC_TYPE_RET_NULL, 20 - BTF_KFUNC_TYPE_KPTR_ACQUIRE, 21 - BTF_KFUNC_TYPE_MAX, 22 - }; 15 + /* These need to be macros, as the expressions are used in assembler input */ 16 + #define KF_ACQUIRE (1 << 0) /* kfunc is an acquire function */ 17 + #define KF_RELEASE (1 << 1) /* kfunc is a release function */ 18 + #define KF_RET_NULL (1 << 2) /* kfunc returns a pointer that may be NULL */ 19 + #define KF_KPTR_GET (1 << 3) /* kfunc returns reference to a kptr */ 23 20 24 21 struct btf; 25 22 struct btf_member; ··· 27 30 28 31 struct btf_kfunc_id_set { 29 32 struct module *owner; 30 - union { 31 - struct { 32 - struct btf_id_set *check_set; 33 - struct btf_id_set *acquire_set; 34 - struct btf_id_set *release_set; 35 - struct btf_id_set *ret_null_set; 36 - struct btf_id_set *kptr_acquire_set; 37 - }; 38 - struct btf_id_set *sets[BTF_KFUNC_TYPE_MAX]; 39 - }; 33 + struct btf_id_set8 *set; 40 34 }; 41 35 42 36 struct btf_id_dtor_kfunc { ··· 366 378 const char *btf_name_by_offset(const struct btf *btf, u32 offset); 367 379 struct btf *btf_parse_vmlinux(void); 368 380 struct btf *bpf_prog_get_target_btf(const struct bpf_prog *prog); 369 - bool btf_kfunc_id_set_contains(const struct btf *btf, 381 + u32 *btf_kfunc_id_set_contains(const struct btf *btf, 370 382 enum bpf_prog_type prog_type, 371 - enum btf_kfunc_type type, u32 kfunc_btf_id); 383 + u32 kfunc_btf_id); 372 384 int register_btf_kfunc_id_set(enum bpf_prog_type prog_type, 373 385 const struct btf_kfunc_id_set *s); 374 386 s32 btf_find_dtor_kfunc(struct btf *btf, u32 btf_id); ··· 385 397 { 386 398 return NULL; 387 399 } 388 - static inline bool btf_kfunc_id_set_contains(const struct btf *btf, 400 + static inline u32 *btf_kfunc_id_set_contains(const struct btf *btf, 389 401 enum bpf_prog_type prog_type, 390 - enum btf_kfunc_type type, 391 402 u32 kfunc_btf_id) 392 403 { 393 - return false; 404 + return NULL; 394 405 } 395 406 static inline int register_btf_kfunc_id_set(enum bpf_prog_type prog_type, 396 407 const struct btf_kfunc_id_set *s)
+48 -58
kernel/bpf/btf.c
··· 213 213 }; 214 214 215 215 struct btf_kfunc_set_tab { 216 - struct btf_id_set *sets[BTF_KFUNC_HOOK_MAX][BTF_KFUNC_TYPE_MAX]; 216 + struct btf_id_set8 *sets[BTF_KFUNC_HOOK_MAX]; 217 217 }; 218 218 219 219 struct btf_id_dtor_kfunc_tab { ··· 1616 1616 static void btf_free_kfunc_set_tab(struct btf *btf) 1617 1617 { 1618 1618 struct btf_kfunc_set_tab *tab = btf->kfunc_set_tab; 1619 - int hook, type; 1619 + int hook; 1620 1620 1621 1621 if (!tab) 1622 1622 return; ··· 1625 1625 */ 1626 1626 if (btf_is_module(btf)) 1627 1627 goto free_tab; 1628 - for (hook = 0; hook < ARRAY_SIZE(tab->sets); hook++) { 1629 - for (type = 0; type < ARRAY_SIZE(tab->sets[0]); type++) 1630 - kfree(tab->sets[hook][type]); 1631 - } 1628 + for (hook = 0; hook < ARRAY_SIZE(tab->sets); hook++) 1629 + kfree(tab->sets[hook]); 1632 1630 free_tab: 1633 1631 kfree(tab); 1634 1632 btf->kfunc_set_tab = NULL; ··· 6170 6172 static int btf_check_func_arg_match(struct bpf_verifier_env *env, 6171 6173 const struct btf *btf, u32 func_id, 6172 6174 struct bpf_reg_state *regs, 6173 - bool ptr_to_mem_ok) 6175 + bool ptr_to_mem_ok, 6176 + u32 kfunc_flags) 6174 6177 { 6175 6178 enum bpf_prog_type prog_type = resolve_prog_type(env->prog); 6176 6179 struct bpf_verifier_log *log = &env->log; ··· 6209 6210 6210 6211 if (is_kfunc) { 6211 6212 /* Only kfunc can be release func */ 6212 - rel = btf_kfunc_id_set_contains(btf, resolve_prog_type(env->prog), 6213 - BTF_KFUNC_TYPE_RELEASE, func_id); 6214 - kptr_get = btf_kfunc_id_set_contains(btf, resolve_prog_type(env->prog), 6215 - BTF_KFUNC_TYPE_KPTR_ACQUIRE, func_id); 6213 + rel = kfunc_flags & KF_RELEASE; 6214 + kptr_get = kfunc_flags & KF_KPTR_GET; 6216 6215 } 6217 6216 6218 6217 /* check that BTF function arguments match actual types that the ··· 6439 6442 return -EINVAL; 6440 6443 6441 6444 is_global = prog->aux->func_info_aux[subprog].linkage == BTF_FUNC_GLOBAL; 6442 - err = btf_check_func_arg_match(env, btf, btf_id, regs, is_global); 6445 + err = btf_check_func_arg_match(env, btf, btf_id, regs, is_global, 0); 6443 6446 6444 6447 /* Compiler optimizations can remove arguments from static functions 6445 6448 * or mismatched type can be passed into a global function. ··· 6452 6455 6453 6456 int btf_check_kfunc_arg_match(struct bpf_verifier_env *env, 6454 6457 const struct btf *btf, u32 func_id, 6455 - struct bpf_reg_state *regs) 6458 + struct bpf_reg_state *regs, 6459 + u32 kfunc_flags) 6456 6460 { 6457 - return btf_check_func_arg_match(env, btf, func_id, regs, true); 6461 + return btf_check_func_arg_match(env, btf, func_id, regs, true, kfunc_flags); 6458 6462 } 6459 6463 6460 6464 /* Convert BTF of a function into bpf_reg_state if possible ··· 6852 6854 return bsearch(&id, set->ids, set->cnt, sizeof(u32), btf_id_cmp_func) != NULL; 6853 6855 } 6854 6856 6857 + static void *btf_id_set8_contains(const struct btf_id_set8 *set, u32 id) 6858 + { 6859 + return bsearch(&id, set->pairs, set->cnt, sizeof(set->pairs[0]), btf_id_cmp_func); 6860 + } 6861 + 6855 6862 enum { 6856 6863 BTF_MODULE_F_LIVE = (1 << 0), 6857 6864 }; ··· 7105 7102 7106 7103 /* Kernel Function (kfunc) BTF ID set registration API */ 7107 7104 7108 - static int __btf_populate_kfunc_set(struct btf *btf, enum btf_kfunc_hook hook, 7109 - enum btf_kfunc_type type, 7110 - struct btf_id_set *add_set, bool vmlinux_set) 7105 + static int btf_populate_kfunc_set(struct btf *btf, enum btf_kfunc_hook hook, 7106 + struct btf_id_set8 *add_set) 7111 7107 { 7108 + bool vmlinux_set = !btf_is_module(btf); 7112 7109 struct btf_kfunc_set_tab *tab; 7113 - struct btf_id_set *set; 7110 + struct btf_id_set8 *set; 7114 7111 u32 set_cnt; 7115 7112 int ret; 7116 7113 7117 - if (hook >= BTF_KFUNC_HOOK_MAX || type >= BTF_KFUNC_TYPE_MAX) { 7114 + if (hook >= BTF_KFUNC_HOOK_MAX) { 7118 7115 ret = -EINVAL; 7119 7116 goto end; 7120 7117 } ··· 7130 7127 btf->kfunc_set_tab = tab; 7131 7128 } 7132 7129 7133 - set = tab->sets[hook][type]; 7130 + set = tab->sets[hook]; 7134 7131 /* Warn when register_btf_kfunc_id_set is called twice for the same hook 7135 7132 * for module sets. 7136 7133 */ ··· 7144 7141 * pointer and return. 7145 7142 */ 7146 7143 if (!vmlinux_set) { 7147 - tab->sets[hook][type] = add_set; 7144 + tab->sets[hook] = add_set; 7148 7145 return 0; 7149 7146 } 7150 7147 ··· 7153 7150 * and concatenate all individual sets being registered. While each set 7154 7151 * is individually sorted, they may become unsorted when concatenated, 7155 7152 * hence re-sorting the final set again is required to make binary 7156 - * searching the set using btf_id_set_contains function work. 7153 + * searching the set using btf_id_set8_contains function work. 7157 7154 */ 7158 7155 set_cnt = set ? set->cnt : 0; 7159 7156 ··· 7168 7165 } 7169 7166 7170 7167 /* Grow set */ 7171 - set = krealloc(tab->sets[hook][type], 7172 - offsetof(struct btf_id_set, ids[set_cnt + add_set->cnt]), 7168 + set = krealloc(tab->sets[hook], 7169 + offsetof(struct btf_id_set8, pairs[set_cnt + add_set->cnt]), 7173 7170 GFP_KERNEL | __GFP_NOWARN); 7174 7171 if (!set) { 7175 7172 ret = -ENOMEM; ··· 7177 7174 } 7178 7175 7179 7176 /* For newly allocated set, initialize set->cnt to 0 */ 7180 - if (!tab->sets[hook][type]) 7177 + if (!tab->sets[hook]) 7181 7178 set->cnt = 0; 7182 - tab->sets[hook][type] = set; 7179 + tab->sets[hook] = set; 7183 7180 7184 7181 /* Concatenate the two sets */ 7185 - memcpy(set->ids + set->cnt, add_set->ids, add_set->cnt * sizeof(set->ids[0])); 7182 + memcpy(set->pairs + set->cnt, add_set->pairs, add_set->cnt * sizeof(set->pairs[0])); 7186 7183 set->cnt += add_set->cnt; 7187 7184 7188 - sort(set->ids, set->cnt, sizeof(set->ids[0]), btf_id_cmp_func, NULL); 7185 + sort(set->pairs, set->cnt, sizeof(set->pairs[0]), btf_id_cmp_func, NULL); 7189 7186 7190 7187 return 0; 7191 7188 end: ··· 7193 7190 return ret; 7194 7191 } 7195 7192 7196 - static int btf_populate_kfunc_set(struct btf *btf, enum btf_kfunc_hook hook, 7197 - const struct btf_kfunc_id_set *kset) 7198 - { 7199 - bool vmlinux_set = !btf_is_module(btf); 7200 - int type, ret = 0; 7201 - 7202 - for (type = 0; type < ARRAY_SIZE(kset->sets); type++) { 7203 - if (!kset->sets[type]) 7204 - continue; 7205 - 7206 - ret = __btf_populate_kfunc_set(btf, hook, type, kset->sets[type], vmlinux_set); 7207 - if (ret) 7208 - break; 7209 - } 7210 - return ret; 7211 - } 7212 - 7213 - static bool __btf_kfunc_id_set_contains(const struct btf *btf, 7193 + static u32 *__btf_kfunc_id_set_contains(const struct btf *btf, 7214 7194 enum btf_kfunc_hook hook, 7215 - enum btf_kfunc_type type, 7216 7195 u32 kfunc_btf_id) 7217 7196 { 7218 - struct btf_id_set *set; 7197 + struct btf_id_set8 *set; 7198 + u32 *id; 7219 7199 7220 - if (hook >= BTF_KFUNC_HOOK_MAX || type >= BTF_KFUNC_TYPE_MAX) 7221 - return false; 7200 + if (hook >= BTF_KFUNC_HOOK_MAX) 7201 + return NULL; 7222 7202 if (!btf->kfunc_set_tab) 7223 - return false; 7224 - set = btf->kfunc_set_tab->sets[hook][type]; 7203 + return NULL; 7204 + set = btf->kfunc_set_tab->sets[hook]; 7225 7205 if (!set) 7226 - return false; 7227 - return btf_id_set_contains(set, kfunc_btf_id); 7206 + return NULL; 7207 + id = btf_id_set8_contains(set, kfunc_btf_id); 7208 + if (!id) 7209 + return NULL; 7210 + /* The flags for BTF ID are located next to it */ 7211 + return id + 1; 7228 7212 } 7229 7213 7230 7214 static int bpf_prog_type_to_kfunc_hook(enum bpf_prog_type prog_type) ··· 7239 7249 * keeping the reference for the duration of the call provides the necessary 7240 7250 * protection for looking up a well-formed btf->kfunc_set_tab. 7241 7251 */ 7242 - bool btf_kfunc_id_set_contains(const struct btf *btf, 7252 + u32 *btf_kfunc_id_set_contains(const struct btf *btf, 7243 7253 enum bpf_prog_type prog_type, 7244 - enum btf_kfunc_type type, u32 kfunc_btf_id) 7254 + u32 kfunc_btf_id) 7245 7255 { 7246 7256 enum btf_kfunc_hook hook; 7247 7257 7248 7258 hook = bpf_prog_type_to_kfunc_hook(prog_type); 7249 - return __btf_kfunc_id_set_contains(btf, hook, type, kfunc_btf_id); 7259 + return __btf_kfunc_id_set_contains(btf, hook, kfunc_btf_id); 7250 7260 } 7251 7261 7252 7262 /* This function must be invoked only from initcalls/module init functions */ ··· 7273 7283 return PTR_ERR(btf); 7274 7284 7275 7285 hook = bpf_prog_type_to_kfunc_hook(prog_type); 7276 - ret = btf_populate_kfunc_set(btf, hook, kset); 7286 + ret = btf_populate_kfunc_set(btf, hook, kset->set); 7277 7287 btf_put(btf); 7278 7288 return ret; 7279 7289 }
+6 -8
kernel/bpf/verifier.c
··· 7562 7562 int err, insn_idx = *insn_idx_p; 7563 7563 const struct btf_param *args; 7564 7564 struct btf *desc_btf; 7565 + u32 *kfunc_flags; 7565 7566 bool acq; 7566 7567 7567 7568 /* skip for now, but return error when we find this in fixup_kfunc_call */ ··· 7578 7577 func_name = btf_name_by_offset(desc_btf, func->name_off); 7579 7578 func_proto = btf_type_by_id(desc_btf, func->type); 7580 7579 7581 - if (!btf_kfunc_id_set_contains(desc_btf, resolve_prog_type(env->prog), 7582 - BTF_KFUNC_TYPE_CHECK, func_id)) { 7580 + kfunc_flags = btf_kfunc_id_set_contains(desc_btf, resolve_prog_type(env->prog), func_id); 7581 + if (!kfunc_flags) { 7583 7582 verbose(env, "calling kernel function %s is not allowed\n", 7584 7583 func_name); 7585 7584 return -EACCES; 7586 7585 } 7587 - 7588 - acq = btf_kfunc_id_set_contains(desc_btf, resolve_prog_type(env->prog), 7589 - BTF_KFUNC_TYPE_ACQUIRE, func_id); 7586 + acq = *kfunc_flags & KF_ACQUIRE; 7590 7587 7591 7588 /* Check the arguments */ 7592 - err = btf_check_kfunc_arg_match(env, desc_btf, func_id, regs); 7589 + err = btf_check_kfunc_arg_match(env, desc_btf, func_id, regs, *kfunc_flags); 7593 7590 if (err < 0) 7594 7591 return err; 7595 7592 /* In case of release function, we get register number of refcounted ··· 7631 7632 regs[BPF_REG_0].btf = desc_btf; 7632 7633 regs[BPF_REG_0].type = PTR_TO_BTF_ID; 7633 7634 regs[BPF_REG_0].btf_id = ptr_type_id; 7634 - if (btf_kfunc_id_set_contains(desc_btf, resolve_prog_type(env->prog), 7635 - BTF_KFUNC_TYPE_RET_NULL, func_id)) { 7635 + if (*kfunc_flags & KF_RET_NULL) { 7636 7636 regs[BPF_REG_0].type |= PTR_MAYBE_NULL; 7637 7637 /* For mark_ptr_or_null_reg, see 93c230e3f5bd6 */ 7638 7638 regs[BPF_REG_0].id = ++env->id_gen;
+22 -48
net/bpf/test_run.c
··· 695 695 696 696 ALLOW_ERROR_INJECTION(bpf_modify_return_test, ERRNO); 697 697 698 - BTF_SET_START(test_sk_check_kfunc_ids) 699 - BTF_ID(func, bpf_kfunc_call_test1) 700 - BTF_ID(func, bpf_kfunc_call_test2) 701 - BTF_ID(func, bpf_kfunc_call_test3) 702 - BTF_ID(func, bpf_kfunc_call_test_acquire) 703 - BTF_ID(func, bpf_kfunc_call_memb_acquire) 704 - BTF_ID(func, bpf_kfunc_call_test_release) 705 - BTF_ID(func, bpf_kfunc_call_memb_release) 706 - BTF_ID(func, bpf_kfunc_call_memb1_release) 707 - BTF_ID(func, bpf_kfunc_call_test_kptr_get) 708 - BTF_ID(func, bpf_kfunc_call_test_pass_ctx) 709 - BTF_ID(func, bpf_kfunc_call_test_pass1) 710 - BTF_ID(func, bpf_kfunc_call_test_pass2) 711 - BTF_ID(func, bpf_kfunc_call_test_fail1) 712 - BTF_ID(func, bpf_kfunc_call_test_fail2) 713 - BTF_ID(func, bpf_kfunc_call_test_fail3) 714 - BTF_ID(func, bpf_kfunc_call_test_mem_len_pass1) 715 - BTF_ID(func, bpf_kfunc_call_test_mem_len_fail1) 716 - BTF_ID(func, bpf_kfunc_call_test_mem_len_fail2) 717 - BTF_SET_END(test_sk_check_kfunc_ids) 718 - 719 - BTF_SET_START(test_sk_acquire_kfunc_ids) 720 - BTF_ID(func, bpf_kfunc_call_test_acquire) 721 - BTF_ID(func, bpf_kfunc_call_memb_acquire) 722 - BTF_ID(func, bpf_kfunc_call_test_kptr_get) 723 - BTF_SET_END(test_sk_acquire_kfunc_ids) 724 - 725 - BTF_SET_START(test_sk_release_kfunc_ids) 726 - BTF_ID(func, bpf_kfunc_call_test_release) 727 - BTF_ID(func, bpf_kfunc_call_memb_release) 728 - BTF_ID(func, bpf_kfunc_call_memb1_release) 729 - BTF_SET_END(test_sk_release_kfunc_ids) 730 - 731 - BTF_SET_START(test_sk_ret_null_kfunc_ids) 732 - BTF_ID(func, bpf_kfunc_call_test_acquire) 733 - BTF_ID(func, bpf_kfunc_call_memb_acquire) 734 - BTF_ID(func, bpf_kfunc_call_test_kptr_get) 735 - BTF_SET_END(test_sk_ret_null_kfunc_ids) 736 - 737 - BTF_SET_START(test_sk_kptr_acquire_kfunc_ids) 738 - BTF_ID(func, bpf_kfunc_call_test_kptr_get) 739 - BTF_SET_END(test_sk_kptr_acquire_kfunc_ids) 698 + BTF_SET8_START(test_sk_check_kfunc_ids) 699 + BTF_ID_FLAGS(func, bpf_kfunc_call_test1) 700 + BTF_ID_FLAGS(func, bpf_kfunc_call_test2) 701 + BTF_ID_FLAGS(func, bpf_kfunc_call_test3) 702 + BTF_ID_FLAGS(func, bpf_kfunc_call_test_acquire, KF_ACQUIRE | KF_RET_NULL) 703 + BTF_ID_FLAGS(func, bpf_kfunc_call_memb_acquire, KF_ACQUIRE | KF_RET_NULL) 704 + BTF_ID_FLAGS(func, bpf_kfunc_call_test_release, KF_RELEASE) 705 + BTF_ID_FLAGS(func, bpf_kfunc_call_memb_release, KF_RELEASE) 706 + BTF_ID_FLAGS(func, bpf_kfunc_call_memb1_release, KF_RELEASE) 707 + BTF_ID_FLAGS(func, bpf_kfunc_call_test_kptr_get, KF_ACQUIRE | KF_RET_NULL | KF_KPTR_GET) 708 + BTF_ID_FLAGS(func, bpf_kfunc_call_test_pass_ctx) 709 + BTF_ID_FLAGS(func, bpf_kfunc_call_test_pass1) 710 + BTF_ID_FLAGS(func, bpf_kfunc_call_test_pass2) 711 + BTF_ID_FLAGS(func, bpf_kfunc_call_test_fail1) 712 + BTF_ID_FLAGS(func, bpf_kfunc_call_test_fail2) 713 + BTF_ID_FLAGS(func, bpf_kfunc_call_test_fail3) 714 + BTF_ID_FLAGS(func, bpf_kfunc_call_test_mem_len_pass1) 715 + BTF_ID_FLAGS(func, bpf_kfunc_call_test_mem_len_fail1) 716 + BTF_ID_FLAGS(func, bpf_kfunc_call_test_mem_len_fail2) 717 + BTF_SET8_END(test_sk_check_kfunc_ids) 740 718 741 719 static void *bpf_test_init(const union bpf_attr *kattr, u32 user_size, 742 720 u32 size, u32 headroom, u32 tailroom) ··· 1598 1620 } 1599 1621 1600 1622 static const struct btf_kfunc_id_set bpf_prog_test_kfunc_set = { 1601 - .owner = THIS_MODULE, 1602 - .check_set = &test_sk_check_kfunc_ids, 1603 - .acquire_set = &test_sk_acquire_kfunc_ids, 1604 - .release_set = &test_sk_release_kfunc_ids, 1605 - .ret_null_set = &test_sk_ret_null_kfunc_ids, 1606 - .kptr_acquire_set = &test_sk_kptr_acquire_kfunc_ids 1623 + .owner = THIS_MODULE, 1624 + .set = &test_sk_check_kfunc_ids, 1607 1625 }; 1608 1626 1609 1627 BTF_ID_LIST(bpf_prog_test_dtor_kfunc_ids)
+9 -9
net/ipv4/bpf_tcp_ca.c
··· 197 197 } 198 198 } 199 199 200 - BTF_SET_START(bpf_tcp_ca_check_kfunc_ids) 201 - BTF_ID(func, tcp_reno_ssthresh) 202 - BTF_ID(func, tcp_reno_cong_avoid) 203 - BTF_ID(func, tcp_reno_undo_cwnd) 204 - BTF_ID(func, tcp_slow_start) 205 - BTF_ID(func, tcp_cong_avoid_ai) 206 - BTF_SET_END(bpf_tcp_ca_check_kfunc_ids) 200 + BTF_SET8_START(bpf_tcp_ca_check_kfunc_ids) 201 + BTF_ID_FLAGS(func, tcp_reno_ssthresh) 202 + BTF_ID_FLAGS(func, tcp_reno_cong_avoid) 203 + BTF_ID_FLAGS(func, tcp_reno_undo_cwnd) 204 + BTF_ID_FLAGS(func, tcp_slow_start) 205 + BTF_ID_FLAGS(func, tcp_cong_avoid_ai) 206 + BTF_SET8_END(bpf_tcp_ca_check_kfunc_ids) 207 207 208 208 static const struct btf_kfunc_id_set bpf_tcp_ca_kfunc_set = { 209 - .owner = THIS_MODULE, 210 - .check_set = &bpf_tcp_ca_check_kfunc_ids, 209 + .owner = THIS_MODULE, 210 + .set = &bpf_tcp_ca_check_kfunc_ids, 211 211 }; 212 212 213 213 static const struct bpf_verifier_ops bpf_tcp_ca_verifier_ops = {
+12 -12
net/ipv4/tcp_bbr.c
··· 1154 1154 .set_state = bbr_set_state, 1155 1155 }; 1156 1156 1157 - BTF_SET_START(tcp_bbr_check_kfunc_ids) 1157 + BTF_SET8_START(tcp_bbr_check_kfunc_ids) 1158 1158 #ifdef CONFIG_X86 1159 1159 #ifdef CONFIG_DYNAMIC_FTRACE 1160 - BTF_ID(func, bbr_init) 1161 - BTF_ID(func, bbr_main) 1162 - BTF_ID(func, bbr_sndbuf_expand) 1163 - BTF_ID(func, bbr_undo_cwnd) 1164 - BTF_ID(func, bbr_cwnd_event) 1165 - BTF_ID(func, bbr_ssthresh) 1166 - BTF_ID(func, bbr_min_tso_segs) 1167 - BTF_ID(func, bbr_set_state) 1160 + BTF_ID_FLAGS(func, bbr_init) 1161 + BTF_ID_FLAGS(func, bbr_main) 1162 + BTF_ID_FLAGS(func, bbr_sndbuf_expand) 1163 + BTF_ID_FLAGS(func, bbr_undo_cwnd) 1164 + BTF_ID_FLAGS(func, bbr_cwnd_event) 1165 + BTF_ID_FLAGS(func, bbr_ssthresh) 1166 + BTF_ID_FLAGS(func, bbr_min_tso_segs) 1167 + BTF_ID_FLAGS(func, bbr_set_state) 1168 1168 #endif 1169 1169 #endif 1170 - BTF_SET_END(tcp_bbr_check_kfunc_ids) 1170 + BTF_SET8_END(tcp_bbr_check_kfunc_ids) 1171 1171 1172 1172 static const struct btf_kfunc_id_set tcp_bbr_kfunc_set = { 1173 - .owner = THIS_MODULE, 1174 - .check_set = &tcp_bbr_check_kfunc_ids, 1173 + .owner = THIS_MODULE, 1174 + .set = &tcp_bbr_check_kfunc_ids, 1175 1175 }; 1176 1176 1177 1177 static int __init bbr_register(void)
+10 -10
net/ipv4/tcp_cubic.c
··· 485 485 .name = "cubic", 486 486 }; 487 487 488 - BTF_SET_START(tcp_cubic_check_kfunc_ids) 488 + BTF_SET8_START(tcp_cubic_check_kfunc_ids) 489 489 #ifdef CONFIG_X86 490 490 #ifdef CONFIG_DYNAMIC_FTRACE 491 - BTF_ID(func, cubictcp_init) 492 - BTF_ID(func, cubictcp_recalc_ssthresh) 493 - BTF_ID(func, cubictcp_cong_avoid) 494 - BTF_ID(func, cubictcp_state) 495 - BTF_ID(func, cubictcp_cwnd_event) 496 - BTF_ID(func, cubictcp_acked) 491 + BTF_ID_FLAGS(func, cubictcp_init) 492 + BTF_ID_FLAGS(func, cubictcp_recalc_ssthresh) 493 + BTF_ID_FLAGS(func, cubictcp_cong_avoid) 494 + BTF_ID_FLAGS(func, cubictcp_state) 495 + BTF_ID_FLAGS(func, cubictcp_cwnd_event) 496 + BTF_ID_FLAGS(func, cubictcp_acked) 497 497 #endif 498 498 #endif 499 - BTF_SET_END(tcp_cubic_check_kfunc_ids) 499 + BTF_SET8_END(tcp_cubic_check_kfunc_ids) 500 500 501 501 static const struct btf_kfunc_id_set tcp_cubic_kfunc_set = { 502 - .owner = THIS_MODULE, 503 - .check_set = &tcp_cubic_check_kfunc_ids, 502 + .owner = THIS_MODULE, 503 + .set = &tcp_cubic_check_kfunc_ids, 504 504 }; 505 505 506 506 static int __init cubictcp_register(void)
+10 -10
net/ipv4/tcp_dctcp.c
··· 239 239 .name = "dctcp-reno", 240 240 }; 241 241 242 - BTF_SET_START(tcp_dctcp_check_kfunc_ids) 242 + BTF_SET8_START(tcp_dctcp_check_kfunc_ids) 243 243 #ifdef CONFIG_X86 244 244 #ifdef CONFIG_DYNAMIC_FTRACE 245 - BTF_ID(func, dctcp_init) 246 - BTF_ID(func, dctcp_update_alpha) 247 - BTF_ID(func, dctcp_cwnd_event) 248 - BTF_ID(func, dctcp_ssthresh) 249 - BTF_ID(func, dctcp_cwnd_undo) 250 - BTF_ID(func, dctcp_state) 245 + BTF_ID_FLAGS(func, dctcp_init) 246 + BTF_ID_FLAGS(func, dctcp_update_alpha) 247 + BTF_ID_FLAGS(func, dctcp_cwnd_event) 248 + BTF_ID_FLAGS(func, dctcp_ssthresh) 249 + BTF_ID_FLAGS(func, dctcp_cwnd_undo) 250 + BTF_ID_FLAGS(func, dctcp_state) 251 251 #endif 252 252 #endif 253 - BTF_SET_END(tcp_dctcp_check_kfunc_ids) 253 + BTF_SET8_END(tcp_dctcp_check_kfunc_ids) 254 254 255 255 static const struct btf_kfunc_id_set tcp_dctcp_kfunc_set = { 256 - .owner = THIS_MODULE, 257 - .check_set = &tcp_dctcp_check_kfunc_ids, 256 + .owner = THIS_MODULE, 257 + .set = &tcp_dctcp_check_kfunc_ids, 258 258 }; 259 259 260 260 static int __init dctcp_register(void)
+10 -37
net/netfilter/nf_conntrack_bpf.c
··· 219 219 220 220 __diag_pop() 221 221 222 - BTF_SET_START(nf_ct_xdp_check_kfunc_ids) 223 - BTF_ID(func, bpf_xdp_ct_lookup) 224 - BTF_ID(func, bpf_ct_release) 225 - BTF_SET_END(nf_ct_xdp_check_kfunc_ids) 222 + BTF_SET8_START(nf_ct_kfunc_set) 223 + BTF_ID_FLAGS(func, bpf_xdp_ct_lookup, KF_ACQUIRE | KF_RET_NULL) 224 + BTF_ID_FLAGS(func, bpf_skb_ct_lookup, KF_ACQUIRE | KF_RET_NULL) 225 + BTF_ID_FLAGS(func, bpf_ct_release, KF_RELEASE) 226 + BTF_SET8_END(nf_ct_kfunc_set) 226 227 227 - BTF_SET_START(nf_ct_tc_check_kfunc_ids) 228 - BTF_ID(func, bpf_skb_ct_lookup) 229 - BTF_ID(func, bpf_ct_release) 230 - BTF_SET_END(nf_ct_tc_check_kfunc_ids) 231 - 232 - BTF_SET_START(nf_ct_acquire_kfunc_ids) 233 - BTF_ID(func, bpf_xdp_ct_lookup) 234 - BTF_ID(func, bpf_skb_ct_lookup) 235 - BTF_SET_END(nf_ct_acquire_kfunc_ids) 236 - 237 - BTF_SET_START(nf_ct_release_kfunc_ids) 238 - BTF_ID(func, bpf_ct_release) 239 - BTF_SET_END(nf_ct_release_kfunc_ids) 240 - 241 - /* Both sets are identical */ 242 - #define nf_ct_ret_null_kfunc_ids nf_ct_acquire_kfunc_ids 243 - 244 - static const struct btf_kfunc_id_set nf_conntrack_xdp_kfunc_set = { 245 - .owner = THIS_MODULE, 246 - .check_set = &nf_ct_xdp_check_kfunc_ids, 247 - .acquire_set = &nf_ct_acquire_kfunc_ids, 248 - .release_set = &nf_ct_release_kfunc_ids, 249 - .ret_null_set = &nf_ct_ret_null_kfunc_ids, 250 - }; 251 - 252 - static const struct btf_kfunc_id_set nf_conntrack_tc_kfunc_set = { 253 - .owner = THIS_MODULE, 254 - .check_set = &nf_ct_tc_check_kfunc_ids, 255 - .acquire_set = &nf_ct_acquire_kfunc_ids, 256 - .release_set = &nf_ct_release_kfunc_ids, 257 - .ret_null_set = &nf_ct_ret_null_kfunc_ids, 228 + static const struct btf_kfunc_id_set nf_conntrack_kfunc_set = { 229 + .owner = THIS_MODULE, 230 + .set = &nf_ct_kfunc_set, 258 231 }; 259 232 260 233 int register_nf_conntrack_bpf(void) 261 234 { 262 235 int ret; 263 236 264 - ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_XDP, &nf_conntrack_xdp_kfunc_set); 265 - return ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SCHED_CLS, &nf_conntrack_tc_kfunc_set); 237 + ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_XDP, &nf_conntrack_kfunc_set); 238 + return ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SCHED_CLS, &nf_conntrack_kfunc_set); 266 239 }
+5 -5
tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c
··· 148 148 .write = bpf_testmod_test_write, 149 149 }; 150 150 151 - BTF_SET_START(bpf_testmod_check_kfunc_ids) 152 - BTF_ID(func, bpf_testmod_test_mod_kfunc) 153 - BTF_SET_END(bpf_testmod_check_kfunc_ids) 151 + BTF_SET8_START(bpf_testmod_check_kfunc_ids) 152 + BTF_ID_FLAGS(func, bpf_testmod_test_mod_kfunc) 153 + BTF_SET8_END(bpf_testmod_check_kfunc_ids) 154 154 155 155 static const struct btf_kfunc_id_set bpf_testmod_kfunc_set = { 156 - .owner = THIS_MODULE, 157 - .check_set = &bpf_testmod_check_kfunc_ids, 156 + .owner = THIS_MODULE, 157 + .set = &bpf_testmod_check_kfunc_ids, 158 158 }; 159 159 160 160 extern int bpf_fentry_test1(int a);