Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'move-attach_type-into-bpf_link'

Tao Chen says:

====================
Move attach_type into bpf_link

Andrii suggested moving the attach_type into bpf_link, the previous discussion
is as follows:
https://lore.kernel.org/bpf/CAEf4BzY7TZRjxpCJM-+LYgEqe23YFj5Uv3isb7gat2-HU4OSng@mail.gmail.com

patch1 add attach_type in bpf_link, and pass it to bpf_link_init, which
will init the attach_type field.

patch2-7 remove the attach_type in struct bpf_xx_link, update the info
with bpf_link attach_type.

There are some functions finally call bpf_link_init but do not have bpf_attr
from user or do not need to init attach_type from user like bpf_raw_tracepoint_open,
now use prog->expected_attach_type to init attach_type.

bpf_struct_ops_map_update_elem
bpf_raw_tracepoint_open
bpf_struct_ops_test_run

Feedback of any kind is welcome, thanks.
====================

Link: https://patch.msgid.link/20250710032038.888700-1-chen.dylane@linux.dev
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>

+90 -78
+4 -6
drivers/net/netkit.c
··· 32 32 struct netkit_link { 33 33 struct bpf_link link; 34 34 struct net_device *dev; 35 - u32 location; 36 35 }; 37 36 38 37 static __always_inline int ··· 732 733 733 734 seq_printf(seq, "ifindex:\t%u\n", ifindex); 734 735 seq_printf(seq, "attach_type:\t%u (%s)\n", 735 - nkl->location, 736 - nkl->location == BPF_NETKIT_PRIMARY ? "primary" : "peer"); 736 + link->attach_type, 737 + link->attach_type == BPF_NETKIT_PRIMARY ? "primary" : "peer"); 737 738 } 738 739 739 740 static int netkit_link_fill_info(const struct bpf_link *link, ··· 748 749 rtnl_unlock(); 749 750 750 751 info->netkit.ifindex = ifindex; 751 - info->netkit.attach_type = nkl->location; 752 + info->netkit.attach_type = link->attach_type; 752 753 return 0; 753 754 } 754 755 ··· 774 775 struct bpf_prog *prog) 775 776 { 776 777 bpf_link_init(&nkl->link, BPF_LINK_TYPE_NETKIT, 777 - &netkit_link_lops, prog); 778 - nkl->location = attr->link_create.attach_type; 778 + &netkit_link_lops, prog, attr->link_create.attach_type); 779 779 nkl->dev = dev; 780 780 return bpf_link_prime(&nkl->link, link_primer); 781 781 }
-1
include/linux/bpf-cgroup.h
··· 103 103 struct bpf_cgroup_link { 104 104 struct bpf_link link; 105 105 struct cgroup *cgroup; 106 - enum bpf_attach_type type; 107 106 }; 108 107 109 108 struct bpf_prog_list {
+17 -12
include/linux/bpf.h
··· 1729 1729 enum bpf_link_type type; 1730 1730 const struct bpf_link_ops *ops; 1731 1731 struct bpf_prog *prog; 1732 - /* whether BPF link itself has "sleepable" semantics, which can differ 1733 - * from underlying BPF program having a "sleepable" semantics, as BPF 1734 - * link's semantics is determined by target attach hook 1735 - */ 1736 - bool sleepable; 1732 + 1737 1733 u32 flags; 1734 + enum bpf_attach_type attach_type; 1735 + 1738 1736 /* rcu is used before freeing, work can be used to schedule that 1739 1737 * RCU-based freeing before that, so they never overlap 1740 1738 */ ··· 1740 1742 struct rcu_head rcu; 1741 1743 struct work_struct work; 1742 1744 }; 1745 + /* whether BPF link itself has "sleepable" semantics, which can differ 1746 + * from underlying BPF program having a "sleepable" semantics, as BPF 1747 + * link's semantics is determined by target attach hook 1748 + */ 1749 + bool sleepable; 1743 1750 }; 1744 1751 1745 1752 struct bpf_link_ops { ··· 1784 1781 1785 1782 struct bpf_tracing_link { 1786 1783 struct bpf_tramp_link link; 1787 - enum bpf_attach_type attach_type; 1788 1784 struct bpf_trampoline *trampoline; 1789 1785 struct bpf_prog *tgt_prog; 1790 1786 }; ··· 2036 2034 2037 2035 #if defined(CONFIG_CGROUP_BPF) && defined(CONFIG_BPF_LSM) 2038 2036 int bpf_trampoline_link_cgroup_shim(struct bpf_prog *prog, 2039 - int cgroup_atype); 2037 + int cgroup_atype, 2038 + enum bpf_attach_type attach_type); 2040 2039 void bpf_trampoline_unlink_cgroup_shim(struct bpf_prog *prog); 2041 2040 #else 2042 2041 static inline int bpf_trampoline_link_cgroup_shim(struct bpf_prog *prog, 2043 - int cgroup_atype) 2042 + int cgroup_atype, 2043 + enum bpf_attach_type attach_type) 2044 2044 { 2045 2045 return -EOPNOTSUPP; 2046 2046 } ··· 2532 2528 int bpf_prog_new_fd(struct bpf_prog *prog); 2533 2529 2534 2530 void bpf_link_init(struct bpf_link *link, enum bpf_link_type type, 2535 - const struct bpf_link_ops *ops, struct bpf_prog *prog); 2531 + const struct bpf_link_ops *ops, struct bpf_prog *prog, 2532 + enum bpf_attach_type attach_type); 2536 2533 void bpf_link_init_sleepable(struct bpf_link *link, enum bpf_link_type type, 2537 2534 const struct bpf_link_ops *ops, struct bpf_prog *prog, 2538 - bool sleepable); 2535 + enum bpf_attach_type attach_type, bool sleepable); 2539 2536 int bpf_link_prime(struct bpf_link *link, struct bpf_link_primer *primer); 2540 2537 int bpf_link_settle(struct bpf_link_primer *primer); 2541 2538 void bpf_link_cleanup(struct bpf_link_primer *primer); ··· 2888 2883 2889 2884 static inline void bpf_link_init(struct bpf_link *link, enum bpf_link_type type, 2890 2885 const struct bpf_link_ops *ops, 2891 - struct bpf_prog *prog) 2886 + struct bpf_prog *prog, enum bpf_attach_type attach_type) 2892 2887 { 2893 2888 } 2894 2889 2895 2890 static inline void bpf_link_init_sleepable(struct bpf_link *link, enum bpf_link_type type, 2896 2891 const struct bpf_link_ops *ops, struct bpf_prog *prog, 2897 - bool sleepable) 2892 + enum bpf_attach_type attach_type, bool sleepable) 2898 2893 { 2899 2894 } 2900 2895
-1
include/net/tcx.h
··· 20 20 struct tcx_link { 21 21 struct bpf_link link; 22 22 struct net_device *dev; 23 - u32 location; 24 23 }; 25 24 26 25 static inline void tcx_set_ingress(struct sk_buff *skb, bool ingress)
+2 -1
kernel/bpf/bpf_iter.c
··· 552 552 if (!link) 553 553 return -ENOMEM; 554 554 555 - bpf_link_init(&link->link, BPF_LINK_TYPE_ITER, &bpf_iter_link_lops, prog); 555 + bpf_link_init(&link->link, BPF_LINK_TYPE_ITER, &bpf_iter_link_lops, prog, 556 + attr->link_create.attach_type); 556 557 link->tinfo = tinfo; 557 558 558 559 err = bpf_link_prime(&link->link, &link_primer);
+3 -2
kernel/bpf/bpf_struct_ops.c
··· 808 808 goto reset_unlock; 809 809 } 810 810 bpf_link_init(&link->link, BPF_LINK_TYPE_STRUCT_OPS, 811 - &bpf_struct_ops_link_lops, prog); 811 + &bpf_struct_ops_link_lops, prog, prog->expected_attach_type); 812 812 *plink++ = &link->link; 813 813 814 814 ksym = kzalloc(sizeof(*ksym), GFP_USER); ··· 1351 1351 err = -ENOMEM; 1352 1352 goto err_out; 1353 1353 } 1354 - bpf_link_init(&link->link, BPF_LINK_TYPE_STRUCT_OPS, &bpf_struct_ops_map_lops, NULL); 1354 + bpf_link_init(&link->link, BPF_LINK_TYPE_STRUCT_OPS, &bpf_struct_ops_map_lops, NULL, 1355 + attr->link_create.attach_type); 1355 1356 1356 1357 err = bpf_link_prime(&link->link, &link_primer); 1357 1358 if (err)
+8 -9
kernel/bpf/cgroup.c
··· 867 867 cgrp->bpf.flags[atype] = saved_flags; 868 868 869 869 if (type == BPF_LSM_CGROUP) { 870 - err = bpf_trampoline_link_cgroup_shim(new_prog, atype); 870 + err = bpf_trampoline_link_cgroup_shim(new_prog, atype, type); 871 871 if (err) 872 872 goto cleanup; 873 873 } ··· 984 984 struct hlist_head *progs; 985 985 bool found = false; 986 986 987 - atype = bpf_cgroup_atype_find(link->type, new_prog->aux->attach_btf_id); 987 + atype = bpf_cgroup_atype_find(link->link.attach_type, new_prog->aux->attach_btf_id); 988 988 if (atype < 0) 989 989 return -EINVAL; 990 990 ··· 1396 1396 } 1397 1397 1398 1398 WARN_ON(__cgroup_bpf_detach(cg_link->cgroup, NULL, cg_link, 1399 - cg_link->type, 0)); 1400 - if (cg_link->type == BPF_LSM_CGROUP) 1399 + link->attach_type, 0)); 1400 + if (link->attach_type == BPF_LSM_CGROUP) 1401 1401 bpf_trampoline_unlink_cgroup_shim(cg_link->link.prog); 1402 1402 1403 1403 cg = cg_link->cgroup; ··· 1439 1439 "cgroup_id:\t%llu\n" 1440 1440 "attach_type:\t%d\n", 1441 1441 cg_id, 1442 - cg_link->type); 1442 + link->attach_type); 1443 1443 } 1444 1444 1445 1445 static int bpf_cgroup_link_fill_link_info(const struct bpf_link *link, ··· 1455 1455 cgroup_unlock(); 1456 1456 1457 1457 info->cgroup.cgroup_id = cg_id; 1458 - info->cgroup.attach_type = cg_link->type; 1458 + info->cgroup.attach_type = link->attach_type; 1459 1459 return 0; 1460 1460 } 1461 1461 ··· 1495 1495 goto out_put_cgroup; 1496 1496 } 1497 1497 bpf_link_init(&link->link, BPF_LINK_TYPE_CGROUP, &bpf_cgroup_link_lops, 1498 - prog); 1498 + prog, attr->link_create.attach_type); 1499 1499 link->cgroup = cgrp; 1500 - link->type = attr->link_create.attach_type; 1501 1500 1502 1501 err = bpf_link_prime(&link->link, &link_primer); 1503 1502 if (err) { ··· 1505 1506 } 1506 1507 1507 1508 err = cgroup_bpf_attach(cgrp, NULL, NULL, link, 1508 - link->type, BPF_F_ALLOW_MULTI | attr->link_create.flags, 1509 + link->link.attach_type, BPF_F_ALLOW_MULTI | attr->link_create.flags, 1509 1510 attr->link_create.cgroup.relative_fd, 1510 1511 attr->link_create.cgroup.expected_revision); 1511 1512 if (err) {
+4 -6
kernel/bpf/net_namespace.c
··· 11 11 12 12 struct bpf_netns_link { 13 13 struct bpf_link link; 14 - enum bpf_attach_type type; 15 - enum netns_bpf_attach_type netns_type; 16 14 17 15 /* We don't hold a ref to net in order to auto-detach the link 18 16 * when netns is going away. Instead we rely on pernet ··· 19 21 */ 20 22 struct net *net; 21 23 struct list_head node; /* node in list of links attached to net */ 24 + enum netns_bpf_attach_type netns_type; 22 25 }; 23 26 24 27 /* Protects updates to netns_bpf */ ··· 215 216 mutex_unlock(&netns_bpf_mutex); 216 217 217 218 info->netns.netns_ino = inum; 218 - info->netns.attach_type = net_link->type; 219 + info->netns.attach_type = link->attach_type; 219 220 return 0; 220 221 } 221 222 ··· 229 230 "netns_ino:\t%u\n" 230 231 "attach_type:\t%u\n", 231 232 info.netns.netns_ino, 232 - info.netns.attach_type); 233 + link->attach_type); 233 234 } 234 235 235 236 static const struct bpf_link_ops bpf_netns_link_ops = { ··· 500 501 goto out_put_net; 501 502 } 502 503 bpf_link_init(&net_link->link, BPF_LINK_TYPE_NETNS, 503 - &bpf_netns_link_ops, prog); 504 + &bpf_netns_link_ops, prog, type); 504 505 net_link->net = net; 505 - net_link->type = type; 506 506 net_link->netns_type = netns_type; 507 507 508 508 err = bpf_link_prime(&net_link->link, &link_primer);
+24 -16
kernel/bpf/syscall.c
··· 3069 3069 */ 3070 3070 void bpf_link_init_sleepable(struct bpf_link *link, enum bpf_link_type type, 3071 3071 const struct bpf_link_ops *ops, struct bpf_prog *prog, 3072 - bool sleepable) 3072 + enum bpf_attach_type attach_type, bool sleepable) 3073 3073 { 3074 3074 WARN_ON(ops->dealloc && ops->dealloc_deferred); 3075 3075 atomic64_set(&link->refcnt, 1); ··· 3078 3078 link->id = 0; 3079 3079 link->ops = ops; 3080 3080 link->prog = prog; 3081 + link->attach_type = attach_type; 3081 3082 } 3082 3083 3083 3084 void bpf_link_init(struct bpf_link *link, enum bpf_link_type type, 3084 - const struct bpf_link_ops *ops, struct bpf_prog *prog) 3085 + const struct bpf_link_ops *ops, struct bpf_prog *prog, 3086 + enum bpf_attach_type attach_type) 3085 3087 { 3086 - bpf_link_init_sleepable(link, type, ops, prog, false); 3088 + bpf_link_init_sleepable(link, type, ops, prog, attach_type, false); 3087 3089 } 3088 3090 3089 3091 static void bpf_link_free_id(int id) ··· 3414 3412 "target_obj_id:\t%u\n" 3415 3413 "target_btf_id:\t%u\n" 3416 3414 "cookie:\t%llu\n", 3417 - tr_link->attach_type, 3415 + link->attach_type, 3418 3416 target_obj_id, 3419 3417 target_btf_id, 3420 3418 tr_link->link.cookie); ··· 3426 3424 struct bpf_tracing_link *tr_link = 3427 3425 container_of(link, struct bpf_tracing_link, link.link); 3428 3426 3429 - info->tracing.attach_type = tr_link->attach_type; 3427 + info->tracing.attach_type = link->attach_type; 3430 3428 info->tracing.cookie = tr_link->link.cookie; 3431 3429 bpf_trampoline_unpack_key(tr_link->trampoline->key, 3432 3430 &info->tracing.target_obj_id, ··· 3445 3443 static int bpf_tracing_prog_attach(struct bpf_prog *prog, 3446 3444 int tgt_prog_fd, 3447 3445 u32 btf_id, 3448 - u64 bpf_cookie) 3446 + u64 bpf_cookie, 3447 + enum bpf_attach_type attach_type) 3449 3448 { 3450 3449 struct bpf_link_primer link_primer; 3451 3450 struct bpf_prog *tgt_prog = NULL; ··· 3514 3511 goto out_put_prog; 3515 3512 } 3516 3513 bpf_link_init(&link->link.link, BPF_LINK_TYPE_TRACING, 3517 - &bpf_tracing_link_lops, prog); 3518 - link->attach_type = prog->expected_attach_type; 3514 + &bpf_tracing_link_lops, prog, attach_type); 3515 + 3519 3516 link->link.cookie = bpf_cookie; 3520 3517 3521 3518 mutex_lock(&prog->aux->dst_mutex); ··· 4052 4049 err = -ENOMEM; 4053 4050 goto out_put_file; 4054 4051 } 4055 - bpf_link_init(&link->link, BPF_LINK_TYPE_PERF_EVENT, &bpf_perf_link_lops, prog); 4052 + bpf_link_init(&link->link, BPF_LINK_TYPE_PERF_EVENT, &bpf_perf_link_lops, prog, 4053 + attr->link_create.attach_type); 4056 4054 link->perf_file = perf_file; 4057 4055 4058 4056 err = bpf_link_prime(&link->link, &link_primer); ··· 4085 4081 #endif /* CONFIG_PERF_EVENTS */ 4086 4082 4087 4083 static int bpf_raw_tp_link_attach(struct bpf_prog *prog, 4088 - const char __user *user_tp_name, u64 cookie) 4084 + const char __user *user_tp_name, u64 cookie, 4085 + enum bpf_attach_type attach_type) 4089 4086 { 4090 4087 struct bpf_link_primer link_primer; 4091 4088 struct bpf_raw_tp_link *link; ··· 4109 4104 tp_name = prog->aux->attach_func_name; 4110 4105 break; 4111 4106 } 4112 - return bpf_tracing_prog_attach(prog, 0, 0, 0); 4107 + return bpf_tracing_prog_attach(prog, 0, 0, 0, attach_type); 4113 4108 case BPF_PROG_TYPE_RAW_TRACEPOINT: 4114 4109 case BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE: 4115 4110 if (strncpy_from_user(buf, user_tp_name, sizeof(buf) - 1) < 0) ··· 4131 4126 goto out_put_btp; 4132 4127 } 4133 4128 bpf_link_init_sleepable(&link->link, BPF_LINK_TYPE_RAW_TRACEPOINT, 4134 - &bpf_raw_tp_link_lops, prog, 4129 + &bpf_raw_tp_link_lops, prog, attach_type, 4135 4130 tracepoint_is_faultable(btp->tp)); 4136 4131 link->btp = btp; 4137 4132 link->cookie = cookie; ··· 4173 4168 4174 4169 tp_name = u64_to_user_ptr(attr->raw_tracepoint.name); 4175 4170 cookie = attr->raw_tracepoint.cookie; 4176 - fd = bpf_raw_tp_link_attach(prog, tp_name, cookie); 4171 + fd = bpf_raw_tp_link_attach(prog, tp_name, cookie, prog->expected_attach_type); 4177 4172 if (fd < 0) 4178 4173 bpf_prog_put(prog); 4179 4174 return fd; ··· 5530 5525 ret = bpf_tracing_prog_attach(prog, 5531 5526 attr->link_create.target_fd, 5532 5527 attr->link_create.target_btf_id, 5533 - attr->link_create.tracing.cookie); 5528 + attr->link_create.tracing.cookie, 5529 + attr->link_create.attach_type); 5534 5530 break; 5535 5531 case BPF_PROG_TYPE_LSM: 5536 5532 case BPF_PROG_TYPE_TRACING: ··· 5540 5534 goto out; 5541 5535 } 5542 5536 if (prog->expected_attach_type == BPF_TRACE_RAW_TP) 5543 - ret = bpf_raw_tp_link_attach(prog, NULL, attr->link_create.tracing.cookie); 5537 + ret = bpf_raw_tp_link_attach(prog, NULL, attr->link_create.tracing.cookie, 5538 + attr->link_create.attach_type); 5544 5539 else if (prog->expected_attach_type == BPF_TRACE_ITER) 5545 5540 ret = bpf_iter_link_attach(attr, uattr, prog); 5546 5541 else if (prog->expected_attach_type == BPF_LSM_CGROUP) ··· 5550 5543 ret = bpf_tracing_prog_attach(prog, 5551 5544 attr->link_create.target_fd, 5552 5545 attr->link_create.target_btf_id, 5553 - attr->link_create.tracing.cookie); 5546 + attr->link_create.tracing.cookie, 5547 + attr->link_create.attach_type); 5554 5548 break; 5555 5549 case BPF_PROG_TYPE_FLOW_DISSECTOR: 5556 5550 case BPF_PROG_TYPE_SK_LOOKUP:
+8 -8
kernel/bpf/tcx.c
··· 142 142 u64 revision) 143 143 { 144 144 struct tcx_link *tcx = tcx_link(link); 145 - bool created, ingress = tcx->location == BPF_TCX_INGRESS; 145 + bool created, ingress = link->attach_type == BPF_TCX_INGRESS; 146 146 struct bpf_mprog_entry *entry, *entry_new; 147 147 struct net_device *dev = tcx->dev; 148 148 int ret; ··· 169 169 static void tcx_link_release(struct bpf_link *link) 170 170 { 171 171 struct tcx_link *tcx = tcx_link(link); 172 - bool ingress = tcx->location == BPF_TCX_INGRESS; 172 + bool ingress = link->attach_type == BPF_TCX_INGRESS; 173 173 struct bpf_mprog_entry *entry, *entry_new; 174 174 struct net_device *dev; 175 175 int ret = 0; ··· 204 204 struct bpf_prog *oprog) 205 205 { 206 206 struct tcx_link *tcx = tcx_link(link); 207 - bool ingress = tcx->location == BPF_TCX_INGRESS; 207 + bool ingress = link->attach_type == BPF_TCX_INGRESS; 208 208 struct bpf_mprog_entry *entry, *entry_new; 209 209 struct net_device *dev; 210 210 int ret = 0; ··· 260 260 261 261 seq_printf(seq, "ifindex:\t%u\n", ifindex); 262 262 seq_printf(seq, "attach_type:\t%u (%s)\n", 263 - tcx->location, 264 - tcx->location == BPF_TCX_INGRESS ? "ingress" : "egress"); 263 + link->attach_type, 264 + link->attach_type == BPF_TCX_INGRESS ? "ingress" : "egress"); 265 265 } 266 266 267 267 static int tcx_link_fill_info(const struct bpf_link *link, ··· 276 276 rtnl_unlock(); 277 277 278 278 info->tcx.ifindex = ifindex; 279 - info->tcx.attach_type = tcx->location; 279 + info->tcx.attach_type = link->attach_type; 280 280 return 0; 281 281 } 282 282 ··· 301 301 struct net_device *dev, 302 302 struct bpf_prog *prog) 303 303 { 304 - bpf_link_init(&tcx->link, BPF_LINK_TYPE_TCX, &tcx_link_lops, prog); 305 - tcx->location = attr->link_create.attach_type; 304 + bpf_link_init(&tcx->link, BPF_LINK_TYPE_TCX, &tcx_link_lops, prog, 305 + attr->link_create.attach_type); 306 306 tcx->dev = dev; 307 307 return bpf_link_prime(&tcx->link, link_primer); 308 308 }
+6 -4
kernel/bpf/trampoline.c
··· 674 674 675 675 static struct bpf_shim_tramp_link *cgroup_shim_alloc(const struct bpf_prog *prog, 676 676 bpf_func_t bpf_func, 677 - int cgroup_atype) 677 + int cgroup_atype, 678 + enum bpf_attach_type attach_type) 678 679 { 679 680 struct bpf_shim_tramp_link *shim_link = NULL; 680 681 struct bpf_prog *p; ··· 702 701 p->expected_attach_type = BPF_LSM_MAC; 703 702 bpf_prog_inc(p); 704 703 bpf_link_init(&shim_link->link.link, BPF_LINK_TYPE_UNSPEC, 705 - &bpf_shim_tramp_link_lops, p); 704 + &bpf_shim_tramp_link_lops, p, attach_type); 706 705 bpf_cgroup_atype_get(p->aux->attach_btf_id, cgroup_atype); 707 706 708 707 return shim_link; ··· 727 726 } 728 727 729 728 int bpf_trampoline_link_cgroup_shim(struct bpf_prog *prog, 730 - int cgroup_atype) 729 + int cgroup_atype, 730 + enum bpf_attach_type attach_type) 731 731 { 732 732 struct bpf_shim_tramp_link *shim_link = NULL; 733 733 struct bpf_attach_target_info tgt_info = {}; ··· 765 763 766 764 /* Allocate and install new shim. */ 767 765 768 - shim_link = cgroup_shim_alloc(prog, bpf_func, cgroup_atype); 766 + shim_link = cgroup_shim_alloc(prog, bpf_func, cgroup_atype, attach_type); 769 767 if (!shim_link) { 770 768 err = -ENOMEM; 771 769 goto err;
+2 -2
kernel/trace/bpf_trace.c
··· 2986 2986 } 2987 2987 2988 2988 bpf_link_init(&link->link, BPF_LINK_TYPE_KPROBE_MULTI, 2989 - &bpf_kprobe_multi_link_lops, prog); 2989 + &bpf_kprobe_multi_link_lops, prog, attr->link_create.attach_type); 2990 2990 2991 2991 err = bpf_link_prime(&link->link, &link_primer); 2992 2992 if (err) ··· 3441 3441 link->link.flags = flags; 3442 3442 3443 3443 bpf_link_init(&link->link, BPF_LINK_TYPE_UPROBE_MULTI, 3444 - &bpf_uprobe_multi_link_lops, prog); 3444 + &bpf_uprobe_multi_link_lops, prog, attr->link_create.attach_type); 3445 3445 3446 3446 for (i = 0; i < cnt; i++) { 3447 3447 uprobes[i].uprobe = uprobe_register(d_real_inode(link->path.dentry),
+2 -1
net/bpf/bpf_dummy_struct_ops.c
··· 171 171 } 172 172 /* prog doesn't take the ownership of the reference from caller */ 173 173 bpf_prog_inc(prog); 174 - bpf_link_init(&link->link, BPF_LINK_TYPE_STRUCT_OPS, &bpf_struct_ops_link_lops, prog); 174 + bpf_link_init(&link->link, BPF_LINK_TYPE_STRUCT_OPS, &bpf_struct_ops_link_lops, prog, 175 + prog->expected_attach_type); 175 176 176 177 op_idx = prog->expected_attach_type; 177 178 err = bpf_struct_ops_prepare_trampoline(tlinks, link,
+2 -1
net/core/dev.c
··· 10364 10364 goto unlock; 10365 10365 } 10366 10366 10367 - bpf_link_init(&link->link, BPF_LINK_TYPE_XDP, &bpf_xdp_link_lops, prog); 10367 + bpf_link_init(&link->link, BPF_LINK_TYPE_XDP, &bpf_xdp_link_lops, prog, 10368 + attr->link_create.attach_type); 10368 10369 link->dev = dev; 10369 10370 link->flags = attr->link_create.flags; 10370 10371
+6 -7
net/core/sock_map.c
··· 1709 1709 struct sockmap_link { 1710 1710 struct bpf_link link; 1711 1711 struct bpf_map *map; 1712 - enum bpf_attach_type attach_type; 1713 1712 }; 1714 1713 1715 1714 static void sock_map_link_release(struct bpf_link *link) ··· 1720 1721 goto out; 1721 1722 1722 1723 WARN_ON_ONCE(sock_map_prog_update(sockmap_link->map, NULL, link->prog, link, 1723 - sockmap_link->attach_type)); 1724 + link->attach_type)); 1724 1725 1725 1726 bpf_map_put_with_uref(sockmap_link->map); 1726 1727 sockmap_link->map = NULL; ··· 1771 1772 } 1772 1773 1773 1774 ret = sock_map_prog_link_lookup(sockmap_link->map, &pprog, &plink, 1774 - sockmap_link->attach_type); 1775 + link->attach_type); 1775 1776 if (ret) 1776 1777 goto out; 1777 1778 ··· 1816 1817 u32 map_id = sock_map_link_get_map_id(sockmap_link); 1817 1818 1818 1819 info->sockmap.map_id = map_id; 1819 - info->sockmap.attach_type = sockmap_link->attach_type; 1820 + info->sockmap.attach_type = link->attach_type; 1820 1821 return 0; 1821 1822 } 1822 1823 ··· 1827 1828 u32 map_id = sock_map_link_get_map_id(sockmap_link); 1828 1829 1829 1830 seq_printf(seq, "map_id:\t%u\n", map_id); 1830 - seq_printf(seq, "attach_type:\t%u\n", sockmap_link->attach_type); 1831 + seq_printf(seq, "attach_type:\t%u\n", link->attach_type); 1831 1832 } 1832 1833 1833 1834 static const struct bpf_link_ops sock_map_link_ops = { ··· 1865 1866 } 1866 1867 1867 1868 attach_type = attr->link_create.attach_type; 1868 - bpf_link_init(&sockmap_link->link, BPF_LINK_TYPE_SOCKMAP, &sock_map_link_ops, prog); 1869 + bpf_link_init(&sockmap_link->link, BPF_LINK_TYPE_SOCKMAP, &sock_map_link_ops, prog, 1870 + attach_type); 1869 1871 sockmap_link->map = map; 1870 - sockmap_link->attach_type = attach_type; 1871 1872 1872 1873 ret = bpf_link_prime(&sockmap_link->link, &link_primer); 1873 1874 if (ret) {
+2 -1
net/netfilter/nf_bpf_link.c
··· 225 225 if (!link) 226 226 return -ENOMEM; 227 227 228 - bpf_link_init(&link->link, BPF_LINK_TYPE_NETFILTER, &bpf_nf_link_lops, prog); 228 + bpf_link_init(&link->link, BPF_LINK_TYPE_NETFILTER, &bpf_nf_link_lops, prog, 229 + attr->link_create.attach_type); 229 230 230 231 link->hook_ops.hook = nf_hook_run_bpf; 231 232 link->hook_ops.hook_ops_type = NF_HOOK_OP_BPF;