Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

bpf, netkit: Add indirect call wrapper for fetching peer dev

ndo_get_peer_dev is used in tcx BPF fast path, therefore make use of
indirect call wrapper and therefore optimize the bpf_redirect_peer()
internal handling a bit. Add a small skb_get_peer_dev() wrapper which
utilizes the INDIRECT_CALL_1() macro instead of open coding.

Future work could potentially add a peer pointer directly into struct
net_device in future and convert veth and netkit over to use it so
that eventually ndo_get_peer_dev can be removed.

Co-developed-by: Nikolay Aleksandrov <razor@blackwall.org>
Signed-off-by: Nikolay Aleksandrov <razor@blackwall.org>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: Stanislav Fomichev <sdf@google.com>
Link: https://lore.kernel.org/r/20231114004220.6495-7-daniel@iogearbox.net
Signed-off-by: Martin KaFai Lau <martin.lau@kernel.org>

authored by

Daniel Borkmann and committed by
Martin KaFai Lau
2c225425 024ee930

+21 -6
+2 -1
drivers/net/netkit.c
··· 7 7 #include <linux/filter.h> 8 8 #include <linux/netfilter_netdev.h> 9 9 #include <linux/bpf_mprog.h> 10 + #include <linux/indirect_call_wrapper.h> 10 11 11 12 #include <net/netkit.h> 12 13 #include <net/dst.h> ··· 178 177 rcu_read_unlock(); 179 178 } 180 179 181 - static struct net_device *netkit_peer_dev(struct net_device *dev) 180 + INDIRECT_CALLABLE_SCOPE struct net_device *netkit_peer_dev(struct net_device *dev) 182 181 { 183 182 return rcu_dereference(netkit_priv(dev)->peer); 184 183 }
+6
include/net/netkit.h
··· 10 10 int netkit_link_attach(const union bpf_attr *attr, struct bpf_prog *prog); 11 11 int netkit_prog_detach(const union bpf_attr *attr, struct bpf_prog *prog); 12 12 int netkit_prog_query(const union bpf_attr *attr, union bpf_attr __user *uattr); 13 + INDIRECT_CALLABLE_DECLARE(struct net_device *netkit_peer_dev(struct net_device *dev)); 13 14 #else 14 15 static inline int netkit_prog_attach(const union bpf_attr *attr, 15 16 struct bpf_prog *prog) ··· 34 33 union bpf_attr __user *uattr) 35 34 { 36 35 return -EINVAL; 36 + } 37 + 38 + static inline struct net_device *netkit_peer_dev(struct net_device *dev) 39 + { 40 + return NULL; 37 41 } 38 42 #endif /* CONFIG_NETKIT */ 39 43 #endif /* __NET_NETKIT_H */
+13 -5
net/core/filter.c
··· 81 81 #include <net/xdp.h> 82 82 #include <net/mptcp.h> 83 83 #include <net/netfilter/nf_conntrack_bpf.h> 84 + #include <net/netkit.h> 84 85 #include <linux/un.h> 85 86 86 87 #include "dev.h" ··· 2469 2468 DEFINE_PER_CPU(struct bpf_redirect_info, bpf_redirect_info); 2470 2469 EXPORT_PER_CPU_SYMBOL_GPL(bpf_redirect_info); 2471 2470 2471 + static struct net_device *skb_get_peer_dev(struct net_device *dev) 2472 + { 2473 + const struct net_device_ops *ops = dev->netdev_ops; 2474 + 2475 + if (likely(ops->ndo_get_peer_dev)) 2476 + return INDIRECT_CALL_1(ops->ndo_get_peer_dev, 2477 + netkit_peer_dev, dev); 2478 + return NULL; 2479 + } 2480 + 2472 2481 int skb_do_redirect(struct sk_buff *skb) 2473 2482 { 2474 2483 struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); ··· 2492 2481 if (unlikely(!dev)) 2493 2482 goto out_drop; 2494 2483 if (flags & BPF_F_PEER) { 2495 - const struct net_device_ops *ops = dev->netdev_ops; 2496 - 2497 - if (unlikely(!ops->ndo_get_peer_dev || 2498 - !skb_at_tc_ingress(skb))) 2484 + if (unlikely(!skb_at_tc_ingress(skb))) 2499 2485 goto out_drop; 2500 - dev = ops->ndo_get_peer_dev(dev); 2486 + dev = skb_get_peer_dev(dev); 2501 2487 if (unlikely(!dev || 2502 2488 !(dev->flags & IFF_UP) || 2503 2489 net_eq(net, dev_net(dev))))