Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf

Daniel Borkmann says:

====================
pull-request: bpf 2020-11-28

1) Do not reference the skb for xsk's generic TX side since when looped
back into RX it might crash in generic XDP, from Björn Töpel.

2) Fix umem cleanup on a partially set up xsk socket when being destroyed,
from Magnus Karlsson.

3) Fix an incorrect netdev reference count when failing xsk_bind() operation,
from Marek Majtyka.

4) Fix bpftool to set an error code on failed calloc() in build_btf_type_table(),
from Zhen Lei.

* https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf:
bpf: Add MAINTAINERS entry for BPF LSM
bpftool: Fix error return value in build_btf_type_table
net, xsk: Avoid taking multiple skbuff references
xsk: Fix incorrect netdev reference count
xsk: Fix umem cleanup bug at socket destruct
MAINTAINERS: Update XDP and AF_XDP entries
====================

Link: https://lore.kernel.org/r/20201128005104.1205-1-daniel@iogearbox.net
Signed-off-by: Jakub Kicinski <kuba@kernel.org>

+61 -23
+21 -2
MAINTAINERS
··· 3355 3355 F: arch/x86/net/ 3356 3356 X: arch/x86/net/bpf_jit_comp32.c 3357 3357 3358 + BPF LSM (Security Audit and Enforcement using BPF) 3359 + M: KP Singh <kpsingh@chromium.org> 3360 + R: Florent Revest <revest@chromium.org> 3361 + R: Brendan Jackman <jackmanb@chromium.org> 3362 + L: bpf@vger.kernel.org 3363 + S: Maintained 3364 + F: Documentation/bpf/bpf_lsm.rst 3365 + F: include/linux/bpf_lsm.h 3366 + F: kernel/bpf/bpf_lsm.c 3367 + F: security/bpf/ 3368 + 3358 3369 BROADCOM B44 10/100 ETHERNET DRIVER 3359 3370 M: Michael Chan <michael.chan@broadcom.com> 3360 3371 L: netdev@vger.kernel.org ··· 19123 19112 L: bpf@vger.kernel.org 19124 19113 S: Supported 19125 19114 F: include/net/xdp.h 19115 + F: include/net/xdp_priv.h 19126 19116 F: include/trace/events/xdp.h 19127 19117 F: kernel/bpf/cpumap.c 19128 19118 F: kernel/bpf/devmap.c 19129 19119 F: net/core/xdp.c 19130 - N: xdp 19131 - K: xdp 19120 + F: samples/bpf/xdp* 19121 + F: tools/testing/selftests/bpf/*xdp* 19122 + F: tools/testing/selftests/bpf/*/*xdp* 19123 + F: drivers/net/ethernet/*/*/*/*/*xdp* 19124 + F: drivers/net/ethernet/*/*/*xdp* 19125 + K: (?:\b|_)xdp(?:\b|_) 19132 19126 19133 19127 XDP SOCKETS (AF_XDP) 19134 19128 M: Björn Töpel <bjorn.topel@intel.com> ··· 19142 19126 L: netdev@vger.kernel.org 19143 19127 L: bpf@vger.kernel.org 19144 19128 S: Maintained 19129 + F: Documentation/networking/af_xdp.rst 19145 19130 F: include/net/xdp_sock* 19146 19131 F: include/net/xsk_buff_pool.h 19147 19132 F: include/uapi/linux/if_xdp.h 19133 + F: include/uapi/linux/xdp_diag.h 19134 + F: include/net/netns/xdp.h 19148 19135 F: net/xdp/ 19149 19136 F: samples/bpf/xdpsock* 19150 19137 F: tools/lib/bpf/xsk*
+13 -1
include/linux/netdevice.h
··· 2813 2813 struct net_device *sb_dev); 2814 2814 u16 dev_pick_tx_cpu_id(struct net_device *dev, struct sk_buff *skb, 2815 2815 struct net_device *sb_dev); 2816 + 2816 2817 int dev_queue_xmit(struct sk_buff *skb); 2817 2818 int dev_queue_xmit_accel(struct sk_buff *skb, struct net_device *sb_dev); 2818 - int dev_direct_xmit(struct sk_buff *skb, u16 queue_id); 2819 + int __dev_direct_xmit(struct sk_buff *skb, u16 queue_id); 2820 + 2821 + static inline int dev_direct_xmit(struct sk_buff *skb, u16 queue_id) 2822 + { 2823 + int ret; 2824 + 2825 + ret = __dev_direct_xmit(skb, queue_id); 2826 + if (!dev_xmit_complete(ret)) 2827 + kfree_skb(skb); 2828 + return ret; 2829 + } 2830 + 2819 2831 int register_netdevice(struct net_device *dev); 2820 2832 void unregister_netdevice_queue(struct net_device *dev, struct list_head *head); 2821 2833 void unregister_netdevice_many(struct list_head *head);
+1
include/net/xdp_sock.h
··· 31 31 struct page **pgs; 32 32 int id; 33 33 struct list_head xsk_dma_list; 34 + struct work_struct work; 34 35 }; 35 36 36 37 struct xsk_map {
+2 -6
net/core/dev.c
··· 4180 4180 } 4181 4181 EXPORT_SYMBOL(dev_queue_xmit_accel); 4182 4182 4183 - int dev_direct_xmit(struct sk_buff *skb, u16 queue_id) 4183 + int __dev_direct_xmit(struct sk_buff *skb, u16 queue_id) 4184 4184 { 4185 4185 struct net_device *dev = skb->dev; 4186 4186 struct sk_buff *orig_skb = skb; ··· 4210 4210 dev_xmit_recursion_dec(); 4211 4211 4212 4212 local_bh_enable(); 4213 - 4214 - if (!dev_xmit_complete(ret)) 4215 - kfree_skb(skb); 4216 - 4217 4213 return ret; 4218 4214 drop: 4219 4215 atomic_long_inc(&dev->tx_dropped); 4220 4216 kfree_skb_list(skb); 4221 4217 return NET_XMIT_DROP; 4222 4218 } 4223 - EXPORT_SYMBOL(dev_direct_xmit); 4219 + EXPORT_SYMBOL(__dev_direct_xmit); 4224 4220 4225 4221 /************************************************************************* 4226 4222 * Receiver routines
+16 -3
net/xdp/xdp_umem.c
··· 66 66 kfree(umem); 67 67 } 68 68 69 + static void xdp_umem_release_deferred(struct work_struct *work) 70 + { 71 + struct xdp_umem *umem = container_of(work, struct xdp_umem, work); 72 + 73 + xdp_umem_release(umem); 74 + } 75 + 69 76 void xdp_get_umem(struct xdp_umem *umem) 70 77 { 71 78 refcount_inc(&umem->users); 72 79 } 73 80 74 - void xdp_put_umem(struct xdp_umem *umem) 81 + void xdp_put_umem(struct xdp_umem *umem, bool defer_cleanup) 75 82 { 76 83 if (!umem) 77 84 return; 78 85 79 - if (refcount_dec_and_test(&umem->users)) 80 - xdp_umem_release(umem); 86 + if (refcount_dec_and_test(&umem->users)) { 87 + if (defer_cleanup) { 88 + INIT_WORK(&umem->work, xdp_umem_release_deferred); 89 + schedule_work(&umem->work); 90 + } else { 91 + xdp_umem_release(umem); 92 + } 93 + } 81 94 } 82 95 83 96 static int xdp_umem_pin_pages(struct xdp_umem *umem, unsigned long address)
+1 -1
net/xdp/xdp_umem.h
··· 9 9 #include <net/xdp_sock_drv.h> 10 10 11 11 void xdp_get_umem(struct xdp_umem *umem); 12 - void xdp_put_umem(struct xdp_umem *umem); 12 + void xdp_put_umem(struct xdp_umem *umem, bool defer_cleanup); 13 13 struct xdp_umem *xdp_umem_create(struct xdp_umem_reg *mr); 14 14 15 15 #endif /* XDP_UMEM_H_ */
+2 -8
net/xdp/xsk.c
··· 411 411 skb_shinfo(skb)->destructor_arg = (void *)(long)desc.addr; 412 412 skb->destructor = xsk_destruct_skb; 413 413 414 - /* Hinder dev_direct_xmit from freeing the packet and 415 - * therefore completing it in the destructor 416 - */ 417 - refcount_inc(&skb->users); 418 - err = dev_direct_xmit(skb, xs->queue_id); 414 + err = __dev_direct_xmit(skb, xs->queue_id); 419 415 if (err == NETDEV_TX_BUSY) { 420 416 /* Tell user-space to retry the send */ 421 417 skb->destructor = sock_wfree; ··· 425 429 /* Ignore NET_XMIT_CN as packet might have been sent */ 426 430 if (err == NET_XMIT_DROP) { 427 431 /* SKB completed but not sent */ 428 - kfree_skb(skb); 429 432 err = -EBUSY; 430 433 goto out; 431 434 } 432 435 433 - consume_skb(skb); 434 436 sent_frame = true; 435 437 } 436 438 ··· 1141 1147 return; 1142 1148 1143 1149 if (!xp_put_pool(xs->pool)) 1144 - xdp_put_umem(xs->umem); 1150 + xdp_put_umem(xs->umem, !xs->pool); 1145 1151 1146 1152 sk_refcnt_debug_dec(sk); 1147 1153 }
+4 -2
net/xdp/xsk_buff_pool.c
··· 185 185 err_unreg_pool: 186 186 if (!force_zc) 187 187 err = 0; /* fallback to copy mode */ 188 - if (err) 188 + if (err) { 189 189 xsk_clear_pool_at_qid(netdev, queue_id); 190 + dev_put(netdev); 191 + } 190 192 return err; 191 193 } 192 194 ··· 244 242 pool->cq = NULL; 245 243 } 246 244 247 - xdp_put_umem(pool->umem); 245 + xdp_put_umem(pool->umem, false); 248 246 xp_destroy(pool); 249 247 } 250 248
+1
tools/bpf/bpftool/btf.c
··· 693 693 obj_node = calloc(1, sizeof(*obj_node)); 694 694 if (!obj_node) { 695 695 p_err("failed to allocate memory: %s", strerror(errno)); 696 + err = -ENOMEM; 696 697 goto err_free; 697 698 } 698 699