Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

xsk: Move xsk_tx_list and its lock to buffer pool

Move the xsk_tx_list and the xsk_tx_list_lock from the umem to
the buffer pool. This so that we in a later commit can share the
umem between multiple HW queues. There is one xsk_tx_list per
device and queue id, so it should be located in the buffer pool.

Signed-off-by: Magnus Karlsson <magnus.karlsson@intel.com>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: Björn Töpel <bjorn.topel@intel.com>
Link: https://lore.kernel.org/bpf/1598603189-32145-7-git-send-email-magnus.karlsson@intel.com

authored by

Magnus Karlsson and committed by
Daniel Borkmann
a5aa8e52 c2d3d6a4

+38 -40
+1 -3
include/net/xdp_sock.h
··· 29 29 u8 flags; 30 30 int id; 31 31 bool zc; 32 - spinlock_t xsk_tx_list_lock; 33 - struct list_head xsk_tx_list; 34 32 }; 35 33 36 34 struct xsk_map { ··· 55 57 /* Protects multiple processes in the control path */ 56 58 struct mutex mutex; 57 59 struct xsk_queue *tx ____cacheline_aligned_in_smp; 58 - struct list_head list; 60 + struct list_head tx_list; 59 61 /* Mutual exclusion of NAPI TX thread and sendmsg error paths 60 62 * in the SKB destructor callback. 61 63 */
+5
include/net/xsk_buff_pool.h
··· 52 52 void *addrs; 53 53 struct device *dev; 54 54 struct net_device *netdev; 55 + struct list_head xsk_tx_list; 56 + /* Protects modifications to the xsk_tx_list */ 57 + spinlock_t xsk_tx_list_lock; 55 58 refcount_t users; 56 59 struct work_struct work; 57 60 struct xdp_buff_xsk *free_heads[]; ··· 70 67 void xp_get_pool(struct xsk_buff_pool *pool); 71 68 void xp_put_pool(struct xsk_buff_pool *pool); 72 69 void xp_clear_dev(struct xsk_buff_pool *pool); 70 + void xp_add_xsk(struct xsk_buff_pool *pool, struct xdp_sock *xs); 71 + void xp_del_xsk(struct xsk_buff_pool *pool, struct xdp_sock *xs); 73 72 74 73 /* AF_XDP, and XDP core. */ 75 74 void xp_free(struct xdp_buff_xsk *xskb);
-26
net/xdp/xdp_umem.c
··· 23 23 24 24 static DEFINE_IDA(umem_ida); 25 25 26 - void xdp_add_sk_umem(struct xdp_umem *umem, struct xdp_sock *xs) 27 - { 28 - unsigned long flags; 29 - 30 - if (!xs->tx) 31 - return; 32 - 33 - spin_lock_irqsave(&umem->xsk_tx_list_lock, flags); 34 - list_add_rcu(&xs->list, &umem->xsk_tx_list); 35 - spin_unlock_irqrestore(&umem->xsk_tx_list_lock, flags); 36 - } 37 - 38 - void xdp_del_sk_umem(struct xdp_umem *umem, struct xdp_sock *xs) 39 - { 40 - unsigned long flags; 41 - 42 - if (!xs->tx) 43 - return; 44 - 45 - spin_lock_irqsave(&umem->xsk_tx_list_lock, flags); 46 - list_del_rcu(&xs->list); 47 - spin_unlock_irqrestore(&umem->xsk_tx_list_lock, flags); 48 - } 49 - 50 26 static void xdp_umem_unpin_pages(struct xdp_umem *umem) 51 27 { 52 28 unpin_user_pages_dirty_lock(umem->pgs, umem->npgs, true); ··· 181 205 umem->pgs = NULL; 182 206 umem->user = NULL; 183 207 umem->flags = mr->flags; 184 - INIT_LIST_HEAD(&umem->xsk_tx_list); 185 - spin_lock_init(&umem->xsk_tx_list_lock); 186 208 187 209 refcount_set(&umem->users, 1); 188 210
-2
net/xdp/xdp_umem.h
··· 10 10 11 11 void xdp_get_umem(struct xdp_umem *umem); 12 12 void xdp_put_umem(struct xdp_umem *umem); 13 - void xdp_add_sk_umem(struct xdp_umem *umem, struct xdp_sock *xs); 14 - void xdp_del_sk_umem(struct xdp_umem *umem, struct xdp_sock *xs); 15 13 struct xdp_umem *xdp_umem_create(struct xdp_umem_reg *mr); 16 14 17 15 #endif /* XDP_UMEM_H_ */
+6 -9
net/xdp/xsk.c
··· 51 51 52 52 void xsk_set_tx_need_wakeup(struct xsk_buff_pool *pool) 53 53 { 54 - struct xdp_umem *umem = pool->umem; 55 54 struct xdp_sock *xs; 56 55 57 56 if (pool->cached_need_wakeup & XDP_WAKEUP_TX) 58 57 return; 59 58 60 59 rcu_read_lock(); 61 - list_for_each_entry_rcu(xs, &umem->xsk_tx_list, list) { 60 + list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) { 62 61 xs->tx->ring->flags |= XDP_RING_NEED_WAKEUP; 63 62 } 64 63 rcu_read_unlock(); ··· 78 79 79 80 void xsk_clear_tx_need_wakeup(struct xsk_buff_pool *pool) 80 81 { 81 - struct xdp_umem *umem = pool->umem; 82 82 struct xdp_sock *xs; 83 83 84 84 if (!(pool->cached_need_wakeup & XDP_WAKEUP_TX)) 85 85 return; 86 86 87 87 rcu_read_lock(); 88 - list_for_each_entry_rcu(xs, &umem->xsk_tx_list, list) { 88 + list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) { 89 89 xs->tx->ring->flags &= ~XDP_RING_NEED_WAKEUP; 90 90 } 91 91 rcu_read_unlock(); ··· 300 302 struct xdp_sock *xs; 301 303 302 304 rcu_read_lock(); 303 - list_for_each_entry_rcu(xs, &pool->umem->xsk_tx_list, list) { 305 + list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) { 304 306 __xskq_cons_release(xs->tx); 305 307 xs->sk.sk_write_space(&xs->sk); 306 308 } ··· 310 312 311 313 bool xsk_tx_peek_desc(struct xsk_buff_pool *pool, struct xdp_desc *desc) 312 314 { 313 - struct xdp_umem *umem = pool->umem; 314 315 struct xdp_sock *xs; 315 316 316 317 rcu_read_lock(); 317 - list_for_each_entry_rcu(xs, &umem->xsk_tx_list, list) { 318 + list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) { 318 319 if (!xskq_cons_peek_desc(xs->tx, desc, pool)) { 319 320 xs->tx->queue_empty_descs++; 320 321 continue; ··· 521 524 WRITE_ONCE(xs->state, XSK_UNBOUND); 522 525 523 526 /* Wait for driver to stop using the xdp socket. */ 524 - xdp_del_sk_umem(xs->umem, xs); 527 + xp_del_xsk(xs->pool, xs); 525 528 xs->dev = NULL; 526 529 synchronize_net(); 527 530 dev_put(dev); ··· 741 744 xs->dev = dev; 742 745 xs->zc = xs->umem->zc; 743 746 xs->queue_id = qid; 744 - xdp_add_sk_umem(xs->umem, xs); 747 + xp_add_xsk(xs->pool, xs); 745 748 746 749 out_unlock: 747 750 if (err) {
+26
net/xdp/xsk_buff_pool.c
··· 11 11 #include "xdp_umem.h" 12 12 #include "xsk.h" 13 13 14 + void xp_add_xsk(struct xsk_buff_pool *pool, struct xdp_sock *xs) 15 + { 16 + unsigned long flags; 17 + 18 + if (!xs->tx) 19 + return; 20 + 21 + spin_lock_irqsave(&pool->xsk_tx_list_lock, flags); 22 + list_add_rcu(&xs->tx_list, &pool->xsk_tx_list); 23 + spin_unlock_irqrestore(&pool->xsk_tx_list_lock, flags); 24 + } 25 + 26 + void xp_del_xsk(struct xsk_buff_pool *pool, struct xdp_sock *xs) 27 + { 28 + unsigned long flags; 29 + 30 + if (!xs->tx) 31 + return; 32 + 33 + spin_lock_irqsave(&pool->xsk_tx_list_lock, flags); 34 + list_del_rcu(&xs->tx_list); 35 + spin_unlock_irqrestore(&pool->xsk_tx_list_lock, flags); 36 + } 37 + 14 38 static void xp_addr_unmap(struct xsk_buff_pool *pool) 15 39 { 16 40 vunmap(pool->addrs); ··· 87 63 XDP_PACKET_HEADROOM; 88 64 pool->umem = umem; 89 65 INIT_LIST_HEAD(&pool->free_list); 66 + INIT_LIST_HEAD(&pool->xsk_tx_list); 67 + spin_lock_init(&pool->xsk_tx_list_lock); 90 68 refcount_set(&pool->users, 1); 91 69 92 70 pool->fq = xs->fq_tmp;