Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'xsk-minor-optimizations-around-locks'

Jason Xing says:

====================
xsk: minor optimizations around locks

Two optimizations regarding xsk_tx_list_lock and cq_lock can yield a
performance increase because of avoiding disabling and enabling
interrupts frequently.
====================

Link: https://patch.msgid.link/20251030000646.18859-1-kerneljasonxing@gmail.com
Signed-off-by: Paolo Abeni <pabeni@redhat.com>

+21 -22
+9 -4
include/net/xsk_buff_pool.h
··· 85 85 bool unaligned; 86 86 bool tx_sw_csum; 87 87 void *addrs; 88 - /* Mutual exclusion of the completion ring in the SKB mode. Two cases to protect: 89 - * NAPI TX thread and sendmsg error paths in the SKB destructor callback and when 90 - * sockets share a single cq when the same netdev and queue id is shared. 88 + /* Mutual exclusion of the completion ring in the SKB mode. 89 + * Protect: NAPI TX thread and sendmsg error paths in the SKB 90 + * destructor callback. 91 91 */ 92 - spinlock_t cq_lock; 92 + spinlock_t cq_prod_lock; 93 + /* Mutual exclusion of the completion ring in the SKB mode. 94 + * Protect: when sockets share a single cq when the same netdev 95 + * and queue id is shared. 96 + */ 97 + spinlock_t cq_cached_prod_lock; 93 98 struct xdp_buff_xsk *free_heads[]; 94 99 }; 95 100
+6 -9
net/xdp/xsk.c
··· 548 548 549 549 static int xsk_cq_reserve_locked(struct xsk_buff_pool *pool) 550 550 { 551 - unsigned long flags; 552 551 int ret; 553 552 554 - spin_lock_irqsave(&pool->cq_lock, flags); 553 + spin_lock(&pool->cq_cached_prod_lock); 555 554 ret = xskq_prod_reserve(pool->cq); 556 - spin_unlock_irqrestore(&pool->cq_lock, flags); 555 + spin_unlock(&pool->cq_cached_prod_lock); 557 556 558 557 return ret; 559 558 } ··· 565 566 unsigned long flags; 566 567 u32 idx; 567 568 568 - spin_lock_irqsave(&pool->cq_lock, flags); 569 + spin_lock_irqsave(&pool->cq_prod_lock, flags); 569 570 idx = xskq_get_prod(pool->cq); 570 571 571 572 xskq_prod_write_addr(pool->cq, idx, ··· 582 583 } 583 584 } 584 585 xskq_prod_submit_n(pool->cq, descs_processed); 585 - spin_unlock_irqrestore(&pool->cq_lock, flags); 586 + spin_unlock_irqrestore(&pool->cq_prod_lock, flags); 586 587 } 587 588 588 589 static void xsk_cq_cancel_locked(struct xsk_buff_pool *pool, u32 n) 589 590 { 590 - unsigned long flags; 591 - 592 - spin_lock_irqsave(&pool->cq_lock, flags); 591 + spin_lock(&pool->cq_cached_prod_lock); 593 592 xskq_prod_cancel_n(pool->cq, n); 594 - spin_unlock_irqrestore(&pool->cq_lock, flags); 593 + spin_unlock(&pool->cq_cached_prod_lock); 595 594 } 596 595 597 596 static void xsk_inc_num_desc(struct sk_buff *skb)
+6 -9
net/xdp/xsk_buff_pool.c
··· 12 12 13 13 void xp_add_xsk(struct xsk_buff_pool *pool, struct xdp_sock *xs) 14 14 { 15 - unsigned long flags; 16 - 17 15 if (!xs->tx) 18 16 return; 19 17 20 - spin_lock_irqsave(&pool->xsk_tx_list_lock, flags); 18 + spin_lock(&pool->xsk_tx_list_lock); 21 19 list_add_rcu(&xs->tx_list, &pool->xsk_tx_list); 22 - spin_unlock_irqrestore(&pool->xsk_tx_list_lock, flags); 20 + spin_unlock(&pool->xsk_tx_list_lock); 23 21 } 24 22 25 23 void xp_del_xsk(struct xsk_buff_pool *pool, struct xdp_sock *xs) 26 24 { 27 - unsigned long flags; 28 - 29 25 if (!xs->tx) 30 26 return; 31 27 32 - spin_lock_irqsave(&pool->xsk_tx_list_lock, flags); 28 + spin_lock(&pool->xsk_tx_list_lock); 33 29 list_del_rcu(&xs->tx_list); 34 - spin_unlock_irqrestore(&pool->xsk_tx_list_lock, flags); 30 + spin_unlock(&pool->xsk_tx_list_lock); 35 31 } 36 32 37 33 void xp_destroy(struct xsk_buff_pool *pool) ··· 90 94 INIT_LIST_HEAD(&pool->xskb_list); 91 95 INIT_LIST_HEAD(&pool->xsk_tx_list); 92 96 spin_lock_init(&pool->xsk_tx_list_lock); 93 - spin_lock_init(&pool->cq_lock); 97 + spin_lock_init(&pool->cq_prod_lock); 98 + spin_lock_init(&pool->cq_cached_prod_lock); 94 99 refcount_set(&pool->users, 1); 95 100 96 101 pool->fq = xs->fq_tmp;