Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

virtio_net: enhance wake/stop tx queue statistics accounting

This patch refines and strengthens the statistics collection of TX queue
wake/stop events introduced by commit c39add9b2423 ("virtio_net: Add TX
stopped and wake counters").

Previously, the driver only recorded partial wake/stop statistics
for TX queues. Some wake events triggered by 'skb_xmit_done()' or resume
operations were not counted, which made the per-queue metrics incomplete.

Signed-off-by: Liming Wu <liming.wu@jaguarmicro.com>
Acked-by: Michael S. Tsirkin <mst@redhat.com>
Link: https://patch.msgid.link/20251120015320.1418-1-liming.wu@jaguarmicro.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>

authored by

Liming Wu and committed by
Jakub Kicinski
cfeb7cd8 8ccd1160

+26 -18
+26 -18
drivers/net/virtio_net.c
··· 775 775 return false; 776 776 } 777 777 778 + static void virtnet_tx_wake_queue(struct virtnet_info *vi, 779 + struct send_queue *sq) 780 + { 781 + unsigned int index = vq2txq(sq->vq); 782 + struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, index); 783 + 784 + if (netif_tx_queue_stopped(txq)) { 785 + u64_stats_update_begin(&sq->stats.syncp); 786 + u64_stats_inc(&sq->stats.wake); 787 + u64_stats_update_end(&sq->stats.syncp); 788 + netif_tx_wake_queue(txq); 789 + } 790 + } 791 + 778 792 static void skb_xmit_done(struct virtqueue *vq) 779 793 { 780 794 struct virtnet_info *vi = vq->vdev->priv; 781 - struct napi_struct *napi = &vi->sq[vq2txq(vq)].napi; 795 + unsigned int index = vq2txq(vq); 796 + struct send_queue *sq = &vi->sq[index]; 797 + struct napi_struct *napi = &sq->napi; 782 798 783 799 /* Suppress further interrupts. */ 784 800 virtqueue_disable_cb(vq); ··· 802 786 if (napi->weight) 803 787 virtqueue_napi_schedule(napi, vq); 804 788 else 805 - /* We were probably waiting for more output buffers. */ 806 - netif_wake_subqueue(vi->dev, vq2txq(vq)); 789 + virtnet_tx_wake_queue(vi, sq); 807 790 } 808 791 809 792 #define MRG_CTX_HEADER_SHIFT 22 ··· 3095 3080 free_old_xmit(sq, txq, !!budget); 3096 3081 } while (unlikely(!virtqueue_enable_cb_delayed(sq->vq))); 3097 3082 3098 - if (sq->vq->num_free >= MAX_SKB_FRAGS + 2 && 3099 - netif_tx_queue_stopped(txq)) { 3100 - u64_stats_update_begin(&sq->stats.syncp); 3101 - u64_stats_inc(&sq->stats.wake); 3102 - u64_stats_update_end(&sq->stats.syncp); 3103 - netif_tx_wake_queue(txq); 3104 - } 3083 + if (sq->vq->num_free >= MAX_SKB_FRAGS + 2) 3084 + virtnet_tx_wake_queue(vi, sq); 3105 3085 3106 3086 __netif_tx_unlock(txq); 3107 3087 } ··· 3286 3276 else 3287 3277 free_old_xmit(sq, txq, !!budget); 3288 3278 3289 - if (sq->vq->num_free >= MAX_SKB_FRAGS + 2 && 3290 - netif_tx_queue_stopped(txq)) { 3291 - u64_stats_update_begin(&sq->stats.syncp); 3292 - u64_stats_inc(&sq->stats.wake); 3293 - u64_stats_update_end(&sq->stats.syncp); 3294 - netif_tx_wake_queue(txq); 3295 - } 3279 + if (sq->vq->num_free >= MAX_SKB_FRAGS + 2) 3280 + virtnet_tx_wake_queue(vi, sq); 3296 3281 3297 3282 if (xsk_done >= budget) { 3298 3283 __netif_tx_unlock(txq); ··· 3542 3537 3543 3538 /* Prevent the upper layer from trying to send packets. */ 3544 3539 netif_stop_subqueue(vi->dev, qindex); 3540 + u64_stats_update_begin(&sq->stats.syncp); 3541 + u64_stats_inc(&sq->stats.stop); 3542 + u64_stats_update_end(&sq->stats.syncp); 3545 3543 3546 3544 __netif_tx_unlock_bh(txq); 3547 3545 } ··· 3561 3553 3562 3554 __netif_tx_lock_bh(txq); 3563 3555 sq->reset = false; 3564 - netif_tx_wake_queue(txq); 3556 + virtnet_tx_wake_queue(vi, sq); 3565 3557 __netif_tx_unlock_bh(txq); 3566 3558 3567 3559 if (running)