+6
-6
drivers/net/virtio_net.c
+6
-6
drivers/net/virtio_net.c
···
1330
1330
return stats.packets;
1331
1331
}
1332
1332
1333
-
static void free_old_xmit_skbs(struct send_queue *sq)
1333
+
static void free_old_xmit_skbs(struct send_queue *sq, bool in_napi)
1334
1334
{
1335
1335
struct sk_buff *skb;
1336
1336
unsigned int len;
···
1343
1343
bytes += skb->len;
1344
1344
packets++;
1345
1345
1346
-
dev_consume_skb_any(skb);
1346
+
napi_consume_skb(skb, in_napi);
1347
1347
}
1348
1348
1349
1349
/* Avoid overhead when no packets have been processed
···
1369
1369
return;
1370
1370
1371
1371
if (__netif_tx_trylock(txq)) {
1372
-
free_old_xmit_skbs(sq);
1372
+
free_old_xmit_skbs(sq, true);
1373
1373
__netif_tx_unlock(txq);
1374
1374
}
1375
1375
···
1445
1445
struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, vq2txq(sq->vq));
1446
1446
1447
1447
__netif_tx_lock(txq, raw_smp_processor_id());
1448
-
free_old_xmit_skbs(sq);
1448
+
free_old_xmit_skbs(sq, true);
1449
1449
__netif_tx_unlock(txq);
1450
1450
1451
1451
virtqueue_napi_complete(napi, sq->vq, 0);
···
1514
1514
bool use_napi = sq->napi.weight;
1515
1515
1516
1516
/* Free up any pending old buffers before queueing new ones. */
1517
-
free_old_xmit_skbs(sq);
1517
+
free_old_xmit_skbs(sq, false);
1518
1518
1519
1519
if (use_napi && kick)
1520
1520
virtqueue_enable_cb_delayed(sq->vq);
···
1557
1557
if (!use_napi &&
1558
1558
unlikely(!virtqueue_enable_cb_delayed(sq->vq))) {
1559
1559
/* More just got used, free them then recheck. */
1560
-
free_old_xmit_skbs(sq);
1560
+
free_old_xmit_skbs(sq, false);
1561
1561
if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) {
1562
1562
netif_start_subqueue(dev, qnum);
1563
1563
virtqueue_disable_cb(sq->vq);