Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

net: cadence: macb: Implement BQL

Implement byte queue limits to allow queuing disciplines to account for
packets enqueued in the ring buffer but not yet transmitted. There are a
separate set of transmit functions for AT91 that I haven't touched since
I don't have hardware to test on.

Signed-off-by: Sean Anderson <sean.anderson@linux.dev>
Link: https://patch.msgid.link/20250220164257.96859-1-sean.anderson@linux.dev
Signed-off-by: Jakub Kicinski <kuba@kernel.org>

authored by

Sean Anderson and committed by
Jakub Kicinski
e6a53218 3e401818

+18 -2
+18 -2
drivers/net/ethernet/cadence/macb_main.c
··· 1079 1079 tx_error_task); 1080 1080 bool halt_timeout = false; 1081 1081 struct macb *bp = queue->bp; 1082 + u32 queue_index; 1083 + u32 packets = 0; 1084 + u32 bytes = 0; 1082 1085 struct macb_tx_skb *tx_skb; 1083 1086 struct macb_dma_desc *desc; 1084 1087 struct sk_buff *skb; 1085 1088 unsigned int tail; 1086 1089 unsigned long flags; 1087 1090 1091 + queue_index = queue - bp->queues; 1088 1092 netdev_vdbg(bp->dev, "macb_tx_error_task: q = %u, t = %u, h = %u\n", 1089 - (unsigned int)(queue - bp->queues), 1090 - queue->tx_tail, queue->tx_head); 1093 + queue_index, queue->tx_tail, queue->tx_head); 1091 1094 1092 1095 /* Prevent the queue NAPI TX poll from running, as it calls 1093 1096 * macb_tx_complete(), which in turn may call netif_wake_subqueue(). ··· 1143 1140 skb->data); 1144 1141 bp->dev->stats.tx_packets++; 1145 1142 queue->stats.tx_packets++; 1143 + packets++; 1146 1144 bp->dev->stats.tx_bytes += skb->len; 1147 1145 queue->stats.tx_bytes += skb->len; 1146 + bytes += skb->len; 1148 1147 } 1149 1148 } else { 1150 1149 /* "Buffers exhausted mid-frame" errors may only happen ··· 1162 1157 1163 1158 macb_tx_unmap(bp, tx_skb, 0); 1164 1159 } 1160 + 1161 + netdev_tx_completed_queue(netdev_get_tx_queue(bp->dev, queue_index), 1162 + packets, bytes); 1165 1163 1166 1164 /* Set end of TX queue */ 1167 1165 desc = macb_tx_desc(queue, 0); ··· 1236 1228 unsigned int tail; 1237 1229 unsigned int head; 1238 1230 int packets = 0; 1231 + u32 bytes = 0; 1239 1232 1240 1233 spin_lock(&queue->tx_ptr_lock); 1241 1234 head = queue->tx_head; ··· 1278 1269 bp->dev->stats.tx_bytes += skb->len; 1279 1270 queue->stats.tx_bytes += skb->len; 1280 1271 packets++; 1272 + bytes += skb->len; 1281 1273 } 1282 1274 1283 1275 /* Now we can safely release resources */ ··· 1292 1282 break; 1293 1283 } 1294 1284 } 1285 + 1286 + netdev_tx_completed_queue(netdev_get_tx_queue(bp->dev, queue_index), 1287 + packets, bytes); 1295 1288 1296 1289 queue->tx_tail = tail; 1297 1290 if (__netif_subqueue_stopped(bp->dev, queue_index) && ··· 2397 2384 /* Make newly initialized descriptor visible to hardware */ 2398 2385 wmb(); 2399 2386 skb_tx_timestamp(skb); 2387 + netdev_tx_sent_queue(netdev_get_tx_queue(bp->dev, queue_index), 2388 + skb->len); 2400 2389 2401 2390 spin_lock_irq(&bp->lock); 2402 2391 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART)); ··· 3034 3019 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { 3035 3020 napi_disable(&queue->napi_rx); 3036 3021 napi_disable(&queue->napi_tx); 3022 + netdev_tx_reset_queue(netdev_get_tx_queue(dev, q)); 3037 3023 } 3038 3024 3039 3025 phylink_stop(bp->phylink);