Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

gianfar: Support NAPI for TX Frames

Poll the completed TX frames in gfar_poll(). This prevents the tx
completion interrupt from interfering with processing of received
frames.

We also disable hardware rx coalescing when NAPI is enabled.

Signed-off-by: Dai Haruki <dai.haruki@freescale.com>
Signed-off-by: Andy Fleming <afleming@freescale.com>
Signed-off-by: Jeff Garzik <jgarzik@redhat.com>

authored by

Dai Haruki and committed by
Jeff Garzik
d080cd63 0b50d753

+53 -16
+42 -14
drivers/net/gianfar.c
··· 1250 1250 } 1251 1251 1252 1252 /* Interrupt Handler for Transmit complete */ 1253 - static irqreturn_t gfar_transmit(int irq, void *dev_id) 1253 + int gfar_clean_tx_ring(struct net_device *dev) 1254 1254 { 1255 - struct net_device *dev = (struct net_device *) dev_id; 1256 - struct gfar_private *priv = netdev_priv(dev); 1257 1255 struct txbd8 *bdp; 1256 + struct gfar_private *priv = netdev_priv(dev); 1257 + int howmany = 0; 1258 1258 1259 - /* Clear IEVENT */ 1260 - gfar_write(&priv->regs->ievent, IEVENT_TX_MASK); 1261 - 1262 - /* Lock priv */ 1263 - spin_lock(&priv->txlock); 1264 1259 bdp = priv->dirty_tx; 1265 1260 while ((bdp->status & TXBD_READY) == 0) { 1266 1261 /* If dirty_tx and cur_tx are the same, then either the */ ··· 1264 1269 if ((bdp == priv->cur_tx) && (netif_queue_stopped(dev) == 0)) 1265 1270 break; 1266 1271 1267 - dev->stats.tx_packets++; 1272 + howmany++; 1268 1273 1269 1274 /* Deferred means some collisions occurred during transmit, */ 1270 1275 /* but we eventually sent the packet. */ ··· 1273 1278 1274 1279 /* Free the sk buffer associated with this TxBD */ 1275 1280 dev_kfree_skb_irq(priv->tx_skbuff[priv->skb_dirtytx]); 1281 + 1276 1282 priv->tx_skbuff[priv->skb_dirtytx] = NULL; 1277 1283 priv->skb_dirtytx = 1278 1284 (priv->skb_dirtytx + 1279 1285 1) & TX_RING_MOD_MASK(priv->tx_ring_size); 1286 + 1287 + /* Clean BD length for empty detection */ 1288 + bdp->length = 0; 1280 1289 1281 1290 /* update bdp to point at next bd in the ring (wrapping if necessary) */ 1282 1291 if (bdp->status & TXBD_WRAP) ··· 1295 1296 if (netif_queue_stopped(dev)) 1296 1297 netif_wake_queue(dev); 1297 1298 } /* while ((bdp->status & TXBD_READY) == 0) */ 1299 + 1300 + dev->stats.tx_packets += howmany; 1301 + 1302 + return howmany; 1303 + } 1304 + 1305 + /* Interrupt Handler for Transmit complete */ 1306 + static irqreturn_t gfar_transmit(int irq, void *dev_id) 1307 + { 1308 + struct net_device *dev = (struct net_device *) dev_id; 1309 + struct gfar_private *priv = netdev_priv(dev); 1310 + 1311 + /* Clear IEVENT */ 1312 + gfar_write(&priv->regs->ievent, IEVENT_TX_MASK); 1313 + 1314 + /* Lock priv */ 1315 + spin_lock(&priv->txlock); 1316 + 1317 + gfar_clean_tx_ring(dev); 1298 1318 1299 1319 /* If we are coalescing the interrupts, reset the timer */ 1300 1320 /* Otherwise, clear it */ ··· 1410 1392 unsigned long flags; 1411 1393 #endif 1412 1394 1413 - /* Clear IEVENT, so rx interrupt isn't called again 1414 - * because of this interrupt */ 1415 - gfar_write(&priv->regs->ievent, IEVENT_RX_MASK); 1416 - 1417 1395 /* support NAPI */ 1418 1396 #ifdef CONFIG_GFAR_NAPI 1397 + /* Clear IEVENT, so interrupts aren't called again 1398 + * because of the packets that have already arrived */ 1399 + gfar_write(&priv->regs->ievent, IEVENT_RTX_MASK); 1400 + 1419 1401 if (netif_rx_schedule_prep(dev, &priv->napi)) { 1420 1402 tempval = gfar_read(&priv->regs->imask); 1421 - tempval &= IMASK_RX_DISABLED; 1403 + tempval &= IMASK_RTX_DISABLED; 1422 1404 gfar_write(&priv->regs->imask, tempval); 1423 1405 1424 1406 __netif_rx_schedule(dev, &priv->napi); ··· 1429 1411 gfar_read(&priv->regs->imask)); 1430 1412 } 1431 1413 #else 1414 + /* Clear IEVENT, so rx interrupt isn't called again 1415 + * because of this interrupt */ 1416 + gfar_write(&priv->regs->ievent, IEVENT_RX_MASK); 1432 1417 1433 1418 spin_lock_irqsave(&priv->rxlock, flags); 1434 1419 gfar_clean_rx_ring(dev, priv->rx_ring_size); ··· 1601 1580 struct gfar_private *priv = container_of(napi, struct gfar_private, napi); 1602 1581 struct net_device *dev = priv->dev; 1603 1582 int howmany; 1583 + unsigned long flags; 1584 + 1585 + /* If we fail to get the lock, don't bother with the TX BDs */ 1586 + if (spin_trylock_irqsave(&priv->txlock, flags)) { 1587 + gfar_clean_tx_ring(dev); 1588 + spin_unlock_irqrestore(&priv->txlock, flags); 1589 + } 1604 1590 1605 1591 howmany = gfar_clean_rx_ring(dev, budget); 1606 1592
+11 -2
drivers/net/gianfar.h
··· 126 126 #define DEFAULT_TXCOUNT 16 127 127 #define DEFAULT_TXTIME 21 128 128 129 + #define DEFAULT_RXTIME 21 130 + 131 + /* Non NAPI Case */ 132 + #ifndef CONFIG_GFAR_NAPI 129 133 #define DEFAULT_RX_COALESCE 1 130 134 #define DEFAULT_RXCOUNT 16 131 - #define DEFAULT_RXTIME 21 135 + #else 136 + #define DEFAULT_RX_COALESCE 0 137 + #define DEFAULT_RXCOUNT 0 138 + #endif /* CONFIG_GFAR_NAPI */ 132 139 133 140 #define TBIPA_VALUE 0x1f 134 141 #define MIIMCFG_INIT_VALUE 0x00000007 ··· 249 242 #define IEVENT_PERR 0x00000001 250 243 #define IEVENT_RX_MASK (IEVENT_RXB0 | IEVENT_RXF0) 251 244 #define IEVENT_TX_MASK (IEVENT_TXB | IEVENT_TXF) 245 + #define IEVENT_RTX_MASK (IEVENT_RX_MASK | IEVENT_TX_MASK) 252 246 #define IEVENT_ERR_MASK \ 253 247 (IEVENT_RXC | IEVENT_BSY | IEVENT_EBERR | IEVENT_MSRO | \ 254 248 IEVENT_BABT | IEVENT_TXC | IEVENT_TXE | IEVENT_LC \ ··· 277 269 #define IMASK_FIQ 0x00000004 278 270 #define IMASK_DPE 0x00000002 279 271 #define IMASK_PERR 0x00000001 280 - #define IMASK_RX_DISABLED ~(IMASK_RXFEN0 | IMASK_BSY) 281 272 #define IMASK_DEFAULT (IMASK_TXEEN | IMASK_TXFEN | IMASK_TXBEN | \ 282 273 IMASK_RXFEN0 | IMASK_BSY | IMASK_EBERR | IMASK_BABR | \ 283 274 IMASK_XFUN | IMASK_RXC | IMASK_BABT | IMASK_DPE \ 284 275 | IMASK_PERR) 276 + #define IMASK_RTX_DISABLED ((~(IMASK_RXFEN0 | IMASK_TXFEN | IMASK_BSY)) \ 277 + & IMASK_DEFAULT) 285 278 286 279 /* Fifo management */ 287 280 #define FIFO_TX_THR_MASK 0x01ff