Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'sxgbe-stmmac-remove-private-tx-lock'

Lino Sanfilippo says:

====================
Remove private tx queue locks

this patch series removes unnecessary private locks in the sxgbe and the
stmmac driver.

v2:
- adjust commit message
====================

Signed-off-by: David S. Miller <davem@davemloft.net>

+12 -45
-1
drivers/net/ethernet/samsung/sxgbe/sxgbe_common.h
··· 384 384 dma_addr_t *tx_skbuff_dma; 385 385 struct sk_buff **tx_skbuff; 386 386 struct timer_list txtimer; 387 - spinlock_t tx_lock; /* lock for tx queues */ 388 387 unsigned int cur_tx; 389 388 unsigned int dirty_tx; 390 389 u32 tx_count_frames;
+6 -21
drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
··· 426 426 tx_ring->dirty_tx = 0; 427 427 tx_ring->cur_tx = 0; 428 428 429 - /* initialise TX queue lock */ 430 - spin_lock_init(&tx_ring->tx_lock); 431 - 432 429 return 0; 433 430 434 431 dmamem_err: ··· 740 743 741 744 dev_txq = netdev_get_tx_queue(priv->dev, queue_no); 742 745 743 - spin_lock(&tqueue->tx_lock); 746 + __netif_tx_lock(dev_txq, smp_processor_id()); 744 747 745 748 priv->xstats.tx_clean++; 746 749 while (tqueue->dirty_tx != tqueue->cur_tx) { ··· 778 781 779 782 /* wake up queue */ 780 783 if (unlikely(netif_tx_queue_stopped(dev_txq) && 781 - sxgbe_tx_avail(tqueue, tx_rsize) > SXGBE_TX_THRESH(priv))) { 782 - netif_tx_lock(priv->dev); 783 - if (netif_tx_queue_stopped(dev_txq) && 784 - sxgbe_tx_avail(tqueue, tx_rsize) > SXGBE_TX_THRESH(priv)) { 785 - if (netif_msg_tx_done(priv)) 786 - pr_debug("%s: restart transmit\n", __func__); 787 - netif_tx_wake_queue(dev_txq); 788 - } 789 - netif_tx_unlock(priv->dev); 784 + sxgbe_tx_avail(tqueue, tx_rsize) > SXGBE_TX_THRESH(priv))) { 785 + if (netif_msg_tx_done(priv)) 786 + pr_debug("%s: restart transmit\n", __func__); 787 + netif_tx_wake_queue(dev_txq); 790 788 } 791 789 792 - spin_unlock(&tqueue->tx_lock); 790 + __netif_tx_unlock(dev_txq); 793 791 } 794 792 795 793 /** ··· 1296 1304 tqueue->hwts_tx_en))) 1297 1305 ctxt_desc_req = 1; 1298 1306 1299 - /* get the spinlock */ 1300 - spin_lock(&tqueue->tx_lock); 1301 - 1302 1307 if (priv->tx_path_in_lpi_mode) 1303 1308 sxgbe_disable_eee_mode(priv); 1304 1309 ··· 1305 1316 netdev_err(dev, "%s: Tx Ring is full when %d queue is awake\n", 1306 1317 __func__, txq_index); 1307 1318 } 1308 - /* release the spin lock in case of BUSY */ 1309 - spin_unlock(&tqueue->tx_lock); 1310 1319 return NETDEV_TX_BUSY; 1311 1320 } 1312 1321 ··· 1422 1435 skb_tx_timestamp(skb); 1423 1436 1424 1437 priv->hw->dma->enable_dma_transmission(priv->ioaddr, txq_index); 1425 - 1426 - spin_unlock(&tqueue->tx_lock); 1427 1438 1428 1439 return NETDEV_TX_OK; 1429 1440 }
-1
drivers/net/ethernet/stmicro/stmmac/stmmac.h
··· 64 64 dma_addr_t dma_tx_phy; 65 65 int tx_coalesce; 66 66 int hwts_tx_en; 67 - spinlock_t tx_lock; 68 67 bool tx_path_in_lpi_mode; 69 68 struct timer_list txtimer; 70 69 bool tso;
+6 -22
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
··· 1308 1308 unsigned int bytes_compl = 0, pkts_compl = 0; 1309 1309 unsigned int entry = priv->dirty_tx; 1310 1310 1311 - spin_lock(&priv->tx_lock); 1311 + netif_tx_lock(priv->dev); 1312 1312 1313 1313 priv->xstats.tx_clean++; 1314 1314 ··· 1379 1379 netdev_completed_queue(priv->dev, pkts_compl, bytes_compl); 1380 1380 1381 1381 if (unlikely(netif_queue_stopped(priv->dev) && 1382 - stmmac_tx_avail(priv) > STMMAC_TX_THRESH)) { 1383 - netif_tx_lock(priv->dev); 1384 - if (netif_queue_stopped(priv->dev) && 1385 - stmmac_tx_avail(priv) > STMMAC_TX_THRESH) { 1386 - netif_dbg(priv, tx_done, priv->dev, 1387 - "%s: restart transmit\n", __func__); 1388 - netif_wake_queue(priv->dev); 1389 - } 1390 - netif_tx_unlock(priv->dev); 1382 + stmmac_tx_avail(priv) > STMMAC_TX_THRESH)) { 1383 + netif_dbg(priv, tx_done, priv->dev, 1384 + "%s: restart transmit\n", __func__); 1385 + netif_wake_queue(priv->dev); 1391 1386 } 1392 1387 1393 1388 if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) { 1394 1389 stmmac_enable_eee_mode(priv); 1395 1390 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer)); 1396 1391 } 1397 - spin_unlock(&priv->tx_lock); 1392 + netif_tx_unlock(priv->dev); 1398 1393 } 1399 1394 1400 1395 static inline void stmmac_enable_dma_irq(struct stmmac_priv *priv) ··· 1997 2002 u8 proto_hdr_len; 1998 2003 int i; 1999 2004 2000 - spin_lock(&priv->tx_lock); 2001 - 2002 2005 /* Compute header lengths */ 2003 2006 proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); 2004 2007 ··· 2010 2017 "%s: Tx Ring full when queue awake\n", 2011 2018 __func__); 2012 2019 } 2013 - spin_unlock(&priv->tx_lock); 2014 2020 return NETDEV_TX_BUSY; 2015 2021 } 2016 2022 ··· 2144 2152 priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, priv->tx_tail_addr, 2145 2153 STMMAC_CHAN0); 2146 2154 2147 - spin_unlock(&priv->tx_lock); 2148 2155 return NETDEV_TX_OK; 2149 2156 2150 2157 dma_map_err: 2151 - spin_unlock(&priv->tx_lock); 2152 2158 dev_err(priv->device, "Tx dma map failed\n"); 2153 2159 dev_kfree_skb(skb); 2154 2160 priv->dev->stats.tx_dropped++; ··· 2178 2188 return stmmac_tso_xmit(skb, dev); 2179 2189 } 2180 2190 2181 - spin_lock(&priv->tx_lock); 2182 - 2183 2191 if (unlikely(stmmac_tx_avail(priv) < nfrags + 1)) { 2184 - spin_unlock(&priv->tx_lock); 2185 2192 if (!netif_queue_stopped(dev)) { 2186 2193 netif_stop_queue(dev); 2187 2194 /* This is a hard error, log it. */ ··· 2349 2362 priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, priv->tx_tail_addr, 2350 2363 STMMAC_CHAN0); 2351 2364 2352 - spin_unlock(&priv->tx_lock); 2353 2365 return NETDEV_TX_OK; 2354 2366 2355 2367 dma_map_err: 2356 - spin_unlock(&priv->tx_lock); 2357 2368 netdev_err(priv->dev, "Tx DMA map failed\n"); 2358 2369 dev_kfree_skb(skb); 2359 2370 priv->dev->stats.tx_dropped++; ··· 3338 3353 netif_napi_add(ndev, &priv->napi, stmmac_poll, 64); 3339 3354 3340 3355 spin_lock_init(&priv->lock); 3341 - spin_lock_init(&priv->tx_lock); 3342 3356 3343 3357 ret = register_netdev(ndev); 3344 3358 if (ret) {