Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

wan: convert drivers to netdev_tx_t

Signed-off-by: Stephen Hemminger <shemminger@vyatta.com>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

Stephen Hemminger and committed by
David S. Miller
d71a6749 61a84108

+57 -46
+4 -3
drivers/net/wan/cosa.c
··· 279 279 static int cosa_net_open(struct net_device *d); 280 280 static int cosa_net_close(struct net_device *d); 281 281 static void cosa_net_timeout(struct net_device *d); 282 - static int cosa_net_tx(struct sk_buff *skb, struct net_device *d); 282 + static netdev_tx_t cosa_net_tx(struct sk_buff *skb, struct net_device *d); 283 283 static char *cosa_net_setup_rx(struct channel_data *channel, int size); 284 284 static int cosa_net_rx_done(struct channel_data *channel); 285 285 static int cosa_net_tx_done(struct channel_data *channel, int size); ··· 672 672 return 0; 673 673 } 674 674 675 - static int cosa_net_tx(struct sk_buff *skb, struct net_device *dev) 675 + static netdev_tx_t cosa_net_tx(struct sk_buff *skb, 676 + struct net_device *dev) 676 677 { 677 678 struct channel_data *chan = dev_to_chan(dev); 678 679 ··· 681 680 682 681 chan->tx_skb = skb; 683 682 cosa_start_tx(chan, skb->data, skb->len); 684 - return 0; 683 + return NETDEV_TX_OK; 685 684 } 686 685 687 686 static void cosa_net_timeout(struct net_device *dev)
+4 -4
drivers/net/wan/cycx_x25.c
··· 139 139 const void *daddr, const void *saddr, 140 140 unsigned len); 141 141 static int cycx_netdevice_rebuild_header(struct sk_buff *skb); 142 - static int cycx_netdevice_hard_start_xmit(struct sk_buff *skb, 143 - struct net_device *dev); 142 + static netdev_tx_t cycx_netdevice_hard_start_xmit(struct sk_buff *skb, 143 + struct net_device *dev); 144 144 145 145 static struct net_device_stats * 146 146 cycx_netdevice_get_stats(struct net_device *dev); ··· 593 593 * bottom half" (with interrupts enabled). 594 594 * 2. Setting tbusy flag will inhibit further transmit requests from the 595 595 * protocol stack and can be used for flow control with protocol layer. */ 596 - static int cycx_netdevice_hard_start_xmit(struct sk_buff *skb, 597 - struct net_device *dev) 596 + static netdev_tx_t cycx_netdevice_hard_start_xmit(struct sk_buff *skb, 597 + struct net_device *dev) 598 598 { 599 599 struct cycx_x25_channel *chan = netdev_priv(dev); 600 600 struct cycx_device *card = chan->card;
+7 -6
drivers/net/wan/dlci.c
··· 186 186 dev_kfree_skb(skb); 187 187 } 188 188 189 - static int dlci_transmit(struct sk_buff *skb, struct net_device *dev) 189 + static netdev_tx_t dlci_transmit(struct sk_buff *skb, 190 + struct net_device *dev) 190 191 { 191 192 struct dlci_local *dlp; 192 - int ret; 193 - 194 - ret = 0; 193 + netdev_tx_t ret; 195 194 196 195 if (!skb || !dev) 197 196 return NETDEV_TX_OK; ··· 199 200 200 201 netif_stop_queue(dev); 201 202 203 + /* This is hackish, overloads driver specific return values 204 + on top of normal transmit return! */ 202 205 ret = dlp->slave->netdev_ops->ndo_start_xmit(skb, dlp->slave); 203 206 switch (ret) 204 207 { ··· 208 207 dev->stats.tx_packets++; 209 208 ret = NETDEV_TX_OK; 210 209 break; 211 - case DLCI_RET_ERR: 210 + case DLCI_RET_ERR: 212 211 dev->stats.tx_errors++; 213 212 ret = NETDEV_TX_OK; 214 213 break; 215 - case DLCI_RET_DROP: 214 + case DLCI_RET_DROP: 216 215 dev->stats.tx_dropped++; 217 216 ret = NETDEV_TX_BUSY; 218 217 break;
+4 -2
drivers/net/wan/dscc4.c
··· 359 359 static int dscc4_found1(struct pci_dev *, void __iomem *ioaddr); 360 360 static int dscc4_init_one(struct pci_dev *, const struct pci_device_id *ent); 361 361 static int dscc4_open(struct net_device *); 362 - static int dscc4_start_xmit(struct sk_buff *, struct net_device *); 362 + static netdev_tx_t dscc4_start_xmit(struct sk_buff *, 363 + struct net_device *); 363 364 static int dscc4_close(struct net_device *); 364 365 static int dscc4_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); 365 366 static int dscc4_init_ring(struct net_device *); ··· 1149 1148 } 1150 1149 #endif /* DSCC4_POLLING */ 1151 1150 1152 - static int dscc4_start_xmit(struct sk_buff *skb, struct net_device *dev) 1151 + static netdev_tx_t dscc4_start_xmit(struct sk_buff *skb, 1152 + struct net_device *dev) 1153 1153 { 1154 1154 struct dscc4_dev_priv *dpriv = dscc4_priv(dev); 1155 1155 struct dscc4_pci_priv *ppriv = dpriv->pci_priv;
+1 -1
drivers/net/wan/farsync.c
··· 2274 2274 port->start = 0; 2275 2275 } 2276 2276 2277 - static int 2277 + static netdev_tx_t 2278 2278 fst_start_xmit(struct sk_buff *skb, struct net_device *dev) 2279 2279 { 2280 2280 struct fst_card_info *card;
+2 -2
drivers/net/wan/hd64570.c
··· 620 620 #endif /* DEBUG_RINGS */ 621 621 622 622 623 - static int sca_xmit(struct sk_buff *skb, struct net_device *dev) 623 + static netdev_tx_t sca_xmit(struct sk_buff *skb, struct net_device *dev) 624 624 { 625 625 port_t *port = dev_to_port(dev); 626 626 card_t *card = port_to_card(port); ··· 674 674 spin_unlock_irq(&port->lock); 675 675 676 676 dev_kfree_skb(skb); 677 - return 0; 677 + return NETDEV_TX_OK; 678 678 } 679 679 680 680
+2 -2
drivers/net/wan/hd64572.c
··· 562 562 #endif /* DEBUG_RINGS */ 563 563 564 564 565 - static int sca_xmit(struct sk_buff *skb, struct net_device *dev) 565 + static netdev_tx_t sca_xmit(struct sk_buff *skb, struct net_device *dev) 566 566 { 567 567 port_t *port = dev_to_port(dev); 568 568 card_t *card = port->card; ··· 601 601 spin_unlock_irq(&port->lock); 602 602 603 603 dev_kfree_skb(skb); 604 - return 0; 604 + return NETDEV_TX_OK; 605 605 } 606 606 607 607
+1 -1
drivers/net/wan/hdlc.c
··· 66 66 return hdlc->proto->netif_rx(skb); 67 67 } 68 68 69 - int hdlc_start_xmit(struct sk_buff *skb, struct net_device *dev) 69 + netdev_tx_t hdlc_start_xmit(struct sk_buff *skb, struct net_device *dev) 70 70 { 71 71 hdlc_device *hdlc = dev_to_hdlc(dev); 72 72
+1 -1
drivers/net/wan/hdlc_fr.c
··· 407 407 return -EINVAL; 408 408 } 409 409 410 - static int pvc_xmit(struct sk_buff *skb, struct net_device *dev) 410 + static netdev_tx_t pvc_xmit(struct sk_buff *skb, struct net_device *dev) 411 411 { 412 412 pvc_device *pvc = dev->ml_priv; 413 413
+1 -1
drivers/net/wan/hdlc_raw_eth.c
··· 25 25 26 26 static int raw_eth_ioctl(struct net_device *dev, struct ifreq *ifr); 27 27 28 - static int eth_tx(struct sk_buff *skb, struct net_device *dev) 28 + static netdev_tx_t eth_tx(struct sk_buff *skb, struct net_device *dev) 29 29 { 30 30 int pad = ETH_ZLEN - skb->len; 31 31 if (pad > 0) { /* Pad the frame with zeros */
+3 -3
drivers/net/wan/hdlc_x25.c
··· 87 87 88 88 89 89 90 - static int x25_xmit(struct sk_buff *skb, struct net_device *dev) 90 + static netdev_tx_t x25_xmit(struct sk_buff *skb, struct net_device *dev) 91 91 { 92 92 int result; 93 93 ··· 98 98 skb_pull(skb, 1); 99 99 if ((result = lapb_data_request(dev, skb)) != LAPB_OK) 100 100 dev_kfree_skb(skb); 101 - return 0; 101 + return NETDEV_TX_OK; 102 102 103 103 case 1: 104 104 if ((result = lapb_connect_request(dev))!= LAPB_OK) { ··· 129 129 } 130 130 131 131 dev_kfree_skb(skb); 132 - return 0; 132 + return NETDEV_TX_OK; 133 133 } 134 134 135 135
+2 -1
drivers/net/wan/hostess_sv11.c
··· 156 156 * Passed network frames, fire them downwind. 157 157 */ 158 158 159 - static int hostess_queue_xmit(struct sk_buff *skb, struct net_device *d) 159 + static netdev_tx_t hostess_queue_xmit(struct sk_buff *skb, 160 + struct net_device *d) 160 161 { 161 162 return z8530_queue_xmit(&dev_to_sv(d)->chanA, skb); 162 163 }
+2 -1
drivers/net/wan/lapbether.c
··· 147 147 /* 148 148 * Send a LAPB frame via an ethernet interface 149 149 */ 150 - static int lapbeth_xmit(struct sk_buff *skb, struct net_device *dev) 150 + static netdev_tx_t lapbeth_xmit(struct sk_buff *skb, 151 + struct net_device *dev) 151 152 { 152 153 int err; 153 154
+5 -4
drivers/net/wan/lmc/lmc_main.c
··· 89 89 MODULE_LICENSE("GPL v2"); 90 90 91 91 92 - static int lmc_start_xmit(struct sk_buff *skb, struct net_device *dev); 92 + static netdev_tx_t lmc_start_xmit(struct sk_buff *skb, 93 + struct net_device *dev); 93 94 static int lmc_rx (struct net_device *dev); 94 95 static int lmc_open(struct net_device *dev); 95 96 static int lmc_close(struct net_device *dev); ··· 1424 1423 return IRQ_RETVAL(handled); 1425 1424 } 1426 1425 1427 - static int lmc_start_xmit(struct sk_buff *skb, struct net_device *dev) 1426 + static netdev_tx_t lmc_start_xmit(struct sk_buff *skb, 1427 + struct net_device *dev) 1428 1428 { 1429 1429 lmc_softc_t *sc = dev_to_sc(dev); 1430 1430 u32 flag; 1431 1431 int entry; 1432 - int ret = NETDEV_TX_OK; 1433 1432 unsigned long flags; 1434 1433 1435 1434 lmc_trace(dev, "lmc_start_xmit in"); ··· 1511 1510 spin_unlock_irqrestore(&sc->lmc_lock, flags); 1512 1511 1513 1512 lmc_trace(dev, "lmc_start_xmit_out"); 1514 - return ret; 1513 + return NETDEV_TX_OK; 1515 1514 } 1516 1515 1517 1516
+4 -3
drivers/net/wan/sbni.c
··· 114 114 static struct net_device *sbni_probe1(struct net_device *, unsigned long, int); 115 115 static int sbni_open( struct net_device * ); 116 116 static int sbni_close( struct net_device * ); 117 - static int sbni_start_xmit( struct sk_buff *, struct net_device * ); 117 + static netdev_tx_t sbni_start_xmit(struct sk_buff *, 118 + struct net_device * ); 118 119 static int sbni_ioctl( struct net_device *, struct ifreq *, int ); 119 120 static void set_multicast_list( struct net_device * ); 120 121 ··· 445 444 446 445 #ifdef CONFIG_SBNI_MULTILINE 447 446 448 - static int 447 + static netdev_tx_t 449 448 sbni_start_xmit( struct sk_buff *skb, struct net_device *dev ) 450 449 { 451 450 struct net_device *p; ··· 473 472 474 473 #else /* CONFIG_SBNI_MULTILINE */ 475 474 476 - static int 475 + static netdev_tx_t 477 476 sbni_start_xmit( struct sk_buff *skb, struct net_device *dev ) 478 477 { 479 478 struct net_local *nl = netdev_priv(dev);
+3 -2
drivers/net/wan/sdla.c
··· 651 651 **************************/ 652 652 653 653 /* NOTE: the DLCI driver deals with freeing the SKB!! */ 654 - static int sdla_transmit(struct sk_buff *skb, struct net_device *dev) 654 + static netdev_tx_t sdla_transmit(struct sk_buff *skb, 655 + struct net_device *dev) 655 656 { 656 657 struct frad_local *flp; 657 658 int ret, addr, accept, i; ··· 738 737 if(flp->master[i]!=NULL) 739 738 netif_wake_queue(flp->master[i]); 740 739 } 741 - return(ret); 740 + return NETDEV_TX_OK; 742 741 } 743 742 744 743 static void sdla_receive(struct net_device *dev)
+2 -1
drivers/net/wan/sealevel.c
··· 156 156 * Passed network frames, fire them downwind. 157 157 */ 158 158 159 - static int sealevel_queue_xmit(struct sk_buff *skb, struct net_device *d) 159 + static netdev_tx_t sealevel_queue_xmit(struct sk_buff *skb, 160 + struct net_device *d) 160 161 { 161 162 return z8530_queue_xmit(dev_to_chan(d)->chan, skb); 162 163 }
+1 -1
drivers/net/wan/wanxl.c
··· 268 268 269 269 270 270 271 - static int wanxl_xmit(struct sk_buff *skb, struct net_device *dev) 271 + static netdev_tx_t wanxl_xmit(struct sk_buff *skb, struct net_device *dev) 272 272 { 273 273 port_t *port = dev_to_port(dev); 274 274 desc_t *desc;
+2 -1
drivers/net/wan/x25_asy.c
··· 299 299 300 300 /* Encapsulate an IP datagram and kick it into a TTY queue. */ 301 301 302 - static int x25_asy_xmit(struct sk_buff *skb, struct net_device *dev) 302 + static netdev_tx_t x25_asy_xmit(struct sk_buff *skb, 303 + struct net_device *dev) 303 304 { 304 305 struct x25_asy *sl = netdev_priv(dev); 305 306 int err;
+4 -5
drivers/net/wan/z85230.c
··· 1727 1727 * point. 1728 1728 */ 1729 1729 1730 - int z8530_queue_xmit(struct z8530_channel *c, struct sk_buff *skb) 1730 + netdev_tx_t z8530_queue_xmit(struct z8530_channel *c, struct sk_buff *skb) 1731 1731 { 1732 1732 unsigned long flags; 1733 1733 1734 1734 netif_stop_queue(c->netdevice); 1735 1735 if(c->tx_next_skb) 1736 - { 1737 - return 1; 1738 - } 1736 + return NETDEV_TX_BUSY; 1737 + 1739 1738 1740 1739 /* PC SPECIFIC - DMA limits */ 1741 1740 ··· 1766 1767 z8530_tx_begin(c); 1767 1768 spin_unlock_irqrestore(c->lock, flags); 1768 1769 1769 - return 0; 1770 + return NETDEV_TX_OK; 1770 1771 } 1771 1772 1772 1773 EXPORT_SYMBOL(z8530_queue_xmit);
+2 -1
drivers/net/wan/z85230.h
··· 406 406 extern int z8530_sync_txdma_open(struct net_device *, struct z8530_channel *); 407 407 extern int z8530_sync_txdma_close(struct net_device *, struct z8530_channel *); 408 408 extern int z8530_channel_load(struct z8530_channel *, u8 *); 409 - extern int z8530_queue_xmit(struct z8530_channel *c, struct sk_buff *skb); 409 + extern netdev_tx_t z8530_queue_xmit(struct z8530_channel *c, 410 + struct sk_buff *skb); 410 411 extern void z8530_null_rx(struct z8530_channel *c, struct sk_buff *skb); 411 412 412 413