Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

netdev: convert bulk of drivers to netdev_tx_t

In a couple of cases collapse some extra code like:
int retval = NETDEV_TX_OK;
...
return retval;
into
return NETDEV_TX_OK;

Signed-off-by: Stephen Hemminger <shemminger@vyatta.com>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

Stephen Hemminger and committed by
David S. Miller
61357325 d0cf9c0d

+253 -173
+2 -1
drivers/ieee802154/fakehard.c
··· 257 257 return 0; 258 258 } 259 259 260 - static int ieee802154_fake_xmit(struct sk_buff *skb, struct net_device *dev) 260 + static netdev_tx_t ieee802154_fake_xmit(struct sk_buff *skb, 261 + struct net_device *dev) 261 262 { 262 263 skb->iif = dev->ifindex; 263 264 skb->dev = dev;
+2 -1
drivers/net/8139cp.c
··· 736 736 netif_wake_queue(cp->dev); 737 737 } 738 738 739 - static int cp_start_xmit (struct sk_buff *skb, struct net_device *dev) 739 + static netdev_tx_t cp_start_xmit (struct sk_buff *skb, 740 + struct net_device *dev) 740 741 { 741 742 struct cp_private *cp = netdev_priv(dev); 742 743 unsigned entry;
+4 -3
drivers/net/8139too.c
··· 628 628 static void rtl8139_start_thread(struct rtl8139_private *tp); 629 629 static void rtl8139_tx_timeout (struct net_device *dev); 630 630 static void rtl8139_init_ring (struct net_device *dev); 631 - static int rtl8139_start_xmit (struct sk_buff *skb, 632 - struct net_device *dev); 631 + static netdev_tx_t rtl8139_start_xmit (struct sk_buff *skb, 632 + struct net_device *dev); 633 633 #ifdef CONFIG_NET_POLL_CONTROLLER 634 634 static void rtl8139_poll_controller(struct net_device *dev); 635 635 #endif ··· 1687 1687 } 1688 1688 } 1689 1689 1690 - static int rtl8139_start_xmit (struct sk_buff *skb, struct net_device *dev) 1690 + static netdev_tx_t rtl8139_start_xmit (struct sk_buff *skb, 1691 + struct net_device *dev) 1691 1692 { 1692 1693 struct rtl8139_private *tp = netdev_priv(dev); 1693 1694 void __iomem *ioaddr = tp->mmio_addr;
+2 -3
drivers/net/82596.c
··· 356 356 0x7f /* *multi IA */ }; 357 357 358 358 static int i596_open(struct net_device *dev); 359 - static int i596_start_xmit(struct sk_buff *skb, struct net_device *dev); 359 + static netdev_tx_t i596_start_xmit(struct sk_buff *skb, struct net_device *dev); 360 360 static irqreturn_t i596_interrupt(int irq, void *dev_id); 361 361 static int i596_close(struct net_device *dev); 362 362 static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd); ··· 1054 1054 netif_wake_queue (dev); 1055 1055 } 1056 1056 1057 - 1058 - static int i596_start_xmit(struct sk_buff *skb, struct net_device *dev) 1057 + static netdev_tx_t i596_start_xmit(struct sk_buff *skb, struct net_device *dev) 1059 1058 { 1060 1059 struct i596_private *lp = dev->ml_priv; 1061 1060 struct tx_cmd *tx_cmd;
+1 -1
drivers/net/8390.c
··· 17 17 } 18 18 EXPORT_SYMBOL(ei_close); 19 19 20 - int ei_start_xmit(struct sk_buff *skb, struct net_device *dev) 20 + netdev_tx_t ei_start_xmit(struct sk_buff *skb, struct net_device *dev) 21 21 { 22 22 return __ei_start_xmit(skb, dev); 23 23 }
+2 -2
drivers/net/8390.h
··· 40 40 extern int ei_close(struct net_device *dev); 41 41 extern irqreturn_t ei_interrupt(int irq, void *dev_id); 42 42 extern void ei_tx_timeout(struct net_device *dev); 43 - extern int ei_start_xmit(struct sk_buff *skb, struct net_device *dev); 43 + extern netdev_tx_t ei_start_xmit(struct sk_buff *skb, struct net_device *dev); 44 44 extern void ei_set_multicast_list(struct net_device *dev); 45 45 extern struct net_device_stats *ei_get_stats(struct net_device *dev); 46 46 ··· 58 58 extern int eip_close(struct net_device *dev); 59 59 extern irqreturn_t eip_interrupt(int irq, void *dev_id); 60 60 extern void eip_tx_timeout(struct net_device *dev); 61 - extern int eip_start_xmit(struct sk_buff *skb, struct net_device *dev); 61 + extern netdev_tx_t eip_start_xmit(struct sk_buff *skb, struct net_device *dev); 62 62 extern void eip_set_multicast_list(struct net_device *dev); 63 63 extern struct net_device_stats *eip_get_stats(struct net_device *dev); 64 64
+1 -1
drivers/net/8390p.c
··· 22 22 } 23 23 EXPORT_SYMBOL(eip_close); 24 24 25 - int eip_start_xmit(struct sk_buff *skb, struct net_device *dev) 25 + netdev_tx_t eip_start_xmit(struct sk_buff *skb, struct net_device *dev) 26 26 { 27 27 return __ei_start_xmit(skb, dev); 28 28 }
+2 -1
drivers/net/a2065.c
··· 547 547 netif_wake_queue(dev); 548 548 } 549 549 550 - static int lance_start_xmit (struct sk_buff *skb, struct net_device *dev) 550 + static netdev_tx_t lance_start_xmit (struct sk_buff *skb, 551 + struct net_device *dev) 551 552 { 552 553 struct lance_private *lp = netdev_priv(dev); 553 554 volatile struct lance_regs *ll = lp->ll;
+2 -1
drivers/net/acenic.c
··· 2464 2464 } 2465 2465 2466 2466 2467 - static int ace_start_xmit(struct sk_buff *skb, struct net_device *dev) 2467 + static netdev_tx_t ace_start_xmit(struct sk_buff *skb, 2468 + struct net_device *dev) 2468 2469 { 2469 2470 struct ace_private *ap = netdev_priv(dev); 2470 2471 struct ace_regs __iomem *regs = ap->regs;
+2 -1
drivers/net/acenic.h
··· 775 775 static irqreturn_t ace_interrupt(int irq, void *dev_id); 776 776 static int ace_load_firmware(struct net_device *dev); 777 777 static int ace_open(struct net_device *dev); 778 - static int ace_start_xmit(struct sk_buff *skb, struct net_device *dev); 778 + static netdev_tx_t ace_start_xmit(struct sk_buff *skb, 779 + struct net_device *dev); 779 780 static int ace_close(struct net_device *dev); 780 781 static void ace_tasklet(unsigned long dev); 781 782 static void ace_dump_trace(struct ace_private *ap);
+2 -1
drivers/net/amd8111e.c
··· 1300 1300 This function will queue the transmit packets to the descriptors and will trigger the send operation. It also initializes the transmit descriptors with buffer physical address, byte count, ownership to hardware etc. 1301 1301 */ 1302 1302 1303 - static int amd8111e_start_xmit(struct sk_buff *skb, struct net_device * dev) 1303 + static netdev_tx_t amd8111e_start_xmit(struct sk_buff *skb, 1304 + struct net_device * dev) 1304 1305 { 1305 1306 struct amd8111e_priv *lp = netdev_priv(dev); 1306 1307 int tx_index;
+2 -1
drivers/net/arcnet/arcnet.c
··· 591 591 592 592 593 593 /* Called by the kernel in order to transmit a packet. */ 594 - int arcnet_send_packet(struct sk_buff *skb, struct net_device *dev) 594 + netdev_tx_t arcnet_send_packet(struct sk_buff *skb, 595 + struct net_device *dev) 595 596 { 596 597 struct arcnet_local *lp = netdev_priv(dev); 597 598 struct archdr *pkt;
+4 -2
drivers/net/ariadne.c
··· 115 115 116 116 static int ariadne_open(struct net_device *dev); 117 117 static void ariadne_init_ring(struct net_device *dev); 118 - static int ariadne_start_xmit(struct sk_buff *skb, struct net_device *dev); 118 + static netdev_tx_t ariadne_start_xmit(struct sk_buff *skb, 119 + struct net_device *dev); 119 120 static void ariadne_tx_timeout(struct net_device *dev); 120 121 static int ariadne_rx(struct net_device *dev); 121 122 static void ariadne_reset(struct net_device *dev); ··· 590 589 } 591 590 592 591 593 - static int ariadne_start_xmit(struct sk_buff *skb, struct net_device *dev) 592 + static netdev_tx_t ariadne_start_xmit(struct sk_buff *skb, 593 + struct net_device *dev) 594 594 { 595 595 struct ariadne_private *priv = netdev_priv(dev); 596 596 volatile struct Am79C960 *lance = (struct Am79C960*)dev->base_addr;
+4 -2
drivers/net/at1700.c
··· 159 159 static int at1700_probe1(struct net_device *dev, int ioaddr); 160 160 static int read_eeprom(long ioaddr, int location); 161 161 static int net_open(struct net_device *dev); 162 - static int net_send_packet(struct sk_buff *skb, struct net_device *dev); 162 + static netdev_tx_t net_send_packet(struct sk_buff *skb, 163 + struct net_device *dev); 163 164 static irqreturn_t net_interrupt(int irq, void *dev_id); 164 165 static void net_rx(struct net_device *dev); 165 166 static int net_close(struct net_device *dev); ··· 596 595 } 597 596 598 597 599 - static int net_send_packet (struct sk_buff *skb, struct net_device *dev) 598 + static netdev_tx_t net_send_packet (struct sk_buff *skb, 599 + struct net_device *dev) 600 600 { 601 601 struct net_local *lp = netdev_priv(dev); 602 602 int ioaddr = dev->base_addr;
+2 -1
drivers/net/atl1c/atl1c_main.c
··· 2055 2055 AT_WRITE_REG(&adapter->hw, REG_MB_PRIO_PROD_IDX, prod_data); 2056 2056 } 2057 2057 2058 - static int atl1c_xmit_frame(struct sk_buff *skb, struct net_device *netdev) 2058 + static netdev_tx_t atl1c_xmit_frame(struct sk_buff *skb, 2059 + struct net_device *netdev) 2059 2060 { 2060 2061 struct atl1c_adapter *adapter = netdev_priv(netdev); 2061 2062 unsigned long flags;
+2 -1
drivers/net/atl1e/atl1e_main.c
··· 1839 1839 AT_WRITE_REG(&adapter->hw, REG_MB_TPD_PROD_IDX, tx_ring->next_to_use); 1840 1840 } 1841 1841 1842 - static int atl1e_xmit_frame(struct sk_buff *skb, struct net_device *netdev) 1842 + static netdev_tx_t atl1e_xmit_frame(struct sk_buff *skb, 1843 + struct net_device *netdev) 1843 1844 { 1844 1845 struct atl1e_adapter *adapter = netdev_priv(netdev); 1845 1846 unsigned long flags;
+2 -1
drivers/net/atlx/atl1.c
··· 2349 2349 atomic_set(&tpd_ring->next_to_use, next_to_use); 2350 2350 } 2351 2351 2352 - static int atl1_xmit_frame(struct sk_buff *skb, struct net_device *netdev) 2352 + static netdev_tx_t atl1_xmit_frame(struct sk_buff *skb, 2353 + struct net_device *netdev) 2353 2354 { 2354 2355 struct atl1_adapter *adapter = netdev_priv(netdev); 2355 2356 struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring;
+2 -1
drivers/net/atlx/atl2.c
··· 821 821 (int) (txd_read_ptr - adapter->txd_write_ptr - 1); 822 822 } 823 823 824 - static int atl2_xmit_frame(struct sk_buff *skb, struct net_device *netdev) 824 + static netdev_tx_t atl2_xmit_frame(struct sk_buff *skb, 825 + struct net_device *netdev) 825 826 { 826 827 struct atl2_adapter *adapter = netdev_priv(netdev); 827 828 struct tx_pkt_header *txph;
+4 -2
drivers/net/atp.c
··· 199 199 static void hardware_init(struct net_device *dev); 200 200 static void write_packet(long ioaddr, int length, unsigned char *packet, int pad, int mode); 201 201 static void trigger_send(long ioaddr, int length); 202 - static int atp_send_packet(struct sk_buff *skb, struct net_device *dev); 202 + static netdev_tx_t atp_send_packet(struct sk_buff *skb, 203 + struct net_device *dev); 203 204 static irqreturn_t atp_interrupt(int irq, void *dev_id); 204 205 static void net_rx(struct net_device *dev); 205 206 static void read_block(long ioaddr, int length, unsigned char *buffer, int data_mode); ··· 553 552 dev->stats.tx_errors++; 554 553 } 555 554 556 - static int atp_send_packet(struct sk_buff *skb, struct net_device *dev) 555 + static netdev_tx_t atp_send_packet(struct sk_buff *skb, 556 + struct net_device *dev) 557 557 { 558 558 struct net_local *lp = netdev_priv(dev); 559 559 long ioaddr = dev->base_addr;
+1 -1
drivers/net/au1000_eth.c
··· 937 937 /* 938 938 * Au1000 transmit routine. 939 939 */ 940 - static int au1000_tx(struct sk_buff *skb, struct net_device *dev) 940 + static netdev_tx_t au1000_tx(struct sk_buff *skb, struct net_device *dev) 941 941 { 942 942 struct au1000_private *aup = netdev_priv(dev); 943 943 struct net_device_stats *ps = &dev->stats;
+1 -1
drivers/net/b44.c
··· 946 946 netif_wake_queue(dev); 947 947 } 948 948 949 - static int b44_start_xmit(struct sk_buff *skb, struct net_device *dev) 949 + static netdev_tx_t b44_start_xmit(struct sk_buff *skb, struct net_device *dev) 950 950 { 951 951 struct b44 *bp = netdev_priv(dev); 952 952 int rc = NETDEV_TX_OK;
+2 -1
drivers/net/benet/be_main.c
··· 427 427 return copied; 428 428 } 429 429 430 - static int be_xmit(struct sk_buff *skb, struct net_device *netdev) 430 + static netdev_tx_t be_xmit(struct sk_buff *skb, 431 + struct net_device *netdev) 431 432 { 432 433 struct be_adapter *adapter = netdev_priv(netdev); 433 434 struct be_tx_obj *tx_obj = &adapter->tx_obj;
+1 -1
drivers/net/bnx2.c
··· 6283 6283 * bnx2_tx_int() runs without netif_tx_lock unless it needs to call 6284 6284 * netif_wake_queue(). 6285 6285 */ 6286 - static int 6286 + static netdev_tx_t 6287 6287 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev) 6288 6288 { 6289 6289 struct bnx2 *bp = netdev_priv(dev);
+1 -1
drivers/net/bnx2x_main.c
··· 10936 10936 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call 10937 10937 * netif_wake_queue() 10938 10938 */ 10939 - static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) 10939 + static netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) 10940 10940 { 10941 10941 struct bnx2x *bp = netdev_priv(dev); 10942 10942 struct bnx2x_fastpath *fp, *fp_stat;
+2 -1
drivers/net/can/sja1000/sja1000.c
··· 238 238 * xx xx xx xx ff ll 00 11 22 33 44 55 66 77 239 239 * [ can-id ] [flags] [len] [can data (up to 8 bytes] 240 240 */ 241 - static int sja1000_start_xmit(struct sk_buff *skb, struct net_device *dev) 241 + static netdev_tx_t sja1000_start_xmit(struct sk_buff *skb, 242 + struct net_device *dev) 242 243 { 243 244 struct sja1000_priv *priv = netdev_priv(dev); 244 245 struct net_device_stats *stats = &dev->stats;
+1 -1
drivers/net/cassini.c
··· 2918 2918 return 0; 2919 2919 } 2920 2920 2921 - static int cas_start_xmit(struct sk_buff *skb, struct net_device *dev) 2921 + static netdev_tx_t cas_start_xmit(struct sk_buff *skb, struct net_device *dev) 2922 2922 { 2923 2923 struct cas *cp = netdev_priv(dev); 2924 2924
+1 -1
drivers/net/chelsio/sge.c
··· 1776 1776 /* 1777 1777 * Adds the CPL header to the sk_buff and passes it to t1_sge_tx. 1778 1778 */ 1779 - int t1_start_xmit(struct sk_buff *skb, struct net_device *dev) 1779 + netdev_tx_t t1_start_xmit(struct sk_buff *skb, struct net_device *dev) 1780 1780 { 1781 1781 struct adapter *adapter = dev->ml_priv; 1782 1782 struct sge *sge = adapter->sge;
+1 -1
drivers/net/chelsio/sge.h
··· 78 78 irqreturn_t t1_interrupt(int irq, void *cookie); 79 79 int t1_poll(struct napi_struct *, int); 80 80 81 - int t1_start_xmit(struct sk_buff *skb, struct net_device *dev); 81 + netdev_tx_t t1_start_xmit(struct sk_buff *skb, struct net_device *dev); 82 82 void t1_set_vlan_accel(struct adapter *adapter, int on_off); 83 83 void t1_sge_start(struct sge *); 84 84 void t1_sge_stop(struct sge *);
+2 -2
drivers/net/cs89x0.c
··· 246 246 247 247 static int cs89x0_probe1(struct net_device *dev, int ioaddr, int modular); 248 248 static int net_open(struct net_device *dev); 249 - static int net_send_packet(struct sk_buff *skb, struct net_device *dev); 249 + static netdev_tx_t net_send_packet(struct sk_buff *skb, struct net_device *dev); 250 250 static irqreturn_t net_interrupt(int irq, void *dev_id); 251 251 static void set_multicast_list(struct net_device *dev); 252 252 static void net_timeout(struct net_device *dev); ··· 1518 1518 netif_wake_queue(dev); 1519 1519 } 1520 1520 1521 - static int net_send_packet(struct sk_buff *skb, struct net_device *dev) 1521 + static netdev_tx_t net_send_packet(struct sk_buff *skb,struct net_device *dev) 1522 1522 { 1523 1523 struct net_local *lp = netdev_priv(dev); 1524 1524 unsigned long flags;
+1 -1
drivers/net/cxgb3/adapter.h
··· 309 309 void t3_free_sge_resources(struct adapter *adap); 310 310 void t3_sge_err_intr_handler(struct adapter *adapter); 311 311 irq_handler_t t3_intr_handler(struct adapter *adap, int polling); 312 - int t3_eth_xmit(struct sk_buff *skb, struct net_device *dev); 312 + netdev_tx_t t3_eth_xmit(struct sk_buff *skb, struct net_device *dev); 313 313 int t3_mgmt_tx(struct adapter *adap, struct sk_buff *skb); 314 314 void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p); 315 315 int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
+1 -1
drivers/net/cxgb3/sge.c
··· 1216 1216 * 1217 1217 * Add a packet to an SGE Tx queue. Runs with softirqs disabled. 1218 1218 */ 1219 - int t3_eth_xmit(struct sk_buff *skb, struct net_device *dev) 1219 + netdev_tx_t t3_eth_xmit(struct sk_buff *skb, struct net_device *dev) 1220 1220 { 1221 1221 int qidx; 1222 1222 unsigned int ndesc, pidx, credits, gen, compl;
+4 -6
drivers/net/defxx.c
··· 300 300 static void dfx_rcv_queue_process(DFX_board_t *bp); 301 301 static void dfx_rcv_flush(DFX_board_t *bp); 302 302 303 - static int dfx_xmt_queue_pkt(struct sk_buff *skb, struct net_device *dev); 303 + static netdev_tx_t dfx_xmt_queue_pkt(struct sk_buff *skb, 304 + struct net_device *dev); 304 305 static int dfx_xmt_done(DFX_board_t *bp); 305 306 static void dfx_xmt_flush(DFX_board_t *bp); 306 307 ··· 3189 3188 * None 3190 3189 */ 3191 3190 3192 - static int dfx_xmt_queue_pkt( 3193 - struct sk_buff *skb, 3194 - struct net_device *dev 3195 - ) 3196 - 3191 + static netdev_tx_t dfx_xmt_queue_pkt(struct sk_buff *skb, 3192 + struct net_device *dev) 3197 3193 { 3198 3194 DFX_board_t *bp = netdev_priv(dev); 3199 3195 u8 prod; /* local transmit producer index */
+4 -2
drivers/net/depca.c
··· 516 516 ** Public Functions 517 517 */ 518 518 static int depca_open(struct net_device *dev); 519 - static int depca_start_xmit(struct sk_buff *skb, struct net_device *dev); 519 + static netdev_tx_t depca_start_xmit(struct sk_buff *skb, 520 + struct net_device *dev); 520 521 static irqreturn_t depca_interrupt(int irq, void *dev_id); 521 522 static int depca_close(struct net_device *dev); 522 523 static int depca_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); ··· 929 928 /* 930 929 ** Writes a socket buffer to TX descriptor ring and starts transmission 931 930 */ 932 - static int depca_start_xmit(struct sk_buff *skb, struct net_device *dev) 931 + static netdev_tx_t depca_start_xmit(struct sk_buff *skb, 932 + struct net_device *dev) 933 933 { 934 934 struct depca_private *lp = netdev_priv(dev); 935 935 u_long ioaddr = dev->base_addr;
+2 -2
drivers/net/dl2k.c
··· 59 59 static void rio_timer (unsigned long data); 60 60 static void rio_tx_timeout (struct net_device *dev); 61 61 static void alloc_list (struct net_device *dev); 62 - static int start_xmit (struct sk_buff *skb, struct net_device *dev); 62 + static netdev_tx_t start_xmit (struct sk_buff *skb, struct net_device *dev); 63 63 static irqreturn_t rio_interrupt (int irq, void *dev_instance); 64 64 static void rio_free_tx (struct net_device *dev, int irq); 65 65 static void tx_error (struct net_device *dev, int tx_status); ··· 600 600 return; 601 601 } 602 602 603 - static int 603 + static netdev_tx_t 604 604 start_xmit (struct sk_buff *skb, struct net_device *dev) 605 605 { 606 606 struct netdev_private *np = netdev_priv(dev);
+1 -1
drivers/net/dnet.c
··· 541 541 #define dnet_print_skb(skb) do {} while (0) 542 542 #endif 543 543 544 - static int dnet_start_xmit(struct sk_buff *skb, struct net_device *dev) 544 + static netdev_tx_t dnet_start_xmit(struct sk_buff *skb, struct net_device *dev) 545 545 { 546 546 547 547 struct dnet *bp = netdev_priv(dev);
+4 -2
drivers/net/eepro.c
··· 309 309 310 310 static int eepro_probe1(struct net_device *dev, int autoprobe); 311 311 static int eepro_open(struct net_device *dev); 312 - static int eepro_send_packet(struct sk_buff *skb, struct net_device *dev); 312 + static netdev_tx_t eepro_send_packet(struct sk_buff *skb, 313 + struct net_device *dev); 313 314 static irqreturn_t eepro_interrupt(int irq, void *dev_id); 314 315 static void eepro_rx(struct net_device *dev); 315 316 static void eepro_transmit_interrupt(struct net_device *dev); ··· 1134 1133 } 1135 1134 1136 1135 1137 - static int eepro_send_packet(struct sk_buff *skb, struct net_device *dev) 1136 + static netdev_tx_t eepro_send_packet(struct sk_buff *skb, 1137 + struct net_device *dev) 1138 1138 { 1139 1139 struct eepro_local *lp = netdev_priv(dev); 1140 1140 unsigned long flags;
+3 -2
drivers/net/eexpress.c
··· 246 246 static int eexp_open(struct net_device *dev); 247 247 static int eexp_close(struct net_device *dev); 248 248 static void eexp_timeout(struct net_device *dev); 249 - static int eexp_xmit(struct sk_buff *buf, struct net_device *dev); 249 + static netdev_tx_t eexp_xmit(struct sk_buff *buf, 250 + struct net_device *dev); 250 251 251 252 static irqreturn_t eexp_irq(int irq, void *dev_addr); 252 253 static void eexp_set_multicast(struct net_device *dev); ··· 651 650 * Called to transmit a packet, or to allow us to right ourselves 652 651 * if the kernel thinks we've died. 653 652 */ 654 - static int eexp_xmit(struct sk_buff *buf, struct net_device *dev) 653 + static netdev_tx_t eexp_xmit(struct sk_buff *buf, struct net_device *dev) 655 654 { 656 655 short length = buf->len; 657 656 #ifdef CONFIG_SMP
+2 -1
drivers/net/enc28j60.c
··· 1276 1276 locked_reg_bfset(priv, ECON1, ECON1_TXRTS); 1277 1277 } 1278 1278 1279 - static int enc28j60_send_packet(struct sk_buff *skb, struct net_device *dev) 1279 + static netdev_tx_t enc28j60_send_packet(struct sk_buff *skb, 1280 + struct net_device *dev) 1280 1281 { 1281 1282 struct enc28j60_net *priv = netdev_priv(dev); 1282 1283
+2 -1
drivers/net/enic/enic_main.c
··· 622 622 } 623 623 624 624 /* netif_tx_lock held, process context with BHs disabled, or BH */ 625 - static int enic_hard_start_xmit(struct sk_buff *skb, struct net_device *netdev) 625 + static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb, 626 + struct net_device *netdev) 626 627 { 627 628 struct enic *enic = netdev_priv(netdev); 628 629 struct vnic_wq *wq = &enic->wq[0];
+3 -2
drivers/net/epic100.c
··· 298 298 static void epic_timer(unsigned long data); 299 299 static void epic_tx_timeout(struct net_device *dev); 300 300 static void epic_init_ring(struct net_device *dev); 301 - static int epic_start_xmit(struct sk_buff *skb, struct net_device *dev); 301 + static netdev_tx_t epic_start_xmit(struct sk_buff *skb, 302 + struct net_device *dev); 302 303 static int epic_rx(struct net_device *dev, int budget); 303 304 static int epic_poll(struct napi_struct *napi, int budget); 304 305 static irqreturn_t epic_interrupt(int irq, void *dev_instance); ··· 962 961 return; 963 962 } 964 963 965 - static int epic_start_xmit(struct sk_buff *skb, struct net_device *dev) 964 + static netdev_tx_t epic_start_xmit(struct sk_buff *skb, struct net_device *dev) 966 965 { 967 966 struct epic_private *ep = netdev_priv(dev); 968 967 int entry, free_count;
+2 -2
drivers/net/eth16i.c
··· 405 405 static void eth16i_eeprom_cmd(int ioaddr, unsigned char command); 406 406 static int eth16i_open(struct net_device *dev); 407 407 static int eth16i_close(struct net_device *dev); 408 - static int eth16i_tx(struct sk_buff *skb, struct net_device *dev); 408 + static netdev_tx_t eth16i_tx(struct sk_buff *skb, struct net_device *dev); 409 409 static void eth16i_rx(struct net_device *dev); 410 410 static void eth16i_timeout(struct net_device *dev); 411 411 static irqreturn_t eth16i_interrupt(int irq, void *dev_id); ··· 1053 1053 netif_wake_queue(dev); 1054 1054 } 1055 1055 1056 - static int eth16i_tx(struct sk_buff *skb, struct net_device *dev) 1056 + static netdev_tx_t eth16i_tx(struct sk_buff *skb, struct net_device *dev) 1057 1057 { 1058 1058 struct eth16i_local *lp = netdev_priv(dev); 1059 1059 int ioaddr = dev->base_addr;
+1 -1
drivers/net/ethoc.c
··· 802 802 return &priv->stats; 803 803 } 804 804 805 - static int ethoc_start_xmit(struct sk_buff *skb, struct net_device *dev) 805 + static netdev_tx_t ethoc_start_xmit(struct sk_buff *skb, struct net_device *dev) 806 806 { 807 807 struct ethoc *priv = netdev_priv(dev); 808 808 struct ethoc_bd bd;
+2 -2
drivers/net/ewrk3.c
··· 298 298 ** Public Functions 299 299 */ 300 300 static int ewrk3_open(struct net_device *dev); 301 - static int ewrk3_queue_pkt(struct sk_buff *skb, struct net_device *dev); 301 + static netdev_tx_t ewrk3_queue_pkt(struct sk_buff *skb, struct net_device *dev); 302 302 static irqreturn_t ewrk3_interrupt(int irq, void *dev_id); 303 303 static int ewrk3_close(struct net_device *dev); 304 304 static void set_multicast_list(struct net_device *dev); ··· 764 764 /* 765 765 ** Writes a socket buffer to the free page queue 766 766 */ 767 - static int ewrk3_queue_pkt (struct sk_buff *skb, struct net_device *dev) 767 + static netdev_tx_t ewrk3_queue_pkt(struct sk_buff *skb, struct net_device *dev) 768 768 { 769 769 struct ewrk3_private *lp = netdev_priv(dev); 770 770 u_long iobase = dev->base_addr;
+2 -2
drivers/net/fealnx.c
··· 433 433 static void reset_timer(unsigned long data); 434 434 static void fealnx_tx_timeout(struct net_device *dev); 435 435 static void init_ring(struct net_device *dev); 436 - static int start_tx(struct sk_buff *skb, struct net_device *dev); 436 + static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev); 437 437 static irqreturn_t intr_handler(int irq, void *dev_instance); 438 438 static int netdev_rx(struct net_device *dev); 439 439 static void set_rx_mode(struct net_device *dev); ··· 1305 1305 } 1306 1306 1307 1307 1308 - static int start_tx(struct sk_buff *skb, struct net_device *dev) 1308 + static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev) 1309 1309 { 1310 1310 struct netdev_private *np = netdev_priv(dev); 1311 1311 unsigned long flags;
+3 -2
drivers/net/forcedeth.c
··· 2137 2137 * nv_start_xmit: dev->hard_start_xmit function 2138 2138 * Called with netif_tx_lock held. 2139 2139 */ 2140 - static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev) 2140 + static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev) 2141 2141 { 2142 2142 struct fe_priv *np = netdev_priv(dev); 2143 2143 u32 tx_flags = 0; ··· 2257 2257 return NETDEV_TX_OK; 2258 2258 } 2259 2259 2260 - static int nv_start_xmit_optimized(struct sk_buff *skb, struct net_device *dev) 2260 + static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb, 2261 + struct net_device *dev) 2261 2262 { 2262 2263 struct fe_priv *np = netdev_priv(dev); 2263 2264 u32 tx_flags = 0;
+4 -2
drivers/net/hamachi.c
··· 557 557 static void hamachi_timer(unsigned long data); 558 558 static void hamachi_tx_timeout(struct net_device *dev); 559 559 static void hamachi_init_ring(struct net_device *dev); 560 - static int hamachi_start_xmit(struct sk_buff *skb, struct net_device *dev); 560 + static netdev_tx_t hamachi_start_xmit(struct sk_buff *skb, 561 + struct net_device *dev); 561 562 static irqreturn_t hamachi_interrupt(int irq, void *dev_instance); 562 563 static int hamachi_rx(struct net_device *dev); 563 564 static inline int hamachi_tx(struct net_device *dev); ··· 1264 1263 } while (0) 1265 1264 #endif 1266 1265 1267 - static int hamachi_start_xmit(struct sk_buff *skb, struct net_device *dev) 1266 + static netdev_tx_t hamachi_start_xmit(struct sk_buff *skb, 1267 + struct net_device *dev) 1268 1268 { 1269 1269 struct hamachi_private *hmp = netdev_priv(dev); 1270 1270 unsigned entry;
+8 -5
drivers/net/hp100.c
··· 240 240 241 241 static int hp100_open(struct net_device *dev); 242 242 static int hp100_close(struct net_device *dev); 243 - static int hp100_start_xmit(struct sk_buff *skb, struct net_device *dev); 244 - static int hp100_start_xmit_bm(struct sk_buff *skb, 245 - struct net_device *dev); 243 + static netdev_tx_t hp100_start_xmit(struct sk_buff *skb, 244 + struct net_device *dev); 245 + static netdev_tx_t hp100_start_xmit_bm(struct sk_buff *skb, 246 + struct net_device *dev); 246 247 static void hp100_rx(struct net_device *dev); 247 248 static struct net_device_stats *hp100_get_stats(struct net_device *dev); 248 249 static void hp100_misc_interrupt(struct net_device *dev); ··· 1484 1483 */ 1485 1484 1486 1485 /* tx function for busmaster mode */ 1487 - static int hp100_start_xmit_bm(struct sk_buff *skb, struct net_device *dev) 1486 + static netdev_tx_t hp100_start_xmit_bm(struct sk_buff *skb, 1487 + struct net_device *dev) 1488 1488 { 1489 1489 unsigned long flags; 1490 1490 int i, ok_flag; ··· 1637 1635 } 1638 1636 1639 1637 /* tx function for slave modes */ 1640 - static int hp100_start_xmit(struct sk_buff *skb, struct net_device *dev) 1638 + static netdev_tx_t hp100_start_xmit(struct sk_buff *skb, 1639 + struct net_device *dev) 1641 1640 { 1642 1641 unsigned long flags; 1643 1642 int i, ok_flag;
+1 -1
drivers/net/ibmlana.c
··· 812 812 813 813 /* transmit a block. */ 814 814 815 - static int ibmlana_tx(struct sk_buff *skb, struct net_device *dev) 815 + static netdev_tx_t ibmlana_tx(struct sk_buff *skb, struct net_device *dev) 816 816 { 817 817 ibmlana_priv *priv = netdev_priv(dev); 818 818 int tmplen, addr;
+2 -1
drivers/net/ibmveth.c
··· 887 887 888 888 #define page_offset(v) ((unsigned long)(v) & ((1 << 12) - 1)) 889 889 890 - static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *netdev) 890 + static netdev_tx_t ibmveth_start_xmit(struct sk_buff *skb, 891 + struct net_device *netdev) 891 892 { 892 893 struct ibmveth_adapter *adapter = netdev_priv(netdev); 893 894 union ibmveth_buf_desc desc;
+2 -1
drivers/net/ipg.c
··· 1858 1858 return 0; 1859 1859 } 1860 1860 1861 - static int ipg_nic_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) 1861 + static netdev_tx_t ipg_nic_hard_start_xmit(struct sk_buff *skb, 1862 + struct net_device *dev) 1862 1863 { 1863 1864 struct ipg_nic_private *sp = netdev_priv(dev); 1864 1865 void __iomem *ioaddr = sp->ioaddr;
+1 -1
drivers/net/jme.c
··· 1931 1931 * This function is already protected by netif_tx_lock() 1932 1932 */ 1933 1933 1934 - static int 1934 + static netdev_tx_t 1935 1935 jme_start_xmit(struct sk_buff *skb, struct net_device *netdev) 1936 1936 { 1937 1937 struct jme_adapter *jme = netdev_priv(netdev);
+2 -1
drivers/net/ks8842.c
··· 551 551 return 0; 552 552 } 553 553 554 - static int ks8842_xmit_frame(struct sk_buff *skb, struct net_device *netdev) 554 + static netdev_tx_t ks8842_xmit_frame(struct sk_buff *skb, 555 + struct net_device *netdev) 555 556 { 556 557 int ret; 557 558 struct ks8842_adapter *adapter = netdev_priv(netdev);
+3 -2
drivers/net/ks8851.c
··· 868 868 * and secondly so we can round up more than one packet to transmit which 869 869 * means we can try and avoid generating too many transmit done interrupts. 870 870 */ 871 - static int ks8851_start_xmit(struct sk_buff *skb, struct net_device *dev) 871 + static netdev_tx_t ks8851_start_xmit(struct sk_buff *skb, 872 + struct net_device *dev) 872 873 { 873 874 struct ks8851_net *ks = netdev_priv(dev); 874 875 unsigned needed = calc_txlen(skb->len); 875 - int ret = NETDEV_TX_OK; 876 + netdev_tx_t ret = NETDEV_TX_OK; 876 877 877 878 if (netif_msg_tx_queued(ks)) 878 879 ks_dbg(ks, "%s: skb %p, %d@%p\n", __func__,
+4 -2
drivers/net/lance.c
··· 300 300 301 301 static int lance_open(struct net_device *dev); 302 302 static void lance_init_ring(struct net_device *dev, gfp_t mode); 303 - static int lance_start_xmit(struct sk_buff *skb, struct net_device *dev); 303 + static netdev_tx_t lance_start_xmit(struct sk_buff *skb, 304 + struct net_device *dev); 304 305 static int lance_rx(struct net_device *dev); 305 306 static irqreturn_t lance_interrupt(int irq, void *dev_id); 306 307 static int lance_close(struct net_device *dev); ··· 950 949 } 951 950 952 951 953 - static int lance_start_xmit(struct sk_buff *skb, struct net_device *dev) 952 + static netdev_tx_t lance_start_xmit(struct sk_buff *skb, 953 + struct net_device *dev) 954 954 { 955 955 struct lance_private *lp = dev->ml_priv; 956 956 int ioaddr = dev->base_addr;
+2 -1
drivers/net/lib8390.c
··· 299 299 * Sends a packet to an 8390 network device. 300 300 */ 301 301 302 - static int __ei_start_xmit(struct sk_buff *skb, struct net_device *dev) 302 + static netdev_tx_t __ei_start_xmit(struct sk_buff *skb, 303 + struct net_device *dev) 303 304 { 304 305 unsigned long e8390_base = dev->base_addr; 305 306 struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+2 -1
drivers/net/loopback.c
··· 69 69 * The higher levels take care of making this non-reentrant (it's 70 70 * called with bh's disabled). 71 71 */ 72 - static int loopback_xmit(struct sk_buff *skb, struct net_device *dev) 72 + static netdev_tx_t loopback_xmit(struct sk_buff *skb, 73 + struct net_device *dev) 73 74 { 74 75 struct pcpu_lstats *pcpu_lstats, *lb_stats; 75 76 int len;
+2 -2
drivers/net/lp486e.c
··· 377 377 }; 378 378 379 379 static int i596_open(struct net_device *dev); 380 - static int i596_start_xmit(struct sk_buff *skb, struct net_device *dev); 380 + static netdev_tx_t i596_start_xmit(struct sk_buff *skb, struct net_device *dev); 381 381 static irqreturn_t i596_interrupt(int irq, void *dev_id); 382 382 static int i596_close(struct net_device *dev); 383 383 static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd); ··· 863 863 return 0; /* Always succeed */ 864 864 } 865 865 866 - static int i596_start_xmit (struct sk_buff *skb, struct net_device *dev) { 866 + static netdev_tx_t i596_start_xmit (struct sk_buff *skb, struct net_device *dev) { 867 867 struct tx_cmd *tx_cmd; 868 868 short length; 869 869
+1 -1
drivers/net/mlx4/en_tx.c
··· 588 588 return skb_tx_hash(dev, skb); 589 589 } 590 590 591 - int mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) 591 + netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) 592 592 { 593 593 struct mlx4_en_priv *priv = netdev_priv(dev); 594 594 struct mlx4_en_dev *mdev = priv->mdev;
+1 -1
drivers/net/mlx4/mlx4_en.h
··· 518 518 void mlx4_en_poll_tx_cq(unsigned long data); 519 519 void mlx4_en_tx_irq(struct mlx4_cq *mcq); 520 520 u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb); 521 - int mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev); 521 + netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev); 522 522 523 523 int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring, 524 524 u32 size, u16 stride);
+7 -4
drivers/net/myri10ge/myri10ge.c
··· 360 360 #define myri10ge_pio_copy(to,from,size) __iowrite64_copy(to,from,size/8) 361 361 362 362 static void myri10ge_set_multicast_list(struct net_device *dev); 363 - static int myri10ge_sw_tso(struct sk_buff *skb, struct net_device *dev); 363 + static netdev_tx_t myri10ge_sw_tso(struct sk_buff *skb, 364 + struct net_device *dev); 364 365 365 366 static inline void put_be32(__be32 val, __be32 __iomem * p) 366 367 { ··· 2657 2656 * it and try again. 2658 2657 */ 2659 2658 2660 - static int myri10ge_xmit(struct sk_buff *skb, struct net_device *dev) 2659 + static netdev_tx_t myri10ge_xmit(struct sk_buff *skb, 2660 + struct net_device *dev) 2661 2661 { 2662 2662 struct myri10ge_priv *mgp = netdev_priv(dev); 2663 2663 struct myri10ge_slice_state *ss; ··· 2949 2947 2950 2948 } 2951 2949 2952 - static int myri10ge_sw_tso(struct sk_buff *skb, struct net_device *dev) 2950 + static netdev_tx_t myri10ge_sw_tso(struct sk_buff *skb, 2951 + struct net_device *dev) 2953 2952 { 2954 2953 struct sk_buff *segs, *curr; 2955 2954 struct myri10ge_priv *mgp = netdev_priv(dev); 2956 2955 struct myri10ge_slice_state *ss; 2957 - int status; 2956 + netdev_tx_t status; 2958 2957 2959 2958 segs = skb_gso_segment(skb, dev->features & ~NETIF_F_TSO6); 2960 2959 if (IS_ERR(segs))
+2 -2
drivers/net/natsemi.c
··· 621 621 static void free_ring(struct net_device *dev); 622 622 static void reinit_ring(struct net_device *dev); 623 623 static void init_registers(struct net_device *dev); 624 - static int start_tx(struct sk_buff *skb, struct net_device *dev); 624 + static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev); 625 625 static irqreturn_t intr_handler(int irq, void *dev_instance); 626 626 static void netdev_error(struct net_device *dev, int intr_status); 627 627 static int natsemi_poll(struct napi_struct *napi, int budget); ··· 2079 2079 reinit_rx(dev); 2080 2080 } 2081 2081 2082 - static int start_tx(struct sk_buff *skb, struct net_device *dev) 2082 + static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev) 2083 2083 { 2084 2084 struct netdev_private *np = netdev_priv(dev); 2085 2085 void __iomem * ioaddr = ns_ioaddr(dev);
+3 -2
drivers/net/netxen/netxen_nic_main.c
··· 63 63 static void __devexit netxen_nic_remove(struct pci_dev *pdev); 64 64 static int netxen_nic_open(struct net_device *netdev); 65 65 static int netxen_nic_close(struct net_device *netdev); 66 - static int netxen_nic_xmit_frame(struct sk_buff *, struct net_device *); 66 + static netdev_tx_t netxen_nic_xmit_frame(struct sk_buff *, 67 + struct net_device *); 67 68 static void netxen_tx_timeout(struct net_device *netdev); 68 69 static void netxen_reset_task(struct work_struct *work); 69 70 static void netxen_watchdog(unsigned long); ··· 1600 1599 desc[2] = 0ULL; 1601 1600 } 1602 1601 1603 - static int 1602 + static netdev_tx_t 1604 1603 netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev) 1605 1604 { 1606 1605 struct netxen_adapter *adapter = netdev_priv(netdev);
+3 -2
drivers/net/ni52.c
··· 170 170 static irqreturn_t ni52_interrupt(int irq, void *dev_id); 171 171 static int ni52_open(struct net_device *dev); 172 172 static int ni52_close(struct net_device *dev); 173 - static int ni52_send_packet(struct sk_buff *, struct net_device *); 173 + static netdev_tx_t ni52_send_packet(struct sk_buff *, struct net_device *); 174 174 static struct net_device_stats *ni52_get_stats(struct net_device *dev); 175 175 static void set_multicast_list(struct net_device *dev); 176 176 static void ni52_timeout(struct net_device *dev); ··· 1173 1173 * send frame 1174 1174 */ 1175 1175 1176 - static int ni52_send_packet(struct sk_buff *skb, struct net_device *dev) 1176 + static netdev_tx_t ni52_send_packet(struct sk_buff *skb, 1177 + struct net_device *dev) 1177 1178 { 1178 1179 int len, i; 1179 1180 #ifndef NO_NOPCOMMANDS
+4 -2
drivers/net/ni65.c
··· 252 252 static int ni65_open(struct net_device *dev); 253 253 static int ni65_lance_reinit(struct net_device *dev); 254 254 static void ni65_init_lance(struct priv *p,unsigned char*,int,int); 255 - static int ni65_send_packet(struct sk_buff *skb, struct net_device *dev); 255 + static netdev_tx_t ni65_send_packet(struct sk_buff *skb, 256 + struct net_device *dev); 256 257 static void ni65_timeout(struct net_device *dev); 257 258 static int ni65_close(struct net_device *dev); 258 259 static int ni65_alloc_buffer(struct net_device *dev); ··· 1158 1157 * Send a packet 1159 1158 */ 1160 1159 1161 - static int ni65_send_packet(struct sk_buff *skb, struct net_device *dev) 1160 + static netdev_tx_t ni65_send_packet(struct sk_buff *skb, 1161 + struct net_device *dev) 1162 1162 { 1163 1163 struct priv *p = dev->ml_priv; 1164 1164
+2 -1
drivers/net/niu.c
··· 6657 6657 return ret; 6658 6658 } 6659 6659 6660 - static int niu_start_xmit(struct sk_buff *skb, struct net_device *dev) 6660 + static netdev_tx_t niu_start_xmit(struct sk_buff *skb, 6661 + struct net_device *dev) 6661 6662 { 6662 6663 struct niu *np = netdev_priv(dev); 6663 6664 unsigned long align, headroom;
+2 -1
drivers/net/ns83820.c
··· 1077 1077 * while trying to track down a bug in either the zero copy code or 1078 1078 * the tx fifo (hence the MAX_FRAG_LEN). 1079 1079 */ 1080 - static int ns83820_hard_start_xmit(struct sk_buff *skb, struct net_device *ndev) 1080 + static netdev_tx_t ns83820_hard_start_xmit(struct sk_buff *skb, 1081 + struct net_device *ndev) 1081 1082 { 1082 1083 struct ns83820 *dev = PRIV(ndev); 1083 1084 u32 free_idx, cmdsts, extsts;
+4 -2
drivers/net/pcnet32.c
··· 303 303 static int pcnet32_probe1(unsigned long, int, struct pci_dev *); 304 304 static int pcnet32_open(struct net_device *); 305 305 static int pcnet32_init_ring(struct net_device *); 306 - static int pcnet32_start_xmit(struct sk_buff *, struct net_device *); 306 + static netdev_tx_t pcnet32_start_xmit(struct sk_buff *, 307 + struct net_device *); 307 308 static void pcnet32_tx_timeout(struct net_device *dev); 308 309 static irqreturn_t pcnet32_interrupt(int, void *); 309 310 static int pcnet32_close(struct net_device *); ··· 2482 2481 spin_unlock_irqrestore(&lp->lock, flags); 2483 2482 } 2484 2483 2485 - static int pcnet32_start_xmit(struct sk_buff *skb, struct net_device *dev) 2484 + static netdev_tx_t pcnet32_start_xmit(struct sk_buff *skb, 2485 + struct net_device *dev) 2486 2486 { 2487 2487 struct pcnet32_private *lp = netdev_priv(dev); 2488 2488 unsigned long ioaddr = dev->base_addr;
+2 -1
drivers/net/qla3xxx.c
··· 2572 2572 * The IOCB is always the top of the chain followed by one or more 2573 2573 * OALs (when necessary). 2574 2574 */ 2575 - static int ql3xxx_send(struct sk_buff *skb, struct net_device *ndev) 2575 + static netdev_tx_t ql3xxx_send(struct sk_buff *skb, 2576 + struct net_device *ndev) 2576 2577 { 2577 2578 struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev); 2578 2579 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
+1 -1
drivers/net/qlge/qlge_main.c
··· 2103 2103 iph->daddr, len, iph->protocol, 0); 2104 2104 } 2105 2105 2106 - static int qlge_send(struct sk_buff *skb, struct net_device *ndev) 2106 + static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev) 2107 2107 { 2108 2108 struct tx_ring_desc *tx_ring_desc; 2109 2109 struct ob_mac_iocb_req *mac_iocb_ptr;
+5 -5
drivers/net/r6040.c
··· 883 883 return 0; 884 884 } 885 885 886 - static int r6040_start_xmit(struct sk_buff *skb, struct net_device *dev) 886 + static netdev_tx_t r6040_start_xmit(struct sk_buff *skb, 887 + struct net_device *dev) 887 888 { 888 889 struct r6040_private *lp = netdev_priv(dev); 889 890 struct r6040_descriptor *descptr; 890 891 void __iomem *ioaddr = lp->base; 891 892 unsigned long flags; 892 - int ret = NETDEV_TX_OK; 893 893 894 894 /* Critical Section */ 895 895 spin_lock_irqsave(&lp->lock, flags); ··· 899 899 spin_unlock_irqrestore(&lp->lock, flags); 900 900 netif_stop_queue(dev); 901 901 printk(KERN_ERR DRV_NAME ": no tx descriptor\n"); 902 - ret = NETDEV_TX_BUSY; 903 - return ret; 902 + return NETDEV_TX_BUSY; 904 903 } 905 904 906 905 /* Statistic Counter */ ··· 927 928 928 929 dev->trans_start = jiffies; 929 930 spin_unlock_irqrestore(&lp->lock, flags); 930 - return ret; 931 + 932 + return NETDEV_TX_OK; 931 933 } 932 934 933 935 static void r6040_multicast_list(struct net_device *dev)
+6 -6
drivers/net/r8169.c
··· 507 507 MODULE_VERSION(RTL8169_VERSION); 508 508 509 509 static int rtl8169_open(struct net_device *dev); 510 - static int rtl8169_start_xmit(struct sk_buff *skb, struct net_device *dev); 510 + static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb, 511 + struct net_device *dev); 511 512 static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance); 512 513 static int rtl8169_init_ring(struct net_device *dev); 513 514 static void rtl_hw_start(struct net_device *dev); ··· 3358 3357 return 0; 3359 3358 } 3360 3359 3361 - static int rtl8169_start_xmit(struct sk_buff *skb, struct net_device *dev) 3360 + static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb, 3361 + struct net_device *dev) 3362 3362 { 3363 3363 struct rtl8169_private *tp = netdev_priv(dev); 3364 3364 unsigned int frags, entry = tp->cur_tx % NUM_TX_DESC; ··· 3368 3366 dma_addr_t mapping; 3369 3367 u32 status, len; 3370 3368 u32 opts1; 3371 - int ret = NETDEV_TX_OK; 3372 3369 3373 3370 if (unlikely(TX_BUFFS_AVAIL(tp) < skb_shinfo(skb)->nr_frags)) { 3374 3371 if (netif_msg_drv(tp)) { ··· 3419 3418 } 3420 3419 3421 3420 out: 3422 - return ret; 3421 + return NETDEV_TX_OK; 3423 3422 3424 3423 err_stop: 3425 3424 netif_stop_queue(dev); 3426 - ret = NETDEV_TX_BUSY; 3427 3425 dev->stats.tx_dropped++; 3428 - goto out; 3426 + return NETDEV_TX_BUSY; 3429 3427 } 3430 3428 3431 3429 static void rtl8169_pcierr_interrupt(struct net_device *dev)
+2 -1
drivers/net/rrunner.c
··· 1401 1401 } 1402 1402 1403 1403 1404 - static int rr_start_xmit(struct sk_buff *skb, struct net_device *dev) 1404 + static netdev_tx_t rr_start_xmit(struct sk_buff *skb, 1405 + struct net_device *dev) 1405 1406 { 1406 1407 struct rr_private *rrpriv = netdev_priv(dev); 1407 1408 struct rr_regs __iomem *regs = rrpriv->regs;
+2 -1
drivers/net/rrunner.h
··· 831 831 static irqreturn_t rr_interrupt(int irq, void *dev_id); 832 832 833 833 static int rr_open(struct net_device *dev); 834 - static int rr_start_xmit(struct sk_buff *skb, struct net_device *dev); 834 + static netdev_tx_t rr_start_xmit(struct sk_buff *skb, 835 + struct net_device *dev); 835 836 static int rr_close(struct net_device *dev); 836 837 static int rr_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); 837 838 static unsigned int rr_read_eeprom(struct rr_private *rrpriv,
+1 -1
drivers/net/s2io.c
··· 4072 4072 * 0 on success & 1 on failure. 4073 4073 */ 4074 4074 4075 - static int s2io_xmit(struct sk_buff *skb, struct net_device *dev) 4075 + static netdev_tx_t s2io_xmit(struct sk_buff *skb, struct net_device *dev) 4076 4076 { 4077 4077 struct s2io_nic *sp = netdev_priv(dev); 4078 4078 u16 frg_cnt, frg_len, i, queue, queue_len, put_off, get_off;
+3 -2
drivers/net/sb1000.c
··· 82 82 extern int sb1000_probe(struct net_device *dev); 83 83 static int sb1000_open(struct net_device *dev); 84 84 static int sb1000_dev_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd); 85 - static int sb1000_start_xmit(struct sk_buff *skb, struct net_device *dev); 85 + static netdev_tx_t sb1000_start_xmit(struct sk_buff *skb, 86 + struct net_device *dev); 86 87 static irqreturn_t sb1000_interrupt(int irq, void *dev_id); 87 88 static int sb1000_close(struct net_device *dev); 88 89 ··· 1081 1080 } 1082 1081 1083 1082 /* transmit function: do nothing since SB1000 can't send anything out */ 1084 - static int 1083 + static netdev_tx_t 1085 1084 sb1000_start_xmit(struct sk_buff *skb, struct net_device *dev) 1086 1085 { 1087 1086 printk(KERN_WARNING "%s: trying to transmit!!!\n", dev->name);
+2 -1
drivers/net/sc92031.c
··· 941 941 return &dev->stats; 942 942 } 943 943 944 - static int sc92031_start_xmit(struct sk_buff *skb, struct net_device *dev) 944 + static netdev_tx_t sc92031_start_xmit(struct sk_buff *skb, 945 + struct net_device *dev) 945 946 { 946 947 struct sc92031_priv *priv = netdev_priv(dev); 947 948 void __iomem *port_base = priv->port_base;
+4 -2
drivers/net/seeq8005.c
··· 81 81 static int seeq8005_probe1(struct net_device *dev, int ioaddr); 82 82 static int seeq8005_open(struct net_device *dev); 83 83 static void seeq8005_timeout(struct net_device *dev); 84 - static int seeq8005_send_packet(struct sk_buff *skb, struct net_device *dev); 84 + static netdev_tx_t seeq8005_send_packet(struct sk_buff *skb, 85 + struct net_device *dev); 85 86 static irqreturn_t seeq8005_interrupt(int irq, void *dev_id); 86 87 static void seeq8005_rx(struct net_device *dev); 87 88 static int seeq8005_close(struct net_device *dev); ··· 395 394 netif_wake_queue(dev); 396 395 } 397 396 398 - static int seeq8005_send_packet(struct sk_buff *skb, struct net_device *dev) 397 + static netdev_tx_t seeq8005_send_packet(struct sk_buff *skb, 398 + struct net_device *dev) 399 399 { 400 400 short length = skb->len; 401 401 unsigned char *buf;
+3 -2
drivers/net/sfc/efx.h
··· 20 20 #define FALCON_B_P_DEVID 0x0710 21 21 22 22 /* TX */ 23 - extern int efx_xmit(struct efx_nic *efx, 24 - struct efx_tx_queue *tx_queue, struct sk_buff *skb); 23 + extern netdev_tx_t efx_xmit(struct efx_nic *efx, 24 + struct efx_tx_queue *tx_queue, 25 + struct sk_buff *skb); 25 26 extern void efx_stop_queue(struct efx_nic *efx); 26 27 extern void efx_wake_queue(struct efx_nic *efx); 27 28
+2 -1
drivers/net/sfc/selftest.c
··· 400 400 struct efx_loopback_state *state = efx->loopback_selftest; 401 401 struct efx_loopback_payload *payload; 402 402 struct sk_buff *skb; 403 - int i, rc; 403 + int i; 404 + netdev_tx_t rc; 404 405 405 406 /* Transmit N copies of buffer */ 406 407 for (i = 0; i < state->packet_count; i++) {
+8 -10
drivers/net/sfc/tx.c
··· 138 138 * Returns NETDEV_TX_OK or NETDEV_TX_BUSY 139 139 * You must hold netif_tx_lock() to call this function. 140 140 */ 141 - static int efx_enqueue_skb(struct efx_tx_queue *tx_queue, 142 - struct sk_buff *skb) 141 + static netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, 142 + struct sk_buff *skb) 143 143 { 144 144 struct efx_nic *efx = tx_queue->efx; 145 145 struct pci_dev *pci_dev = efx->pci_dev; ··· 152 152 unsigned int dma_len; 153 153 bool unmap_single; 154 154 int q_space, i = 0; 155 - int rc = NETDEV_TX_OK; 155 + netdev_tx_t rc = NETDEV_TX_OK; 156 156 157 157 EFX_BUG_ON_PARANOID(tx_queue->write_count != tx_queue->insert_count); 158 158 ··· 353 353 * 354 354 * Context: netif_tx_lock held 355 355 */ 356 - inline int efx_xmit(struct efx_nic *efx, 357 - struct efx_tx_queue *tx_queue, struct sk_buff *skb) 356 + inline netdev_tx_t efx_xmit(struct efx_nic *efx, 357 + struct efx_tx_queue *tx_queue, struct sk_buff *skb) 358 358 { 359 - int rc; 360 - 361 359 /* Map fragments for DMA and add to TX queue */ 362 - rc = efx_enqueue_skb(tx_queue, skb); 363 - return rc; 360 + return efx_enqueue_skb(tx_queue, skb); 364 361 } 365 362 366 363 /* Initiate a packet transmission. We use one channel per CPU ··· 369 372 * Note that returning anything other than NETDEV_TX_OK will cause the 370 373 * OS to free the skb. 371 374 */ 372 - int efx_hard_start_xmit(struct sk_buff *skb, struct net_device *net_dev) 375 + netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb, 376 + struct net_device *net_dev) 373 377 { 374 378 struct efx_nic *efx = netdev_priv(net_dev); 375 379 struct efx_tx_queue *tx_queue;
+2 -1
drivers/net/sfc/tx.h
··· 18 18 void efx_init_tx_queue(struct efx_tx_queue *tx_queue); 19 19 void efx_fini_tx_queue(struct efx_tx_queue *tx_queue); 20 20 21 - int efx_hard_start_xmit(struct sk_buff *skb, struct net_device *net_dev); 21 + netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb, 22 + struct net_device *net_dev); 22 23 void efx_release_tx_buffers(struct efx_tx_queue *tx_queue); 23 24 24 25 #endif /* EFX_TX_H */
+2 -1
drivers/net/sis190.c
··· 1168 1168 return 0; 1169 1169 } 1170 1170 1171 - static int sis190_start_xmit(struct sk_buff *skb, struct net_device *dev) 1171 + static netdev_tx_t sis190_start_xmit(struct sk_buff *skb, 1172 + struct net_device *dev) 1172 1173 { 1173 1174 struct sis190_private *tp = netdev_priv(dev); 1174 1175 void __iomem *ioaddr = tp->mmio_addr;
+3 -2
drivers/net/sis900.c
··· 214 214 static void sis900_tx_timeout(struct net_device *net_dev); 215 215 static void sis900_init_tx_ring(struct net_device *net_dev); 216 216 static void sis900_init_rx_ring(struct net_device *net_dev); 217 - static int sis900_start_xmit(struct sk_buff *skb, struct net_device *net_dev); 217 + static netdev_tx_t sis900_start_xmit(struct sk_buff *skb, 218 + struct net_device *net_dev); 218 219 static int sis900_rx(struct net_device *net_dev); 219 220 static void sis900_finish_xmit (struct net_device *net_dev); 220 221 static irqreturn_t sis900_interrupt(int irq, void *dev_instance); ··· 1572 1571 * tell upper layer if the buffer is full 1573 1572 */ 1574 1573 1575 - static int 1574 + static netdev_tx_t 1576 1575 sis900_start_xmit(struct sk_buff *skb, struct net_device *net_dev) 1577 1576 { 1578 1577 struct sis900_private *sis_priv = netdev_priv(net_dev);
+4 -2
drivers/net/skfp/skfddi.c
··· 107 107 static void skfp_ctl_set_multicast_list_wo_lock(struct net_device *dev); 108 108 static int skfp_ctl_set_mac_address(struct net_device *dev, void *addr); 109 109 static int skfp_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); 110 - static int skfp_send_pkt(struct sk_buff *skb, struct net_device *dev); 110 + static netdev_tx_t skfp_send_pkt(struct sk_buff *skb, 111 + struct net_device *dev); 111 112 static void send_queued_packets(struct s_smc *smc); 112 113 static void CheckSourceAddress(unsigned char *frame, unsigned char *hw_addr); 113 114 static void ResetAdapter(struct s_smc *smc); ··· 1057 1056 * Side Effects: 1058 1057 * None 1059 1058 */ 1060 - static int skfp_send_pkt(struct sk_buff *skb, struct net_device *dev) 1059 + static netdev_tx_t skfp_send_pkt(struct sk_buff *skb, 1060 + struct net_device *dev) 1061 1061 { 1062 1062 struct s_smc *smc = netdev_priv(dev); 1063 1063 skfddi_priv *bp = &smc->os;
+2 -1
drivers/net/skge.c
··· 2746 2746 + (ring->to_clean - ring->to_use) - 1; 2747 2747 } 2748 2748 2749 - static int skge_xmit_frame(struct sk_buff *skb, struct net_device *dev) 2749 + static netdev_tx_t skge_xmit_frame(struct sk_buff *skb, 2750 + struct net_device *dev) 2750 2751 { 2751 2752 struct skge_port *skge = netdev_priv(dev); 2752 2753 struct skge_hw *hw = skge->hw;
+2 -1
drivers/net/sky2.c
··· 1574 1574 * the number of ring elements will probably be less than the number 1575 1575 * of list elements used. 1576 1576 */ 1577 - static int sky2_xmit_frame(struct sk_buff *skb, struct net_device *dev) 1577 + static netdev_tx_t sky2_xmit_frame(struct sk_buff *skb, 1578 + struct net_device *dev) 1578 1579 { 1579 1580 struct sky2_port *sky2 = netdev_priv(dev); 1580 1581 struct sky2_hw *hw = sky2->hw;
+4 -2
drivers/net/smc9194.c
··· 299 299 . to store the packet, I call this routine, which either sends it 300 300 . now, or generates an interrupt when the card is ready for the 301 301 . packet */ 302 - static int smc_wait_to_send_packet( struct sk_buff * skb, struct net_device *dev ); 302 + static netdev_tx_t smc_wait_to_send_packet( struct sk_buff * skb, 303 + struct net_device *dev ); 303 304 304 305 /* this does a soft reset on the device */ 305 306 static void smc_reset( int ioaddr ); ··· 488 487 . o (NO): Enable interrupts and let the interrupt handler deal with it. 489 488 . o (YES):Send it now. 490 489 */ 491 - static int smc_wait_to_send_packet( struct sk_buff * skb, struct net_device * dev ) 490 + static netdev_tx_t smc_wait_to_send_packet(struct sk_buff *skb, 491 + struct net_device *dev) 492 492 { 493 493 struct smc_local *lp = netdev_priv(dev); 494 494 unsigned int ioaddr = dev->base_addr;
+2 -1
drivers/net/smsc9420.c
··· 968 968 } 969 969 } 970 970 971 - static int smsc9420_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) 971 + static netdev_tx_t smsc9420_hard_start_xmit(struct sk_buff *skb, 972 + struct net_device *dev) 972 973 { 973 974 struct smsc9420_pdata *pd = netdev_priv(dev); 974 975 dma_addr_t mapping;
+2 -2
drivers/net/starfire.c
··· 595 595 static void check_duplex(struct net_device *dev); 596 596 static void tx_timeout(struct net_device *dev); 597 597 static void init_ring(struct net_device *dev); 598 - static int start_tx(struct sk_buff *skb, struct net_device *dev); 598 + static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev); 599 599 static irqreturn_t intr_handler(int irq, void *dev_instance); 600 600 static void netdev_error(struct net_device *dev, int intr_status); 601 601 static int __netdev_rx(struct net_device *dev, int *quota); ··· 1223 1223 } 1224 1224 1225 1225 1226 - static int start_tx(struct sk_buff *skb, struct net_device *dev) 1226 + static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev) 1227 1227 { 1228 1228 struct netdev_private *np = netdev_priv(dev); 1229 1229 unsigned int entry;
+2 -2
drivers/net/sundance.c
··· 415 415 static void netdev_timer(unsigned long data); 416 416 static void tx_timeout(struct net_device *dev); 417 417 static void init_ring(struct net_device *dev); 418 - static int start_tx(struct sk_buff *skb, struct net_device *dev); 418 + static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev); 419 419 static int reset_tx (struct net_device *dev); 420 420 static irqreturn_t intr_handler(int irq, void *dev_instance); 421 421 static void rx_poll(unsigned long data); ··· 1053 1053 return; 1054 1054 } 1055 1055 1056 - static int 1056 + static netdev_tx_t 1057 1057 start_tx (struct sk_buff *skb, struct net_device *dev) 1058 1058 { 1059 1059 struct netdev_private *np = netdev_priv(dev);
+2 -1
drivers/net/sungem.c
··· 1015 1015 return 0; 1016 1016 } 1017 1017 1018 - static int gem_start_xmit(struct sk_buff *skb, struct net_device *dev) 1018 + static netdev_tx_t gem_start_xmit(struct sk_buff *skb, 1019 + struct net_device *dev) 1019 1020 { 1020 1021 struct gem *gp = netdev_priv(dev); 1021 1022 int entry;
+2 -1
drivers/net/sunhme.c
··· 2252 2252 netif_wake_queue(dev); 2253 2253 } 2254 2254 2255 - static int happy_meal_start_xmit(struct sk_buff *skb, struct net_device *dev) 2255 + static netdev_tx_t happy_meal_start_xmit(struct sk_buff *skb, 2256 + struct net_device *dev) 2256 2257 { 2257 2258 struct happy_meal *hp = netdev_priv(dev); 2258 2259 int entry;
+2 -1
drivers/net/tehuti.c
··· 1622 1622 * the driver. Note: the driver must NOT put the skb in its DMA ring. 1623 1623 * o NETDEV_TX_LOCKED Locking failed, please retry quickly. 1624 1624 */ 1625 - static int bdx_tx_transmit(struct sk_buff *skb, struct net_device *ndev) 1625 + static netdev_tx_t bdx_tx_transmit(struct sk_buff *skb, 1626 + struct net_device *ndev) 1626 1627 { 1627 1628 struct bdx_priv *priv = netdev_priv(ndev); 1628 1629 struct txd_fifo *f = &priv->txd_fifo0;
+6 -3
drivers/net/tg3.c
··· 5135 5135 /* hard_start_xmit for devices that don't have any bugs and 5136 5136 * support TG3_FLG2_HW_TSO_2 only. 5137 5137 */ 5138 - static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) 5138 + static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, 5139 + struct net_device *dev) 5139 5140 { 5140 5141 struct tg3 *tp = netdev_priv(dev); 5141 5142 u32 len, entry, base_flags, mss; ··· 5252 5251 return NETDEV_TX_OK; 5253 5252 } 5254 5253 5255 - static int tg3_start_xmit_dma_bug(struct sk_buff *, struct net_device *); 5254 + static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *, 5255 + struct net_device *); 5256 5256 5257 5257 /* Use GSO to workaround a rare TSO bug that may be triggered when the 5258 5258 * TSO header is greater than 80 bytes. ··· 5292 5290 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and 5293 5291 * support TG3_FLG2_HW_TSO_1 or firmware TSO only. 5294 5292 */ 5295 - static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev) 5293 + static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb, 5294 + struct net_device *dev) 5296 5295 { 5297 5296 struct tg3 *tp = netdev_priv(dev); 5298 5297 u32 len, entry, base_flags, mss;
+2 -2
drivers/net/tlan.c
··· 289 289 static void TLan_Eisa_Cleanup( void ); 290 290 static int TLan_Init( struct net_device * ); 291 291 static int TLan_Open( struct net_device *dev ); 292 - static int TLan_StartTx( struct sk_buff *, struct net_device *); 292 + static netdev_tx_t TLan_StartTx( struct sk_buff *, struct net_device *); 293 293 static irqreturn_t TLan_HandleInterrupt( int, void *); 294 294 static int TLan_Close( struct net_device *); 295 295 static struct net_device_stats *TLan_GetStats( struct net_device *); ··· 1083 1083 * 1084 1084 **************************************************************/ 1085 1085 1086 - static int TLan_StartTx( struct sk_buff *skb, struct net_device *dev ) 1086 + static netdev_tx_t TLan_StartTx( struct sk_buff *skb, struct net_device *dev ) 1087 1087 { 1088 1088 TLanPrivateInfo *priv = netdev_priv(dev); 1089 1089 dma_addr_t tail_list_phys;
+1 -1
drivers/net/typhoon.c
··· 762 762 tcpd->status = 0; 763 763 } 764 764 765 - static int 765 + static netdev_tx_t 766 766 typhoon_start_tx(struct sk_buff *skb, struct net_device *dev) 767 767 { 768 768 struct typhoon *tp = netdev_priv(dev);
+4 -2
drivers/net/via-rhine.c
··· 408 408 static void mdio_write(struct net_device *dev, int phy_id, int location, int value); 409 409 static int rhine_open(struct net_device *dev); 410 410 static void rhine_tx_timeout(struct net_device *dev); 411 - static int rhine_start_tx(struct sk_buff *skb, struct net_device *dev); 411 + static netdev_tx_t rhine_start_tx(struct sk_buff *skb, 412 + struct net_device *dev); 412 413 static irqreturn_t rhine_interrupt(int irq, void *dev_instance); 413 414 static void rhine_tx(struct net_device *dev); 414 415 static int rhine_rx(struct net_device *dev, int limit); ··· 1214 1213 netif_wake_queue(dev); 1215 1214 } 1216 1215 1217 - static int rhine_start_tx(struct sk_buff *skb, struct net_device *dev) 1216 + static netdev_tx_t rhine_start_tx(struct sk_buff *skb, 1217 + struct net_device *dev) 1218 1218 { 1219 1219 struct rhine_private *rp = netdev_priv(dev); 1220 1220 void __iomem *ioaddr = rp->base;
+2 -1
drivers/net/via-velocity.c
··· 2465 2465 * Called by the networ layer to request a packet is queued to 2466 2466 * the velocity. Returns zero on success. 2467 2467 */ 2468 - static int velocity_xmit(struct sk_buff *skb, struct net_device *dev) 2468 + static netdev_tx_t velocity_xmit(struct sk_buff *skb, 2469 + struct net_device *dev) 2469 2470 { 2470 2471 struct velocity_info *vptr = netdev_priv(dev); 2471 2472 int qnum = 0;
+1 -1
drivers/net/vxge/vxge-main.c
··· 812 812 * NOTE: when device cant queue the pkt, just the trans_start variable will 813 813 * not be upadted. 814 814 */ 815 - static int 815 + static netdev_tx_t 816 816 vxge_xmit(struct sk_buff *skb, struct net_device *dev) 817 817 { 818 818 struct vxge_fifo *fifo = NULL;
+4 -2
drivers/net/yellowfin.c
··· 347 347 static void yellowfin_timer(unsigned long data); 348 348 static void yellowfin_tx_timeout(struct net_device *dev); 349 349 static void yellowfin_init_ring(struct net_device *dev); 350 - static int yellowfin_start_xmit(struct sk_buff *skb, struct net_device *dev); 350 + static netdev_tx_t yellowfin_start_xmit(struct sk_buff *skb, 351 + struct net_device *dev); 351 352 static irqreturn_t yellowfin_interrupt(int irq, void *dev_instance); 352 353 static int yellowfin_rx(struct net_device *dev); 353 354 static void yellowfin_error(struct net_device *dev, int intr_status); ··· 809 808 return; 810 809 } 811 810 812 - static int yellowfin_start_xmit(struct sk_buff *skb, struct net_device *dev) 811 + static netdev_tx_t yellowfin_start_xmit(struct sk_buff *skb, 812 + struct net_device *dev) 813 813 { 814 814 struct yellowfin_private *yp = netdev_priv(dev); 815 815 unsigned entry;
+3 -2
drivers/net/znet.c
··· 156 156 }; 157 157 158 158 static int znet_open(struct net_device *dev); 159 - static int znet_send_packet(struct sk_buff *skb, struct net_device *dev); 159 + static netdev_tx_t znet_send_packet(struct sk_buff *skb, 160 + struct net_device *dev); 160 161 static irqreturn_t znet_interrupt(int irq, void *dev_id); 161 162 static void znet_rx(struct net_device *dev); 162 163 static int znet_close(struct net_device *dev); ··· 535 534 netif_wake_queue (dev); 536 535 } 537 536 538 - static int znet_send_packet(struct sk_buff *skb, struct net_device *dev) 537 + static netdev_tx_t znet_send_packet(struct sk_buff *skb, struct net_device *dev) 539 538 { 540 539 int ioaddr = dev->base_addr; 541 540 struct znet_private *znet = netdev_priv(dev);
+2 -1
include/linux/arcdevice.h
··· 337 337 338 338 int arcnet_open(struct net_device *dev); 339 339 int arcnet_close(struct net_device *dev); 340 - int arcnet_send_packet(struct sk_buff *skb, struct net_device *dev); 340 + netdev_tx_t arcnet_send_packet(struct sk_buff *skb, 341 + struct net_device *dev); 341 342 void arcnet_timeout(struct net_device *dev); 342 343 343 344 #endif /* __KERNEL__ */