Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

irda: convert to internal stats

Convert IRDA drivers to use already existing net_device_stats structure
in network device. This is a pre-cursor to conversion to net_device
ops. Compile tested only.

Signed-off-by: Stephen Hemminger <shemminger@vyatta.com>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

Stephen Hemminger and committed by
David S. Miller
af049081 46377bb3

+230 -391
+16 -29
drivers/net/irda/ali-ircc.c
··· 109 109 static int ali_ircc_net_close(struct net_device *dev); 110 110 static int ali_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); 111 111 static void ali_ircc_change_speed(struct ali_ircc_cb *self, __u32 baud); 112 - static struct net_device_stats *ali_ircc_net_get_stats(struct net_device *dev); 113 112 114 113 /* SIR function */ 115 114 static int ali_ircc_sir_hard_xmit(struct sk_buff *skb, struct net_device *dev); ··· 365 366 dev->open = ali_ircc_net_open; 366 367 dev->stop = ali_ircc_net_close; 367 368 dev->do_ioctl = ali_ircc_net_ioctl; 368 - dev->get_stats = ali_ircc_net_get_stats; 369 369 370 370 err = register_netdev(dev); 371 371 if (err) { ··· 874 876 * async_unwrap_char will deliver all found frames 875 877 */ 876 878 do { 877 - async_unwrap_char(self->netdev, &self->stats, &self->rx_buff, 879 + async_unwrap_char(self->netdev, &self->netdev->stats, &self->rx_buff, 878 880 inb(iobase+UART_RX)); 879 881 880 882 /* Make sure we don't stay here too long */ ··· 941 943 netif_wake_queue(self->netdev); 942 944 } 943 945 944 - self->stats.tx_packets++; 946 + self->netdev->stats.tx_packets++; 945 947 946 948 /* Turn on receive interrupts */ 947 949 outb(UART_IER_RDI, iobase+UART_IER); ··· 1465 1467 self->tx_fifo.queue[self->tx_fifo.free].len = skb->len; 1466 1468 self->tx_fifo.tail += skb->len; 1467 1469 1468 - self->stats.tx_bytes += skb->len; 1470 + dev->stats.tx_bytes += skb->len; 1469 1471 1470 1472 skb_copy_from_linear_data(skb, self->tx_fifo.queue[self->tx_fifo.free].start, 1471 1473 skb->len); ··· 1659 1661 1660 1662 { 1661 1663 IRDA_ERROR("%s(), ********* LSR_FRAME_ABORT *********\n", __func__); 1662 - self->stats.tx_errors++; 1663 - self->stats.tx_fifo_errors++; 1664 + self->netdev->stats.tx_errors++; 1665 + self->netdev->stats.tx_fifo_errors++; 1664 1666 } 1665 1667 else 1666 1668 { 1667 - self->stats.tx_packets++; 1669 + self->netdev->stats.tx_packets++; 1668 1670 } 1669 1671 1670 1672 /* Check if we need to change the speed */ ··· 1829 1831 IRDA_DEBUG(0,"%s(), ************* RX Errors ************ \n", __func__ ); 1830 1832 1831 1833 /* Skip frame */ 1832 - self->stats.rx_errors++; 1834 + self->netdev->stats.rx_errors++; 1833 1835 1834 1836 self->rx_buff.data += len; 1835 1837 1836 1838 if (status & LSR_FIFO_UR) 1837 1839 { 1838 - self->stats.rx_frame_errors++; 1840 + self->netdev->stats.rx_frame_errors++; 1839 1841 IRDA_DEBUG(0,"%s(), ************* FIFO Errors ************ \n", __func__ ); 1840 1842 } 1841 1843 if (status & LSR_FRAME_ERROR) 1842 1844 { 1843 - self->stats.rx_frame_errors++; 1845 + self->netdev->stats.rx_frame_errors++; 1844 1846 IRDA_DEBUG(0,"%s(), ************* FRAME Errors ************ \n", __func__ ); 1845 1847 } 1846 1848 1847 1849 if (status & LSR_CRC_ERROR) 1848 1850 { 1849 - self->stats.rx_crc_errors++; 1851 + self->netdev->stats.rx_crc_errors++; 1850 1852 IRDA_DEBUG(0,"%s(), ************* CRC Errors ************ \n", __func__ ); 1851 1853 } 1852 1854 1853 1855 if(self->rcvFramesOverflow) 1854 1856 { 1855 - self->stats.rx_frame_errors++; 1857 + self->netdev->stats.rx_frame_errors++; 1856 1858 IRDA_DEBUG(0,"%s(), ************* Overran DMA buffer ************ \n", __func__ ); 1857 1859 } 1858 1860 if(len == 0) 1859 1861 { 1860 - self->stats.rx_frame_errors++; 1862 + self->netdev->stats.rx_frame_errors++; 1861 1863 IRDA_DEBUG(0,"%s(), ********** Receive Frame Size = 0 ********* \n", __func__ ); 1862 1864 } 1863 1865 } ··· 1908 1910 IRDA_WARNING("%s(), memory squeeze, " 1909 1911 "dropping frame.\n", 1910 1912 __func__); 1911 - self->stats.rx_dropped++; 1913 + self->netdev->stats.rx_dropped++; 1912 1914 1913 1915 return FALSE; 1914 1916 } ··· 1922 1924 1923 1925 /* Move to next frame */ 1924 1926 self->rx_buff.data += len; 1925 - self->stats.rx_bytes += len; 1926 - self->stats.rx_packets++; 1927 + self->netdev->stats.rx_bytes += len; 1928 + self->netdev->stats.rx_packets++; 1927 1929 1928 1930 skb->dev = self->netdev; 1929 1931 skb_reset_mac_header(skb); ··· 1992 1994 self->tx_buff.len = async_wrap_skb(skb, self->tx_buff.data, 1993 1995 self->tx_buff.truesize); 1994 1996 1995 - self->stats.tx_bytes += self->tx_buff.len; 1997 + self->netdev->stats.tx_bytes += self->tx_buff.len; 1996 1998 1997 1999 /* Turn on transmit finished interrupt. Will fire immediately! */ 1998 2000 outb(UART_IER_THRI, iobase+UART_IER); ··· 2107 2109 IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __func__ ); 2108 2110 2109 2111 return status; 2110 - } 2111 - 2112 - static struct net_device_stats *ali_ircc_net_get_stats(struct net_device *dev) 2113 - { 2114 - struct ali_ircc_cb *self = netdev_priv(dev); 2115 - 2116 - IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __func__ ); 2117 - 2118 - IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __func__ ); 2119 - 2120 - return &self->stats; 2121 2112 } 2122 2113 2123 2114 static int ali_ircc_suspend(struct platform_device *dev, pm_message_t state)
-1
drivers/net/irda/ali-ircc.h
··· 191 191 struct tx_fifo tx_fifo; /* Info about frames to be transmitted */ 192 192 193 193 struct net_device *netdev; /* Yes! we are some kind of netdevice */ 194 - struct net_device_stats stats; 195 194 196 195 struct irlap_cb *irlap; /* The link layer we are binded to */ 197 196 struct qos_info qos; /* QoS capabilities for this device */
-1
drivers/net/irda/au1000_ircc.h
··· 107 107 iobuff_t rx_buff; 108 108 109 109 struct net_device *netdev; 110 - struct net_device_stats stats; 111 110 112 111 struct timeval stamp; 113 112 struct timeval now;
-9
drivers/net/irda/au1k_ir.c
··· 53 53 static int au1k_irda_rx(struct net_device *); 54 54 static void au1k_irda_interrupt(int, void *); 55 55 static void au1k_tx_timeout(struct net_device *); 56 - static struct net_device_stats *au1k_irda_stats(struct net_device *); 57 56 static int au1k_irda_ioctl(struct net_device *, struct ifreq *, int); 58 57 static int au1k_irda_set_speed(struct net_device *dev, int speed); 59 58 ··· 212 213 dev->open = au1k_irda_start; 213 214 dev->hard_start_xmit = au1k_irda_hard_xmit; 214 215 dev->stop = au1k_irda_stop; 215 - dev->get_stats = au1k_irda_stats; 216 216 dev->do_ioctl = au1k_irda_ioctl; 217 217 dev->tx_timeout = au1k_tx_timeout; 218 218 ··· 828 830 break; 829 831 } 830 832 return ret; 831 - } 832 - 833 - 834 - static struct net_device_stats *au1k_irda_stats(struct net_device *dev) 835 - { 836 - struct au1k_private *aup = netdev_priv(dev); 837 - return &aup->stats; 838 833 } 839 834 840 835 MODULE_AUTHOR("Pete Popov <ppopov@mvista.com>");
-1
drivers/net/irda/donauboe.h
··· 308 308 struct toshoboe_cb 309 309 { 310 310 struct net_device *netdev; /* Yes! we are some kind of netdevice */ 311 - struct net_device_stats stats; 312 311 struct tty_driver ttydev; 313 312 314 313 struct irlap_cb *irlap; /* The link layer we are binded to */
+9 -19
drivers/net/irda/irda-usb.c
··· 122 122 static int irda_usb_net_close(struct net_device *dev); 123 123 static int irda_usb_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); 124 124 static void irda_usb_net_timeout(struct net_device *dev); 125 - static struct net_device_stats *irda_usb_net_get_stats(struct net_device *dev); 126 125 127 126 /************************ TRANSMIT ROUTINES ************************/ 128 127 /* ··· 524 525 /* Ask USB to send the packet - Irq disabled -> GFP_ATOMIC */ 525 526 if ((res = usb_submit_urb(urb, GFP_ATOMIC))) { 526 527 IRDA_WARNING("%s(), failed Tx URB\n", __func__); 527 - self->stats.tx_errors++; 528 + netdev->stats.tx_errors++; 528 529 /* Let USB recover : We will catch that in the watchdog */ 529 530 /*netif_start_queue(netdev);*/ 530 531 } else { 531 532 /* Increment packet stats */ 532 - self->stats.tx_packets++; 533 - self->stats.tx_bytes += skb->len; 533 + netdev->stats.tx_packets++; 534 + netdev->stats.tx_bytes += skb->len; 534 535 535 536 netdev->trans_start = jiffies; 536 537 } ··· 676 677 IRDA_DEBUG(0, "%s: Tx timed out, urb->status=%d, urb->transfer_flags=0x%04X\n", netdev->name, urb->status, urb->transfer_flags); 677 678 678 679 /* Increase error count */ 679 - self->stats.tx_errors++; 680 + netdev->stats.tx_errors++; 680 681 681 682 #ifdef IU_BUG_KICK_TIMEOUT 682 683 /* Can't be a bad idea to reset the speed ;-) - Jean II */ ··· 825 826 if (urb->status != 0) { 826 827 switch (urb->status) { 827 828 case -EILSEQ: 828 - self->stats.rx_crc_errors++; 829 + self->netdev->stats.rx_crc_errors++; 829 830 /* Also precursor to a hot-unplug on UHCI. */ 830 831 /* Fallthrough... */ 831 832 case -ECONNRESET: ··· 838 839 case -ETIME: 839 840 /* Usually precursor to a hot-unplug on OHCI. */ 840 841 default: 841 - self->stats.rx_errors++; 842 + self->netdev->stats.rx_errors++; 842 843 IRDA_DEBUG(0, "%s(), RX status %d, transfer_flags 0x%04X \n", __func__, urb->status, urb->transfer_flags); 843 844 break; 844 845 } ··· 889 890 IRDA_SKB_MAX_MTU); 890 891 891 892 if (!newskb) { 892 - self->stats.rx_dropped++; 893 + self->netdev->stats.rx_dropped++; 893 894 /* We could deliver the current skb, but this would stall 894 895 * the Rx path. Better drop the packet... Jean II */ 895 896 goto done; ··· 926 927 netif_rx(dataskb); 927 928 928 929 /* Keep stats up to date */ 929 - self->stats.rx_bytes += len; 930 - self->stats.rx_packets++; 930 + self->netdev->stats.rx_bytes += len; 931 + self->netdev->stats.rx_packets++; 931 932 932 933 done: 933 934 /* Note : at this point, the URB we've just received (urb) ··· 1341 1342 } 1342 1343 1343 1344 /*------------------------------------------------------------------*/ 1344 - /* 1345 - * Get device stats (for /proc/net/dev and ifconfig) 1346 - */ 1347 - static struct net_device_stats *irda_usb_net_get_stats(struct net_device *dev) 1348 - { 1349 - struct irda_usb_cb *self = netdev_priv(dev); 1350 - return &self->stats; 1351 - } 1352 1345 1353 1346 /********************* IRDA CONFIG SUBROUTINES *********************/ 1354 1347 /* ··· 1419 1428 netdev->watchdog_timeo = 250*HZ/1000; /* 250 ms > USB timeout */ 1420 1429 netdev->open = irda_usb_net_open; 1421 1430 netdev->stop = irda_usb_net_close; 1422 - netdev->get_stats = irda_usb_net_get_stats; 1423 1431 netdev->do_ioctl = irda_usb_net_ioctl; 1424 1432 1425 1433 return register_netdev(netdev);
-1
drivers/net/irda/irda-usb.h
··· 152 152 struct urb *speed_urb; /* URB used to send speed commands */ 153 153 154 154 struct net_device *netdev; /* Yes! we are some kind of netdev. */ 155 - struct net_device_stats stats; 156 155 struct irlap_cb *irlap; /* The link layer we are binded to */ 157 156 struct qos_info qos; 158 157 char *speed_buff; /* Buffer for speed changes */
+5 -15
drivers/net/irda/kingsun-sir.c
··· 105 105 struct usb_device *usbdev; /* init: probe_irda */ 106 106 struct net_device *netdev; /* network layer */ 107 107 struct irlap_cb *irlap; /* The link layer we are binded to */ 108 - struct net_device_stats stats; /* network statistics */ 108 + 109 109 struct qos_info qos; 110 110 111 111 __u8 *in_buf; /* receive buffer */ ··· 186 186 case -EPIPE: 187 187 break; 188 188 default: 189 - kingsun->stats.tx_errors++; 189 + netdev->stats.tx_errors++; 190 190 netif_start_queue(netdev); 191 191 } 192 192 } else { 193 - kingsun->stats.tx_packets++; 194 - kingsun->stats.tx_bytes += skb->len; 193 + netdev->stats.tx_packets++; 194 + netdev->stats.tx_bytes += skb->len; 195 195 } 196 196 197 197 dev_kfree_skb(skb); ··· 232 232 if (bytes[0] >= 1 && bytes[0] < kingsun->max_rx) { 233 233 for (i = 1; i <= bytes[0]; i++) { 234 234 async_unwrap_char(kingsun->netdev, 235 - &kingsun->stats, 235 + &kingsun->netdev->stats, 236 236 &kingsun->rx_buff, bytes[i]); 237 237 } 238 238 do_gettimeofday(&kingsun->rx_time); ··· 418 418 return ret; 419 419 } 420 420 421 - /* 422 - * Get device stats (for /proc/net/dev and ifconfig) 423 - */ 424 - static struct net_device_stats * 425 - kingsun_net_get_stats(struct net_device *netdev) 426 - { 427 - struct kingsun_cb *kingsun = netdev_priv(netdev); 428 - return &kingsun->stats; 429 - } 430 421 431 422 /* 432 423 * This routine is called by the USB subsystem for each new device ··· 523 532 net->hard_start_xmit = kingsun_hard_xmit; 524 533 net->open = kingsun_net_open; 525 534 net->stop = kingsun_net_close; 526 - net->get_stats = kingsun_net_get_stats; 527 535 net->do_ioctl = kingsun_net_ioctl; 528 536 529 537 ret = register_netdev(net);
+6 -16
drivers/net/irda/ks959-sir.c
··· 174 174 struct usb_device *usbdev; /* init: probe_irda */ 175 175 struct net_device *netdev; /* network layer */ 176 176 struct irlap_cb *irlap; /* The link layer we are binded to */ 177 - struct net_device_stats stats; /* network statistics */ 177 + 178 178 struct qos_info qos; 179 179 180 180 struct usb_ctrlrequest *tx_setuprequest; ··· 366 366 case -EPIPE: 367 367 break; 368 368 default: 369 - kingsun->stats.tx_errors++; 369 + netdev->stats.tx_errors++; 370 370 netif_start_queue(netdev); 371 371 } 372 372 } ··· 416 416 case -EPIPE: 417 417 break; 418 418 default: 419 - kingsun->stats.tx_errors++; 419 + netdev->stats.tx_errors++; 420 420 netif_start_queue(netdev); 421 421 } 422 422 } else { 423 - kingsun->stats.tx_packets++; 424 - kingsun->stats.tx_bytes += skb->len; 423 + netdev->stats.tx_packets++; 424 + netdev->stats.tx_bytes += skb->len; 425 425 426 426 } 427 427 ··· 469 469 */ 470 470 if (kingsun->rx_variable_xormask != 0) { 471 471 async_unwrap_char(kingsun->netdev, 472 - &kingsun->stats, 472 + &kingsun->netdev->stats, 473 473 &kingsun->rx_unwrap_buff, 474 474 bytes[i]); 475 475 } ··· 669 669 } 670 670 671 671 /* 672 - * Get device stats (for /proc/net/dev and ifconfig) 673 - */ 674 - static struct net_device_stats *ks959_net_get_stats(struct net_device *netdev) 675 - { 676 - struct ks959_cb *kingsun = netdev_priv(netdev); 677 - return &kingsun->stats; 678 - } 679 - 680 - /* 681 672 * This routine is called by the USB subsystem for each new device 682 673 * in the system. We need to check if the device is ours, and in 683 674 * this case start handling it. ··· 783 792 net->hard_start_xmit = ks959_hard_xmit; 784 793 net->open = ks959_net_open; 785 794 net->stop = ks959_net_close; 786 - net->get_stats = ks959_net_get_stats; 787 795 net->do_ioctl = ks959_net_ioctl; 788 796 789 797 ret = register_netdev(net);
+8 -18
drivers/net/irda/ksdazzle-sir.c
··· 140 140 struct usb_device *usbdev; /* init: probe_irda */ 141 141 struct net_device *netdev; /* network layer */ 142 142 struct irlap_cb *irlap; /* The link layer we are binded to */ 143 - struct net_device_stats stats; /* network statistics */ 143 + 144 144 struct qos_info qos; 145 145 146 146 struct urb *tx_urb; ··· 278 278 case -EPIPE: 279 279 break; 280 280 default: 281 - kingsun->stats.tx_errors++; 281 + netdev->stats.tx_errors++; 282 282 netif_start_queue(netdev); 283 283 } 284 284 } ··· 329 329 case -EPIPE: 330 330 break; 331 331 default: 332 - kingsun->stats.tx_errors++; 332 + netdev->stats.tx_errors++; 333 333 netif_start_queue(netdev); 334 334 } 335 335 } else { 336 - kingsun->stats.tx_packets++; 337 - kingsun->stats.tx_bytes += skb->len; 336 + netdev->stats.tx_packets++; 337 + netdev->stats.tx_bytes += skb->len; 338 338 339 339 } 340 340 ··· 348 348 static void ksdazzle_rcv_irq(struct urb *urb) 349 349 { 350 350 struct ksdazzle_cb *kingsun = urb->context; 351 + struct net_device *netdev = kingsun->netdev; 351 352 352 353 /* in process of stopping, just drop data */ 353 - if (!netif_running(kingsun->netdev)) { 354 + if (!netif_running(netdev)) { 354 355 kingsun->receiving = 0; 355 356 return; 356 357 } ··· 369 368 unsigned int i; 370 369 371 370 for (i = 0; i < urb->actual_length; i++) { 372 - async_unwrap_char(kingsun->netdev, &kingsun->stats, 371 + async_unwrap_char(netdev, &netdev->stats, 373 372 &kingsun->rx_unwrap_buff, bytes[i]); 374 373 } 375 374 kingsun->receiving = ··· 563 562 } 564 563 565 564 /* 566 - * Get device stats (for /proc/net/dev and ifconfig) 567 - */ 568 - static struct net_device_stats *ksdazzle_net_get_stats(struct net_device 569 - *netdev) 570 - { 571 - struct ksdazzle_cb *kingsun = netdev_priv(netdev); 572 - return &kingsun->stats; 573 - } 574 - 575 - /* 576 565 * This routine is called by the USB subsystem for each new device 577 566 * in the system. We need to check if the device is ours, and in 578 567 * this case start handling it. ··· 687 696 net->hard_start_xmit = ksdazzle_hard_xmit; 688 697 net->open = ksdazzle_net_open; 689 698 net->stop = ksdazzle_net_close; 690 - net->get_stats = ksdazzle_net_get_stats; 691 699 net->do_ioctl = ksdazzle_net_ioctl; 692 700 693 701 ret = register_netdev(net);
+20 -29
drivers/net/irda/mcs7780.c
··· 403 403 if(unlikely(new_len <= 0)) { 404 404 IRDA_ERROR("%s short frame length %d\n", 405 405 mcs->netdev->name, new_len); 406 - ++mcs->stats.rx_errors; 407 - ++mcs->stats.rx_length_errors; 406 + ++mcs->netdev->stats.rx_errors; 407 + ++mcs->netdev->stats.rx_length_errors; 408 408 return; 409 409 } 410 410 fcs = 0; ··· 413 413 if(fcs != GOOD_FCS) { 414 414 IRDA_ERROR("crc error calc 0x%x len %d\n", 415 415 fcs, new_len); 416 - mcs->stats.rx_errors++; 417 - mcs->stats.rx_crc_errors++; 416 + mcs->netdev->stats.rx_errors++; 417 + mcs->netdev->stats.rx_crc_errors++; 418 418 return; 419 419 } 420 420 421 421 skb = dev_alloc_skb(new_len + 1); 422 422 if(unlikely(!skb)) { 423 - ++mcs->stats.rx_dropped; 423 + ++mcs->netdev->stats.rx_dropped; 424 424 return; 425 425 } 426 426 ··· 433 433 434 434 netif_rx(skb); 435 435 436 - mcs->stats.rx_packets++; 437 - mcs->stats.rx_bytes += new_len; 436 + mcs->netdev->stats.rx_packets++; 437 + mcs->netdev->stats.rx_bytes += new_len; 438 438 439 439 return; 440 440 } ··· 458 458 if(unlikely(new_len <= 0)) { 459 459 IRDA_ERROR("%s short frame length %d\n", 460 460 mcs->netdev->name, new_len); 461 - ++mcs->stats.rx_errors; 462 - ++mcs->stats.rx_length_errors; 461 + ++mcs->netdev->stats.rx_errors; 462 + ++mcs->netdev->stats.rx_length_errors; 463 463 return; 464 464 } 465 465 466 466 fcs = ~(crc32_le(~0, buf, new_len)); 467 467 if(fcs != get_unaligned_le32(buf + new_len)) { 468 468 IRDA_ERROR("crc error calc 0x%x len %d\n", fcs, new_len); 469 - mcs->stats.rx_errors++; 470 - mcs->stats.rx_crc_errors++; 469 + mcs->netdev->stats.rx_errors++; 470 + mcs->netdev->stats.rx_crc_errors++; 471 471 return; 472 472 } 473 473 474 474 skb = dev_alloc_skb(new_len + 1); 475 475 if(unlikely(!skb)) { 476 - ++mcs->stats.rx_dropped; 476 + ++mcs->netdev->stats.rx_dropped; 477 477 return; 478 478 } 479 479 ··· 486 486 487 487 netif_rx(skb); 488 488 489 - mcs->stats.rx_packets++; 490 - mcs->stats.rx_bytes += new_len; 489 + mcs->netdev->stats.rx_packets++; 490 + mcs->netdev->stats.rx_bytes += new_len; 491 491 492 492 return; 493 493 } ··· 756 756 return ret; 757 757 } 758 758 759 - 760 - /* Get device stats for /proc/net/dev and ifconfig */ 761 - static struct net_device_stats *mcs_net_get_stats(struct net_device *netdev) 762 - { 763 - struct mcs_cb *mcs = netdev_priv(netdev); 764 - return &mcs->stats; 765 - } 766 - 767 759 /* Receive callback function. */ 768 760 static void mcs_receive_irq(struct urb *urb) 769 761 { ··· 778 786 */ 779 787 /* SIR speed */ 780 788 if(mcs->speed < 576000) { 781 - async_unwrap_char(mcs->netdev, &mcs->stats, 789 + async_unwrap_char(mcs->netdev, &mcs->netdev->stats, 782 790 &mcs->rx_buff, 0xc0); 783 791 784 792 for (i = 0; i < urb->actual_length; i++) 785 - async_unwrap_char(mcs->netdev, &mcs->stats, 793 + async_unwrap_char(mcs->netdev, &mcs->netdev->stats, 786 794 &mcs->rx_buff, bytes[i]); 787 795 788 - async_unwrap_char(mcs->netdev, &mcs->stats, 796 + async_unwrap_char(mcs->netdev, &mcs->netdev->stats, 789 797 &mcs->rx_buff, 0xc1); 790 798 } 791 799 /* MIR speed */ ··· 860 868 case -EPIPE: 861 869 break; 862 870 default: 863 - mcs->stats.tx_errors++; 871 + mcs->netdev->stats.tx_errors++; 864 872 netif_start_queue(ndev); 865 873 } 866 874 } else { 867 - mcs->stats.tx_packets++; 868 - mcs->stats.tx_bytes += skb->len; 875 + mcs->netdev->stats.tx_packets++; 876 + mcs->netdev->stats.tx_bytes += skb->len; 869 877 } 870 878 871 879 dev_kfree_skb(skb); ··· 923 931 ndev->hard_start_xmit = mcs_hard_xmit; 924 932 ndev->open = mcs_net_open; 925 933 ndev->stop = mcs_net_close; 926 - ndev->get_stats = mcs_net_get_stats; 927 934 ndev->do_ioctl = mcs_net_ioctl; 928 935 929 936 if (!intf->cur_altsetting)
-2
drivers/net/irda/mcs7780.h
··· 104 104 struct usb_device *usbdev; /* init: probe_irda */ 105 105 struct net_device *netdev; /* network layer */ 106 106 struct irlap_cb *irlap; /* The link layer we are binded to */ 107 - struct net_device_stats stats; /* network statistics */ 108 107 struct qos_info qos; 109 108 unsigned int speed; /* Current speed */ 110 109 unsigned int new_speed; /* new speed */ ··· 153 154 static int mcs_net_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd); 154 155 static int mcs_net_close(struct net_device *netdev); 155 156 static int mcs_net_open(struct net_device *netdev); 156 - static struct net_device_stats *mcs_net_get_stats(struct net_device *netdev); 157 157 158 158 static void mcs_receive_irq(struct urb *urb); 159 159 static void mcs_send_irq(struct urb *urb);
+18 -27
drivers/net/irda/nsc-ircc.c
··· 185 185 static int nsc_ircc_net_open(struct net_device *dev); 186 186 static int nsc_ircc_net_close(struct net_device *dev); 187 187 static int nsc_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); 188 - static struct net_device_stats *nsc_ircc_net_get_stats(struct net_device *dev); 189 188 190 189 /* Globals */ 191 190 static int pnp_registered; ··· 445 446 dev->open = nsc_ircc_net_open; 446 447 dev->stop = nsc_ircc_net_close; 447 448 dev->do_ioctl = nsc_ircc_net_ioctl; 448 - dev->get_stats = nsc_ircc_net_get_stats; 449 449 450 450 err = register_netdev(dev); 451 451 if (err) { ··· 1399 1401 self->tx_buff.len = async_wrap_skb(skb, self->tx_buff.data, 1400 1402 self->tx_buff.truesize); 1401 1403 1402 - self->stats.tx_bytes += self->tx_buff.len; 1404 + dev->stats.tx_bytes += self->tx_buff.len; 1403 1405 1404 1406 /* Add interrupt on tx low level (will fire immediately) */ 1405 1407 switch_bank(iobase, BANK0); ··· 1471 1473 self->tx_fifo.queue[self->tx_fifo.free].len = skb->len; 1472 1474 self->tx_fifo.tail += skb->len; 1473 1475 1474 - self->stats.tx_bytes += skb->len; 1476 + dev->stats.tx_bytes += skb->len; 1475 1477 1476 1478 skb_copy_from_linear_data(skb, self->tx_fifo.queue[self->tx_fifo.free].start, 1477 1479 skb->len); ··· 1650 1652 1651 1653 /* Check for underrrun! */ 1652 1654 if (inb(iobase+ASCR) & ASCR_TXUR) { 1653 - self->stats.tx_errors++; 1654 - self->stats.tx_fifo_errors++; 1655 + self->netdev->stats.tx_errors++; 1656 + self->netdev->stats.tx_fifo_errors++; 1655 1657 1656 1658 /* Clear bit, by writing 1 into it */ 1657 1659 outb(ASCR_TXUR, iobase+ASCR); 1658 1660 } else { 1659 - self->stats.tx_packets++; 1661 + self->netdev->stats.tx_packets++; 1660 1662 } 1661 1663 1662 1664 /* Finished with this frame, so prepare for next */ ··· 1791 1793 if (status & FRM_ST_ERR_MSK) { 1792 1794 if (status & FRM_ST_LOST_FR) { 1793 1795 /* Add number of lost frames to stats */ 1794 - self->stats.rx_errors += len; 1796 + self->netdev->stats.rx_errors += len; 1795 1797 } else { 1796 1798 /* Skip frame */ 1797 - self->stats.rx_errors++; 1799 + self->netdev->stats.rx_errors++; 1798 1800 1799 1801 self->rx_buff.data += len; 1800 1802 1801 1803 if (status & FRM_ST_MAX_LEN) 1802 - self->stats.rx_length_errors++; 1804 + self->netdev->stats.rx_length_errors++; 1803 1805 1804 1806 if (status & FRM_ST_PHY_ERR) 1805 - self->stats.rx_frame_errors++; 1807 + self->netdev->stats.rx_frame_errors++; 1806 1808 1807 1809 if (status & FRM_ST_BAD_CRC) 1808 - self->stats.rx_crc_errors++; 1810 + self->netdev->stats.rx_crc_errors++; 1809 1811 } 1810 1812 /* The errors below can be reported in both cases */ 1811 1813 if (status & FRM_ST_OVR1) 1812 - self->stats.rx_fifo_errors++; 1814 + self->netdev->stats.rx_fifo_errors++; 1813 1815 1814 1816 if (status & FRM_ST_OVR2) 1815 - self->stats.rx_fifo_errors++; 1817 + self->netdev->stats.rx_fifo_errors++; 1816 1818 } else { 1817 1819 /* 1818 1820 * First we must make sure that the frame we ··· 1861 1863 IRDA_WARNING("%s(), memory squeeze, " 1862 1864 "dropping frame.\n", 1863 1865 __func__); 1864 - self->stats.rx_dropped++; 1866 + self->netdev->stats.rx_dropped++; 1865 1867 1866 1868 /* Restore bank register */ 1867 1869 outb(bank, iobase+BSR); ··· 1887 1889 1888 1890 /* Move to next frame */ 1889 1891 self->rx_buff.data += len; 1890 - self->stats.rx_bytes += len; 1891 - self->stats.rx_packets++; 1892 + self->netdev->stats.rx_bytes += len; 1893 + self->netdev->stats.rx_packets++; 1892 1894 1893 1895 skb->dev = self->netdev; 1894 1896 skb_reset_mac_header(skb); ··· 1918 1920 /* Receive all characters in Rx FIFO */ 1919 1921 do { 1920 1922 byte = inb(iobase+RXD); 1921 - async_unwrap_char(self->netdev, &self->stats, &self->rx_buff, 1922 - byte); 1923 + async_unwrap_char(self->netdev, &self->netdev->stats, 1924 + &self->rx_buff, byte); 1923 1925 } while (inb(iobase+LSR) & LSR_RXDA); /* Data available */ 1924 1926 } 1925 1927 ··· 1950 1952 self->ier = IER_TXLDL_IE; 1951 1953 else { 1952 1954 1953 - self->stats.tx_packets++; 1955 + self->netdev->stats.tx_packets++; 1954 1956 netif_wake_queue(self->netdev); 1955 1957 self->ier = IER_TXEMP_IE; 1956 1958 } ··· 2303 2305 ret = -EOPNOTSUPP; 2304 2306 } 2305 2307 return ret; 2306 - } 2307 - 2308 - static struct net_device_stats *nsc_ircc_net_get_stats(struct net_device *dev) 2309 - { 2310 - struct nsc_ircc_cb *self = netdev_priv(dev); 2311 - 2312 - return &self->stats; 2313 2308 } 2314 2309 2315 2310 static int nsc_ircc_suspend(struct platform_device *dev, pm_message_t state)
-1
drivers/net/irda/nsc-ircc.h
··· 251 251 struct tx_fifo tx_fifo; /* Info about frames to be transmitted */ 252 252 253 253 struct net_device *netdev; /* Yes! we are some kind of netdevice */ 254 - struct net_device_stats stats; 255 254 256 255 struct irlap_cb *irlap; /* The link layer we are binded to */ 257 256 struct qos_info qos; /* QoS capabilities for this device */
+22 -30
drivers/net/irda/pxaficp_ir.c
··· 108 108 int txdma; 109 109 int rxdma; 110 110 111 - struct net_device_stats stats; 112 111 struct irlap_cb *irlap; 113 112 struct qos_info qos; 114 113 ··· 257 258 data = STRBR; 258 259 if (lsr & (LSR_OE | LSR_PE | LSR_FE | LSR_BI)) { 259 260 printk(KERN_DEBUG "pxa_ir: sir receiving error\n"); 260 - si->stats.rx_errors++; 261 + dev->stats.rx_errors++; 261 262 if (lsr & LSR_FE) 262 - si->stats.rx_frame_errors++; 263 + dev->stats.rx_frame_errors++; 263 264 if (lsr & LSR_OE) 264 - si->stats.rx_fifo_errors++; 265 + dev->stats.rx_fifo_errors++; 265 266 } else { 266 - si->stats.rx_bytes++; 267 - async_unwrap_char(dev, &si->stats, &si->rx_buff, data); 267 + dev->stats.rx_bytes++; 268 + async_unwrap_char(dev, &dev->stats, 269 + &si->rx_buff, data); 268 270 } 269 271 lsr = STLSR; 270 272 } ··· 277 277 278 278 case 0x0C: /* Character Timeout Indication */ 279 279 do { 280 - si->stats.rx_bytes++; 281 - async_unwrap_char(dev, &si->stats, &si->rx_buff, STRBR); 280 + dev->stats.rx_bytes++; 281 + async_unwrap_char(dev, &dev->stats, &si->rx_buff, STRBR); 282 282 } while (STLSR & LSR_DR); 283 283 si->last_oscr = OSCR; 284 284 break; ··· 290 290 } 291 291 292 292 if (si->tx_buff.len == 0) { 293 - si->stats.tx_packets++; 294 - si->stats.tx_bytes += si->tx_buff.data - 295 - si->tx_buff.head; 293 + dev->stats.tx_packets++; 294 + dev->stats.tx_bytes += si->tx_buff.data - si->tx_buff.head; 296 295 297 296 /* We need to ensure that the transmitter has finished. */ 298 297 while ((STLSR & LSR_TEMT) == 0) ··· 342 343 DCSR(channel) = dcsr & ~DCSR_RUN; 343 344 344 345 if (dcsr & DCSR_ENDINTR) { 345 - si->stats.tx_packets++; 346 - si->stats.tx_bytes += si->dma_tx_buff_len; 346 + dev->stats.tx_packets++; 347 + dev->stats.tx_bytes += si->dma_tx_buff_len; 347 348 } else { 348 - si->stats.tx_errors++; 349 + dev->stats.tx_errors++; 349 350 } 350 351 351 352 while (ICSR1 & ICSR1_TBY) ··· 391 392 data = ICDR; 392 393 393 394 if (stat & (ICSR1_CRE | ICSR1_ROR)) { 394 - si->stats.rx_errors++; 395 + dev->stats.rx_errors++; 395 396 if (stat & ICSR1_CRE) { 396 397 printk(KERN_DEBUG "pxa_ir: fir receive CRC error\n"); 397 - si->stats.rx_crc_errors++; 398 + dev->stats.rx_crc_errors++; 398 399 } 399 400 if (stat & ICSR1_ROR) { 400 401 printk(KERN_DEBUG "pxa_ir: fir receive overrun\n"); 401 - si->stats.rx_over_errors++; 402 + dev->stats.rx_over_errors++; 402 403 } 403 404 } else { 404 405 si->dma_rx_buff[len++] = data; ··· 414 415 415 416 if (icsr0 & ICSR0_FRE) { 416 417 printk(KERN_ERR "pxa_ir: dropping erroneous frame\n"); 417 - si->stats.rx_dropped++; 418 + dev->stats.rx_dropped++; 418 419 return; 419 420 } 420 421 421 422 skb = alloc_skb(len+1,GFP_ATOMIC); 422 423 if (!skb) { 423 424 printk(KERN_ERR "pxa_ir: fir out of memory for receive skb\n"); 424 - si->stats.rx_dropped++; 425 + dev->stats.rx_dropped++; 425 426 return; 426 427 } 427 428 ··· 436 437 skb->protocol = htons(ETH_P_IRDA); 437 438 netif_rx(skb); 438 439 439 - si->stats.rx_packets++; 440 - si->stats.rx_bytes += len; 440 + dev->stats.rx_packets++; 441 + dev->stats.rx_bytes += len; 441 442 } 442 443 } 443 444 ··· 456 457 if (icsr0 & (ICSR0_FRE | ICSR0_RAB)) { 457 458 if (icsr0 & ICSR0_FRE) { 458 459 printk(KERN_DEBUG "pxa_ir: fir receive frame error\n"); 459 - si->stats.rx_frame_errors++; 460 + dev->stats.rx_frame_errors++; 460 461 } else { 461 462 printk(KERN_DEBUG "pxa_ir: fir receive abort\n"); 462 - si->stats.rx_errors++; 463 + dev->stats.rx_errors++; 463 464 } 464 465 ICSR0 = icsr0 & (ICSR0_FRE | ICSR0_RAB); 465 466 } ··· 586 587 } 587 588 588 589 return ret; 589 - } 590 - 591 - static struct net_device_stats *pxa_irda_stats(struct net_device *dev) 592 - { 593 - struct pxa_irda *si = netdev_priv(dev); 594 - return &si->stats; 595 590 } 596 591 597 592 static void pxa_irda_startup(struct pxa_irda *si) ··· 850 857 dev->open = pxa_irda_start; 851 858 dev->stop = pxa_irda_stop; 852 859 dev->do_ioctl = pxa_irda_ioctl; 853 - dev->get_stats = pxa_irda_stats; 854 860 855 861 irda_init_max_qos_capabilies(&si->qos); 856 862
+19 -27
drivers/net/irda/sa1100_ir.c
··· 60 60 dma_regs_t *txdma; 61 61 dma_regs_t *rxdma; 62 62 63 - struct net_device_stats stats; 64 63 struct device *dev; 65 64 struct irda_platform_data *pdata; 66 65 struct irlap_cb *irlap; ··· 374 375 data = Ser2UTDR; 375 376 376 377 if (stat & (UTSR1_FRE | UTSR1_ROR)) { 377 - si->stats.rx_errors++; 378 + dev->stats.rx_errors++; 378 379 if (stat & UTSR1_FRE) 379 - si->stats.rx_frame_errors++; 380 + dev->stats.rx_frame_errors++; 380 381 if (stat & UTSR1_ROR) 381 - si->stats.rx_fifo_errors++; 382 + dev->stats.rx_fifo_errors++; 382 383 } else 383 - async_unwrap_char(dev, &si->stats, &si->rx_buff, data); 384 + async_unwrap_char(dev, &dev->stats, &si->rx_buff, data); 384 385 385 386 status = Ser2UTSR0; 386 387 } ··· 395 396 * There are at least 4 bytes in the FIFO. Read 3 bytes 396 397 * and leave the rest to the block below. 397 398 */ 398 - async_unwrap_char(dev, &si->stats, &si->rx_buff, Ser2UTDR); 399 - async_unwrap_char(dev, &si->stats, &si->rx_buff, Ser2UTDR); 400 - async_unwrap_char(dev, &si->stats, &si->rx_buff, Ser2UTDR); 399 + async_unwrap_char(dev, &dev->stats, &si->rx_buff, Ser2UTDR); 400 + async_unwrap_char(dev, &dev->stats, &si->rx_buff, Ser2UTDR); 401 + async_unwrap_char(dev, &dev->stats, &si->rx_buff, Ser2UTDR); 401 402 } 402 403 403 404 if (status & (UTSR0_RFS | UTSR0_RID)) { ··· 405 406 * Fifo contains more than 1 character. 406 407 */ 407 408 do { 408 - async_unwrap_char(dev, &si->stats, &si->rx_buff, 409 + async_unwrap_char(dev, &dev->stats, &si->rx_buff, 409 410 Ser2UTDR); 410 411 } while (Ser2UTSR1 & UTSR1_RNE); 411 412 ··· 421 422 } while (Ser2UTSR1 & UTSR1_TNF && si->tx_buff.len); 422 423 423 424 if (si->tx_buff.len == 0) { 424 - si->stats.tx_packets++; 425 - si->stats.tx_bytes += si->tx_buff.data - 425 + dev->stats.tx_packets++; 426 + dev->stats.tx_bytes += si->tx_buff.data - 426 427 si->tx_buff.head; 427 428 428 429 /* ··· 481 482 data = Ser2HSDR; 482 483 483 484 if (stat & (HSSR1_CRE | HSSR1_ROR)) { 484 - si->stats.rx_errors++; 485 + dev->stats.rx_errors++; 485 486 if (stat & HSSR1_CRE) 486 - si->stats.rx_crc_errors++; 487 + dev->stats.rx_crc_errors++; 487 488 if (stat & HSSR1_ROR) 488 - si->stats.rx_frame_errors++; 489 + dev->stats.rx_frame_errors++; 489 490 } else 490 491 skb->data[len++] = data; 491 492 ··· 504 505 skb->dev = dev; 505 506 skb_reset_mac_header(skb); 506 507 skb->protocol = htons(ETH_P_IRDA); 507 - si->stats.rx_packets++; 508 - si->stats.rx_bytes += len; 508 + dev->stats.rx_packets++; 509 + dev->stats.rx_bytes += len; 509 510 510 511 /* 511 512 * Before we pass the buffer up, allocate a new one. ··· 544 545 * from the fifo. 545 546 */ 546 547 if (Ser2HSSR0 & (HSSR0_FRE | HSSR0_RAB)) { 547 - si->stats.rx_errors++; 548 + dev->stats.rx_errors++; 548 549 549 550 if (Ser2HSSR0 & HSSR0_FRE) 550 - si->stats.rx_frame_errors++; 551 + dev->stats.rx_frame_errors++; 551 552 552 553 /* 553 554 * Clear out the DMA... ··· 632 633 */ 633 634 if (skb) { 634 635 dma_unmap_single(si->dev, si->txbuf_dma, skb->len, DMA_TO_DEVICE); 635 - si->stats.tx_packets ++; 636 - si->stats.tx_bytes += skb->len; 636 + dev->stats.tx_packets ++; 637 + dev->stats.tx_bytes += skb->len; 637 638 dev_kfree_skb_irq(skb); 638 639 } 639 640 ··· 759 760 } 760 761 761 762 return ret; 762 - } 763 - 764 - static struct net_device_stats *sa1100_irda_stats(struct net_device *dev) 765 - { 766 - struct sa1100_irda *si = netdev_priv(dev); 767 - return &si->stats; 768 763 } 769 764 770 765 static int sa1100_irda_start(struct net_device *dev) ··· 917 924 dev->open = sa1100_irda_start; 918 925 dev->stop = sa1100_irda_stop; 919 926 dev->do_ioctl = sa1100_irda_ioctl; 920 - dev->get_stats = sa1100_irda_stats; 921 927 dev->irq = IRQ_Ser2ICP; 922 928 923 929 irda_init_max_qos_capabilies(&si->qos);
-1
drivers/net/irda/sir-dev.h
··· 160 160 161 161 struct sir_dev { 162 162 struct net_device *netdev; 163 - struct net_device_stats stats; 164 163 165 164 struct irlap_cb *irlap; 166 165
+9 -17
drivers/net/irda/sir_dev.c
··· 455 455 if ((skb=dev->tx_skb) != NULL) { 456 456 dev->tx_skb = NULL; 457 457 dev_kfree_skb_any(skb); 458 - dev->stats.tx_errors++; 459 - dev->stats.tx_dropped++; 458 + dev->netdev->stats.tx_errors++; 459 + dev->netdev->stats.tx_dropped++; 460 460 } 461 461 dev->tx_buff.len = 0; 462 462 } ··· 493 493 494 494 if ((skb=dev->tx_skb) != NULL) { 495 495 dev->tx_skb = NULL; 496 - dev->stats.tx_packets++; 497 - dev->stats.tx_bytes += skb->len; 496 + dev->netdev->stats.tx_packets++; 497 + dev->netdev->stats.tx_bytes += skb->len; 498 498 dev_kfree_skb_any(skb); 499 499 } 500 500 ··· 548 548 * just update stats and set media busy 549 549 */ 550 550 irda_device_set_media_busy(dev->netdev, TRUE); 551 - dev->stats.rx_dropped++; 551 + dev->netdev->stats.rx_dropped++; 552 552 IRDA_DEBUG(0, "%s; rx-drop: %zd\n", __func__, count); 553 553 return 0; 554 554 } ··· 557 557 if (likely(atomic_read(&dev->enable_rx))) { 558 558 while (count--) 559 559 /* Unwrap and destuff one byte */ 560 - async_unwrap_char(dev->netdev, &dev->stats, 560 + async_unwrap_char(dev->netdev, &dev->netdev->stats, 561 561 &dev->rx_buff, *cp++); 562 562 } else { 563 563 while (count--) { ··· 581 581 /**********************************************************************/ 582 582 583 583 /* callbacks from network layer */ 584 - 585 - static struct net_device_stats *sirdev_get_stats(struct net_device *ndev) 586 - { 587 - struct sir_dev *dev = netdev_priv(ndev); 588 - 589 - return (dev) ? &dev->stats : NULL; 590 - } 591 584 592 585 static int sirdev_hard_xmit(struct sk_buff *skb, struct net_device *ndev) 593 586 { ··· 647 654 */ 648 655 atomic_set(&dev->enable_rx, 0); 649 656 if (unlikely(sirdev_is_receiving(dev))) 650 - dev->stats.collisions++; 657 + dev->netdev->stats.collisions++; 651 658 652 659 actual = dev->drv->do_write(dev, dev->tx_buff.data, dev->tx_buff.len); 653 660 ··· 662 669 IRDA_ERROR("%s: drv->do_write failed (%d)\n", 663 670 __func__, actual); 664 671 dev_kfree_skb_any(skb); 665 - dev->stats.tx_errors++; 666 - dev->stats.tx_dropped++; 672 + dev->netdev->stats.tx_errors++; 673 + dev->netdev->stats.tx_dropped++; 667 674 netif_wake_queue(ndev); 668 675 } 669 676 spin_unlock_irqrestore(&dev->tx_lock, flags); ··· 911 918 ndev->hard_start_xmit = sirdev_hard_xmit; 912 919 ndev->open = sirdev_open; 913 920 ndev->stop = sirdev_close; 914 - ndev->get_stats = sirdev_get_stats; 915 921 ndev->do_ioctl = sirdev_ioctl; 916 922 917 923 if (register_netdev(ndev)) {
+14 -24
drivers/net/irda/smsc-ircc2.c
··· 150 150 /* Private data for each instance */ 151 151 struct smsc_ircc_cb { 152 152 struct net_device *netdev; /* Yes! we are some kind of netdevice */ 153 - struct net_device_stats stats; 154 153 struct irlap_cb *irlap; /* The link layer we are binded to */ 155 154 156 155 chipio_t io; /* IrDA controller information */ ··· 214 215 #if SMSC_IRCC2_C_NET_TIMEOUT 215 216 static void smsc_ircc_timeout(struct net_device *dev); 216 217 #endif 217 - static struct net_device_stats *smsc_ircc_net_get_stats(struct net_device *dev); 218 218 static int smsc_ircc_is_receiving(struct smsc_ircc_cb *self); 219 219 static void smsc_ircc_probe_transceiver(struct smsc_ircc_cb *self); 220 220 static void smsc_ircc_set_transceiver_for_speed(struct smsc_ircc_cb *self, u32 speed); ··· 527 529 dev->open = smsc_ircc_net_open; 528 530 dev->stop = smsc_ircc_net_close; 529 531 dev->do_ioctl = smsc_ircc_net_ioctl; 530 - dev->get_stats = smsc_ircc_net_get_stats; 531 532 532 533 self = netdev_priv(dev); 533 534 self->netdev = dev; ··· 831 834 return ret; 832 835 } 833 836 834 - static struct net_device_stats *smsc_ircc_net_get_stats(struct net_device *dev) 835 - { 836 - struct smsc_ircc_cb *self = netdev_priv(dev); 837 - 838 - return &self->stats; 839 - } 840 - 841 837 #if SMSC_IRCC2_C_NET_TIMEOUT 842 838 /* 843 839 * Function smsc_ircc_timeout (struct net_device *dev) ··· 910 920 self->tx_buff.len = async_wrap_skb(skb, self->tx_buff.data, 911 921 self->tx_buff.truesize); 912 922 913 - self->stats.tx_bytes += self->tx_buff.len; 923 + dev->stats.tx_bytes += self->tx_buff.len; 914 924 915 925 /* Turn on transmit finished interrupt. Will fire immediately! */ 916 926 outb(UART_IER_THRI, self->io.sir_base + UART_IER); ··· 1310 1320 /* Check for underrun! */ 1311 1321 register_bank(iobase, 0); 1312 1322 if (inb(iobase + IRCC_LSR) & IRCC_LSR_UNDERRUN) { 1313 - self->stats.tx_errors++; 1314 - self->stats.tx_fifo_errors++; 1323 + self->netdev->stats.tx_errors++; 1324 + self->netdev->stats.tx_fifo_errors++; 1315 1325 1316 1326 /* Reset error condition */ 1317 1327 register_bank(iobase, 0); 1318 1328 outb(IRCC_MASTER_ERROR_RESET, iobase + IRCC_MASTER); 1319 1329 outb(0x00, iobase + IRCC_MASTER); 1320 1330 } else { 1321 - self->stats.tx_packets++; 1322 - self->stats.tx_bytes += self->tx_buff.len; 1331 + self->netdev->stats.tx_packets++; 1332 + self->netdev->stats.tx_bytes += self->tx_buff.len; 1323 1333 } 1324 1334 1325 1335 /* Check if it's time to change the speed */ ··· 1419 1429 1420 1430 /* Look for errors */ 1421 1431 if (lsr & (IRCC_LSR_FRAME_ERROR | IRCC_LSR_CRC_ERROR | IRCC_LSR_SIZE_ERROR)) { 1422 - self->stats.rx_errors++; 1432 + self->netdev->stats.rx_errors++; 1423 1433 if (lsr & IRCC_LSR_FRAME_ERROR) 1424 - self->stats.rx_frame_errors++; 1434 + self->netdev->stats.rx_frame_errors++; 1425 1435 if (lsr & IRCC_LSR_CRC_ERROR) 1426 - self->stats.rx_crc_errors++; 1436 + self->netdev->stats.rx_crc_errors++; 1427 1437 if (lsr & IRCC_LSR_SIZE_ERROR) 1428 - self->stats.rx_length_errors++; 1438 + self->netdev->stats.rx_length_errors++; 1429 1439 if (lsr & (IRCC_LSR_UNDERRUN | IRCC_LSR_OVERRUN)) 1430 - self->stats.rx_length_errors++; 1440 + self->netdev->stats.rx_length_errors++; 1431 1441 return; 1432 1442 } 1433 1443 ··· 1450 1460 skb_reserve(skb, 1); 1451 1461 1452 1462 memcpy(skb_put(skb, len), self->rx_buff.data, len); 1453 - self->stats.rx_packets++; 1454 - self->stats.rx_bytes += len; 1463 + self->netdev->stats.rx_packets++; 1464 + self->netdev->stats.rx_bytes += len; 1455 1465 1456 1466 skb->dev = self->netdev; 1457 1467 skb_reset_mac_header(skb); ··· 1479 1489 * async_unwrap_char will deliver all found frames 1480 1490 */ 1481 1491 do { 1482 - async_unwrap_char(self->netdev, &self->stats, &self->rx_buff, 1492 + async_unwrap_char(self->netdev, &self->netdev->stats, &self->rx_buff, 1483 1493 inb(iobase + UART_RX)); 1484 1494 1485 1495 /* Make sure we don't stay here to long */ ··· 1982 1992 /* Tell network layer that we want more frames */ 1983 1993 netif_wake_queue(self->netdev); 1984 1994 } 1985 - self->stats.tx_packets++; 1995 + self->netdev->stats.tx_packets++; 1986 1996 1987 1997 if (self->io.speed <= 115200) { 1988 1998 /*
+17 -27
drivers/net/irda/stir4200.c
··· 164 164 struct usb_device *usbdev; /* init: probe_irda */ 165 165 struct net_device *netdev; /* network layer */ 166 166 struct irlap_cb *irlap; /* The link layer we are binded to */ 167 - struct net_device_stats stats; /* network statistics */ 167 + 168 168 struct qos_info qos; 169 169 unsigned speed; /* Current speed */ 170 170 ··· 323 323 pr_debug("%s: short frame len %d\n", 324 324 stir->netdev->name, len); 325 325 326 - ++stir->stats.rx_errors; 327 - ++stir->stats.rx_length_errors; 326 + ++stir->netdev->stats.rx_errors; 327 + ++stir->netdev->stats.rx_length_errors; 328 328 return; 329 329 } 330 330 331 331 fcs = ~(crc32_le(~0, rx_buff->data, len)); 332 332 if (fcs != get_unaligned_le32(rx_buff->data + len)) { 333 333 pr_debug("crc error calc 0x%x len %d\n", fcs, len); 334 - stir->stats.rx_errors++; 335 - stir->stats.rx_crc_errors++; 334 + stir->netdev->stats.rx_errors++; 335 + stir->netdev->stats.rx_crc_errors++; 336 336 return; 337 337 } 338 338 ··· 340 340 if (len < IRDA_RX_COPY_THRESHOLD) { 341 341 nskb = dev_alloc_skb(len + 1); 342 342 if (unlikely(!nskb)) { 343 - ++stir->stats.rx_dropped; 343 + ++stir->netdev->stats.rx_dropped; 344 344 return; 345 345 } 346 346 skb_reserve(nskb, 1); ··· 349 349 } else { 350 350 nskb = dev_alloc_skb(rx_buff->truesize); 351 351 if (unlikely(!nskb)) { 352 - ++stir->stats.rx_dropped; 352 + ++stir->netdev->stats.rx_dropped; 353 353 return; 354 354 } 355 355 skb_reserve(nskb, 1); ··· 366 366 367 367 netif_rx(skb); 368 368 369 - stir->stats.rx_packets++; 370 - stir->stats.rx_bytes += len; 369 + stir->netdev->stats.rx_packets++; 370 + stir->netdev->stats.rx_bytes += len; 371 371 372 372 rx_buff->data = rx_buff->head; 373 373 rx_buff->len = 0; ··· 437 437 if (unlikely(rx_buff->len >= rx_buff->truesize)) { 438 438 pr_debug("%s: fir frame exceeds %d\n", 439 439 stir->netdev->name, rx_buff->truesize); 440 - ++stir->stats.rx_over_errors; 440 + ++stir->netdev->stats.rx_over_errors; 441 441 goto error_recovery; 442 442 } 443 443 ··· 445 445 continue; 446 446 447 447 frame_error: 448 - ++stir->stats.rx_frame_errors; 448 + ++stir->netdev->stats.rx_frame_errors; 449 449 450 450 error_recovery: 451 - ++stir->stats.rx_errors; 451 + ++stir->netdev->stats.rx_errors; 452 452 rx_buff->state = OUTSIDE_FRAME; 453 453 rx_buff->in_frame = FALSE; 454 454 } ··· 461 461 int i; 462 462 463 463 for (i = 0; i < len; i++) 464 - async_unwrap_char(stir->netdev, &stir->stats, 464 + async_unwrap_char(stir->netdev, &stir->netdev->stats, 465 465 &stir->rx_buff, bytes[i]); 466 466 } 467 467 ··· 692 692 usb_kill_urb(stir->rx_urb); 693 693 694 694 if (stir->rx_buff.in_frame) 695 - stir->stats.collisions++; 695 + stir->netdev->stats.collisions++; 696 696 } 697 697 /* 698 698 * Wrap data in socket buffer and send it. ··· 718 718 if (!first_frame) 719 719 fifo_txwait(stir, wraplen); 720 720 721 - stir->stats.tx_packets++; 722 - stir->stats.tx_bytes += skb->len; 721 + stir->netdev->stats.tx_packets++; 722 + stir->netdev->stats.tx_bytes += skb->len; 723 723 stir->netdev->trans_start = jiffies; 724 724 pr_debug("send %d (%d)\n", skb->len, wraplen); 725 725 726 726 if (usb_bulk_msg(stir->usbdev, usb_sndbulkpipe(stir->usbdev, 1), 727 727 stir->io_buf, wraplen, 728 728 NULL, TRANSMIT_TIMEOUT)) 729 - stir->stats.tx_errors++; 729 + stir->netdev->stats.tx_errors++; 730 730 } 731 731 732 732 /* ··· 1008 1008 } 1009 1009 1010 1010 /* 1011 - * Get device stats (for /proc/net/dev and ifconfig) 1012 - */ 1013 - static struct net_device_stats *stir_net_get_stats(struct net_device *netdev) 1014 - { 1015 - struct stir_cb *stir = netdev_priv(netdev); 1016 - return &stir->stats; 1017 - } 1018 - 1019 - /* 1020 1011 * This routine is called by the USB subsystem for each new device 1021 1012 * in the system. We need to check if the device is ours, and in 1022 1013 * this case start handling it. ··· 1057 1066 net->hard_start_xmit = stir_hard_xmit; 1058 1067 net->open = stir_net_open; 1059 1068 net->stop = stir_net_close; 1060 - net->get_stats = stir_net_get_stats; 1061 1069 net->do_ioctl = stir_net_ioctl; 1062 1070 1063 1071 ret = register_netdev(net);
+18 -29
drivers/net/irda/via-ircc.c
··· 101 101 static int via_ircc_net_close(struct net_device *dev); 102 102 static int via_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq, 103 103 int cmd); 104 - static struct net_device_stats *via_ircc_net_get_stats(struct net_device 105 - *dev); 106 104 static void via_ircc_change_dongle_speed(int iobase, int speed, 107 105 int dongle_id); 108 106 static int RxTimerHandler(struct via_ircc_cb *self, int iobase); ··· 432 434 dev->open = via_ircc_net_open; 433 435 dev->stop = via_ircc_net_close; 434 436 dev->do_ioctl = via_ircc_net_ioctl; 435 - dev->get_stats = via_ircc_net_get_stats; 436 437 437 438 err = register_netdev(dev); 438 439 if (err) ··· 852 855 async_wrap_skb(skb, self->tx_buff.data, 853 856 self->tx_buff.truesize); 854 857 855 - self->stats.tx_bytes += self->tx_buff.len; 858 + dev->stats.tx_bytes += self->tx_buff.len; 856 859 /* Send this frame with old speed */ 857 860 SetBaudRate(iobase, self->io.speed); 858 861 SetPulseWidth(iobase, 12); ··· 918 921 self->tx_fifo.queue[self->tx_fifo.free].len = skb->len; 919 922 920 923 self->tx_fifo.tail += skb->len; 921 - self->stats.tx_bytes += skb->len; 924 + dev->stats.tx_bytes += skb->len; 922 925 skb_copy_from_linear_data(skb, 923 926 self->tx_fifo.queue[self->tx_fifo.free].start, skb->len); 924 927 self->tx_fifo.len++; ··· 987 990 /* Clear bit, by writing 1 into it */ 988 991 Tx_status = GetTXStatus(iobase); 989 992 if (Tx_status & 0x08) { 990 - self->stats.tx_errors++; 991 - self->stats.tx_fifo_errors++; 993 + self->netdev->stats.tx_errors++; 994 + self->netdev->stats.tx_fifo_errors++; 992 995 hwreset(self); 993 996 // how to clear underrrun ? 994 997 } else { 995 - self->stats.tx_packets++; 998 + self->netdev->stats.tx_packets++; 996 999 ResetChip(iobase, 3); 997 1000 ResetChip(iobase, 4); 998 1001 } ··· 1116 1119 } 1117 1120 // Move to next frame 1118 1121 self->rx_buff.data += len; 1119 - self->stats.rx_bytes += len; 1120 - self->stats.rx_packets++; 1122 + self->netdev->stats.rx_bytes += len; 1123 + self->netdev->stats.rx_packets++; 1121 1124 skb->dev = self->netdev; 1122 1125 skb_reset_mac_header(skb); 1123 1126 skb->protocol = htons(ETH_P_IRDA); ··· 1177 1180 */ 1178 1181 if ((skb == NULL) || (skb->data == NULL) 1179 1182 || (self->rx_buff.data == NULL) || (len < 6)) { 1180 - self->stats.rx_dropped++; 1183 + self->netdev->stats.rx_dropped++; 1181 1184 return TRUE; 1182 1185 } 1183 1186 skb_reserve(skb, 1); ··· 1189 1192 1190 1193 // Move to next frame 1191 1194 self->rx_buff.data += len; 1192 - self->stats.rx_bytes += len; 1193 - self->stats.rx_packets++; 1195 + self->netdev->stats.rx_bytes += len; 1196 + self->netdev->stats.rx_packets++; 1194 1197 skb->dev = self->netdev; 1195 1198 skb_reset_mac_header(skb); 1196 1199 skb->protocol = htons(ETH_P_IRDA); ··· 1217 1220 IRDA_DEBUG(2, "%s(): len=%x\n", __func__, len); 1218 1221 1219 1222 if ((len - 4) < 2) { 1220 - self->stats.rx_dropped++; 1223 + self->netdev->stats.rx_dropped++; 1221 1224 return FALSE; 1222 1225 } 1223 1226 1224 1227 skb = dev_alloc_skb(len + 1); 1225 1228 if (skb == NULL) { 1226 - self->stats.rx_dropped++; 1229 + self->netdev->stats.rx_dropped++; 1227 1230 return FALSE; 1228 1231 } 1229 1232 skb_reserve(skb, 1); ··· 1235 1238 st_fifo->tail = 0; 1236 1239 // Move to next frame 1237 1240 self->rx_buff.data += len; 1238 - self->stats.rx_bytes += len; 1239 - self->stats.rx_packets++; 1241 + self->netdev->stats.rx_bytes += len; 1242 + self->netdev->stats.rx_packets++; 1240 1243 skb->dev = self->netdev; 1241 1244 skb_reset_mac_header(skb); 1242 1245 skb->protocol = htons(ETH_P_IRDA); ··· 1292 1295 */ 1293 1296 if ((skb == NULL) || (skb->data == NULL) 1294 1297 || (self->rx_buff.data == NULL) || (len < 6)) { 1295 - self->stats.rx_dropped++; 1298 + self->netdev->stats.rx_dropped++; 1296 1299 continue; 1297 1300 } 1298 1301 skb_reserve(skb, 1); ··· 1304 1307 1305 1308 // Move to next frame 1306 1309 self->rx_buff.data += len; 1307 - self->stats.rx_bytes += len; 1308 - self->stats.rx_packets++; 1310 + self->netdev->stats.rx_bytes += len; 1311 + self->netdev->stats.rx_packets++; 1309 1312 skb->dev = self->netdev; 1310 1313 skb_reset_mac_header(skb); 1311 1314 skb->protocol = htons(ETH_P_IRDA); ··· 1520 1523 1521 1524 IRDA_ASSERT(dev != NULL, return -1;); 1522 1525 self = netdev_priv(dev); 1523 - self->stats.rx_packets = 0; 1526 + dev->stats.rx_packets = 0; 1524 1527 IRDA_ASSERT(self != NULL, return 0;); 1525 1528 iobase = self->io.fir_base; 1526 1529 if (request_irq(self->io.irq, via_ircc_interrupt, 0, dev->name, dev)) { ··· 1655 1658 out: 1656 1659 spin_unlock_irqrestore(&self->lock, flags); 1657 1660 return ret; 1658 - } 1659 - 1660 - static struct net_device_stats *via_ircc_net_get_stats(struct net_device 1661 - *dev) 1662 - { 1663 - struct via_ircc_cb *self = netdev_priv(dev); 1664 - 1665 - return &self->stats; 1666 1661 } 1667 1662 1668 1663 MODULE_AUTHOR("VIA Technologies,inc");
-1
drivers/net/irda/via-ircc.h
··· 95 95 struct tx_fifo tx_fifo; /* Info about frames to be transmitted */ 96 96 97 97 struct net_device *netdev; /* Yes! we are some kind of netdevice */ 98 - struct net_device_stats stats; 99 98 100 99 struct irlap_cb *irlap; /* The link layer we are binded to */ 101 100 struct qos_info qos; /* QoS capabilities for this device */
+36 -42
drivers/net/irda/vlsi_ir.c
··· 291 291 now.tv_sec - idev->last_rx.tv_sec - delta1, delta2); 292 292 293 293 seq_printf(seq, "RX: packets=%lu / bytes=%lu / errors=%lu / dropped=%lu", 294 - idev->stats.rx_packets, idev->stats.rx_bytes, idev->stats.rx_errors, 295 - idev->stats.rx_dropped); 294 + ndev->stats.rx_packets, ndev->stats.rx_bytes, ndev->stats.rx_errors, 295 + ndev->stats.rx_dropped); 296 296 seq_printf(seq, " / overrun=%lu / length=%lu / frame=%lu / crc=%lu\n", 297 - idev->stats.rx_over_errors, idev->stats.rx_length_errors, 298 - idev->stats.rx_frame_errors, idev->stats.rx_crc_errors); 297 + ndev->stats.rx_over_errors, ndev->stats.rx_length_errors, 298 + ndev->stats.rx_frame_errors, ndev->stats.rx_crc_errors); 299 299 seq_printf(seq, "TX: packets=%lu / bytes=%lu / errors=%lu / dropped=%lu / fifo=%lu\n", 300 - idev->stats.tx_packets, idev->stats.tx_bytes, idev->stats.tx_errors, 301 - idev->stats.tx_dropped, idev->stats.tx_fifo_errors); 300 + ndev->stats.tx_packets, ndev->stats.tx_bytes, ndev->stats.tx_errors, 301 + ndev->stats.tx_dropped, ndev->stats.tx_fifo_errors); 302 302 303 303 } 304 304 ··· 651 651 652 652 if (ret < 0) { 653 653 ret = -ret; 654 - idev->stats.rx_errors++; 654 + ndev->stats.rx_errors++; 655 655 if (ret & VLSI_RX_DROP) 656 - idev->stats.rx_dropped++; 656 + ndev->stats.rx_dropped++; 657 657 if (ret & VLSI_RX_OVER) 658 - idev->stats.rx_over_errors++; 658 + ndev->stats.rx_over_errors++; 659 659 if (ret & VLSI_RX_LENGTH) 660 - idev->stats.rx_length_errors++; 660 + ndev->stats.rx_length_errors++; 661 661 if (ret & VLSI_RX_FRAME) 662 - idev->stats.rx_frame_errors++; 662 + ndev->stats.rx_frame_errors++; 663 663 if (ret & VLSI_RX_CRC) 664 - idev->stats.rx_crc_errors++; 664 + ndev->stats.rx_crc_errors++; 665 665 } 666 666 else if (ret > 0) { 667 - idev->stats.rx_packets++; 668 - idev->stats.rx_bytes += ret; 667 + ndev->stats.rx_packets++; 668 + ndev->stats.rx_bytes += ret; 669 669 } 670 670 } 671 671 ··· 686 686 687 687 static void vlsi_unarm_rx(vlsi_irda_dev_t *idev) 688 688 { 689 + struct net_device *ndev = pci_get_drvdata(idev->pdev); 689 690 struct vlsi_ring *r = idev->rx_ring; 690 691 struct ring_descr *rd; 691 692 int ret; ··· 712 711 713 712 if (ret < 0) { 714 713 ret = -ret; 715 - idev->stats.rx_errors++; 714 + ndev->stats.rx_errors++; 716 715 if (ret & VLSI_RX_DROP) 717 - idev->stats.rx_dropped++; 716 + ndev->stats.rx_dropped++; 718 717 if (ret & VLSI_RX_OVER) 719 - idev->stats.rx_over_errors++; 718 + ndev->stats.rx_over_errors++; 720 719 if (ret & VLSI_RX_LENGTH) 721 - idev->stats.rx_length_errors++; 720 + ndev->stats.rx_length_errors++; 722 721 if (ret & VLSI_RX_FRAME) 723 - idev->stats.rx_frame_errors++; 722 + ndev->stats.rx_frame_errors++; 724 723 if (ret & VLSI_RX_CRC) 725 - idev->stats.rx_crc_errors++; 724 + ndev->stats.rx_crc_errors++; 726 725 } 727 726 else if (ret > 0) { 728 - idev->stats.rx_packets++; 729 - idev->stats.rx_bytes += ret; 727 + ndev->stats.rx_packets++; 728 + ndev->stats.rx_bytes += ret; 730 729 } 731 730 } 732 731 } ··· 1051 1050 drop: 1052 1051 IRDA_WARNING("%s: dropping packet - %s\n", __func__, msg); 1053 1052 dev_kfree_skb_any(skb); 1054 - idev->stats.tx_errors++; 1055 - idev->stats.tx_dropped++; 1053 + ndev->stats.tx_errors++; 1054 + ndev->stats.tx_dropped++; 1056 1055 /* Don't even think about returning NET_XMIT_DROP (=1) here! 1057 1056 * In fact any retval!=0 causes the packet scheduler to requeue the 1058 1057 * packet for later retry of transmission - which isn't exactly ··· 1079 1078 1080 1079 if (ret < 0) { 1081 1080 ret = -ret; 1082 - idev->stats.tx_errors++; 1081 + ndev->stats.tx_errors++; 1083 1082 if (ret & VLSI_TX_DROP) 1084 - idev->stats.tx_dropped++; 1083 + ndev->stats.tx_dropped++; 1085 1084 if (ret & VLSI_TX_FIFO) 1086 - idev->stats.tx_fifo_errors++; 1085 + ndev->stats.tx_fifo_errors++; 1087 1086 } 1088 1087 else if (ret > 0){ 1089 - idev->stats.tx_packets++; 1090 - idev->stats.tx_bytes += ret; 1088 + ndev->stats.tx_packets++; 1089 + ndev->stats.tx_bytes += ret; 1091 1090 } 1092 1091 } 1093 1092 ··· 1123 1122 1124 1123 static void vlsi_unarm_tx(vlsi_irda_dev_t *idev) 1125 1124 { 1125 + struct net_device *ndev = pci_get_drvdata(idev->pdev); 1126 1126 struct vlsi_ring *r = idev->tx_ring; 1127 1127 struct ring_descr *rd; 1128 1128 int ret; ··· 1147 1145 1148 1146 if (ret < 0) { 1149 1147 ret = -ret; 1150 - idev->stats.tx_errors++; 1148 + ndev->stats.tx_errors++; 1151 1149 if (ret & VLSI_TX_DROP) 1152 - idev->stats.tx_dropped++; 1150 + ndev->stats.tx_dropped++; 1153 1151 if (ret & VLSI_TX_FIFO) 1154 - idev->stats.tx_fifo_errors++; 1152 + ndev->stats.tx_fifo_errors++; 1155 1153 } 1156 1154 else if (ret > 0){ 1157 - idev->stats.tx_packets++; 1158 - idev->stats.tx_bytes += ret; 1155 + ndev->stats.tx_packets++; 1156 + ndev->stats.tx_bytes += ret; 1159 1157 } 1160 1158 } 1161 1159 ··· 1374 1372 } 1375 1373 1376 1374 /**************************************************************/ 1377 - 1378 - static struct net_device_stats * vlsi_get_stats(struct net_device *ndev) 1379 - { 1380 - vlsi_irda_dev_t *idev = netdev_priv(ndev); 1381 - 1382 - return &idev->stats; 1383 - } 1384 1375 1385 1376 static void vlsi_tx_timeout(struct net_device *ndev) 1386 1377 { ··· 1610 1615 1611 1616 ndev->open = vlsi_open; 1612 1617 ndev->stop = vlsi_close; 1613 - ndev->get_stats = vlsi_get_stats; 1614 1618 ndev->hard_start_xmit = vlsi_hard_start_xmit; 1615 1619 ndev->do_ioctl = vlsi_ioctl; 1616 1620 ndev->tx_timeout = vlsi_tx_timeout;
-1
drivers/net/irda/vlsi_ir.h
··· 712 712 713 713 typedef struct vlsi_irda_dev { 714 714 struct pci_dev *pdev; 715 - struct net_device_stats stats; 716 715 717 716 struct irlap_cb *irlap; 718 717
+13 -22
drivers/net/irda/w83977af_ir.c
··· 102 102 static int w83977af_net_open(struct net_device *dev); 103 103 static int w83977af_net_close(struct net_device *dev); 104 104 static int w83977af_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); 105 - static struct net_device_stats *w83977af_net_get_stats(struct net_device *dev); 106 105 107 106 /* 108 107 * Function w83977af_init () ··· 236 237 dev->open = w83977af_net_open; 237 238 dev->stop = w83977af_net_close; 238 239 dev->do_ioctl = w83977af_net_ioctl; 239 - dev->get_stats = w83977af_net_get_stats; 240 240 241 241 err = register_netdev(dev); 242 242 if (err) { ··· 700 702 if (inb(iobase+AUDR) & AUDR_UNDR) { 701 703 IRDA_DEBUG(0, "%s(), Transmit underrun!\n", __func__ ); 702 704 703 - self->stats.tx_errors++; 704 - self->stats.tx_fifo_errors++; 705 + self->netdev->stats.tx_errors++; 706 + self->netdev->stats.tx_fifo_errors++; 705 707 706 708 /* Clear bit, by writing 1 to it */ 707 709 outb(AUDR_UNDR, iobase+AUDR); 708 710 } else 709 - self->stats.tx_packets++; 711 + self->netdev->stats.tx_packets++; 710 712 711 713 712 714 if (self->new_speed) { ··· 844 846 if (status & FS_FO_ERR_MSK) { 845 847 if (status & FS_FO_LST_FR) { 846 848 /* Add number of lost frames to stats */ 847 - self->stats.rx_errors += len; 849 + self->netdev->stats.rx_errors += len; 848 850 } else { 849 851 /* Skip frame */ 850 - self->stats.rx_errors++; 852 + self->netdev->stats.rx_errors++; 851 853 852 854 self->rx_buff.data += len; 853 855 854 856 if (status & FS_FO_MX_LEX) 855 - self->stats.rx_length_errors++; 857 + self->netdev->stats.rx_length_errors++; 856 858 857 859 if (status & FS_FO_PHY_ERR) 858 - self->stats.rx_frame_errors++; 860 + self->netdev->stats.rx_frame_errors++; 859 861 860 862 if (status & FS_FO_CRC_ERR) 861 - self->stats.rx_crc_errors++; 863 + self->netdev->stats.rx_crc_errors++; 862 864 } 863 865 /* The errors below can be reported in both cases */ 864 866 if (status & FS_FO_RX_OV) 865 - self->stats.rx_fifo_errors++; 867 + self->netdev->stats.rx_fifo_errors++; 866 868 867 869 if (status & FS_FO_FSF_OV) 868 - self->stats.rx_fifo_errors++; 870 + self->netdev->stats.rx_fifo_errors++; 869 871 870 872 } else { 871 873 /* Check if we have transferred all data to memory */ ··· 915 917 916 918 /* Move to next frame */ 917 919 self->rx_buff.data += len; 918 - self->stats.rx_packets++; 920 + self->netdev->stats.rx_packets++; 919 921 920 922 skb->dev = self->netdev; 921 923 skb_reset_mac_header(skb); ··· 949 951 /* Receive all characters in Rx FIFO */ 950 952 do { 951 953 byte = inb(iobase+RBR); 952 - async_unwrap_char(self->netdev, &self->stats, &self->rx_buff, 954 + async_unwrap_char(self->netdev, &self->netdev->stats, &self->rx_buff, 953 955 byte); 954 956 } while (inb(iobase+USR) & USR_RDR); /* Data available */ 955 957 } ··· 992 994 outb(AUDR_SFEND, iobase+AUDR); 993 995 outb(set, iobase+SSR); 994 996 995 - self->stats.tx_packets++; 997 + self->netdev->stats.tx_packets++; 996 998 997 999 /* Feed me more packets */ 998 1000 netif_wake_queue(self->netdev); ··· 1332 1334 out: 1333 1335 spin_unlock_irqrestore(&self->lock, flags); 1334 1336 return ret; 1335 - } 1336 - 1337 - static struct net_device_stats *w83977af_net_get_stats(struct net_device *dev) 1338 - { 1339 - struct w83977af_ir *self = netdev_priv(dev); 1340 - 1341 - return &self->stats; 1342 1337 } 1343 1338 1344 1339 MODULE_AUTHOR("Dag Brattli <dagb@cs.uit.no>");
-1
drivers/net/irda/w83977af_ir.h
··· 172 172 int tx_len; /* Number of frames in tx_buff */ 173 173 174 174 struct net_device *netdev; /* Yes! we are some kind of netdevice */ 175 - struct net_device_stats stats; 176 175 177 176 struct irlap_cb *irlap; /* The link layer we are binded to */ 178 177 struct qos_info qos; /* QoS capabilities for this device */