Merge branch 'upstream-davem' of master.kernel.org:/pub/scm/linux/kernel/git/jgarzik/netdev-2.6

+2343 -3218
+12 -26
Documentation/DocBook/z8530book.tmpl
··· 69 device to be used as both a tty interface and as a synchronous 70 controller is a project for Linux post the 2.4 release 71 </para> 72 - <para> 73 - The support code handles most common card configurations and 74 - supports running both Cisco HDLC and Synchronous PPP. With extra 75 - glue the frame relay and X.25 protocols can also be used with this 76 - driver. 77 - </para> 78 </chapter> 79 80 <chapter id="Driver_Modes"> ··· 173 <para> 174 If you wish to use the network interface facilities of the driver, 175 then you need to attach a network device to each channel that is 176 - present and in use. In addition to use the SyncPPP and Cisco HDLC 177 you need to follow some additional plumbing rules. They may seem 178 complex but a look at the example hostess_sv11 driver should 179 reassure you. 180 </para> 181 <para> 182 The network device used for each channel should be pointed to by 183 - the netdevice field of each channel. The dev-&gt; priv field of the 184 network device points to your private data - you will need to be 185 - able to find your ppp device from this. In addition to use the 186 - sync ppp layer the private data must start with a void * pointer 187 - to the syncppp structures. 188 </para> 189 <para> 190 The way most drivers approach this particular problem is to 191 create a structure holding the Z8530 device definition and 192 - put that and the syncppp pointer into the private field of 193 - the network device. The network device fields of the channels 194 - then point back to the network devices. The ppp_device can also 195 - be put in the private structure conveniently. 196 </para> 197 <para> 198 - If you wish to use the synchronous ppp then you need to attach 199 - the syncppp layer to the network device. You should do this before 200 - you register the network device. The 201 - <function>sppp_attach</function> requires that the first void * 202 - pointer in your private data is pointing to an empty struct 203 - ppp_device. The function fills in the initial data for the 204 - ppp/hdlc layer. 205 </para> 206 <para> 207 Before you register your network device you will also need to ··· 300 buffer in sk_buff format and queues it for transmission. The 301 caller must provide the entire packet with the exception of the 302 bitstuffing and CRC. This is normally done by the caller via 303 - the syncppp interface layer. It returns 0 if the buffer has been 304 - queued and non zero values for queue full. If the function accepts 305 - the buffer it becomes property of the Z8530 layer and the caller 306 - should not free it. 307 </para> 308 <para> 309 The function <function>z8530_get_stats</function> returns a pointer
··· 69 device to be used as both a tty interface and as a synchronous 70 controller is a project for Linux post the 2.4 release 71 </para> 72 </chapter> 73 74 <chapter id="Driver_Modes"> ··· 179 <para> 180 If you wish to use the network interface facilities of the driver, 181 then you need to attach a network device to each channel that is 182 + present and in use. In addition to use the generic HDLC 183 you need to follow some additional plumbing rules. They may seem 184 complex but a look at the example hostess_sv11 driver should 185 reassure you. 186 </para> 187 <para> 188 The network device used for each channel should be pointed to by 189 + the netdevice field of each channel. The hdlc-&gt; priv field of the 190 network device points to your private data - you will need to be 191 + able to find your private data from this. 192 </para> 193 <para> 194 The way most drivers approach this particular problem is to 195 create a structure holding the Z8530 device definition and 196 + put that into the private field of the network device. The 197 + network device fields of the channels then point back to the 198 + network devices. 199 </para> 200 <para> 201 + If you wish to use the generic HDLC then you need to register 202 + the HDLC device. 203 </para> 204 <para> 205 Before you register your network device you will also need to ··· 314 buffer in sk_buff format and queues it for transmission. The 315 caller must provide the entire packet with the exception of the 316 bitstuffing and CRC. This is normally done by the caller via 317 + the generic HDLC interface layer. It returns 0 if the buffer has been 318 + queued and non zero values for queue full. If the function accepts 319 + the buffer it becomes property of the Z8530 layer and the caller 320 + should not free it. 321 </para> 322 <para> 323 The function <function>z8530_get_stats</function> returns a pointer
+11
arch/sh/include/asm/sh_eth.h
···
··· 1 + #ifndef __ASM_SH_ETH_H__ 2 + #define __ASM_SH_ETH_H__ 3 + 4 + enum {EDMAC_LITTLE_ENDIAN, EDMAC_BIG_ENDIAN}; 5 + 6 + struct sh_eth_plat_data { 7 + int phy; 8 + int edmac_endian; 9 + }; 10 + 11 + #endif
-4
drivers/char/synclink.c
··· 304 305 /* generic HDLC device parts */ 306 int netcount; 307 - int dosyncppp; 308 spinlock_t netlock; 309 310 #if SYNCLINK_GENERIC_HDLC ··· 867 static int dma[MAX_ISA_DEVICES]; 868 static int debug_level; 869 static int maxframe[MAX_TOTAL_DEVICES]; 870 - static int dosyncppp[MAX_TOTAL_DEVICES]; 871 static int txdmabufs[MAX_TOTAL_DEVICES]; 872 static int txholdbufs[MAX_TOTAL_DEVICES]; 873 ··· 877 module_param_array(dma, int, NULL, 0); 878 module_param(debug_level, int, 0); 879 module_param_array(maxframe, int, NULL, 0); 880 - module_param_array(dosyncppp, int, NULL, 0); 881 module_param_array(txdmabufs, int, NULL, 0); 882 module_param_array(txholdbufs, int, NULL, 0); 883 ··· 4255 if (info->line < MAX_TOTAL_DEVICES) { 4256 if (maxframe[info->line]) 4257 info->max_frame_size = maxframe[info->line]; 4258 - info->dosyncppp = dosyncppp[info->line]; 4259 4260 if (txdmabufs[info->line]) { 4261 info->num_tx_dma_buffers = txdmabufs[info->line];
··· 304 305 /* generic HDLC device parts */ 306 int netcount; 307 spinlock_t netlock; 308 309 #if SYNCLINK_GENERIC_HDLC ··· 868 static int dma[MAX_ISA_DEVICES]; 869 static int debug_level; 870 static int maxframe[MAX_TOTAL_DEVICES]; 871 static int txdmabufs[MAX_TOTAL_DEVICES]; 872 static int txholdbufs[MAX_TOTAL_DEVICES]; 873 ··· 879 module_param_array(dma, int, NULL, 0); 880 module_param(debug_level, int, 0); 881 module_param_array(maxframe, int, NULL, 0); 882 module_param_array(txdmabufs, int, NULL, 0); 883 module_param_array(txholdbufs, int, NULL, 0); 884 ··· 4258 if (info->line < MAX_TOTAL_DEVICES) { 4259 if (maxframe[info->line]) 4260 info->max_frame_size = maxframe[info->line]; 4261 4262 if (txdmabufs[info->line]) { 4263 info->num_tx_dma_buffers = txdmabufs[info->line];
-4
drivers/char/synclinkmp.c
··· 270 271 /* SPPP/Cisco HDLC device parts */ 272 int netcount; 273 - int dosyncppp; 274 spinlock_t netlock; 275 276 #if SYNCLINK_GENERIC_HDLC ··· 468 */ 469 static int debug_level = 0; 470 static int maxframe[MAX_DEVICES] = {0,}; 471 - static int dosyncppp[MAX_DEVICES] = {0,}; 472 473 module_param(break_on_load, bool, 0); 474 module_param(ttymajor, int, 0); 475 module_param(debug_level, int, 0); 476 module_param_array(maxframe, int, NULL, 0); 477 - module_param_array(dosyncppp, int, NULL, 0); 478 479 static char *driver_name = "SyncLink MultiPort driver"; 480 static char *driver_version = "$Revision: 4.38 $"; ··· 3749 if (info->line < MAX_DEVICES) { 3750 if (maxframe[info->line]) 3751 info->max_frame_size = maxframe[info->line]; 3752 - info->dosyncppp = dosyncppp[info->line]; 3753 } 3754 3755 synclinkmp_device_count++;
··· 270 271 /* SPPP/Cisco HDLC device parts */ 272 int netcount; 273 spinlock_t netlock; 274 275 #if SYNCLINK_GENERIC_HDLC ··· 469 */ 470 static int debug_level = 0; 471 static int maxframe[MAX_DEVICES] = {0,}; 472 473 module_param(break_on_load, bool, 0); 474 module_param(ttymajor, int, 0); 475 module_param(debug_level, int, 0); 476 module_param_array(maxframe, int, NULL, 0); 477 478 static char *driver_name = "SyncLink MultiPort driver"; 479 static char *driver_version = "$Revision: 4.38 $"; ··· 3752 if (info->line < MAX_DEVICES) { 3753 if (maxframe[info->line]) 3754 info->max_frame_size = maxframe[info->line]; 3755 } 3756 3757 synclinkmp_device_count++;
+1 -3
drivers/net/3c523.c
··· 640 cfg_cmd->time_low = 0x00; 641 cfg_cmd->time_high = 0xf2; 642 cfg_cmd->promisc = 0; 643 - if (dev->flags & (IFF_ALLMULTI | IFF_PROMISC)) { 644 cfg_cmd->promisc = 1; 645 - dev->flags |= IFF_PROMISC; 646 - } 647 cfg_cmd->carr_coll = 0x00; 648 649 p->scb->cbl_offset = make16(cfg_cmd);
··· 640 cfg_cmd->time_low = 0x00; 641 cfg_cmd->time_high = 0xf2; 642 cfg_cmd->promisc = 0; 643 + if (dev->flags & (IFF_ALLMULTI | IFF_PROMISC)) 644 cfg_cmd->promisc = 1; 645 cfg_cmd->carr_coll = 0x00; 646 647 p->scb->cbl_offset = make16(cfg_cmd);
+3 -6
drivers/net/3c527.c
··· 1521 struct mc32_local *lp = netdev_priv(dev); 1522 u16 filt = (1<<2); /* Save Bad Packets, for stats purposes */ 1523 1524 - if (dev->flags&IFF_PROMISC) 1525 /* Enable promiscuous mode */ 1526 filt |= 1; 1527 - else if((dev->flags&IFF_ALLMULTI) || dev->mc_count > 10) 1528 - { 1529 - dev->flags|=IFF_PROMISC; 1530 - filt |= 1; 1531 - } 1532 else if(dev->mc_count) 1533 { 1534 unsigned char block[62];
··· 1521 struct mc32_local *lp = netdev_priv(dev); 1522 u16 filt = (1<<2); /* Save Bad Packets, for stats purposes */ 1523 1524 + if ((dev->flags&IFF_PROMISC) || 1525 + (dev->flags&IFF_ALLMULTI) || 1526 + dev->mc_count > 10) 1527 /* Enable promiscuous mode */ 1528 filt |= 1; 1529 else if(dev->mc_count) 1530 { 1531 unsigned char block[62];
+8 -6
drivers/net/3c59x.c
··· 1692 vp->rx_ring[i].next = cpu_to_le32(vp->rx_ring_dma + sizeof(struct boom_rx_desc) * (i+1)); 1693 vp->rx_ring[i].status = 0; /* Clear complete bit. */ 1694 vp->rx_ring[i].length = cpu_to_le32(PKT_BUF_SZ | LAST_FRAG); 1695 - skb = dev_alloc_skb(PKT_BUF_SZ); 1696 vp->rx_skbuff[i] = skb; 1697 if (skb == NULL) 1698 break; /* Bad news! */ 1699 - skb->dev = dev; /* Mark as being used by this device. */ 1700 - skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ 1701 vp->rx_ring[i].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, PKT_BUF_SZ, PCI_DMA_FROMDEVICE)); 1702 } 1703 if (i != RX_RING_SIZE) { ··· 2540 struct sk_buff *skb; 2541 entry = vp->dirty_rx % RX_RING_SIZE; 2542 if (vp->rx_skbuff[entry] == NULL) { 2543 - skb = dev_alloc_skb(PKT_BUF_SZ); 2544 if (skb == NULL) { 2545 static unsigned long last_jif; 2546 if (time_after(jiffies, last_jif + 10 * HZ)) { ··· 2551 mod_timer(&vp->rx_oom_timer, RUN_AT(HZ * 1)); 2552 break; /* Bad news! */ 2553 } 2554 - skb->dev = dev; /* Mark as being used by this device. */ 2555 - skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ 2556 vp->rx_ring[entry].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, PKT_BUF_SZ, PCI_DMA_FROMDEVICE)); 2557 vp->rx_skbuff[entry] = skb; 2558 }
··· 1692 vp->rx_ring[i].next = cpu_to_le32(vp->rx_ring_dma + sizeof(struct boom_rx_desc) * (i+1)); 1693 vp->rx_ring[i].status = 0; /* Clear complete bit. */ 1694 vp->rx_ring[i].length = cpu_to_le32(PKT_BUF_SZ | LAST_FRAG); 1695 + 1696 + skb = __netdev_alloc_skb(dev, PKT_BUF_SZ + NET_IP_ALIGN, 1697 + GFP_KERNEL); 1698 vp->rx_skbuff[i] = skb; 1699 if (skb == NULL) 1700 break; /* Bad news! */ 1701 + 1702 + skb_reserve(skb, NET_IP_ALIGN); /* Align IP on 16 byte boundaries */ 1703 vp->rx_ring[i].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, PKT_BUF_SZ, PCI_DMA_FROMDEVICE)); 1704 } 1705 if (i != RX_RING_SIZE) { ··· 2538 struct sk_buff *skb; 2539 entry = vp->dirty_rx % RX_RING_SIZE; 2540 if (vp->rx_skbuff[entry] == NULL) { 2541 + skb = netdev_alloc_skb(dev, PKT_BUF_SZ + NET_IP_ALIGN); 2542 if (skb == NULL) { 2543 static unsigned long last_jif; 2544 if (time_after(jiffies, last_jif + 10 * HZ)) { ··· 2549 mod_timer(&vp->rx_oom_timer, RUN_AT(HZ * 1)); 2550 break; /* Bad news! */ 2551 } 2552 + 2553 + skb_reserve(skb, NET_IP_ALIGN); 2554 vp->rx_ring[entry].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, PKT_BUF_SZ, PCI_DMA_FROMDEVICE)); 2555 vp->rx_skbuff[entry] = skb; 2556 }
+5 -8
drivers/net/8390.c
··· 9 { 10 return __ei_open(dev); 11 } 12 13 int ei_close(struct net_device *dev) 14 { 15 return __ei_close(dev); 16 } 17 18 irqreturn_t ei_interrupt(int irq, void *dev_id) 19 { 20 return __ei_interrupt(irq, dev_id); 21 } 22 23 #ifdef CONFIG_NET_POLL_CONTROLLER 24 void ei_poll(struct net_device *dev) 25 { 26 __ei_poll(dev); 27 } 28 #endif 29 30 struct net_device *__alloc_ei_netdev(int size) 31 { 32 return ____alloc_ei_netdev(size); 33 } 34 35 void NS8390_init(struct net_device *dev, int startp) 36 { 37 __NS8390_init(dev, startp); 38 } 39 - 40 - EXPORT_SYMBOL(ei_open); 41 - EXPORT_SYMBOL(ei_close); 42 - EXPORT_SYMBOL(ei_interrupt); 43 - #ifdef CONFIG_NET_POLL_CONTROLLER 44 - EXPORT_SYMBOL(ei_poll); 45 - #endif 46 EXPORT_SYMBOL(NS8390_init); 47 - EXPORT_SYMBOL(__alloc_ei_netdev); 48 49 #if defined(MODULE) 50
··· 9 { 10 return __ei_open(dev); 11 } 12 + EXPORT_SYMBOL(ei_open); 13 14 int ei_close(struct net_device *dev) 15 { 16 return __ei_close(dev); 17 } 18 + EXPORT_SYMBOL(ei_close); 19 20 irqreturn_t ei_interrupt(int irq, void *dev_id) 21 { 22 return __ei_interrupt(irq, dev_id); 23 } 24 + EXPORT_SYMBOL(ei_interrupt); 25 26 #ifdef CONFIG_NET_POLL_CONTROLLER 27 void ei_poll(struct net_device *dev) 28 { 29 __ei_poll(dev); 30 } 31 + EXPORT_SYMBOL(ei_poll); 32 #endif 33 34 struct net_device *__alloc_ei_netdev(int size) 35 { 36 return ____alloc_ei_netdev(size); 37 } 38 + EXPORT_SYMBOL(__alloc_ei_netdev); 39 40 void NS8390_init(struct net_device *dev, int startp) 41 { 42 __NS8390_init(dev, startp); 43 } 44 EXPORT_SYMBOL(NS8390_init); 45 46 #if defined(MODULE) 47
+8 -11
drivers/net/8390p.c
··· 4 "8390p.c:v1.10cvs 9/23/94 Donald Becker (becker@cesdis.gsfc.nasa.gov)\n"; 5 6 #define ei_inb(_p) inb(_p) 7 - #define ei_outb(_v,_p) outb(_v,_p) 8 #define ei_inb_p(_p) inb_p(_p) 9 - #define ei_outb_p(_v,_p) outb_p(_v,_p) 10 11 #include "lib8390.c" 12 ··· 14 { 15 return __ei_open(dev); 16 } 17 18 int eip_close(struct net_device *dev) 19 { 20 return __ei_close(dev); 21 } 22 23 irqreturn_t eip_interrupt(int irq, void *dev_id) 24 { 25 return __ei_interrupt(irq, dev_id); 26 } 27 28 #ifdef CONFIG_NET_POLL_CONTROLLER 29 void eip_poll(struct net_device *dev) 30 { 31 __ei_poll(dev); 32 } 33 #endif 34 35 struct net_device *__alloc_eip_netdev(int size) 36 { 37 return ____alloc_ei_netdev(size); 38 } 39 40 void NS8390p_init(struct net_device *dev, int startp) 41 { 42 - return __NS8390_init(dev, startp); 43 } 44 - 45 - EXPORT_SYMBOL(eip_open); 46 - EXPORT_SYMBOL(eip_close); 47 - EXPORT_SYMBOL(eip_interrupt); 48 - #ifdef CONFIG_NET_POLL_CONTROLLER 49 - EXPORT_SYMBOL(eip_poll); 50 - #endif 51 EXPORT_SYMBOL(NS8390p_init); 52 - EXPORT_SYMBOL(__alloc_eip_netdev); 53 54 #if defined(MODULE) 55
··· 4 "8390p.c:v1.10cvs 9/23/94 Donald Becker (becker@cesdis.gsfc.nasa.gov)\n"; 5 6 #define ei_inb(_p) inb(_p) 7 + #define ei_outb(_v, _p) outb(_v, _p) 8 #define ei_inb_p(_p) inb_p(_p) 9 + #define ei_outb_p(_v, _p) outb_p(_v, _p) 10 11 #include "lib8390.c" 12 ··· 14 { 15 return __ei_open(dev); 16 } 17 + EXPORT_SYMBOL(eip_open); 18 19 int eip_close(struct net_device *dev) 20 { 21 return __ei_close(dev); 22 } 23 + EXPORT_SYMBOL(eip_close); 24 25 irqreturn_t eip_interrupt(int irq, void *dev_id) 26 { 27 return __ei_interrupt(irq, dev_id); 28 } 29 + EXPORT_SYMBOL(eip_interrupt); 30 31 #ifdef CONFIG_NET_POLL_CONTROLLER 32 void eip_poll(struct net_device *dev) 33 { 34 __ei_poll(dev); 35 } 36 + EXPORT_SYMBOL(eip_poll); 37 #endif 38 39 struct net_device *__alloc_eip_netdev(int size) 40 { 41 return ____alloc_ei_netdev(size); 42 } 43 + EXPORT_SYMBOL(__alloc_eip_netdev); 44 45 void NS8390p_init(struct net_device *dev, int startp) 46 { 47 + __NS8390_init(dev, startp); 48 } 49 EXPORT_SYMBOL(NS8390p_init); 50 51 #if defined(MODULE) 52
+3 -2
drivers/net/Kconfig
··· 510 config SH_ETH 511 tristate "Renesas SuperH Ethernet support" 512 depends on SUPERH && \ 513 - (CPU_SUBTYPE_SH7710 || CPU_SUBTYPE_SH7712 || CPU_SUBTYPE_SH7763) 514 select CRC32 515 select MII 516 select MDIO_BITBANG 517 select PHYLIB 518 help 519 Renesas SuperH Ethernet device driver. 520 - This driver support SH7710, SH7712 and SH7763. 521 522 config SUNLANCE 523 tristate "Sun LANCE support"
··· 510 config SH_ETH 511 tristate "Renesas SuperH Ethernet support" 512 depends on SUPERH && \ 513 + (CPU_SUBTYPE_SH7710 || CPU_SUBTYPE_SH7712 || CPU_SUBTYPE_SH7763 || \ 514 + CPU_SUBTYPE_SH7619) 515 select CRC32 516 select MII 517 select MDIO_BITBANG 518 select PHYLIB 519 help 520 Renesas SuperH Ethernet device driver. 521 + This driver support SH7710, SH7712, SH7763 and SH7619. 522 523 config SUNLANCE 524 tristate "Sun LANCE support"
+11 -8
drivers/net/atlx/atl1.c
··· 1790 { 1791 struct pci_dev *pdev = adapter->pdev; 1792 1793 skb->ip_summed = CHECKSUM_NONE; 1794 1795 if (unlikely(rrd->pkt_flg & PACKET_FLAG_ERR)) { ··· 1827 return; 1828 } 1829 1830 - /* IPv4, but hardware thinks its checksum is wrong */ 1831 - if (netif_msg_rx_err(adapter)) 1832 - dev_printk(KERN_DEBUG, &pdev->dev, 1833 - "hw csum wrong, pkt_flag:%x, err_flag:%x\n", 1834 - rrd->pkt_flg, rrd->err_flg); 1835 - skb->ip_summed = CHECKSUM_COMPLETE; 1836 - skb->csum = htons(rrd->xsz.xsum_sz.rx_chksum); 1837 - adapter->hw_csum_err++; 1838 return; 1839 } 1840
··· 1790 { 1791 struct pci_dev *pdev = adapter->pdev; 1792 1793 + /* 1794 + * The L1 hardware contains a bug that erroneously sets the 1795 + * PACKET_FLAG_ERR and ERR_FLAG_L4_CHKSUM bits whenever a 1796 + * fragmented IP packet is received, even though the packet 1797 + * is perfectly valid and its checksum is correct. There's 1798 + * no way to distinguish between one of these good packets 1799 + * and a packet that actually contains a TCP/UDP checksum 1800 + * error, so all we can do is allow it to be handed up to 1801 + * the higher layers and let it be sorted out there. 1802 + */ 1803 + 1804 skb->ip_summed = CHECKSUM_NONE; 1805 1806 if (unlikely(rrd->pkt_flg & PACKET_FLAG_ERR)) { ··· 1816 return; 1817 } 1818 1819 return; 1820 } 1821
+2 -7
drivers/net/atp.c
··· 854 struct net_local *lp = netdev_priv(dev); 855 long ioaddr = dev->base_addr; 856 857 - if ( dev->mc_count > 0 || (dev->flags & (IFF_ALLMULTI|IFF_PROMISC))) { 858 - /* We must make the kernel realise we had to move 859 - * into promisc mode or we start all out war on 860 - * the cable. - AC 861 - */ 862 - dev->flags|=IFF_PROMISC; 863 lp->addr_mode = CMR2h_PROMISC; 864 - } else 865 lp->addr_mode = CMR2h_Normal; 866 write_reg_high(ioaddr, CMR2, lp->addr_mode); 867 }
··· 854 struct net_local *lp = netdev_priv(dev); 855 long ioaddr = dev->base_addr; 856 857 + if (dev->mc_count > 0 || (dev->flags & (IFF_ALLMULTI|IFF_PROMISC))) 858 lp->addr_mode = CMR2h_PROMISC; 859 + else 860 lp->addr_mode = CMR2h_Normal; 861 write_reg_high(ioaddr, CMR2, lp->addr_mode); 862 }
+1
drivers/net/bonding/bond_3ad.c
··· 2107 aggregator = __get_first_agg(port); 2108 ad_agg_selection_logic(aggregator); 2109 } 2110 } 2111 2112 // for each port run the state machines
··· 2107 aggregator = __get_first_agg(port); 2108 ad_agg_selection_logic(aggregator); 2109 } 2110 + bond_3ad_set_carrier(bond); 2111 } 2112 2113 // for each port run the state machines
+173 -223
drivers/net/bonding/bond_main.c
··· 2223 2224 /*-------------------------------- Monitoring -------------------------------*/ 2225 2226 - /* 2227 - * if !have_locks, return nonzero if a failover is necessary. if 2228 - * have_locks, do whatever failover activities are needed. 2229 - * 2230 - * This is to separate the inspection and failover steps for locking 2231 - * purposes; failover requires rtnl, but acquiring it for every 2232 - * inspection is undesirable, so a wrapper first does inspection, and 2233 - * the acquires the necessary locks and calls again to perform 2234 - * failover if needed. Since all locks are dropped, a complete 2235 - * restart is needed between calls. 2236 - */ 2237 - static int __bond_mii_monitor(struct bonding *bond, int have_locks) 2238 { 2239 - struct slave *slave, *oldcurrent; 2240 - int do_failover = 0; 2241 - int i; 2242 - 2243 - if (bond->slave_cnt == 0) 2244 - goto out; 2245 - 2246 - /* we will try to read the link status of each of our slaves, and 2247 - * set their IFF_RUNNING flag appropriately. For each slave not 2248 - * supporting MII status, we won't do anything so that a user-space 2249 - * program could monitor the link itself if needed. 2250 - */ 2251 - 2252 - read_lock(&bond->curr_slave_lock); 2253 - oldcurrent = bond->curr_active_slave; 2254 - read_unlock(&bond->curr_slave_lock); 2255 2256 bond_for_each_slave(bond, slave, i) { 2257 - struct net_device *slave_dev = slave->dev; 2258 - int link_state; 2259 - u16 old_speed = slave->speed; 2260 - u8 old_duplex = slave->duplex; 2261 2262 - link_state = bond_check_dev_link(bond, slave_dev, 0); 2263 2264 switch (slave->link) { 2265 - case BOND_LINK_UP: /* the link was up */ 2266 - if (link_state == BMSR_LSTATUS) { 2267 - if (!oldcurrent) { 2268 - if (!have_locks) 2269 - return 1; 2270 - do_failover = 1; 2271 - } 2272 - break; 2273 - } else { /* link going down */ 2274 - slave->link = BOND_LINK_FAIL; 2275 - slave->delay = bond->params.downdelay; 2276 2277 - if (slave->link_failure_count < UINT_MAX) { 2278 - slave->link_failure_count++; 2279 - } 2280 - 2281 - if (bond->params.downdelay) { 2282 - printk(KERN_INFO DRV_NAME 2283 - ": %s: link status down for %s " 2284 - "interface %s, disabling it in " 2285 - "%d ms.\n", 2286 - bond->dev->name, 2287 - IS_UP(slave_dev) 2288 - ? ((bond->params.mode == BOND_MODE_ACTIVEBACKUP) 2289 - ? ((slave == oldcurrent) 2290 - ? "active " : "backup ") 2291 - : "") 2292 - : "idle ", 2293 - slave_dev->name, 2294 - bond->params.downdelay * bond->params.miimon); 2295 - } 2296 } 2297 - /* no break ! fall through the BOND_LINK_FAIL test to 2298 - ensure proper action to be taken 2299 - */ 2300 - case BOND_LINK_FAIL: /* the link has just gone down */ 2301 - if (link_state != BMSR_LSTATUS) { 2302 - /* link stays down */ 2303 - if (slave->delay <= 0) { 2304 - if (!have_locks) 2305 - return 1; 2306 - 2307 - /* link down for too long time */ 2308 - slave->link = BOND_LINK_DOWN; 2309 - 2310 - /* in active/backup mode, we must 2311 - * completely disable this interface 2312 - */ 2313 - if ((bond->params.mode == BOND_MODE_ACTIVEBACKUP) || 2314 - (bond->params.mode == BOND_MODE_8023AD)) { 2315 - bond_set_slave_inactive_flags(slave); 2316 - } 2317 - 2318 - printk(KERN_INFO DRV_NAME 2319 - ": %s: link status definitely " 2320 - "down for interface %s, " 2321 - "disabling it\n", 2322 - bond->dev->name, 2323 - slave_dev->name); 2324 - 2325 - /* notify ad that the link status has changed */ 2326 - if (bond->params.mode == BOND_MODE_8023AD) { 2327 - bond_3ad_handle_link_change(slave, BOND_LINK_DOWN); 2328 - } 2329 - 2330 - if ((bond->params.mode == BOND_MODE_TLB) || 2331 - (bond->params.mode == BOND_MODE_ALB)) { 2332 - bond_alb_handle_link_change(bond, slave, BOND_LINK_DOWN); 2333 - } 2334 - 2335 - if (slave == oldcurrent) { 2336 - do_failover = 1; 2337 - } 2338 - } else { 2339 - slave->delay--; 2340 - } 2341 - } else { 2342 - /* link up again */ 2343 - slave->link = BOND_LINK_UP; 2344 slave->jiffies = jiffies; 2345 printk(KERN_INFO DRV_NAME 2346 ": %s: link status up again after %d " 2347 "ms for interface %s.\n", 2348 bond->dev->name, 2349 - (bond->params.downdelay - slave->delay) * bond->params.miimon, 2350 - slave_dev->name); 2351 } 2352 break; 2353 - case BOND_LINK_DOWN: /* the link was down */ 2354 - if (link_state != BMSR_LSTATUS) { 2355 - /* the link stays down, nothing more to do */ 2356 - break; 2357 - } else { /* link going up */ 2358 - slave->link = BOND_LINK_BACK; 2359 - slave->delay = bond->params.updelay; 2360 2361 - if (bond->params.updelay) { 2362 - /* if updelay == 0, no need to 2363 - advertise about a 0 ms delay */ 2364 - printk(KERN_INFO DRV_NAME 2365 - ": %s: link status up for " 2366 - "interface %s, enabling it " 2367 - "in %d ms.\n", 2368 - bond->dev->name, 2369 - slave_dev->name, 2370 - bond->params.updelay * bond->params.miimon); 2371 - } 2372 } 2373 - /* no break ! fall through the BOND_LINK_BACK state in 2374 - case there's something to do. 2375 - */ 2376 - case BOND_LINK_BACK: /* the link has just come back */ 2377 - if (link_state != BMSR_LSTATUS) { 2378 - /* link down again */ 2379 - slave->link = BOND_LINK_DOWN; 2380 - 2381 printk(KERN_INFO DRV_NAME 2382 ": %s: link status down again after %d " 2383 "ms for interface %s.\n", 2384 bond->dev->name, 2385 - (bond->params.updelay - slave->delay) * bond->params.miimon, 2386 - slave_dev->name); 2387 - } else { 2388 - /* link stays up */ 2389 - if (slave->delay == 0) { 2390 - if (!have_locks) 2391 - return 1; 2392 2393 - /* now the link has been up for long time enough */ 2394 - slave->link = BOND_LINK_UP; 2395 - slave->jiffies = jiffies; 2396 - 2397 - if (bond->params.mode == BOND_MODE_8023AD) { 2398 - /* prevent it from being the active one */ 2399 - slave->state = BOND_STATE_BACKUP; 2400 - } else if (bond->params.mode != BOND_MODE_ACTIVEBACKUP) { 2401 - /* make it immediately active */ 2402 - slave->state = BOND_STATE_ACTIVE; 2403 - } else if (slave != bond->primary_slave) { 2404 - /* prevent it from being the active one */ 2405 - slave->state = BOND_STATE_BACKUP; 2406 - } 2407 - 2408 - printk(KERN_INFO DRV_NAME 2409 - ": %s: link status definitely " 2410 - "up for interface %s.\n", 2411 - bond->dev->name, 2412 - slave_dev->name); 2413 - 2414 - /* notify ad that the link status has changed */ 2415 - if (bond->params.mode == BOND_MODE_8023AD) { 2416 - bond_3ad_handle_link_change(slave, BOND_LINK_UP); 2417 - } 2418 - 2419 - if ((bond->params.mode == BOND_MODE_TLB) || 2420 - (bond->params.mode == BOND_MODE_ALB)) { 2421 - bond_alb_handle_link_change(bond, slave, BOND_LINK_UP); 2422 - } 2423 - 2424 - if ((!oldcurrent) || 2425 - (slave == bond->primary_slave)) { 2426 - do_failover = 1; 2427 - } 2428 - } else { 2429 - slave->delay--; 2430 - } 2431 } 2432 break; 2433 default: 2434 - /* Should not happen */ 2435 printk(KERN_ERR DRV_NAME 2436 - ": %s: Error: %s Illegal value (link=%d)\n", 2437 - bond->dev->name, 2438 - slave->dev->name, 2439 - slave->link); 2440 - goto out; 2441 - } /* end of switch (slave->link) */ 2442 2443 - bond_update_speed_duplex(slave); 2444 - 2445 - if (bond->params.mode == BOND_MODE_8023AD) { 2446 - if (old_speed != slave->speed) { 2447 - bond_3ad_adapter_speed_changed(slave); 2448 - } 2449 - 2450 - if (old_duplex != slave->duplex) { 2451 - bond_3ad_adapter_duplex_changed(slave); 2452 - } 2453 } 2454 2455 - } /* end of for */ 2456 - 2457 - if (do_failover) { 2458 ASSERT_RTNL(); 2459 - 2460 write_lock_bh(&bond->curr_slave_lock); 2461 - 2462 bond_select_active_slave(bond); 2463 - 2464 write_unlock_bh(&bond->curr_slave_lock); 2465 2466 - } else 2467 - bond_set_carrier(bond); 2468 - 2469 - out: 2470 - return 0; 2471 } 2472 2473 /* 2474 * bond_mii_monitor 2475 * 2476 * Really a wrapper that splits the mii monitor into two phases: an 2477 - * inspection, then (if inspection indicates something needs to be 2478 - * done) an acquisition of appropriate locks followed by another pass 2479 - * to implement whatever link state changes are indicated. 2480 */ 2481 void bond_mii_monitor(struct work_struct *work) 2482 { 2483 struct bonding *bond = container_of(work, struct bonding, 2484 mii_work.work); 2485 - unsigned long delay; 2486 2487 read_lock(&bond->lock); 2488 - if (bond->kill_timers) { 2489 - read_unlock(&bond->lock); 2490 - return; 2491 - } 2492 2493 if (bond->send_grat_arp) { 2494 read_lock(&bond->curr_slave_lock); ··· 2441 read_unlock(&bond->curr_slave_lock); 2442 } 2443 2444 - if (__bond_mii_monitor(bond, 0)) { 2445 read_unlock(&bond->lock); 2446 rtnl_lock(); 2447 read_lock(&bond->lock); 2448 - __bond_mii_monitor(bond, 1); 2449 read_unlock(&bond->lock); 2450 rtnl_unlock(); /* might sleep, hold no other locks */ 2451 read_lock(&bond->lock); 2452 } 2453 2454 - delay = msecs_to_jiffies(bond->params.miimon); 2455 read_unlock(&bond->lock); 2456 - queue_delayed_work(bond->wq, &bond->mii_work, delay); 2457 } 2458 2459 static __be32 bond_glean_dev_ip(struct net_device *dev)
··· 2223 2224 /*-------------------------------- Monitoring -------------------------------*/ 2225 2226 + 2227 + static int bond_miimon_inspect(struct bonding *bond) 2228 { 2229 + struct slave *slave; 2230 + int i, link_state, commit = 0; 2231 2232 bond_for_each_slave(bond, slave, i) { 2233 + slave->new_link = BOND_LINK_NOCHANGE; 2234 2235 + link_state = bond_check_dev_link(bond, slave->dev, 0); 2236 2237 switch (slave->link) { 2238 + case BOND_LINK_UP: 2239 + if (link_state) 2240 + continue; 2241 2242 + slave->link = BOND_LINK_FAIL; 2243 + slave->delay = bond->params.downdelay; 2244 + if (slave->delay) { 2245 + printk(KERN_INFO DRV_NAME 2246 + ": %s: link status down for %s" 2247 + "interface %s, disabling it in %d ms.\n", 2248 + bond->dev->name, 2249 + (bond->params.mode == 2250 + BOND_MODE_ACTIVEBACKUP) ? 2251 + ((slave->state == BOND_STATE_ACTIVE) ? 2252 + "active " : "backup ") : "", 2253 + slave->dev->name, 2254 + bond->params.downdelay * bond->params.miimon); 2255 } 2256 + /*FALLTHRU*/ 2257 + case BOND_LINK_FAIL: 2258 + if (link_state) { 2259 + /* 2260 + * recovered before downdelay expired 2261 + */ 2262 + slave->link = BOND_LINK_UP; 2263 slave->jiffies = jiffies; 2264 printk(KERN_INFO DRV_NAME 2265 ": %s: link status up again after %d " 2266 "ms for interface %s.\n", 2267 bond->dev->name, 2268 + (bond->params.downdelay - slave->delay) * 2269 + bond->params.miimon, 2270 + slave->dev->name); 2271 + continue; 2272 } 2273 + 2274 + if (slave->delay <= 0) { 2275 + slave->new_link = BOND_LINK_DOWN; 2276 + commit++; 2277 + continue; 2278 + } 2279 + 2280 + slave->delay--; 2281 break; 2282 2283 + case BOND_LINK_DOWN: 2284 + if (!link_state) 2285 + continue; 2286 + 2287 + slave->link = BOND_LINK_BACK; 2288 + slave->delay = bond->params.updelay; 2289 + 2290 + if (slave->delay) { 2291 + printk(KERN_INFO DRV_NAME 2292 + ": %s: link status up for " 2293 + "interface %s, enabling it in %d ms.\n", 2294 + bond->dev->name, slave->dev->name, 2295 + bond->params.updelay * 2296 + bond->params.miimon); 2297 } 2298 + /*FALLTHRU*/ 2299 + case BOND_LINK_BACK: 2300 + if (!link_state) { 2301 + slave->link = BOND_LINK_DOWN; 2302 printk(KERN_INFO DRV_NAME 2303 ": %s: link status down again after %d " 2304 "ms for interface %s.\n", 2305 bond->dev->name, 2306 + (bond->params.updelay - slave->delay) * 2307 + bond->params.miimon, 2308 + slave->dev->name); 2309 2310 + continue; 2311 } 2312 + 2313 + if (slave->delay <= 0) { 2314 + slave->new_link = BOND_LINK_UP; 2315 + commit++; 2316 + continue; 2317 + } 2318 + 2319 + slave->delay--; 2320 break; 2321 + } 2322 + } 2323 + 2324 + return commit; 2325 + } 2326 + 2327 + static void bond_miimon_commit(struct bonding *bond) 2328 + { 2329 + struct slave *slave; 2330 + int i; 2331 + 2332 + bond_for_each_slave(bond, slave, i) { 2333 + switch (slave->new_link) { 2334 + case BOND_LINK_NOCHANGE: 2335 + continue; 2336 + 2337 + case BOND_LINK_UP: 2338 + slave->link = BOND_LINK_UP; 2339 + slave->jiffies = jiffies; 2340 + 2341 + if (bond->params.mode == BOND_MODE_8023AD) { 2342 + /* prevent it from being the active one */ 2343 + slave->state = BOND_STATE_BACKUP; 2344 + } else if (bond->params.mode != BOND_MODE_ACTIVEBACKUP) { 2345 + /* make it immediately active */ 2346 + slave->state = BOND_STATE_ACTIVE; 2347 + } else if (slave != bond->primary_slave) { 2348 + /* prevent it from being the active one */ 2349 + slave->state = BOND_STATE_BACKUP; 2350 + } 2351 + 2352 + printk(KERN_INFO DRV_NAME 2353 + ": %s: link status definitely " 2354 + "up for interface %s.\n", 2355 + bond->dev->name, slave->dev->name); 2356 + 2357 + /* notify ad that the link status has changed */ 2358 + if (bond->params.mode == BOND_MODE_8023AD) 2359 + bond_3ad_handle_link_change(slave, BOND_LINK_UP); 2360 + 2361 + if ((bond->params.mode == BOND_MODE_TLB) || 2362 + (bond->params.mode == BOND_MODE_ALB)) 2363 + bond_alb_handle_link_change(bond, slave, 2364 + BOND_LINK_UP); 2365 + 2366 + if (!bond->curr_active_slave || 2367 + (slave == bond->primary_slave)) 2368 + goto do_failover; 2369 + 2370 + continue; 2371 + 2372 + case BOND_LINK_DOWN: 2373 + slave->link = BOND_LINK_DOWN; 2374 + 2375 + if (bond->params.mode == BOND_MODE_ACTIVEBACKUP || 2376 + bond->params.mode == BOND_MODE_8023AD) 2377 + bond_set_slave_inactive_flags(slave); 2378 + 2379 + printk(KERN_INFO DRV_NAME 2380 + ": %s: link status definitely down for " 2381 + "interface %s, disabling it\n", 2382 + bond->dev->name, slave->dev->name); 2383 + 2384 + if (bond->params.mode == BOND_MODE_8023AD) 2385 + bond_3ad_handle_link_change(slave, 2386 + BOND_LINK_DOWN); 2387 + 2388 + if (bond->params.mode == BOND_MODE_TLB || 2389 + bond->params.mode == BOND_MODE_ALB) 2390 + bond_alb_handle_link_change(bond, slave, 2391 + BOND_LINK_DOWN); 2392 + 2393 + if (slave == bond->curr_active_slave) 2394 + goto do_failover; 2395 + 2396 + continue; 2397 + 2398 default: 2399 printk(KERN_ERR DRV_NAME 2400 + ": %s: invalid new link %d on slave %s\n", 2401 + bond->dev->name, slave->new_link, 2402 + slave->dev->name); 2403 + slave->new_link = BOND_LINK_NOCHANGE; 2404 2405 + continue; 2406 } 2407 2408 + do_failover: 2409 ASSERT_RTNL(); 2410 write_lock_bh(&bond->curr_slave_lock); 2411 bond_select_active_slave(bond); 2412 write_unlock_bh(&bond->curr_slave_lock); 2413 + } 2414 2415 + bond_set_carrier(bond); 2416 } 2417 2418 /* 2419 * bond_mii_monitor 2420 * 2421 * Really a wrapper that splits the mii monitor into two phases: an 2422 + * inspection, then (if inspection indicates something needs to be done) 2423 + * an acquisition of appropriate locks followed by a commit phase to 2424 + * implement whatever link state changes are indicated. 2425 */ 2426 void bond_mii_monitor(struct work_struct *work) 2427 { 2428 struct bonding *bond = container_of(work, struct bonding, 2429 mii_work.work); 2430 2431 read_lock(&bond->lock); 2432 + if (bond->kill_timers) 2433 + goto out; 2434 + 2435 + if (bond->slave_cnt == 0) 2436 + goto re_arm; 2437 2438 if (bond->send_grat_arp) { 2439 read_lock(&bond->curr_slave_lock); ··· 2496 read_unlock(&bond->curr_slave_lock); 2497 } 2498 2499 + if (bond_miimon_inspect(bond)) { 2500 read_unlock(&bond->lock); 2501 rtnl_lock(); 2502 read_lock(&bond->lock); 2503 + 2504 + bond_miimon_commit(bond); 2505 + 2506 read_unlock(&bond->lock); 2507 rtnl_unlock(); /* might sleep, hold no other locks */ 2508 read_lock(&bond->lock); 2509 } 2510 2511 + re_arm: 2512 + if (bond->params.miimon) 2513 + queue_delayed_work(bond->wq, &bond->mii_work, 2514 + msecs_to_jiffies(bond->params.miimon)); 2515 + out: 2516 read_unlock(&bond->lock); 2517 } 2518 2519 static __be32 bond_glean_dev_ip(struct net_device *dev)
-3
drivers/net/bonding/bond_sysfs.c
··· 350 if (dev) { 351 printk(KERN_INFO DRV_NAME ": %s: Removing slave %s\n", 352 bond->dev->name, dev->name); 353 - if (bond->setup_by_slave) 354 - res = bond_release_and_destroy(bond->dev, dev); 355 - else 356 res = bond_release(bond->dev, dev); 357 if (res) { 358 ret = res;
··· 350 if (dev) { 351 printk(KERN_INFO DRV_NAME ": %s: Removing slave %s\n", 352 bond->dev->name, dev->name); 353 res = bond_release(bond->dev, dev); 354 if (res) { 355 ret = res;
-7
drivers/net/de620.c
··· 488 { 489 if (dev->mc_count || dev->flags&(IFF_ALLMULTI|IFF_PROMISC)) 490 { /* Enable promiscuous mode */ 491 - /* 492 - * We must make the kernel realise we had to move 493 - * into promisc mode or we start all out war on 494 - * the cable. - AC 495 - */ 496 - dev->flags|=IFF_PROMISC; 497 - 498 de620_set_register(dev, W_TCR, (TCR_DEF & ~RXPBM) | RXALL); 499 } 500 else
··· 488 { 489 if (dev->mc_count || dev->flags&(IFF_ALLMULTI|IFF_PROMISC)) 490 { /* Enable promiscuous mode */ 491 de620_set_register(dev, W_TCR, (TCR_DEF & ~RXPBM) | RXALL); 492 } 493 else
+5
drivers/net/dm9000.c
··· 1374 for (i = 0; i < 6; i += 2) 1375 dm9000_read_eeprom(db, i / 2, ndev->dev_addr+i); 1376 1377 if (!is_valid_ether_addr(ndev->dev_addr)) { 1378 /* try reading from mac */ 1379
··· 1374 for (i = 0; i < 6; i += 2) 1375 dm9000_read_eeprom(db, i / 2, ndev->dev_addr+i); 1376 1377 + if (!is_valid_ether_addr(ndev->dev_addr) && pdata != NULL) { 1378 + mac_src = "platform data"; 1379 + memcpy(ndev->dev_addr, pdata->dev_addr, 6); 1380 + } 1381 + 1382 if (!is_valid_ether_addr(ndev->dev_addr)) { 1383 /* try reading from mac */ 1384
+14 -17
drivers/net/e1000e/e1000.h
··· 41 42 struct e1000_info; 43 44 - #define ndev_printk(level, netdev, format, arg...) \ 45 - printk(level "%s: " format, (netdev)->name, ## arg) 46 47 #ifdef DEBUG 48 - #define ndev_dbg(netdev, format, arg...) \ 49 - ndev_printk(KERN_DEBUG , netdev, format, ## arg) 50 #else 51 - #define ndev_dbg(netdev, format, arg...) do { (void)(netdev); } while (0) 52 #endif 53 54 - #define ndev_err(netdev, format, arg...) \ 55 - ndev_printk(KERN_ERR , netdev, format, ## arg) 56 - #define ndev_info(netdev, format, arg...) \ 57 - ndev_printk(KERN_INFO , netdev, format, ## arg) 58 - #define ndev_warn(netdev, format, arg...) \ 59 - ndev_printk(KERN_WARNING , netdev, format, ## arg) 60 - #define ndev_notice(netdev, format, arg...) \ 61 - ndev_printk(KERN_NOTICE , netdev, format, ## arg) 62 63 64 /* Tx/Rx descriptor defines */ ··· 284 unsigned long led_status; 285 286 unsigned int flags; 287 - 288 - /* for ioport free */ 289 - int bars; 290 - int need_ioport; 291 }; 292 293 struct e1000_info {
··· 41 42 struct e1000_info; 43 44 + #define e_printk(level, adapter, format, arg...) \ 45 + printk(level "%s: %s: " format, pci_name(adapter->pdev), \ 46 + adapter->netdev->name, ## arg) 47 48 #ifdef DEBUG 49 + #define e_dbg(format, arg...) \ 50 + e_printk(KERN_DEBUG , adapter, format, ## arg) 51 #else 52 + #define e_dbg(format, arg...) do { (void)(adapter); } while (0) 53 #endif 54 55 + #define e_err(format, arg...) \ 56 + e_printk(KERN_ERR, adapter, format, ## arg) 57 + #define e_info(format, arg...) \ 58 + e_printk(KERN_INFO, adapter, format, ## arg) 59 + #define e_warn(format, arg...) \ 60 + e_printk(KERN_WARNING, adapter, format, ## arg) 61 + #define e_notice(format, arg...) \ 62 + e_printk(KERN_NOTICE, adapter, format, ## arg) 63 64 65 /* Tx/Rx descriptor defines */ ··· 283 unsigned long led_status; 284 285 unsigned int flags; 286 }; 287 288 struct e1000_info {
+18 -26
drivers/net/e1000e/ethtool.c
··· 189 /* Fiber NICs only allow 1000 gbps Full duplex */ 190 if ((adapter->hw.phy.media_type == e1000_media_type_fiber) && 191 spddplx != (SPEED_1000 + DUPLEX_FULL)) { 192 - ndev_err(adapter->netdev, "Unsupported Speed/Duplex " 193 - "configuration\n"); 194 return -EINVAL; 195 } 196 ··· 212 break; 213 case SPEED_1000 + DUPLEX_HALF: /* not supported */ 214 default: 215 - ndev_err(adapter->netdev, "Unsupported Speed/Duplex " 216 - "configuration\n"); 217 return -EINVAL; 218 } 219 return 0; ··· 229 * cannot be changed 230 */ 231 if (e1000_check_reset_block(hw)) { 232 - ndev_err(netdev, "Cannot change link " 233 - "characteristics when SoL/IDER is active.\n"); 234 return -EINVAL; 235 } 236 ··· 378 netdev->features &= ~NETIF_F_TSO6; 379 } 380 381 - ndev_info(netdev, "TSO is %s\n", 382 - data ? "Enabled" : "Disabled"); 383 adapter->flags |= FLAG_TSO_FORCE; 384 return 0; 385 } ··· 719 (test[pat] & write)); 720 val = E1000_READ_REG_ARRAY(&adapter->hw, reg, offset); 721 if (val != (test[pat] & write & mask)) { 722 - ndev_err(adapter->netdev, "pattern test reg %04X " 723 - "failed: got 0x%08X expected 0x%08X\n", 724 - reg + offset, 725 - val, (test[pat] & write & mask)); 726 *data = reg; 727 return 1; 728 } ··· 736 __ew32(&adapter->hw, reg, write & mask); 737 val = __er32(&adapter->hw, reg); 738 if ((write & mask) != (val & mask)) { 739 - ndev_err(adapter->netdev, "set/check reg %04X test failed: " 740 - "got 0x%08X expected 0x%08X\n", reg, (val & mask), 741 - (write & mask)); 742 *data = reg; 743 return 1; 744 } ··· 761 { 762 struct e1000_hw *hw = &adapter->hw; 763 struct e1000_mac_info *mac = &adapter->hw.mac; 764 - struct net_device *netdev = adapter->netdev; 765 u32 value; 766 u32 before; 767 u32 after; ··· 793 ew32(STATUS, toggle); 794 after = er32(STATUS) & toggle; 795 if (value != after) { 796 - ndev_err(netdev, "failed STATUS register test got: " 797 - "0x%08X expected: 0x%08X\n", after, value); 798 *data = 1; 799 return 1; 800 } ··· 897 *data = 1; 898 return -1; 899 } 900 - ndev_info(netdev, "testing %s interrupt\n", 901 - (shared_int ? "shared" : "unshared")); 902 903 /* Disable all the interrupts */ 904 ew32(IMC, 0xFFFFFFFF); ··· 1519 * sessions are active 1520 */ 1521 if (e1000_check_reset_block(&adapter->hw)) { 1522 - ndev_err(adapter->netdev, "Cannot do PHY loopback test " 1523 - "when SoL/IDER is active.\n"); 1524 *data = 0; 1525 goto out; 1526 } ··· 1604 forced_speed_duplex = adapter->hw.mac.forced_speed_duplex; 1605 autoneg = adapter->hw.mac.autoneg; 1606 1607 - ndev_info(netdev, "offline testing starting\n"); 1608 1609 /* 1610 * Link test performed before hardware reset so autoneg doesn't ··· 1650 if (if_running) 1651 dev_open(netdev); 1652 } else { 1653 - ndev_info(netdev, "online testing starting\n"); 1654 /* Online tests */ 1655 if (e1000_link_test(adapter, &data[4])) 1656 eth_test->flags |= ETH_TEST_FL_FAILED; ··· 1686 wol->supported &= ~WAKE_UCAST; 1687 1688 if (adapter->wol & E1000_WUFC_EX) 1689 - ndev_err(netdev, "Interface does not support " 1690 - "directed (unicast) frame wake-up packets\n"); 1691 } 1692 1693 if (adapter->wol & E1000_WUFC_EX)
··· 189 /* Fiber NICs only allow 1000 gbps Full duplex */ 190 if ((adapter->hw.phy.media_type == e1000_media_type_fiber) && 191 spddplx != (SPEED_1000 + DUPLEX_FULL)) { 192 + e_err("Unsupported Speed/Duplex configuration\n"); 193 return -EINVAL; 194 } 195 ··· 213 break; 214 case SPEED_1000 + DUPLEX_HALF: /* not supported */ 215 default: 216 + e_err("Unsupported Speed/Duplex configuration\n"); 217 return -EINVAL; 218 } 219 return 0; ··· 231 * cannot be changed 232 */ 233 if (e1000_check_reset_block(hw)) { 234 + e_err("Cannot change link characteristics when SoL/IDER is " 235 + "active.\n"); 236 return -EINVAL; 237 } 238 ··· 380 netdev->features &= ~NETIF_F_TSO6; 381 } 382 383 + e_info("TSO is %s\n", data ? "Enabled" : "Disabled"); 384 adapter->flags |= FLAG_TSO_FORCE; 385 return 0; 386 } ··· 722 (test[pat] & write)); 723 val = E1000_READ_REG_ARRAY(&adapter->hw, reg, offset); 724 if (val != (test[pat] & write & mask)) { 725 + e_err("pattern test reg %04X failed: got 0x%08X " 726 + "expected 0x%08X\n", reg + offset, val, 727 + (test[pat] & write & mask)); 728 *data = reg; 729 return 1; 730 } ··· 740 __ew32(&adapter->hw, reg, write & mask); 741 val = __er32(&adapter->hw, reg); 742 if ((write & mask) != (val & mask)) { 743 + e_err("set/check reg %04X test failed: got 0x%08X " 744 + "expected 0x%08X\n", reg, (val & mask), (write & mask)); 745 *data = reg; 746 return 1; 747 } ··· 766 { 767 struct e1000_hw *hw = &adapter->hw; 768 struct e1000_mac_info *mac = &adapter->hw.mac; 769 u32 value; 770 u32 before; 771 u32 after; ··· 799 ew32(STATUS, toggle); 800 after = er32(STATUS) & toggle; 801 if (value != after) { 802 + e_err("failed STATUS register test got: 0x%08X expected: " 803 + "0x%08X\n", after, value); 804 *data = 1; 805 return 1; 806 } ··· 903 *data = 1; 904 return -1; 905 } 906 + e_info("testing %s interrupt\n", (shared_int ? "shared" : "unshared")); 907 908 /* Disable all the interrupts */ 909 ew32(IMC, 0xFFFFFFFF); ··· 1526 * sessions are active 1527 */ 1528 if (e1000_check_reset_block(&adapter->hw)) { 1529 + e_err("Cannot do PHY loopback test when SoL/IDER is active.\n"); 1530 *data = 0; 1531 goto out; 1532 } ··· 1612 forced_speed_duplex = adapter->hw.mac.forced_speed_duplex; 1613 autoneg = adapter->hw.mac.autoneg; 1614 1615 + e_info("offline testing starting\n"); 1616 1617 /* 1618 * Link test performed before hardware reset so autoneg doesn't ··· 1658 if (if_running) 1659 dev_open(netdev); 1660 } else { 1661 + e_info("online testing starting\n"); 1662 /* Online tests */ 1663 if (e1000_link_test(adapter, &data[4])) 1664 eth_test->flags |= ETH_TEST_FL_FAILED; ··· 1694 wol->supported &= ~WAKE_UCAST; 1695 1696 if (adapter->wol & E1000_WUFC_EX) 1697 + e_err("Interface does not support directed (unicast) " 1698 + "frame wake-up packets\n"); 1699 } 1700 1701 if (adapter->wol & E1000_WUFC_EX)
+110 -136
drivers/net/e1000e/netdev.c
··· 484 * packet, also make sure the frame isn't just CRC only */ 485 if (!(status & E1000_RXD_STAT_EOP) || (length <= 4)) { 486 /* All receives must fit into a single buffer */ 487 - ndev_dbg(netdev, "%s: Receive packet consumed " 488 - "multiple buffers\n", netdev->name); 489 /* recycle */ 490 buffer_info->skb = skb; 491 goto next_desc; ··· 576 unsigned int i = tx_ring->next_to_clean; 577 unsigned int eop = tx_ring->buffer_info[i].next_to_watch; 578 struct e1000_tx_desc *eop_desc = E1000_TX_DESC(*tx_ring, eop); 579 - struct net_device *netdev = adapter->netdev; 580 581 /* detected Tx unit hang */ 582 - ndev_err(netdev, 583 - "Detected Tx Unit Hang:\n" 584 - " TDH <%x>\n" 585 - " TDT <%x>\n" 586 - " next_to_use <%x>\n" 587 - " next_to_clean <%x>\n" 588 - "buffer_info[next_to_clean]:\n" 589 - " time_stamp <%lx>\n" 590 - " next_to_watch <%x>\n" 591 - " jiffies <%lx>\n" 592 - " next_to_watch.status <%x>\n", 593 - readl(adapter->hw.hw_addr + tx_ring->head), 594 - readl(adapter->hw.hw_addr + tx_ring->tail), 595 - tx_ring->next_to_use, 596 - tx_ring->next_to_clean, 597 - tx_ring->buffer_info[eop].time_stamp, 598 - eop, 599 - jiffies, 600 - eop_desc->upper.fields.status); 601 } 602 603 /** ··· 745 buffer_info->dma = 0; 746 747 if (!(staterr & E1000_RXD_STAT_EOP)) { 748 - ndev_dbg(netdev, "%s: Packet Split buffers didn't pick " 749 - "up the full packet\n", netdev->name); 750 dev_kfree_skb_irq(skb); 751 goto next_desc; 752 } ··· 759 length = le16_to_cpu(rx_desc->wb.middle.length0); 760 761 if (!length) { 762 - ndev_dbg(netdev, "%s: Last part of the packet spanning" 763 - " multiple descriptors\n", netdev->name); 764 dev_kfree_skb_irq(skb); 765 goto next_desc; 766 } ··· 1009 1010 /* eth type trans needs skb->data to point to something */ 1011 if (!pskb_may_pull(skb, ETH_HLEN)) { 1012 - ndev_err(netdev, "pskb_may_pull failed.\n"); 1013 dev_kfree_skb(skb); 1014 goto next_desc; 1015 } ··· 1249 err = request_irq(adapter->pdev->irq, handler, irq_flags, netdev->name, 1250 netdev); 1251 if (err) { 1252 - ndev_err(netdev, 1253 - "Unable to allocate %s interrupt (return: %d)\n", 1254 - adapter->flags & FLAG_MSI_ENABLED ? "MSI":"INTx", 1255 - err); 1256 if (adapter->flags & FLAG_MSI_ENABLED) 1257 pci_disable_msi(adapter->pdev); 1258 } ··· 1391 return 0; 1392 err: 1393 vfree(tx_ring->buffer_info); 1394 - ndev_err(adapter->netdev, 1395 - "Unable to allocate memory for the transmit descriptor ring\n"); 1396 return err; 1397 } 1398 ··· 1445 } 1446 err: 1447 vfree(rx_ring->buffer_info); 1448 - ndev_err(adapter->netdev, 1449 - "Unable to allocate memory for the transmit descriptor ring\n"); 1450 return err; 1451 } 1452 ··· 2444 * For parts with AMT enabled, let the firmware know 2445 * that the network interface is in control 2446 */ 2447 - if ((adapter->flags & FLAG_HAS_AMT) && e1000e_check_mng_mode(hw)) 2448 e1000_get_hw_control(adapter); 2449 2450 ew32(WUC, 0); 2451 2452 if (mac->ops.init_hw(hw)) 2453 - ndev_err(adapter->netdev, "Hardware Error\n"); 2454 2455 e1000_update_mng_vlan(adapter); 2456 ··· 2585 return 0; 2586 2587 err: 2588 - ndev_err(netdev, "Unable to allocate memory for queues\n"); 2589 kfree(adapter->rx_ring); 2590 kfree(adapter->tx_ring); 2591 return -ENOMEM; ··· 2634 * If AMT is enabled, let the firmware know that the network 2635 * interface is now open 2636 */ 2637 - if ((adapter->flags & FLAG_HAS_AMT) && 2638 - e1000e_check_mng_mode(&adapter->hw)) 2639 e1000_get_hw_control(adapter); 2640 2641 /* ··· 2712 * If AMT is enabled, let the firmware know that the network 2713 * interface is now closed 2714 */ 2715 - if ((adapter->flags & FLAG_HAS_AMT) && 2716 - e1000e_check_mng_mode(&adapter->hw)) 2717 e1000_release_hw_control(adapter); 2718 2719 return 0; ··· 2909 ret_val |= e1e_rphy(hw, PHY_1000T_STATUS, &phy->stat1000); 2910 ret_val |= e1e_rphy(hw, PHY_EXT_STATUS, &phy->estatus); 2911 if (ret_val) 2912 - ndev_warn(adapter->netdev, 2913 - "Error reading PHY register\n"); 2914 } else { 2915 /* 2916 * Do not read PHY registers if link is not up ··· 2934 static void e1000_print_link_info(struct e1000_adapter *adapter) 2935 { 2936 struct e1000_hw *hw = &adapter->hw; 2937 - struct net_device *netdev = adapter->netdev; 2938 u32 ctrl = er32(CTRL); 2939 2940 - ndev_info(netdev, 2941 - "Link is Up %d Mbps %s, Flow Control: %s\n", 2942 - adapter->link_speed, 2943 - (adapter->link_duplex == FULL_DUPLEX) ? 2944 - "Full Duplex" : "Half Duplex", 2945 - ((ctrl & E1000_CTRL_TFCE) && (ctrl & E1000_CTRL_RFCE)) ? 2946 - "RX/TX" : 2947 - ((ctrl & E1000_CTRL_RFCE) ? "RX" : 2948 - ((ctrl & E1000_CTRL_TFCE) ? "TX" : "None" ))); 2949 } 2950 2951 static bool e1000_has_link(struct e1000_adapter *adapter) ··· 2983 if ((ret_val == E1000_ERR_PHY) && (hw->phy.type == e1000_phy_igp_3) && 2984 (er32(CTRL) & E1000_PHY_CTRL_GBE_DISABLE)) { 2985 /* See e1000_kmrn_lock_loss_workaround_ich8lan() */ 2986 - ndev_info(adapter->netdev, 2987 - "Gigabit has been disabled, downgrading speed\n"); 2988 } 2989 2990 return link_active; ··· 3084 switch (adapter->link_speed) { 3085 case SPEED_10: 3086 case SPEED_100: 3087 - ndev_info(netdev, 3088 - "10/100 speed: disabling TSO\n"); 3089 netdev->features &= ~NETIF_F_TSO; 3090 netdev->features &= ~NETIF_F_TSO6; 3091 break; ··· 3117 if (netif_carrier_ok(netdev)) { 3118 adapter->link_speed = 0; 3119 adapter->link_duplex = 0; 3120 - ndev_info(netdev, "Link is Down\n"); 3121 netif_carrier_off(netdev); 3122 netif_tx_stop_all_queues(netdev); 3123 if (!test_bit(__E1000_DOWN, &adapter->state)) ··· 3591 3592 pull_size = min((unsigned int)4, skb->data_len); 3593 if (!__pskb_pull_tail(skb, pull_size)) { 3594 - ndev_err(netdev, 3595 - "__pskb_pull_tail failed.\n"); 3596 dev_kfree_skb_any(skb); 3597 return NETDEV_TX_OK; 3598 } ··· 3723 3724 if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) || 3725 (max_frame > MAX_JUMBO_FRAME_SIZE)) { 3726 - ndev_err(netdev, "Invalid MTU setting\n"); 3727 return -EINVAL; 3728 } 3729 3730 /* Jumbo frame size limits */ 3731 if (max_frame > ETH_FRAME_LEN + ETH_FCS_LEN) { 3732 if (!(adapter->flags & FLAG_HAS_JUMBO_FRAMES)) { 3733 - ndev_err(netdev, "Jumbo Frames not supported.\n"); 3734 return -EINVAL; 3735 } 3736 if (adapter->hw.phy.type == e1000_phy_ife) { 3737 - ndev_err(netdev, "Jumbo Frames not supported.\n"); 3738 return -EINVAL; 3739 } 3740 } 3741 3742 #define MAX_STD_JUMBO_FRAME_SIZE 9234 3743 if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) { 3744 - ndev_err(netdev, "MTU > 9216 not supported.\n"); 3745 return -EINVAL; 3746 } 3747 ··· 3778 adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN 3779 + ETH_FCS_LEN; 3780 3781 - ndev_info(netdev, "changing MTU from %d to %d\n", 3782 - netdev->mtu, new_mtu); 3783 netdev->mtu = new_mtu; 3784 3785 if (netif_running(netdev)) ··· 3991 pci_restore_state(pdev); 3992 e1000e_disable_l1aspm(pdev); 3993 3994 - if (adapter->need_ioport) 3995 - err = pci_enable_device(pdev); 3996 - else 3997 - err = pci_enable_device_mem(pdev); 3998 if (err) { 3999 dev_err(&pdev->dev, 4000 "Cannot enable PCI device from suspend\n"); ··· 4025 * is up. For all other cases, let the f/w know that the h/w is now 4026 * under the control of the driver. 4027 */ 4028 - if (!(adapter->flags & FLAG_HAS_AMT) || !e1000e_check_mng_mode(&adapter->hw)) 4029 e1000_get_hw_control(adapter); 4030 4031 return 0; ··· 4093 int err; 4094 4095 e1000e_disable_l1aspm(pdev); 4096 - if (adapter->need_ioport) 4097 - err = pci_enable_device(pdev); 4098 - else 4099 - err = pci_enable_device_mem(pdev); 4100 if (err) { 4101 dev_err(&pdev->dev, 4102 "Cannot re-enable PCI device after reset.\n"); ··· 4141 * is up. For all other cases, let the f/w know that the h/w is now 4142 * under the control of the driver. 4143 */ 4144 - if (!(adapter->flags & FLAG_HAS_AMT) || 4145 - !e1000e_check_mng_mode(&adapter->hw)) 4146 e1000_get_hw_control(adapter); 4147 4148 } ··· 4153 u32 pba_num; 4154 4155 /* print bus type/speed/width info */ 4156 - ndev_info(netdev, "(PCI Express:2.5GB/s:%s) " 4157 - "%02x:%02x:%02x:%02x:%02x:%02x\n", 4158 - /* bus width */ 4159 - ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" : 4160 - "Width x1"), 4161 - /* MAC address */ 4162 - netdev->dev_addr[0], netdev->dev_addr[1], 4163 - netdev->dev_addr[2], netdev->dev_addr[3], 4164 - netdev->dev_addr[4], netdev->dev_addr[5]); 4165 - ndev_info(netdev, "Intel(R) PRO/%s Network Connection\n", 4166 - (hw->phy.type == e1000_phy_ife) 4167 - ? "10/100" : "1000"); 4168 e1000e_read_pba_num(hw, &pba_num); 4169 - ndev_info(netdev, "MAC: %d, PHY: %d, PBA No: %06x-%03x\n", 4170 - hw->mac.type, hw->phy.type, 4171 - (pba_num >> 8), (pba_num & 0xff)); 4172 } 4173 4174 - /** 4175 - * e1000e_is_need_ioport - determine if an adapter needs ioport resources or not 4176 - * @pdev: PCI device information struct 4177 - * 4178 - * Returns true if an adapters needs ioport resources 4179 - **/ 4180 - static int e1000e_is_need_ioport(struct pci_dev *pdev) 4181 { 4182 - switch (pdev->device) { 4183 - /* Currently there are no adapters that need ioport resources */ 4184 - default: 4185 - return false; 4186 } 4187 } 4188 ··· 4215 int i, err, pci_using_dac; 4216 u16 eeprom_data = 0; 4217 u16 eeprom_apme_mask = E1000_EEPROM_APME; 4218 - int bars, need_ioport; 4219 4220 e1000e_disable_l1aspm(pdev); 4221 4222 - /* do not allocate ioport bars when not needed */ 4223 - need_ioport = e1000e_is_need_ioport(pdev); 4224 - if (need_ioport) { 4225 - bars = pci_select_bars(pdev, IORESOURCE_MEM | IORESOURCE_IO); 4226 - err = pci_enable_device(pdev); 4227 - } else { 4228 - bars = pci_select_bars(pdev, IORESOURCE_MEM); 4229 - err = pci_enable_device_mem(pdev); 4230 - } 4231 if (err) 4232 return err; 4233 ··· 4241 } 4242 } 4243 4244 - err = pci_request_selected_regions(pdev, bars, e1000e_driver_name); 4245 if (err) 4246 goto err_pci_reg; 4247 ··· 4268 adapter->hw.adapter = adapter; 4269 adapter->hw.mac.type = ei->mac; 4270 adapter->msg_enable = (1 << NETIF_MSG_DRV | NETIF_MSG_PROBE) - 1; 4271 - adapter->bars = bars; 4272 - adapter->need_ioport = need_ioport; 4273 4274 mmio_start = pci_resource_start(pdev, 0); 4275 mmio_len = pci_resource_len(pdev, 0); ··· 4339 } 4340 4341 if (e1000_check_reset_block(&adapter->hw)) 4342 - ndev_info(netdev, 4343 - "PHY reset is blocked due to SOL/IDER session.\n"); 4344 4345 netdev->features = NETIF_F_SG | 4346 NETIF_F_HW_CSUM | ··· 4383 if (e1000_validate_nvm_checksum(&adapter->hw) >= 0) 4384 break; 4385 if (i == 2) { 4386 - ndev_err(netdev, "The NVM Checksum Is Not Valid\n"); 4387 err = -EIO; 4388 goto err_eeprom; 4389 } 4390 } 4391 4392 /* copy the MAC address out of the NVM */ 4393 if (e1000e_read_mac_addr(&adapter->hw)) 4394 - ndev_err(netdev, "NVM Read Error while reading MAC address\n"); 4395 4396 memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len); 4397 memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len); 4398 4399 if (!is_valid_ether_addr(netdev->perm_addr)) { 4400 - ndev_err(netdev, "Invalid MAC Address: " 4401 - "%02x:%02x:%02x:%02x:%02x:%02x\n", 4402 - netdev->perm_addr[0], netdev->perm_addr[1], 4403 - netdev->perm_addr[2], netdev->perm_addr[3], 4404 - netdev->perm_addr[4], netdev->perm_addr[5]); 4405 err = -EIO; 4406 goto err_eeprom; 4407 } ··· 4472 * is up. For all other cases, let the f/w know that the h/w is now 4473 * under the control of the driver. 4474 */ 4475 - if (!(adapter->flags & FLAG_HAS_AMT) || 4476 - !e1000e_check_mng_mode(&adapter->hw)) 4477 e1000_get_hw_control(adapter); 4478 4479 /* tell the stack to leave us alone until e1000_open() is called */ ··· 4489 return 0; 4490 4491 err_register: 4492 - err_hw_init: 4493 - e1000_release_hw_control(adapter); 4494 err_eeprom: 4495 if (!e1000_check_reset_block(&adapter->hw)) 4496 e1000_phy_hw_reset(&adapter->hw); 4497 4498 - if (adapter->hw.flash_address) 4499 - iounmap(adapter->hw.flash_address); 4500 - 4501 - err_flashmap: 4502 kfree(adapter->tx_ring); 4503 kfree(adapter->rx_ring); 4504 err_sw_init: 4505 iounmap(adapter->hw.hw_addr); 4506 err_ioremap: 4507 free_netdev(netdev); 4508 err_alloc_etherdev: 4509 - pci_release_selected_regions(pdev, bars); 4510 err_pci_reg: 4511 err_dma: 4512 pci_disable_device(pdev); ··· 4555 iounmap(adapter->hw.hw_addr); 4556 if (adapter->hw.flash_address) 4557 iounmap(adapter->hw.flash_address); 4558 - pci_release_selected_regions(pdev, adapter->bars); 4559 4560 free_netdev(netdev); 4561
··· 484 * packet, also make sure the frame isn't just CRC only */ 485 if (!(status & E1000_RXD_STAT_EOP) || (length <= 4)) { 486 /* All receives must fit into a single buffer */ 487 + e_dbg("%s: Receive packet consumed multiple buffers\n", 488 + netdev->name); 489 /* recycle */ 490 buffer_info->skb = skb; 491 goto next_desc; ··· 576 unsigned int i = tx_ring->next_to_clean; 577 unsigned int eop = tx_ring->buffer_info[i].next_to_watch; 578 struct e1000_tx_desc *eop_desc = E1000_TX_DESC(*tx_ring, eop); 579 580 /* detected Tx unit hang */ 581 + e_err("Detected Tx Unit Hang:\n" 582 + " TDH <%x>\n" 583 + " TDT <%x>\n" 584 + " next_to_use <%x>\n" 585 + " next_to_clean <%x>\n" 586 + "buffer_info[next_to_clean]:\n" 587 + " time_stamp <%lx>\n" 588 + " next_to_watch <%x>\n" 589 + " jiffies <%lx>\n" 590 + " next_to_watch.status <%x>\n", 591 + readl(adapter->hw.hw_addr + tx_ring->head), 592 + readl(adapter->hw.hw_addr + tx_ring->tail), 593 + tx_ring->next_to_use, 594 + tx_ring->next_to_clean, 595 + tx_ring->buffer_info[eop].time_stamp, 596 + eop, 597 + jiffies, 598 + eop_desc->upper.fields.status); 599 } 600 601 /** ··· 747 buffer_info->dma = 0; 748 749 if (!(staterr & E1000_RXD_STAT_EOP)) { 750 + e_dbg("%s: Packet Split buffers didn't pick up the " 751 + "full packet\n", netdev->name); 752 dev_kfree_skb_irq(skb); 753 goto next_desc; 754 } ··· 761 length = le16_to_cpu(rx_desc->wb.middle.length0); 762 763 if (!length) { 764 + e_dbg("%s: Last part of the packet spanning multiple " 765 + "descriptors\n", netdev->name); 766 dev_kfree_skb_irq(skb); 767 goto next_desc; 768 } ··· 1011 1012 /* eth type trans needs skb->data to point to something */ 1013 if (!pskb_may_pull(skb, ETH_HLEN)) { 1014 + e_err("pskb_may_pull failed.\n"); 1015 dev_kfree_skb(skb); 1016 goto next_desc; 1017 } ··· 1251 err = request_irq(adapter->pdev->irq, handler, irq_flags, netdev->name, 1252 netdev); 1253 if (err) { 1254 + e_err("Unable to allocate %s interrupt (return: %d)\n", 1255 + adapter->flags & FLAG_MSI_ENABLED ? "MSI":"INTx", err); 1256 if (adapter->flags & FLAG_MSI_ENABLED) 1257 pci_disable_msi(adapter->pdev); 1258 } ··· 1395 return 0; 1396 err: 1397 vfree(tx_ring->buffer_info); 1398 + e_err("Unable to allocate memory for the transmit descriptor ring\n"); 1399 return err; 1400 } 1401 ··· 1450 } 1451 err: 1452 vfree(rx_ring->buffer_info); 1453 + e_err("Unable to allocate memory for the transmit descriptor ring\n"); 1454 return err; 1455 } 1456 ··· 2450 * For parts with AMT enabled, let the firmware know 2451 * that the network interface is in control 2452 */ 2453 + if (adapter->flags & FLAG_HAS_AMT) 2454 e1000_get_hw_control(adapter); 2455 2456 ew32(WUC, 0); 2457 2458 if (mac->ops.init_hw(hw)) 2459 + e_err("Hardware Error\n"); 2460 2461 e1000_update_mng_vlan(adapter); 2462 ··· 2591 return 0; 2592 2593 err: 2594 + e_err("Unable to allocate memory for queues\n"); 2595 kfree(adapter->rx_ring); 2596 kfree(adapter->tx_ring); 2597 return -ENOMEM; ··· 2640 * If AMT is enabled, let the firmware know that the network 2641 * interface is now open 2642 */ 2643 + if (adapter->flags & FLAG_HAS_AMT) 2644 e1000_get_hw_control(adapter); 2645 2646 /* ··· 2719 * If AMT is enabled, let the firmware know that the network 2720 * interface is now closed 2721 */ 2722 + if (adapter->flags & FLAG_HAS_AMT) 2723 e1000_release_hw_control(adapter); 2724 2725 return 0; ··· 2917 ret_val |= e1e_rphy(hw, PHY_1000T_STATUS, &phy->stat1000); 2918 ret_val |= e1e_rphy(hw, PHY_EXT_STATUS, &phy->estatus); 2919 if (ret_val) 2920 + e_warn("Error reading PHY register\n"); 2921 } else { 2922 /* 2923 * Do not read PHY registers if link is not up ··· 2943 static void e1000_print_link_info(struct e1000_adapter *adapter) 2944 { 2945 struct e1000_hw *hw = &adapter->hw; 2946 u32 ctrl = er32(CTRL); 2947 2948 + e_info("Link is Up %d Mbps %s, Flow Control: %s\n", 2949 + adapter->link_speed, 2950 + (adapter->link_duplex == FULL_DUPLEX) ? 2951 + "Full Duplex" : "Half Duplex", 2952 + ((ctrl & E1000_CTRL_TFCE) && (ctrl & E1000_CTRL_RFCE)) ? 2953 + "RX/TX" : 2954 + ((ctrl & E1000_CTRL_RFCE) ? "RX" : 2955 + ((ctrl & E1000_CTRL_TFCE) ? "TX" : "None" ))); 2956 } 2957 2958 static bool e1000_has_link(struct e1000_adapter *adapter) ··· 2994 if ((ret_val == E1000_ERR_PHY) && (hw->phy.type == e1000_phy_igp_3) && 2995 (er32(CTRL) & E1000_PHY_CTRL_GBE_DISABLE)) { 2996 /* See e1000_kmrn_lock_loss_workaround_ich8lan() */ 2997 + e_info("Gigabit has been disabled, downgrading speed\n"); 2998 } 2999 3000 return link_active; ··· 3096 switch (adapter->link_speed) { 3097 case SPEED_10: 3098 case SPEED_100: 3099 + e_info("10/100 speed: disabling TSO\n"); 3100 netdev->features &= ~NETIF_F_TSO; 3101 netdev->features &= ~NETIF_F_TSO6; 3102 break; ··· 3130 if (netif_carrier_ok(netdev)) { 3131 adapter->link_speed = 0; 3132 adapter->link_duplex = 0; 3133 + e_info("Link is Down\n"); 3134 netif_carrier_off(netdev); 3135 netif_tx_stop_all_queues(netdev); 3136 if (!test_bit(__E1000_DOWN, &adapter->state)) ··· 3604 3605 pull_size = min((unsigned int)4, skb->data_len); 3606 if (!__pskb_pull_tail(skb, pull_size)) { 3607 + e_err("__pskb_pull_tail failed.\n"); 3608 dev_kfree_skb_any(skb); 3609 return NETDEV_TX_OK; 3610 } ··· 3737 3738 if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) || 3739 (max_frame > MAX_JUMBO_FRAME_SIZE)) { 3740 + e_err("Invalid MTU setting\n"); 3741 return -EINVAL; 3742 } 3743 3744 /* Jumbo frame size limits */ 3745 if (max_frame > ETH_FRAME_LEN + ETH_FCS_LEN) { 3746 if (!(adapter->flags & FLAG_HAS_JUMBO_FRAMES)) { 3747 + e_err("Jumbo Frames not supported.\n"); 3748 return -EINVAL; 3749 } 3750 if (adapter->hw.phy.type == e1000_phy_ife) { 3751 + e_err("Jumbo Frames not supported.\n"); 3752 return -EINVAL; 3753 } 3754 } 3755 3756 #define MAX_STD_JUMBO_FRAME_SIZE 9234 3757 if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) { 3758 + e_err("MTU > 9216 not supported.\n"); 3759 return -EINVAL; 3760 } 3761 ··· 3792 adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN 3793 + ETH_FCS_LEN; 3794 3795 + e_info("changing MTU from %d to %d\n", netdev->mtu, new_mtu); 3796 netdev->mtu = new_mtu; 3797 3798 if (netif_running(netdev)) ··· 4006 pci_restore_state(pdev); 4007 e1000e_disable_l1aspm(pdev); 4008 4009 + err = pci_enable_device_mem(pdev); 4010 if (err) { 4011 dev_err(&pdev->dev, 4012 "Cannot enable PCI device from suspend\n"); ··· 4043 * is up. For all other cases, let the f/w know that the h/w is now 4044 * under the control of the driver. 4045 */ 4046 + if (!(adapter->flags & FLAG_HAS_AMT)) 4047 e1000_get_hw_control(adapter); 4048 4049 return 0; ··· 4111 int err; 4112 4113 e1000e_disable_l1aspm(pdev); 4114 + err = pci_enable_device_mem(pdev); 4115 if (err) { 4116 dev_err(&pdev->dev, 4117 "Cannot re-enable PCI device after reset.\n"); ··· 4162 * is up. For all other cases, let the f/w know that the h/w is now 4163 * under the control of the driver. 4164 */ 4165 + if (!(adapter->flags & FLAG_HAS_AMT)) 4166 e1000_get_hw_control(adapter); 4167 4168 } ··· 4175 u32 pba_num; 4176 4177 /* print bus type/speed/width info */ 4178 + e_info("(PCI Express:2.5GB/s:%s) %02x:%02x:%02x:%02x:%02x:%02x\n", 4179 + /* bus width */ 4180 + ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" : 4181 + "Width x1"), 4182 + /* MAC address */ 4183 + netdev->dev_addr[0], netdev->dev_addr[1], 4184 + netdev->dev_addr[2], netdev->dev_addr[3], 4185 + netdev->dev_addr[4], netdev->dev_addr[5]); 4186 + e_info("Intel(R) PRO/%s Network Connection\n", 4187 + (hw->phy.type == e1000_phy_ife) ? "10/100" : "1000"); 4188 e1000e_read_pba_num(hw, &pba_num); 4189 + e_info("MAC: %d, PHY: %d, PBA No: %06x-%03x\n", 4190 + hw->mac.type, hw->phy.type, (pba_num >> 8), (pba_num & 0xff)); 4191 } 4192 4193 + static void e1000_eeprom_checks(struct e1000_adapter *adapter) 4194 { 4195 + struct e1000_hw *hw = &adapter->hw; 4196 + int ret_val; 4197 + u16 buf = 0; 4198 + 4199 + if (hw->mac.type != e1000_82573) 4200 + return; 4201 + 4202 + ret_val = e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &buf); 4203 + if (!(le16_to_cpu(buf) & (1 << 0))) { 4204 + /* Deep Smart Power Down (DSPD) */ 4205 + e_warn("Warning: detected DSPD enabled in EEPROM\n"); 4206 + } 4207 + 4208 + ret_val = e1000_read_nvm(hw, NVM_INIT_3GIO_3, 1, &buf); 4209 + if (le16_to_cpu(buf) & (3 << 2)) { 4210 + /* ASPM enable */ 4211 + e_warn("Warning: detected ASPM enabled in EEPROM\n"); 4212 } 4213 } 4214 ··· 4233 int i, err, pci_using_dac; 4234 u16 eeprom_data = 0; 4235 u16 eeprom_apme_mask = E1000_EEPROM_APME; 4236 4237 e1000e_disable_l1aspm(pdev); 4238 4239 + err = pci_enable_device_mem(pdev); 4240 if (err) 4241 return err; 4242 ··· 4268 } 4269 } 4270 4271 + err = pci_request_selected_regions(pdev, 4272 + pci_select_bars(pdev, IORESOURCE_MEM), 4273 + e1000e_driver_name); 4274 if (err) 4275 goto err_pci_reg; 4276 ··· 4293 adapter->hw.adapter = adapter; 4294 adapter->hw.mac.type = ei->mac; 4295 adapter->msg_enable = (1 << NETIF_MSG_DRV | NETIF_MSG_PROBE) - 1; 4296 4297 mmio_start = pci_resource_start(pdev, 0); 4298 mmio_len = pci_resource_len(pdev, 0); ··· 4366 } 4367 4368 if (e1000_check_reset_block(&adapter->hw)) 4369 + e_info("PHY reset is blocked due to SOL/IDER session.\n"); 4370 4371 netdev->features = NETIF_F_SG | 4372 NETIF_F_HW_CSUM | ··· 4411 if (e1000_validate_nvm_checksum(&adapter->hw) >= 0) 4412 break; 4413 if (i == 2) { 4414 + e_err("The NVM Checksum Is Not Valid\n"); 4415 err = -EIO; 4416 goto err_eeprom; 4417 } 4418 } 4419 4420 + e1000_eeprom_checks(adapter); 4421 + 4422 /* copy the MAC address out of the NVM */ 4423 if (e1000e_read_mac_addr(&adapter->hw)) 4424 + e_err("NVM Read Error while reading MAC address\n"); 4425 4426 memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len); 4427 memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len); 4428 4429 if (!is_valid_ether_addr(netdev->perm_addr)) { 4430 + e_err("Invalid MAC Address: %02x:%02x:%02x:%02x:%02x:%02x\n", 4431 + netdev->perm_addr[0], netdev->perm_addr[1], 4432 + netdev->perm_addr[2], netdev->perm_addr[3], 4433 + netdev->perm_addr[4], netdev->perm_addr[5]); 4434 err = -EIO; 4435 goto err_eeprom; 4436 } ··· 4499 * is up. For all other cases, let the f/w know that the h/w is now 4500 * under the control of the driver. 4501 */ 4502 + if (!(adapter->flags & FLAG_HAS_AMT)) 4503 e1000_get_hw_control(adapter); 4504 4505 /* tell the stack to leave us alone until e1000_open() is called */ ··· 4517 return 0; 4518 4519 err_register: 4520 + if (!(adapter->flags & FLAG_HAS_AMT)) 4521 + e1000_release_hw_control(adapter); 4522 err_eeprom: 4523 if (!e1000_check_reset_block(&adapter->hw)) 4524 e1000_phy_hw_reset(&adapter->hw); 4525 + err_hw_init: 4526 4527 kfree(adapter->tx_ring); 4528 kfree(adapter->rx_ring); 4529 err_sw_init: 4530 + if (adapter->hw.flash_address) 4531 + iounmap(adapter->hw.flash_address); 4532 + err_flashmap: 4533 iounmap(adapter->hw.hw_addr); 4534 err_ioremap: 4535 free_netdev(netdev); 4536 err_alloc_etherdev: 4537 + pci_release_selected_regions(pdev, 4538 + pci_select_bars(pdev, IORESOURCE_MEM)); 4539 err_pci_reg: 4540 err_dma: 4541 pci_disable_device(pdev); ··· 4582 iounmap(adapter->hw.hw_addr); 4583 if (adapter->hw.flash_address) 4584 iounmap(adapter->hw.flash_address); 4585 + pci_release_selected_regions(pdev, 4586 + pci_select_bars(pdev, IORESOURCE_MEM)); 4587 4588 free_netdev(netdev); 4589
+12 -19
drivers/net/e1000e/param.c
··· 27 *******************************************************************************/ 28 29 #include <linux/netdevice.h> 30 31 #include "e1000.h" 32 ··· 163 case enable_option: 164 switch (*value) { 165 case OPTION_ENABLED: 166 - ndev_info(adapter->netdev, "%s Enabled\n", opt->name); 167 return 0; 168 case OPTION_DISABLED: 169 - ndev_info(adapter->netdev, "%s Disabled\n", opt->name); 170 return 0; 171 } 172 break; 173 case range_option: 174 if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) { 175 - ndev_info(adapter->netdev, 176 - "%s set to %i\n", opt->name, *value); 177 return 0; 178 } 179 break; ··· 184 ent = &opt->arg.l.p[i]; 185 if (*value == ent->i) { 186 if (ent->str[0] != '\0') 187 - ndev_info(adapter->netdev, "%s\n", 188 - ent->str); 189 return 0; 190 } 191 } ··· 194 BUG(); 195 } 196 197 - ndev_info(adapter->netdev, "Invalid %s value specified (%i) %s\n", 198 - opt->name, *value, opt->err); 199 *value = opt->def; 200 return -1; 201 } ··· 212 void __devinit e1000e_check_options(struct e1000_adapter *adapter) 213 { 214 struct e1000_hw *hw = &adapter->hw; 215 - struct net_device *netdev = adapter->netdev; 216 int bd = adapter->bd_number; 217 218 if (bd >= E1000_MAX_NIC) { 219 - ndev_notice(netdev, 220 - "Warning: no configuration for board #%i\n", bd); 221 - ndev_notice(netdev, "Using defaults for all values\n"); 222 } 223 224 { /* Transmit Interrupt Delay */ ··· 310 adapter->itr = InterruptThrottleRate[bd]; 311 switch (adapter->itr) { 312 case 0: 313 - ndev_info(netdev, "%s turned off\n", 314 - opt.name); 315 break; 316 case 1: 317 - ndev_info(netdev, 318 - "%s set to dynamic mode\n", 319 - opt.name); 320 adapter->itr_setting = adapter->itr; 321 adapter->itr = 20000; 322 break; 323 case 3: 324 - ndev_info(netdev, 325 - "%s set to dynamic conservative mode\n", 326 opt.name); 327 adapter->itr_setting = adapter->itr; 328 adapter->itr = 20000;
··· 27 *******************************************************************************/ 28 29 #include <linux/netdevice.h> 30 + #include <linux/pci.h> 31 32 #include "e1000.h" 33 ··· 162 case enable_option: 163 switch (*value) { 164 case OPTION_ENABLED: 165 + e_info("%s Enabled\n", opt->name); 166 return 0; 167 case OPTION_DISABLED: 168 + e_info("%s Disabled\n", opt->name); 169 return 0; 170 } 171 break; 172 case range_option: 173 if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) { 174 + e_info("%s set to %i\n", opt->name, *value); 175 return 0; 176 } 177 break; ··· 184 ent = &opt->arg.l.p[i]; 185 if (*value == ent->i) { 186 if (ent->str[0] != '\0') 187 + e_info("%s\n", ent->str); 188 return 0; 189 } 190 } ··· 195 BUG(); 196 } 197 198 + e_info("Invalid %s value specified (%i) %s\n", opt->name, *value, 199 + opt->err); 200 *value = opt->def; 201 return -1; 202 } ··· 213 void __devinit e1000e_check_options(struct e1000_adapter *adapter) 214 { 215 struct e1000_hw *hw = &adapter->hw; 216 int bd = adapter->bd_number; 217 218 if (bd >= E1000_MAX_NIC) { 219 + e_notice("Warning: no configuration for board #%i\n", bd); 220 + e_notice("Using defaults for all values\n"); 221 } 222 223 { /* Transmit Interrupt Delay */ ··· 313 adapter->itr = InterruptThrottleRate[bd]; 314 switch (adapter->itr) { 315 case 0: 316 + e_info("%s turned off\n", opt.name); 317 break; 318 case 1: 319 + e_info("%s set to dynamic mode\n", opt.name); 320 adapter->itr_setting = adapter->itr; 321 adapter->itr = 20000; 322 break; 323 case 3: 324 + e_info("%s set to dynamic conservative mode\n", 325 opt.name); 326 adapter->itr_setting = adapter->itr; 327 adapter->itr = 20000;
-8
drivers/net/eepro.c
··· 1283 1284 if (dev->flags&(IFF_ALLMULTI|IFF_PROMISC) || dev->mc_count > 63) 1285 { 1286 - /* 1287 - * We must make the kernel realise we had to move 1288 - * into promisc mode or we start all out war on 1289 - * the cable. If it was a promisc request the 1290 - * flag is already set. If not we assert it. 1291 - */ 1292 - dev->flags|=IFF_PROMISC; 1293 - 1294 eepro_sw2bank2(ioaddr); /* be CAREFUL, BANK 2 now */ 1295 mode = inb(ioaddr + REG2); 1296 outb(mode | PRMSC_Mode, ioaddr + REG2);
··· 1283 1284 if (dev->flags&(IFF_ALLMULTI|IFF_PROMISC) || dev->mc_count > 63) 1285 { 1286 eepro_sw2bank2(ioaddr); /* be CAREFUL, BANK 2 now */ 1287 mode = inb(ioaddr + REG2); 1288 outb(mode | PRMSC_Mode, ioaddr + REG2);
-1
drivers/net/eth16i.c
··· 1356 1357 if(dev->mc_count || dev->flags&(IFF_ALLMULTI|IFF_PROMISC)) 1358 { 1359 - dev->flags|=IFF_PROMISC; /* Must do this */ 1360 outb(3, ioaddr + RECEIVE_MODE_REG); 1361 } else { 1362 outb(2, ioaddr + RECEIVE_MODE_REG);
··· 1356 1357 if(dev->mc_count || dev->flags&(IFF_ALLMULTI|IFF_PROMISC)) 1358 { 1359 outb(3, ioaddr + RECEIVE_MODE_REG); 1360 } else { 1361 outb(2, ioaddr + RECEIVE_MODE_REG);
+70 -40
drivers/net/forcedeth.c
··· 77 * Hardware access: 78 */ 79 80 - #define DEV_NEED_TIMERIRQ 0x00001 /* set the timer irq flag in the irq mask */ 81 - #define DEV_NEED_LINKTIMER 0x00002 /* poll link settings. Relies on the timer irq */ 82 - #define DEV_HAS_LARGEDESC 0x00004 /* device supports jumbo frames and needs packet format 2 */ 83 - #define DEV_HAS_HIGH_DMA 0x00008 /* device supports 64bit dma */ 84 - #define DEV_HAS_CHECKSUM 0x00010 /* device supports tx and rx checksum offloads */ 85 - #define DEV_HAS_VLAN 0x00020 /* device supports vlan tagging and striping */ 86 - #define DEV_HAS_MSI 0x00040 /* device supports MSI */ 87 - #define DEV_HAS_MSI_X 0x00080 /* device supports MSI-X */ 88 - #define DEV_HAS_POWER_CNTRL 0x00100 /* device supports power savings */ 89 - #define DEV_HAS_STATISTICS_V1 0x00200 /* device supports hw statistics version 1 */ 90 - #define DEV_HAS_STATISTICS_V2 0x00400 /* device supports hw statistics version 2 */ 91 - #define DEV_HAS_TEST_EXTENDED 0x00800 /* device supports extended diagnostic test */ 92 - #define DEV_HAS_MGMT_UNIT 0x01000 /* device supports management unit */ 93 - #define DEV_HAS_CORRECT_MACADDR 0x02000 /* device supports correct mac address order */ 94 - #define DEV_HAS_COLLISION_FIX 0x04000 /* device supports tx collision fix */ 95 - #define DEV_HAS_PAUSEFRAME_TX_V1 0x08000 /* device supports tx pause frames version 1 */ 96 - #define DEV_HAS_PAUSEFRAME_TX_V2 0x10000 /* device supports tx pause frames version 2 */ 97 - #define DEV_HAS_PAUSEFRAME_TX_V3 0x20000 /* device supports tx pause frames version 3 */ 98 - #define DEV_NEED_TX_LIMIT 0x40000 /* device needs to limit tx */ 99 - #define DEV_HAS_GEAR_MODE 0x80000 /* device supports gear mode */ 100 101 enum { 102 NvRegIrqStatus = 0x000, ··· 249 #define NVREG_TX_PAUSEFRAME_ENABLE_V1 0x01800010 250 #define NVREG_TX_PAUSEFRAME_ENABLE_V2 0x056003f0 251 #define NVREG_TX_PAUSEFRAME_ENABLE_V3 0x09f00880 252 NvRegMIIStatus = 0x180, 253 #define NVREG_MIISTAT_ERROR 0x0001 254 #define NVREG_MIISTAT_LINKCHANGE 0x0008 ··· 273 #define NVREG_MIICTL_WRITE 0x00400 274 #define NVREG_MIICTL_ADDRSHIFT 5 275 NvRegMIIData = 0x194, 276 NvRegWakeUpFlags = 0x200, 277 #define NVREG_WAKEUPFLAGS_VAL 0x7770 278 #define NVREG_WAKEUPFLAGS_BUSYSHIFT 24 ··· 408 #define NV_RX_FRAMINGERR (1<<29) 409 #define NV_RX_ERROR (1<<30) 410 #define NV_RX_AVAIL (1<<31) 411 412 #define NV_RX2_CHECKSUMMASK (0x1C000000) 413 #define NV_RX2_CHECKSUM_IP (0x10000000) ··· 426 /* error and avail are the same for both */ 427 #define NV_RX2_ERROR (1<<30) 428 #define NV_RX2_AVAIL (1<<31) 429 430 #define NV_RX3_VLAN_TAG_PRESENT (1<<16) 431 #define NV_RX3_VLAN_TAG_MASK (0x0000FFFF) ··· 624 { "rx_bytes" }, 625 { "tx_pause" }, 626 { "rx_pause" }, 627 - { "rx_drop_frame" } 628 }; 629 630 struct nv_ethtool_stats { ··· 665 u64 tx_pause; 666 u64 rx_pause; 667 u64 rx_drop_frame; 668 }; 669 670 - #define NV_DEV_STATISTICS_V2_COUNT (sizeof(struct nv_ethtool_stats)/sizeof(u64)) 671 #define NV_DEV_STATISTICS_V1_COUNT (NV_DEV_STATISTICS_V2_COUNT - 6) 672 673 /* diagnostics */ ··· 1647 np->estats.rx_pause += readl(base + NvRegRxPause); 1648 np->estats.rx_drop_frame += readl(base + NvRegRxDropFrame); 1649 } 1650 } 1651 1652 /* ··· 1666 struct fe_priv *np = netdev_priv(dev); 1667 1668 /* If the nic supports hw counters then retrieve latest values */ 1669 - if (np->driver_data & (DEV_HAS_STATISTICS_V1|DEV_HAS_STATISTICS_V2)) { 1670 nv_get_hw_stats(dev); 1671 1672 /* copy to net_device stats */ ··· 2657 if (likely(flags & NV_RX_DESCRIPTORVALID)) { 2658 len = flags & LEN_MASK_V1; 2659 if (unlikely(flags & NV_RX_ERROR)) { 2660 - if (flags & NV_RX_ERROR4) { 2661 len = nv_getlen(dev, skb->data, len); 2662 if (len < 0) { 2663 dev->stats.rx_errors++; ··· 2666 } 2667 } 2668 /* framing errors are soft errors */ 2669 - else if (flags & NV_RX_FRAMINGERR) { 2670 if (flags & NV_RX_SUBSTRACT1) { 2671 len--; 2672 } ··· 2692 if (likely(flags & NV_RX2_DESCRIPTORVALID)) { 2693 len = flags & LEN_MASK_V2; 2694 if (unlikely(flags & NV_RX2_ERROR)) { 2695 - if (flags & NV_RX2_ERROR4) { 2696 len = nv_getlen(dev, skb->data, len); 2697 if (len < 0) { 2698 dev->stats.rx_errors++; ··· 2701 } 2702 } 2703 /* framing errors are soft errors */ 2704 - else if (flags & NV_RX2_FRAMINGERR) { 2705 if (flags & NV_RX2_SUBSTRACT1) { 2706 len--; 2707 } ··· 2791 if (likely(flags & NV_RX2_DESCRIPTORVALID)) { 2792 len = flags & LEN_MASK_V2; 2793 if (unlikely(flags & NV_RX2_ERROR)) { 2794 - if (flags & NV_RX2_ERROR4) { 2795 len = nv_getlen(dev, skb->data, len); 2796 if (len < 0) { 2797 dev_kfree_skb(skb); ··· 2799 } 2800 } 2801 /* framing errors are soft errors */ 2802 - else if (flags & NV_RX2_FRAMINGERR) { 2803 if (flags & NV_RX2_SUBSTRACT1) { 2804 len--; 2805 } ··· 3078 u32 pause_enable = NVREG_TX_PAUSEFRAME_ENABLE_V1; 3079 if (np->driver_data & DEV_HAS_PAUSEFRAME_TX_V2) 3080 pause_enable = NVREG_TX_PAUSEFRAME_ENABLE_V2; 3081 - if (np->driver_data & DEV_HAS_PAUSEFRAME_TX_V3) 3082 pause_enable = NVREG_TX_PAUSEFRAME_ENABLE_V3; 3083 writel(pause_enable, base + NvRegTxPauseFrame); 3084 writel(regmisc|NVREG_MISC1_PAUSE_TX, base + NvRegMisc1); 3085 np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE; ··· 4768 return NV_DEV_STATISTICS_V1_COUNT; 4769 else if (np->driver_data & DEV_HAS_STATISTICS_V2) 4770 return NV_DEV_STATISTICS_V2_COUNT; 4771 else 4772 return 0; 4773 default: ··· 5354 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); 5355 5356 /* start statistics timer */ 5357 - if (np->driver_data & (DEV_HAS_STATISTICS_V1|DEV_HAS_STATISTICS_V2)) 5358 mod_timer(&np->stats_poll, 5359 round_jiffies(jiffies + STATS_INTERVAL)); 5360 ··· 5458 if (err < 0) 5459 goto out_disable; 5460 5461 - if (id->driver_data & (DEV_HAS_VLAN|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V2)) 5462 np->register_size = NV_PCI_REGSZ_VER3; 5463 else if (id->driver_data & DEV_HAS_STATISTICS_V1) 5464 np->register_size = NV_PCI_REGSZ_VER2; ··· 6113 }, 6114 { /* MCP77 Ethernet Controller */ 6115 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_32), 6116 - .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE, 6117 }, 6118 { /* MCP77 Ethernet Controller */ 6119 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_33), 6120 - .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE, 6121 }, 6122 { /* MCP77 Ethernet Controller */ 6123 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_34), 6124 - .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE, 6125 }, 6126 { /* MCP77 Ethernet Controller */ 6127 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_35), 6128 - .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE, 6129 }, 6130 { /* MCP79 Ethernet Controller */ 6131 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_36), 6132 - .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE, 6133 }, 6134 { /* MCP79 Ethernet Controller */ 6135 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_37), 6136 - .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE, 6137 }, 6138 { /* MCP79 Ethernet Controller */ 6139 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_38), 6140 - .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE, 6141 }, 6142 { /* MCP79 Ethernet Controller */ 6143 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_39), 6144 - .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE, 6145 }, 6146 {0,}, 6147 };
··· 77 * Hardware access: 78 */ 79 80 + #define DEV_NEED_TIMERIRQ 0x000001 /* set the timer irq flag in the irq mask */ 81 + #define DEV_NEED_LINKTIMER 0x000002 /* poll link settings. Relies on the timer irq */ 82 + #define DEV_HAS_LARGEDESC 0x000004 /* device supports jumbo frames and needs packet format 2 */ 83 + #define DEV_HAS_HIGH_DMA 0x000008 /* device supports 64bit dma */ 84 + #define DEV_HAS_CHECKSUM 0x000010 /* device supports tx and rx checksum offloads */ 85 + #define DEV_HAS_VLAN 0x000020 /* device supports vlan tagging and striping */ 86 + #define DEV_HAS_MSI 0x000040 /* device supports MSI */ 87 + #define DEV_HAS_MSI_X 0x000080 /* device supports MSI-X */ 88 + #define DEV_HAS_POWER_CNTRL 0x000100 /* device supports power savings */ 89 + #define DEV_HAS_STATISTICS_V1 0x000200 /* device supports hw statistics version 1 */ 90 + #define DEV_HAS_STATISTICS_V2 0x000400 /* device supports hw statistics version 2 */ 91 + #define DEV_HAS_STATISTICS_V3 0x000800 /* device supports hw statistics version 3 */ 92 + #define DEV_HAS_TEST_EXTENDED 0x001000 /* device supports extended diagnostic test */ 93 + #define DEV_HAS_MGMT_UNIT 0x002000 /* device supports management unit */ 94 + #define DEV_HAS_CORRECT_MACADDR 0x004000 /* device supports correct mac address order */ 95 + #define DEV_HAS_COLLISION_FIX 0x008000 /* device supports tx collision fix */ 96 + #define DEV_HAS_PAUSEFRAME_TX_V1 0x010000 /* device supports tx pause frames version 1 */ 97 + #define DEV_HAS_PAUSEFRAME_TX_V2 0x020000 /* device supports tx pause frames version 2 */ 98 + #define DEV_HAS_PAUSEFRAME_TX_V3 0x040000 /* device supports tx pause frames version 3 */ 99 + #define DEV_NEED_TX_LIMIT 0x080000 /* device needs to limit tx */ 100 + #define DEV_HAS_GEAR_MODE 0x100000 /* device supports gear mode */ 101 102 enum { 103 NvRegIrqStatus = 0x000, ··· 248 #define NVREG_TX_PAUSEFRAME_ENABLE_V1 0x01800010 249 #define NVREG_TX_PAUSEFRAME_ENABLE_V2 0x056003f0 250 #define NVREG_TX_PAUSEFRAME_ENABLE_V3 0x09f00880 251 + NvRegTxPauseFrameLimit = 0x174, 252 + #define NVREG_TX_PAUSEFRAMELIMIT_ENABLE 0x00010000 253 NvRegMIIStatus = 0x180, 254 #define NVREG_MIISTAT_ERROR 0x0001 255 #define NVREG_MIISTAT_LINKCHANGE 0x0008 ··· 270 #define NVREG_MIICTL_WRITE 0x00400 271 #define NVREG_MIICTL_ADDRSHIFT 5 272 NvRegMIIData = 0x194, 273 + NvRegTxUnicast = 0x1a0, 274 + NvRegTxMulticast = 0x1a4, 275 + NvRegTxBroadcast = 0x1a8, 276 NvRegWakeUpFlags = 0x200, 277 #define NVREG_WAKEUPFLAGS_VAL 0x7770 278 #define NVREG_WAKEUPFLAGS_BUSYSHIFT 24 ··· 402 #define NV_RX_FRAMINGERR (1<<29) 403 #define NV_RX_ERROR (1<<30) 404 #define NV_RX_AVAIL (1<<31) 405 + #define NV_RX_ERROR_MASK (NV_RX_ERROR1|NV_RX_ERROR2|NV_RX_ERROR3|NV_RX_ERROR4|NV_RX_CRCERR|NV_RX_OVERFLOW|NV_RX_FRAMINGERR) 406 407 #define NV_RX2_CHECKSUMMASK (0x1C000000) 408 #define NV_RX2_CHECKSUM_IP (0x10000000) ··· 419 /* error and avail are the same for both */ 420 #define NV_RX2_ERROR (1<<30) 421 #define NV_RX2_AVAIL (1<<31) 422 + #define NV_RX2_ERROR_MASK (NV_RX2_ERROR1|NV_RX2_ERROR2|NV_RX2_ERROR3|NV_RX2_ERROR4|NV_RX2_CRCERR|NV_RX2_OVERFLOW|NV_RX2_FRAMINGERR) 423 424 #define NV_RX3_VLAN_TAG_PRESENT (1<<16) 425 #define NV_RX3_VLAN_TAG_MASK (0x0000FFFF) ··· 616 { "rx_bytes" }, 617 { "tx_pause" }, 618 { "rx_pause" }, 619 + { "rx_drop_frame" }, 620 + 621 + /* version 3 stats */ 622 + { "tx_unicast" }, 623 + { "tx_multicast" }, 624 + { "tx_broadcast" } 625 }; 626 627 struct nv_ethtool_stats { ··· 652 u64 tx_pause; 653 u64 rx_pause; 654 u64 rx_drop_frame; 655 + 656 + /* version 3 stats */ 657 + u64 tx_unicast; 658 + u64 tx_multicast; 659 + u64 tx_broadcast; 660 }; 661 662 + #define NV_DEV_STATISTICS_V3_COUNT (sizeof(struct nv_ethtool_stats)/sizeof(u64)) 663 + #define NV_DEV_STATISTICS_V2_COUNT (NV_DEV_STATISTICS_V3_COUNT - 3) 664 #define NV_DEV_STATISTICS_V1_COUNT (NV_DEV_STATISTICS_V2_COUNT - 6) 665 666 /* diagnostics */ ··· 1628 np->estats.rx_pause += readl(base + NvRegRxPause); 1629 np->estats.rx_drop_frame += readl(base + NvRegRxDropFrame); 1630 } 1631 + 1632 + if (np->driver_data & DEV_HAS_STATISTICS_V3) { 1633 + np->estats.tx_unicast += readl(base + NvRegTxUnicast); 1634 + np->estats.tx_multicast += readl(base + NvRegTxMulticast); 1635 + np->estats.tx_broadcast += readl(base + NvRegTxBroadcast); 1636 + } 1637 } 1638 1639 /* ··· 1641 struct fe_priv *np = netdev_priv(dev); 1642 1643 /* If the nic supports hw counters then retrieve latest values */ 1644 + if (np->driver_data & (DEV_HAS_STATISTICS_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_STATISTICS_V3)) { 1645 nv_get_hw_stats(dev); 1646 1647 /* copy to net_device stats */ ··· 2632 if (likely(flags & NV_RX_DESCRIPTORVALID)) { 2633 len = flags & LEN_MASK_V1; 2634 if (unlikely(flags & NV_RX_ERROR)) { 2635 + if ((flags & NV_RX_ERROR_MASK) == NV_RX_ERROR4) { 2636 len = nv_getlen(dev, skb->data, len); 2637 if (len < 0) { 2638 dev->stats.rx_errors++; ··· 2641 } 2642 } 2643 /* framing errors are soft errors */ 2644 + else if ((flags & NV_RX_ERROR_MASK) == NV_RX_FRAMINGERR) { 2645 if (flags & NV_RX_SUBSTRACT1) { 2646 len--; 2647 } ··· 2667 if (likely(flags & NV_RX2_DESCRIPTORVALID)) { 2668 len = flags & LEN_MASK_V2; 2669 if (unlikely(flags & NV_RX2_ERROR)) { 2670 + if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_ERROR4) { 2671 len = nv_getlen(dev, skb->data, len); 2672 if (len < 0) { 2673 dev->stats.rx_errors++; ··· 2676 } 2677 } 2678 /* framing errors are soft errors */ 2679 + else if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_FRAMINGERR) { 2680 if (flags & NV_RX2_SUBSTRACT1) { 2681 len--; 2682 } ··· 2766 if (likely(flags & NV_RX2_DESCRIPTORVALID)) { 2767 len = flags & LEN_MASK_V2; 2768 if (unlikely(flags & NV_RX2_ERROR)) { 2769 + if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_ERROR4) { 2770 len = nv_getlen(dev, skb->data, len); 2771 if (len < 0) { 2772 dev_kfree_skb(skb); ··· 2774 } 2775 } 2776 /* framing errors are soft errors */ 2777 + else if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_FRAMINGERR) { 2778 if (flags & NV_RX2_SUBSTRACT1) { 2779 len--; 2780 } ··· 3053 u32 pause_enable = NVREG_TX_PAUSEFRAME_ENABLE_V1; 3054 if (np->driver_data & DEV_HAS_PAUSEFRAME_TX_V2) 3055 pause_enable = NVREG_TX_PAUSEFRAME_ENABLE_V2; 3056 + if (np->driver_data & DEV_HAS_PAUSEFRAME_TX_V3) { 3057 pause_enable = NVREG_TX_PAUSEFRAME_ENABLE_V3; 3058 + /* limit the number of tx pause frames to a default of 8 */ 3059 + writel(readl(base + NvRegTxPauseFrameLimit)|NVREG_TX_PAUSEFRAMELIMIT_ENABLE, base + NvRegTxPauseFrameLimit); 3060 + } 3061 writel(pause_enable, base + NvRegTxPauseFrame); 3062 writel(regmisc|NVREG_MISC1_PAUSE_TX, base + NvRegMisc1); 3063 np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE; ··· 4740 return NV_DEV_STATISTICS_V1_COUNT; 4741 else if (np->driver_data & DEV_HAS_STATISTICS_V2) 4742 return NV_DEV_STATISTICS_V2_COUNT; 4743 + else if (np->driver_data & DEV_HAS_STATISTICS_V3) 4744 + return NV_DEV_STATISTICS_V3_COUNT; 4745 else 4746 return 0; 4747 default: ··· 5324 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); 5325 5326 /* start statistics timer */ 5327 + if (np->driver_data & (DEV_HAS_STATISTICS_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_STATISTICS_V3)) 5328 mod_timer(&np->stats_poll, 5329 round_jiffies(jiffies + STATS_INTERVAL)); 5330 ··· 5428 if (err < 0) 5429 goto out_disable; 5430 5431 + if (id->driver_data & (DEV_HAS_VLAN|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V2|DEV_HAS_STATISTICS_V3)) 5432 np->register_size = NV_PCI_REGSZ_VER3; 5433 else if (id->driver_data & DEV_HAS_STATISTICS_V1) 5434 np->register_size = NV_PCI_REGSZ_VER2; ··· 6083 }, 6084 { /* MCP77 Ethernet Controller */ 6085 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_32), 6086 + .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE, 6087 }, 6088 { /* MCP77 Ethernet Controller */ 6089 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_33), 6090 + .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE, 6091 }, 6092 { /* MCP77 Ethernet Controller */ 6093 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_34), 6094 + .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE, 6095 }, 6096 { /* MCP77 Ethernet Controller */ 6097 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_35), 6098 + .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE, 6099 }, 6100 { /* MCP79 Ethernet Controller */ 6101 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_36), 6102 + .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE, 6103 }, 6104 { /* MCP79 Ethernet Controller */ 6105 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_37), 6106 + .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE, 6107 }, 6108 { /* MCP79 Ethernet Controller */ 6109 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_38), 6110 + .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE, 6111 }, 6112 { /* MCP79 Ethernet Controller */ 6113 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_39), 6114 + .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE, 6115 }, 6116 {0,}, 6117 };
+1 -1
drivers/net/fs_enet/mac-fcc.c
··· 126 #define FCC_NAPI_RX_EVENT_MSK (FCC_ENET_RXF | FCC_ENET_RXB) 127 #define FCC_RX_EVENT (FCC_ENET_RXF) 128 #define FCC_TX_EVENT (FCC_ENET_TXB) 129 - #define FCC_ERR_EVENT_MSK (FCC_ENET_TXE | FCC_ENET_BSY) 130 131 static int setup_data(struct net_device *dev) 132 {
··· 126 #define FCC_NAPI_RX_EVENT_MSK (FCC_ENET_RXF | FCC_ENET_RXB) 127 #define FCC_RX_EVENT (FCC_ENET_RXF) 128 #define FCC_TX_EVENT (FCC_ENET_TXB) 129 + #define FCC_ERR_EVENT_MSK (FCC_ENET_TXE) 130 131 static int setup_data(struct net_device *dev) 132 {
-4
drivers/net/gianfar.c
··· 414 spin_unlock(&priv->rxlock); 415 spin_unlock_irqrestore(&priv->txlock, flags); 416 417 - #ifdef CONFIG_GFAR_NAPI 418 napi_disable(&priv->napi); 419 - #endif 420 421 if (magic_packet) { 422 /* Enable interrupt on Magic Packet */ ··· 467 468 netif_device_attach(dev); 469 470 - #ifdef CONFIG_GFAR_NAPI 471 napi_enable(&priv->napi); 472 - #endif 473 474 return 0; 475 }
··· 414 spin_unlock(&priv->rxlock); 415 spin_unlock_irqrestore(&priv->txlock, flags); 416 417 napi_disable(&priv->napi); 418 419 if (magic_packet) { 420 /* Enable interrupt on Magic Packet */ ··· 469 470 netif_device_attach(dev); 471 472 napi_enable(&priv->napi); 473 474 return 0; 475 }
+1 -1
drivers/net/hamradio/mkiss.c
··· 548 } 549 550 printk(KERN_ERR "mkiss: %s: transmit timed out, %s?\n", dev->name, 551 - (ax->tty->ops->chars_in_buffer(ax->tty) || ax->xleft) ? 552 "bad line quality" : "driver error"); 553 554 ax->xleft = 0;
··· 548 } 549 550 printk(KERN_ERR "mkiss: %s: transmit timed out, %s?\n", dev->name, 551 + (tty_chars_in_buffer(ax->tty) || ax->xleft) ? 552 "bad line quality" : "driver error"); 553 554 ax->xleft = 0;
+7 -65
drivers/net/igb/e1000_82575.c
··· 850 for (; mc_addr_count > 0; mc_addr_count--) { 851 hash_value = igb_hash_mc_addr(hw, mc_addr_list); 852 hw_dbg("Hash value = 0x%03X\n", hash_value); 853 - hw->mac.ops.mta_set(hw, hash_value); 854 mc_addr_list += ETH_ALEN; 855 } 856 } ··· 1136 E1000_PCS_LCTL_FORCE_LINK; /* Force Link */ 1137 hw_dbg("Configuring Forced Link; PCS_LCTL = 0x%08X\n", reg); 1138 } 1139 wr32(E1000_PCS_LCTL, reg); 1140 1141 return 0; ··· 1235 1236 out: 1237 return ret_val; 1238 - } 1239 - 1240 - /** 1241 - * igb_translate_register_82576 - Translate the proper register offset 1242 - * @reg: e1000 register to be read 1243 - * 1244 - * Registers in 82576 are located in different offsets than other adapters 1245 - * even though they function in the same manner. This function takes in 1246 - * the name of the register to read and returns the correct offset for 1247 - * 82576 silicon. 1248 - **/ 1249 - u32 igb_translate_register_82576(u32 reg) 1250 - { 1251 - /* 1252 - * Some of the Kawela registers are located at different 1253 - * offsets than they are in older adapters. 1254 - * Despite the difference in location, the registers 1255 - * function in the same manner. 1256 - */ 1257 - switch (reg) { 1258 - case E1000_TDBAL(0): 1259 - reg = 0x0E000; 1260 - break; 1261 - case E1000_TDBAH(0): 1262 - reg = 0x0E004; 1263 - break; 1264 - case E1000_TDLEN(0): 1265 - reg = 0x0E008; 1266 - break; 1267 - case E1000_TDH(0): 1268 - reg = 0x0E010; 1269 - break; 1270 - case E1000_TDT(0): 1271 - reg = 0x0E018; 1272 - break; 1273 - case E1000_TXDCTL(0): 1274 - reg = 0x0E028; 1275 - break; 1276 - case E1000_RDBAL(0): 1277 - reg = 0x0C000; 1278 - break; 1279 - case E1000_RDBAH(0): 1280 - reg = 0x0C004; 1281 - break; 1282 - case E1000_RDLEN(0): 1283 - reg = 0x0C008; 1284 - break; 1285 - case E1000_RDH(0): 1286 - reg = 0x0C010; 1287 - break; 1288 - case E1000_RDT(0): 1289 - reg = 0x0C018; 1290 - break; 1291 - case E1000_RXDCTL(0): 1292 - reg = 0x0C028; 1293 - break; 1294 - case E1000_SRRCTL(0): 1295 - reg = 0x0C00C; 1296 - break; 1297 - default: 1298 - break; 1299 - } 1300 - 1301 - return reg; 1302 } 1303 1304 /**
··· 850 for (; mc_addr_count > 0; mc_addr_count--) { 851 hash_value = igb_hash_mc_addr(hw, mc_addr_list); 852 hw_dbg("Hash value = 0x%03X\n", hash_value); 853 + igb_mta_set(hw, hash_value); 854 mc_addr_list += ETH_ALEN; 855 } 856 } ··· 1136 E1000_PCS_LCTL_FORCE_LINK; /* Force Link */ 1137 hw_dbg("Configuring Forced Link; PCS_LCTL = 0x%08X\n", reg); 1138 } 1139 + 1140 + if (hw->mac.type == e1000_82576) { 1141 + reg |= E1000_PCS_LCTL_FORCE_FCTRL; 1142 + igb_force_mac_fc(hw); 1143 + } 1144 + 1145 wr32(E1000_PCS_LCTL, reg); 1146 1147 return 0; ··· 1229 1230 out: 1231 return ret_val; 1232 } 1233 1234 /**
-1
drivers/net/igb/e1000_82575.h
··· 28 #ifndef _E1000_82575_H_ 29 #define _E1000_82575_H_ 30 31 - u32 igb_translate_register_82576(u32 reg); 32 void igb_update_mc_addr_list_82575(struct e1000_hw*, u8*, u32, u32, u32); 33 extern void igb_shutdown_fiber_serdes_link_82575(struct e1000_hw *hw); 34 extern void igb_rx_fifo_flush_82575(struct e1000_hw *hw);
··· 28 #ifndef _E1000_82575_H_ 29 #define _E1000_82575_H_ 30 31 void igb_update_mc_addr_list_82575(struct e1000_hw*, u8*, u32, u32, u32); 32 extern void igb_shutdown_fiber_serdes_link_82575(struct e1000_hw *hw); 33 extern void igb_rx_fifo_flush_82575(struct e1000_hw *hw);
+1
drivers/net/igb/e1000_defines.h
··· 257 #define E1000_PCS_LCTL_FDV_FULL 8 258 #define E1000_PCS_LCTL_FSD 0x10 259 #define E1000_PCS_LCTL_FORCE_LINK 0x20 260 #define E1000_PCS_LCTL_AN_ENABLE 0x10000 261 #define E1000_PCS_LCTL_AN_RESTART 0x20000 262 #define E1000_PCS_LCTL_AN_TIMEOUT 0x40000
··· 257 #define E1000_PCS_LCTL_FDV_FULL 8 258 #define E1000_PCS_LCTL_FSD 0x10 259 #define E1000_PCS_LCTL_FORCE_LINK 0x20 260 + #define E1000_PCS_LCTL_FORCE_FCTRL 0x80 261 #define E1000_PCS_LCTL_AN_ENABLE 0x10000 262 #define E1000_PCS_LCTL_AN_RESTART 0x20000 263 #define E1000_PCS_LCTL_AN_TIMEOUT 0x40000
-1
drivers/net/igb/e1000_hw.h
··· 420 void (*rar_set)(struct e1000_hw *, u8 *, u32); 421 s32 (*read_mac_addr)(struct e1000_hw *); 422 s32 (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *); 423 - void (*mta_set)(struct e1000_hw *, u32); 424 }; 425 426 struct e1000_phy_operations {
··· 420 void (*rar_set)(struct e1000_hw *, u8 *, u32); 421 s32 (*read_mac_addr)(struct e1000_hw *); 422 s32 (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *); 423 }; 424 425 struct e1000_phy_operations {
+1 -83
drivers/net/igb/e1000_mac.c
··· 144 } 145 146 /** 147 - * igb_init_rx_addrs - Initialize receive address's 148 - * @hw: pointer to the HW structure 149 - * @rar_count: receive address registers 150 - * 151 - * Setups the receive address registers by setting the base receive address 152 - * register to the devices MAC address and clearing all the other receive 153 - * address registers to 0. 154 - **/ 155 - void igb_init_rx_addrs(struct e1000_hw *hw, u16 rar_count) 156 - { 157 - u32 i; 158 - 159 - /* Setup the receive address */ 160 - hw_dbg("Programming MAC Address into RAR[0]\n"); 161 - 162 - hw->mac.ops.rar_set(hw, hw->mac.addr, 0); 163 - 164 - /* Zero out the other (rar_entry_count - 1) receive addresses */ 165 - hw_dbg("Clearing RAR[1-%u]\n", rar_count-1); 166 - for (i = 1; i < rar_count; i++) { 167 - array_wr32(E1000_RA, (i << 1), 0); 168 - wrfl(); 169 - array_wr32(E1000_RA, ((i << 1) + 1), 0); 170 - wrfl(); 171 - } 172 - } 173 - 174 - /** 175 * igb_check_alt_mac_addr - Check for alternate MAC addr 176 * @hw: pointer to the HW structure 177 * ··· 243 * current value is read, the new bit is OR'd in and the new value is 244 * written back into the register. 245 **/ 246 - static void igb_mta_set(struct e1000_hw *hw, u32 hash_value) 247 { 248 u32 hash_bit, hash_reg, mta; 249 ··· 266 267 array_wr32(E1000_MTA, hash_reg, mta); 268 wrfl(); 269 - } 270 - 271 - /** 272 - * igb_update_mc_addr_list - Update Multicast addresses 273 - * @hw: pointer to the HW structure 274 - * @mc_addr_list: array of multicast addresses to program 275 - * @mc_addr_count: number of multicast addresses to program 276 - * @rar_used_count: the first RAR register free to program 277 - * @rar_count: total number of supported Receive Address Registers 278 - * 279 - * Updates the Receive Address Registers and Multicast Table Array. 280 - * The caller must have a packed mc_addr_list of multicast addresses. 281 - * The parameter rar_count will usually be hw->mac.rar_entry_count 282 - * unless there are workarounds that change this. 283 - **/ 284 - void igb_update_mc_addr_list(struct e1000_hw *hw, 285 - u8 *mc_addr_list, u32 mc_addr_count, 286 - u32 rar_used_count, u32 rar_count) 287 - { 288 - u32 hash_value; 289 - u32 i; 290 - 291 - /* 292 - * Load the first set of multicast addresses into the exact 293 - * filters (RAR). If there are not enough to fill the RAR 294 - * array, clear the filters. 295 - */ 296 - for (i = rar_used_count; i < rar_count; i++) { 297 - if (mc_addr_count) { 298 - hw->mac.ops.rar_set(hw, mc_addr_list, i); 299 - mc_addr_count--; 300 - mc_addr_list += ETH_ALEN; 301 - } else { 302 - array_wr32(E1000_RA, i << 1, 0); 303 - wrfl(); 304 - array_wr32(E1000_RA, (i << 1) + 1, 0); 305 - wrfl(); 306 - } 307 - } 308 - 309 - /* Clear the old settings from the MTA */ 310 - hw_dbg("Clearing MTA\n"); 311 - for (i = 0; i < hw->mac.mta_reg_count; i++) { 312 - array_wr32(E1000_MTA, i, 0); 313 - wrfl(); 314 - } 315 - 316 - /* Load any remaining multicast addresses into the hash table. */ 317 - for (; mc_addr_count > 0; mc_addr_count--) { 318 - hash_value = igb_hash_mc_addr(hw, mc_addr_list); 319 - hw_dbg("Hash value = 0x%03X\n", hash_value); 320 - igb_mta_set(hw, hash_value); 321 - mc_addr_list += ETH_ALEN; 322 - } 323 } 324 325 /**
··· 144 } 145 146 /** 147 * igb_check_alt_mac_addr - Check for alternate MAC addr 148 * @hw: pointer to the HW structure 149 * ··· 271 * current value is read, the new bit is OR'd in and the new value is 272 * written back into the register. 273 **/ 274 + void igb_mta_set(struct e1000_hw *hw, u32 hash_value) 275 { 276 u32 hash_bit, hash_reg, mta; 277 ··· 294 295 array_wr32(E1000_MTA, hash_reg, mta); 296 wrfl(); 297 } 298 299 /**
+1 -4
drivers/net/igb/e1000_mac.h
··· 51 u16 *duplex); 52 s32 igb_id_led_init(struct e1000_hw *hw); 53 s32 igb_led_off(struct e1000_hw *hw); 54 - void igb_update_mc_addr_list(struct e1000_hw *hw, 55 - u8 *mc_addr_list, u32 mc_addr_count, 56 - u32 rar_used_count, u32 rar_count); 57 s32 igb_setup_link(struct e1000_hw *hw); 58 s32 igb_validate_mdi_setting(struct e1000_hw *hw); 59 s32 igb_write_8bit_ctrl_reg(struct e1000_hw *hw, u32 reg, ··· 59 void igb_clear_hw_cntrs_base(struct e1000_hw *hw); 60 void igb_clear_vfta(struct e1000_hw *hw); 61 void igb_config_collision_dist(struct e1000_hw *hw); 62 - void igb_init_rx_addrs(struct e1000_hw *hw, u16 rar_count); 63 void igb_put_hw_semaphore(struct e1000_hw *hw); 64 void igb_rar_set(struct e1000_hw *hw, u8 *addr, u32 index); 65 s32 igb_check_alt_mac_addr(struct e1000_hw *hw);
··· 51 u16 *duplex); 52 s32 igb_id_led_init(struct e1000_hw *hw); 53 s32 igb_led_off(struct e1000_hw *hw); 54 s32 igb_setup_link(struct e1000_hw *hw); 55 s32 igb_validate_mdi_setting(struct e1000_hw *hw); 56 s32 igb_write_8bit_ctrl_reg(struct e1000_hw *hw, u32 reg, ··· 62 void igb_clear_hw_cntrs_base(struct e1000_hw *hw); 63 void igb_clear_vfta(struct e1000_hw *hw); 64 void igb_config_collision_dist(struct e1000_hw *hw); 65 + void igb_mta_set(struct e1000_hw *hw, u32 hash_value); 66 void igb_put_hw_semaphore(struct e1000_hw *hw); 67 void igb_rar_set(struct e1000_hw *hw, u8 *addr, u32 index); 68 s32 igb_check_alt_mac_addr(struct e1000_hw *hw);
-3
drivers/net/igb/e1000_regs.h
··· 262 #define E1000_RETA(_i) (0x05C00 + ((_i) * 4)) 263 #define E1000_RSSRK(_i) (0x05C80 + ((_i) * 4)) /* RSS Random Key - RW Array */ 264 265 - #define E1000_REGISTER(a, reg) (((a)->mac.type < e1000_82576) \ 266 - ? reg : e1000_translate_register_82576(reg)) 267 - 268 #define wr32(reg, value) (writel(value, hw->hw_addr + reg)) 269 #define rd32(reg) (readl(hw->hw_addr + reg)) 270 #define wrfl() ((void)rd32(E1000_STATUS))
··· 262 #define E1000_RETA(_i) (0x05C00 + ((_i) * 4)) 263 #define E1000_RSSRK(_i) (0x05C80 + ((_i) * 4)) /* RSS Random Key - RW Array */ 264 265 #define wr32(reg, value) (writel(value, hw->hw_addr + reg)) 266 #define rd32(reg) (readl(hw->hw_addr + reg)) 267 #define wrfl() ((void)rd32(E1000_STATUS))
+3 -27
drivers/net/igb/igb_main.c
··· 311 array_wr32(E1000_MSIXBM(0), msix_vector, msixbm); 312 break; 313 case e1000_82576: 314 - /* Kawela uses a table-based method for assigning vectors. 315 Each queue has a single entry in the table to which we write 316 a vector number along with a "valid" bit. Sadly, the layout 317 of the table is somewhat counterintuitive. */ ··· 720 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD); 721 } 722 723 - static void igb_init_manageability(struct igb_adapter *adapter) 724 - { 725 - struct e1000_hw *hw = &adapter->hw; 726 - 727 - if (adapter->en_mng_pt) { 728 - u32 manc2h = rd32(E1000_MANC2H); 729 - u32 manc = rd32(E1000_MANC); 730 - 731 - /* enable receiving management packets to the host */ 732 - /* this will probably generate destination unreachable messages 733 - * from the host OS, but the packets will be handled on SMBUS */ 734 - manc |= E1000_MANC_EN_MNG2HOST; 735 - #define E1000_MNG2HOST_PORT_623 (1 << 5) 736 - #define E1000_MNG2HOST_PORT_664 (1 << 6) 737 - manc2h |= E1000_MNG2HOST_PORT_623; 738 - manc2h |= E1000_MNG2HOST_PORT_664; 739 - wr32(E1000_MANC2H, manc2h); 740 - 741 - wr32(E1000_MANC, manc); 742 - } 743 - } 744 - 745 /** 746 * igb_configure - configure the hardware for RX and TX 747 * @adapter: private board structure ··· 733 igb_set_multi(netdev); 734 735 igb_restore_vlan(adapter); 736 - igb_init_manageability(adapter); 737 738 igb_configure_tx(adapter); 739 igb_setup_rctl(adapter); ··· 1349 1350 unregister_netdev(netdev); 1351 1352 - if (!igb_check_reset_block(&adapter->hw)) 1353 adapter->hw.phy.ops.reset_phy(&adapter->hw); 1354 1355 igb_remove_device(&adapter->hw); ··· 4500 { 4501 struct net_device *netdev = pci_get_drvdata(pdev); 4502 struct igb_adapter *adapter = netdev_priv(netdev); 4503 - 4504 - igb_init_manageability(adapter); 4505 4506 if (netif_running(netdev)) { 4507 if (igb_up(adapter)) {
··· 311 array_wr32(E1000_MSIXBM(0), msix_vector, msixbm); 312 break; 313 case e1000_82576: 314 + /* The 82576 uses a table-based method for assigning vectors. 315 Each queue has a single entry in the table to which we write 316 a vector number along with a "valid" bit. Sadly, the layout 317 of the table is somewhat counterintuitive. */ ··· 720 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD); 721 } 722 723 /** 724 * igb_configure - configure the hardware for RX and TX 725 * @adapter: private board structure ··· 755 igb_set_multi(netdev); 756 757 igb_restore_vlan(adapter); 758 759 igb_configure_tx(adapter); 760 igb_setup_rctl(adapter); ··· 1372 1373 unregister_netdev(netdev); 1374 1375 + if (adapter->hw.phy.ops.reset_phy && 1376 + !igb_check_reset_block(&adapter->hw)) 1377 adapter->hw.phy.ops.reset_phy(&adapter->hw); 1378 1379 igb_remove_device(&adapter->hw); ··· 4522 { 4523 struct net_device *netdev = pci_get_drvdata(pdev); 4524 struct igb_adapter *adapter = netdev_priv(netdev); 4525 4526 if (netif_running(netdev)) { 4527 if (igb_up(adapter)) {
-2
drivers/net/lp486e.c
··· 1272 return; 1273 } 1274 if (dev->mc_count == 0 && !(dev->flags & (IFF_PROMISC | IFF_ALLMULTI))) { 1275 - if (dev->flags & IFF_ALLMULTI) 1276 - dev->flags |= IFF_PROMISC; 1277 lp->i596_config[8] &= ~0x01; 1278 } else { 1279 lp->i596_config[8] |= 0x01;
··· 1272 return; 1273 } 1274 if (dev->mc_count == 0 && !(dev->flags & (IFF_PROMISC | IFF_ALLMULTI))) { 1275 lp->i596_config[8] &= ~0x01; 1276 } else { 1277 lp->i596_config[8] |= 0x01;
+1 -1
drivers/net/meth.c
··· 100 DPRINTK("Loading MAC Address: %s\n", print_mac(mac, dev->dev_addr)); 101 macaddr = 0; 102 for (i = 0; i < 6; i++) 103 - macaddr |= dev->dev_addr[i] << ((5 - i) * 8); 104 105 mace->eth.mac_addr = macaddr; 106 }
··· 100 DPRINTK("Loading MAC Address: %s\n", print_mac(mac, dev->dev_addr)); 101 macaddr = 0; 102 for (i = 0; i < 6; i++) 103 + macaddr |= (u64)dev->dev_addr[i] << ((5 - i) * 8); 104 105 mace->eth.mac_addr = macaddr; 106 }
+1
drivers/net/myri10ge/myri10ge.c
··· 3699 dev_err(&pdev->dev, "Error %d setting DMA mask\n", status); 3700 goto abort_with_netdev; 3701 } 3702 mgp->cmd = dma_alloc_coherent(&pdev->dev, sizeof(*mgp->cmd), 3703 &mgp->cmd_bus, GFP_KERNEL); 3704 if (mgp->cmd == NULL)
··· 3699 dev_err(&pdev->dev, "Error %d setting DMA mask\n", status); 3700 goto abort_with_netdev; 3701 } 3702 + (void)pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK); 3703 mgp->cmd = dma_alloc_coherent(&pdev->dev, sizeof(*mgp->cmd), 3704 &mgp->cmd_bus, GFP_KERNEL); 3705 if (mgp->cmd == NULL)
+47 -5
drivers/net/myri10ge/myri10ge_mcp.h
··· 101 #define MXGEFW_ETH_SEND_3 0x2c0000 102 #define MXGEFW_ETH_RECV_SMALL 0x300000 103 #define MXGEFW_ETH_RECV_BIG 0x340000 104 105 #define MXGEFW_ETH_SEND(n) (0x200000 + (((n) & 0x03) * 0x40000)) 106 #define MXGEFW_ETH_SEND_OFFSET(n) (MXGEFW_ETH_SEND(n) - MXGEFW_ETH_SEND_4) ··· 122 * MXGEFW_CMD_RESET is issued */ 123 124 MXGEFW_CMD_SET_INTRQ_DMA, 125 MXGEFW_CMD_SET_BIG_BUFFER_SIZE, /* in bytes, power of 2 */ 126 MXGEFW_CMD_SET_SMALL_BUFFER_SIZE, /* in bytes */ 127 ··· 136 MXGEFW_CMD_GET_SEND_OFFSET, 137 MXGEFW_CMD_GET_SMALL_RX_OFFSET, 138 MXGEFW_CMD_GET_BIG_RX_OFFSET, 139 MXGEFW_CMD_GET_IRQ_ACK_OFFSET, 140 MXGEFW_CMD_GET_IRQ_DEASSERT_OFFSET, 141 ··· 209 MXGEFW_CMD_SET_STATS_DMA_V2, 210 /* data0, data1 = bus addr, 211 * data2 = sizeof(struct mcp_irq_data) from driver point of view, allows 212 - * adding new stuff to mcp_irq_data without changing the ABI */ 213 214 MXGEFW_CMD_UNALIGNED_TEST, 215 /* same than DMA_TEST (same args) but abort with UNALIGNED on unaligned ··· 236 MXGEFW_CMD_GET_MAX_RSS_QUEUES, 237 MXGEFW_CMD_ENABLE_RSS_QUEUES, 238 /* data0 = number of slices n (0, 1, ..., n-1) to enable 239 - * data1 = interrupt mode. 240 - * 0=share one INTx/MSI, 1=use one MSI-X per queue. 241 * If all queues share one interrupt, the driver must have set 242 * RSS_SHARED_INTERRUPT_DMA before enabling queues. 243 */ 244 - #define MXGEFW_SLICE_INTR_MODE_SHARED 0 245 - #define MXGEFW_SLICE_INTR_MODE_ONE_PER_SLICE 1 246 247 MXGEFW_CMD_GET_RSS_SHARED_INTERRUPT_MASK_OFFSET, 248 MXGEFW_CMD_SET_RSS_SHARED_INTERRUPT_DMA, ··· 269 * 2: TCP_IPV4 (required by RSS) 270 * 3: IPV4 | TCP_IPV4 (required by RSS) 271 * 4: source port 272 */ 273 #define MXGEFW_RSS_HASH_TYPE_IPV4 0x1 274 #define MXGEFW_RSS_HASH_TYPE_TCP_IPV4 0x2 275 #define MXGEFW_RSS_HASH_TYPE_SRC_PORT 0x4 276 277 MXGEFW_CMD_GET_MAX_TSO6_HDR_SIZE, 278 /* Return data = the max. size of the entire headers of a IPv6 TSO packet. ··· 351 352 MXGEFW_CMD_GET_DCA_OFFSET, 353 /* offset of dca control for WDMAs */ 354 }; 355 356 enum myri10ge_mcp_cmd_status { ··· 416 u8 stats_updated; 417 u8 valid; 418 }; 419 420 #endif /* __MYRI10GE_MCP_H__ */
··· 101 #define MXGEFW_ETH_SEND_3 0x2c0000 102 #define MXGEFW_ETH_RECV_SMALL 0x300000 103 #define MXGEFW_ETH_RECV_BIG 0x340000 104 + #define MXGEFW_ETH_SEND_GO 0x380000 105 + #define MXGEFW_ETH_SEND_STOP 0x3C0000 106 107 #define MXGEFW_ETH_SEND(n) (0x200000 + (((n) & 0x03) * 0x40000)) 108 #define MXGEFW_ETH_SEND_OFFSET(n) (MXGEFW_ETH_SEND(n) - MXGEFW_ETH_SEND_4) ··· 120 * MXGEFW_CMD_RESET is issued */ 121 122 MXGEFW_CMD_SET_INTRQ_DMA, 123 + /* data0 = LSW of the host address 124 + * data1 = MSW of the host address 125 + * data2 = slice number if multiple slices are used 126 + */ 127 + 128 MXGEFW_CMD_SET_BIG_BUFFER_SIZE, /* in bytes, power of 2 */ 129 MXGEFW_CMD_SET_SMALL_BUFFER_SIZE, /* in bytes */ 130 ··· 129 MXGEFW_CMD_GET_SEND_OFFSET, 130 MXGEFW_CMD_GET_SMALL_RX_OFFSET, 131 MXGEFW_CMD_GET_BIG_RX_OFFSET, 132 + /* data0 = slice number if multiple slices are used */ 133 + 134 MXGEFW_CMD_GET_IRQ_ACK_OFFSET, 135 MXGEFW_CMD_GET_IRQ_DEASSERT_OFFSET, 136 ··· 200 MXGEFW_CMD_SET_STATS_DMA_V2, 201 /* data0, data1 = bus addr, 202 * data2 = sizeof(struct mcp_irq_data) from driver point of view, allows 203 + * adding new stuff to mcp_irq_data without changing the ABI 204 + * 205 + * If multiple slices are used, data2 contains both the size of the 206 + * structure (in the lower 16 bits) and the slice number 207 + * (in the upper 16 bits). 208 + */ 209 210 MXGEFW_CMD_UNALIGNED_TEST, 211 /* same than DMA_TEST (same args) but abort with UNALIGNED on unaligned ··· 222 MXGEFW_CMD_GET_MAX_RSS_QUEUES, 223 MXGEFW_CMD_ENABLE_RSS_QUEUES, 224 /* data0 = number of slices n (0, 1, ..., n-1) to enable 225 + * data1 = interrupt mode | use of multiple transmit queues. 226 + * 0=share one INTx/MSI. 227 + * 1=use one MSI-X per queue. 228 * If all queues share one interrupt, the driver must have set 229 * RSS_SHARED_INTERRUPT_DMA before enabling queues. 230 + * 2=enable both receive and send queues. 231 + * Without this bit set, only one send queue (slice 0's send queue) 232 + * is enabled. The receive queues are always enabled. 233 */ 234 + #define MXGEFW_SLICE_INTR_MODE_SHARED 0x0 235 + #define MXGEFW_SLICE_INTR_MODE_ONE_PER_SLICE 0x1 236 + #define MXGEFW_SLICE_ENABLE_MULTIPLE_TX_QUEUES 0x2 237 238 MXGEFW_CMD_GET_RSS_SHARED_INTERRUPT_MASK_OFFSET, 239 MXGEFW_CMD_SET_RSS_SHARED_INTERRUPT_DMA, ··· 250 * 2: TCP_IPV4 (required by RSS) 251 * 3: IPV4 | TCP_IPV4 (required by RSS) 252 * 4: source port 253 + * 5: source port + destination port 254 */ 255 #define MXGEFW_RSS_HASH_TYPE_IPV4 0x1 256 #define MXGEFW_RSS_HASH_TYPE_TCP_IPV4 0x2 257 #define MXGEFW_RSS_HASH_TYPE_SRC_PORT 0x4 258 + #define MXGEFW_RSS_HASH_TYPE_SRC_DST_PORT 0x5 259 + #define MXGEFW_RSS_HASH_TYPE_MAX 0x5 260 261 MXGEFW_CMD_GET_MAX_TSO6_HDR_SIZE, 262 /* Return data = the max. size of the entire headers of a IPv6 TSO packet. ··· 329 330 MXGEFW_CMD_GET_DCA_OFFSET, 331 /* offset of dca control for WDMAs */ 332 + 333 + /* VMWare NetQueue commands */ 334 + MXGEFW_CMD_NETQ_GET_FILTERS_PER_QUEUE, 335 + MXGEFW_CMD_NETQ_ADD_FILTER, 336 + /* data0 = filter_id << 16 | queue << 8 | type */ 337 + /* data1 = MS4 of MAC Addr */ 338 + /* data2 = LS2_MAC << 16 | VLAN_tag */ 339 + MXGEFW_CMD_NETQ_DEL_FILTER, 340 + /* data0 = filter_id */ 341 + MXGEFW_CMD_NETQ_QUERY1, 342 + MXGEFW_CMD_NETQ_QUERY2, 343 + MXGEFW_CMD_NETQ_QUERY3, 344 + MXGEFW_CMD_NETQ_QUERY4, 345 + 346 }; 347 348 enum myri10ge_mcp_cmd_status { ··· 380 u8 stats_updated; 381 u8 valid; 382 }; 383 + 384 + /* definitions for NETQ filter type */ 385 + #define MXGEFW_NETQ_FILTERTYPE_NONE 0 386 + #define MXGEFW_NETQ_FILTERTYPE_MACADDR 1 387 + #define MXGEFW_NETQ_FILTERTYPE_VLAN 2 388 + #define MXGEFW_NETQ_FILTERTYPE_VLANMACADDR 3 389 390 #endif /* __MYRI10GE_MCP_H__ */
+1 -1
drivers/net/myri10ge/myri10ge_mcp_gen_header.h
··· 35 unsigned char mcp_index; 36 unsigned char disable_rabbit; 37 unsigned char unaligned_tlp; 38 - unsigned char pad1; 39 unsigned counters_addr; 40 unsigned copy_block_info; /* for small mcps loaded with "lload -d" */ 41 unsigned short handoff_id_major; /* must be equal */
··· 35 unsigned char mcp_index; 36 unsigned char disable_rabbit; 37 unsigned char unaligned_tlp; 38 + unsigned char pcie_link_algo; 39 unsigned counters_addr; 40 unsigned copy_block_info; /* for small mcps loaded with "lload -d" */ 41 unsigned short handoff_id_major; /* must be equal */
+38 -3
drivers/net/netxen/netxen_nic.h
··· 508 NETXEN_BRDTYPE_P3_10000_BASE_T = 0x0027, 509 NETXEN_BRDTYPE_P3_XG_LOM = 0x0028, 510 NETXEN_BRDTYPE_P3_4_GB_MM = 0x0029, 511 NETXEN_BRDTYPE_P3_10G_CX4 = 0x0031, 512 NETXEN_BRDTYPE_P3_10G_XFP = 0x0032 513 ··· 1172 nx_nic_intr_coalesce_data_t irq; 1173 } nx_nic_intr_coalesce_t; 1174 1175 typedef struct { 1176 u64 qhdr; 1177 u64 req_hdr; ··· 1320 int (*disable_phy_interrupts) (struct netxen_adapter *); 1321 int (*macaddr_set) (struct netxen_adapter *, netxen_ethernet_macaddr_t); 1322 int (*set_mtu) (struct netxen_adapter *, int); 1323 - int (*set_promisc) (struct netxen_adapter *, netxen_niu_prom_mode_t); 1324 int (*phy_read) (struct netxen_adapter *, long reg, u32 *); 1325 int (*phy_write) (struct netxen_adapter *, long reg, u32 val); 1326 int (*init_port) (struct netxen_adapter *, int); ··· 1497 u32 netxen_process_rcv_ring(struct netxen_adapter *adapter, int ctx, int max); 1498 void netxen_p2_nic_set_multi(struct net_device *netdev); 1499 void netxen_p3_nic_set_multi(struct net_device *netdev); 1500 int netxen_config_intr_coalesce(struct netxen_adapter *adapter); 1501 1502 - u32 nx_fw_cmd_set_mtu(struct netxen_adapter *adapter, u32 mtu); 1503 int netxen_nic_change_mtu(struct net_device *netdev, int new_mtu); 1504 1505 int netxen_nic_set_mac(struct net_device *netdev, void *p); ··· 1535 {NETXEN_BRDTYPE_P3_10G_SFP_PLUS, 2, "Dual XGb SFP+ LP"}, 1536 {NETXEN_BRDTYPE_P3_10000_BASE_T, 1, "XGB 10G BaseT LP"}, 1537 {NETXEN_BRDTYPE_P3_XG_LOM, 2, "Dual XGb LOM"}, 1538 - {NETXEN_BRDTYPE_P3_4_GB_MM, 4, "Quad GB - March Madness"}, 1539 {NETXEN_BRDTYPE_P3_10G_CX4, 2, "Reference Dual CX4 Option"}, 1540 {NETXEN_BRDTYPE_P3_10G_XFP, 1, "Reference Single XFP Option"} 1541 };
··· 508 NETXEN_BRDTYPE_P3_10000_BASE_T = 0x0027, 509 NETXEN_BRDTYPE_P3_XG_LOM = 0x0028, 510 NETXEN_BRDTYPE_P3_4_GB_MM = 0x0029, 511 + NETXEN_BRDTYPE_P3_10G_SFP_CT = 0x002a, 512 + NETXEN_BRDTYPE_P3_10G_SFP_QT = 0x002b, 513 NETXEN_BRDTYPE_P3_10G_CX4 = 0x0031, 514 NETXEN_BRDTYPE_P3_10G_XFP = 0x0032 515 ··· 1170 nx_nic_intr_coalesce_data_t irq; 1171 } nx_nic_intr_coalesce_t; 1172 1173 + #define NX_HOST_REQUEST 0x13 1174 + #define NX_NIC_REQUEST 0x14 1175 + 1176 + #define NX_MAC_EVENT 0x1 1177 + 1178 + enum { 1179 + NX_NIC_H2C_OPCODE_START = 0, 1180 + NX_NIC_H2C_OPCODE_CONFIG_RSS, 1181 + NX_NIC_H2C_OPCODE_CONFIG_RSS_TBL, 1182 + NX_NIC_H2C_OPCODE_CONFIG_INTR_COALESCE, 1183 + NX_NIC_H2C_OPCODE_CONFIG_LED, 1184 + NX_NIC_H2C_OPCODE_CONFIG_PROMISCUOUS, 1185 + NX_NIC_H2C_OPCODE_CONFIG_L2_MAC, 1186 + NX_NIC_H2C_OPCODE_LRO_REQUEST, 1187 + NX_NIC_H2C_OPCODE_GET_SNMP_STATS, 1188 + NX_NIC_H2C_OPCODE_PROXY_START_REQUEST, 1189 + NX_NIC_H2C_OPCODE_PROXY_STOP_REQUEST, 1190 + NX_NIC_H2C_OPCODE_PROXY_SET_MTU, 1191 + NX_NIC_H2C_OPCODE_PROXY_SET_VPORT_MISS_MODE, 1192 + NX_H2P_OPCODE_GET_FINGER_PRINT_REQUEST, 1193 + NX_H2P_OPCODE_INSTALL_LICENSE_REQUEST, 1194 + NX_H2P_OPCODE_GET_LICENSE_CAPABILITY_REQUEST, 1195 + NX_NIC_H2C_OPCODE_GET_NET_STATS, 1196 + NX_NIC_H2C_OPCODE_LAST 1197 + }; 1198 + 1199 + #define VPORT_MISS_MODE_DROP 0 /* drop all unmatched */ 1200 + #define VPORT_MISS_MODE_ACCEPT_ALL 1 /* accept all packets */ 1201 + #define VPORT_MISS_MODE_ACCEPT_MULTI 2 /* accept unmatched multicast */ 1202 + 1203 typedef struct { 1204 u64 qhdr; 1205 u64 req_hdr; ··· 1288 int (*disable_phy_interrupts) (struct netxen_adapter *); 1289 int (*macaddr_set) (struct netxen_adapter *, netxen_ethernet_macaddr_t); 1290 int (*set_mtu) (struct netxen_adapter *, int); 1291 + int (*set_promisc) (struct netxen_adapter *, u32); 1292 int (*phy_read) (struct netxen_adapter *, long reg, u32 *); 1293 int (*phy_write) (struct netxen_adapter *, long reg, u32 val); 1294 int (*init_port) (struct netxen_adapter *, int); ··· 1465 u32 netxen_process_rcv_ring(struct netxen_adapter *adapter, int ctx, int max); 1466 void netxen_p2_nic_set_multi(struct net_device *netdev); 1467 void netxen_p3_nic_set_multi(struct net_device *netdev); 1468 + int netxen_p3_nic_set_promisc(struct netxen_adapter *adapter, u32); 1469 int netxen_config_intr_coalesce(struct netxen_adapter *adapter); 1470 1471 + int nx_fw_cmd_set_mtu(struct netxen_adapter *adapter, int mtu); 1472 int netxen_nic_change_mtu(struct net_device *netdev, int new_mtu); 1473 1474 int netxen_nic_set_mac(struct net_device *netdev, void *p); ··· 1502 {NETXEN_BRDTYPE_P3_10G_SFP_PLUS, 2, "Dual XGb SFP+ LP"}, 1503 {NETXEN_BRDTYPE_P3_10000_BASE_T, 1, "XGB 10G BaseT LP"}, 1504 {NETXEN_BRDTYPE_P3_XG_LOM, 2, "Dual XGb LOM"}, 1505 + {NETXEN_BRDTYPE_P3_4_GB_MM, 4, "NX3031 Gigabit Ethernet"}, 1506 + {NETXEN_BRDTYPE_P3_10G_SFP_CT, 2, "NX3031 10 Gigabit Ethernet"}, 1507 + {NETXEN_BRDTYPE_P3_10G_SFP_QT, 2, "Quanta Dual XGb SFP+"}, 1508 {NETXEN_BRDTYPE_P3_10G_CX4, 2, "Reference Dual CX4 Option"}, 1509 {NETXEN_BRDTYPE_P3_10G_XFP, 1, "Reference Single XFP Option"} 1510 };
+6 -3
drivers/net/netxen/netxen_nic_ctx.c
··· 145 return rcode; 146 } 147 148 - u32 149 - nx_fw_cmd_set_mtu(struct netxen_adapter *adapter, u32 mtu) 150 { 151 u32 rcode = NX_RCODE_SUCCESS; 152 struct netxen_recv_context *recv_ctx = &adapter->recv_ctx[0]; ··· 160 0, 161 NX_CDRP_CMD_SET_MTU); 162 163 - return rcode; 164 } 165 166 static int
··· 145 return rcode; 146 } 147 148 + int 149 + nx_fw_cmd_set_mtu(struct netxen_adapter *adapter, int mtu) 150 { 151 u32 rcode = NX_RCODE_SUCCESS; 152 struct netxen_recv_context *recv_ctx = &adapter->recv_ctx[0]; ··· 160 0, 161 NX_CDRP_CMD_SET_MTU); 162 163 + if (rcode != NX_RCODE_SUCCESS) 164 + return -EIO; 165 + 166 + return 0; 167 } 168 169 static int
+26 -9
drivers/net/netxen/netxen_nic_ethtool.c
··· 140 if (netif_running(dev)) { 141 ecmd->speed = adapter->link_speed; 142 ecmd->duplex = adapter->link_duplex; 143 - } else 144 - return -EIO; /* link absent */ 145 } else if (adapter->ahw.board_type == NETXEN_NIC_XGBE) { 146 - ecmd->supported = (SUPPORTED_TP | 147 - SUPPORTED_1000baseT_Full | 148 - SUPPORTED_10000baseT_Full); 149 - ecmd->advertising = (ADVERTISED_TP | 150 - ADVERTISED_1000baseT_Full | 151 - ADVERTISED_10000baseT_Full); 152 ecmd->port = PORT_TP; 153 154 - ecmd->speed = SPEED_10000; 155 ecmd->duplex = DUPLEX_FULL; 156 ecmd->autoneg = AUTONEG_DISABLE; 157 } else ··· 207 break; 208 case NETXEN_BRDTYPE_P2_SB31_10G: 209 case NETXEN_BRDTYPE_P3_10G_SFP_PLUS: 210 case NETXEN_BRDTYPE_P3_10G_XFP: 211 ecmd->supported |= SUPPORTED_FIBRE; 212 ecmd->advertising |= ADVERTISED_FIBRE;
··· 140 if (netif_running(dev)) { 141 ecmd->speed = adapter->link_speed; 142 ecmd->duplex = adapter->link_duplex; 143 + ecmd->autoneg = adapter->link_autoneg; 144 + } 145 + 146 } else if (adapter->ahw.board_type == NETXEN_NIC_XGBE) { 147 + u32 val; 148 + 149 + adapter->hw_read_wx(adapter, NETXEN_PORT_MODE_ADDR, &val, 4); 150 + if (val == NETXEN_PORT_MODE_802_3_AP) { 151 + ecmd->supported = SUPPORTED_1000baseT_Full; 152 + ecmd->advertising = ADVERTISED_1000baseT_Full; 153 + } else { 154 + ecmd->supported = SUPPORTED_10000baseT_Full; 155 + ecmd->advertising = ADVERTISED_10000baseT_Full; 156 + } 157 + 158 ecmd->port = PORT_TP; 159 160 + if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { 161 + u16 pcifn = adapter->ahw.pci_func; 162 + 163 + adapter->hw_read_wx(adapter, 164 + P3_LINK_SPEED_REG(pcifn), &val, 4); 165 + ecmd->speed = P3_LINK_SPEED_MHZ * 166 + P3_LINK_SPEED_VAL(pcifn, val); 167 + } else 168 + ecmd->speed = SPEED_10000; 169 + 170 ecmd->duplex = DUPLEX_FULL; 171 ecmd->autoneg = AUTONEG_DISABLE; 172 } else ··· 192 break; 193 case NETXEN_BRDTYPE_P2_SB31_10G: 194 case NETXEN_BRDTYPE_P3_10G_SFP_PLUS: 195 + case NETXEN_BRDTYPE_P3_10G_SFP_CT: 196 + case NETXEN_BRDTYPE_P3_10G_SFP_QT: 197 case NETXEN_BRDTYPE_P3_10G_XFP: 198 ecmd->supported |= SUPPORTED_FIBRE; 199 ecmd->advertising |= ADVERTISED_FIBRE;
+10
drivers/net/netxen/netxen_nic_hdr.h
··· 724 #define XG_LINK_STATE_P3(pcifn,val) \ 725 (((val) >> ((pcifn) * 4)) & XG_LINK_STATE_P3_MASK) 726 727 #define NETXEN_CAM_RAM_BASE (NETXEN_CRB_CAM + 0x02000) 728 #define NETXEN_CAM_RAM(reg) (NETXEN_CAM_RAM_BASE + (reg)) 729 #define NETXEN_FW_VERSION_MAJOR (NETXEN_CAM_RAM(0x150)) ··· 843 844 #define PCIE_SETUP_FUNCTION (0x12040) 845 #define PCIE_SETUP_FUNCTION2 (0x12048) 846 #define PCIE_TGT_SPLIT_CHICKEN (0x12080) 847 #define PCIE_CHICKEN3 (0x120c8) 848 849 #define PCIE_MAX_MASTER_SPLIT (0x14048) 850 851 #define NETXEN_PORT_MODE_NONE 0 ··· 863 #define NETXEN_CAM_RAM_DMA_WATCHDOG_CTRL (0x14) 864 865 #define ISR_MSI_INT_TRIGGER(FUNC) (NETXEN_PCIX_PS_REG(PCIX_MSI_F(FUNC))) 866 867 /* 868 * PCI Interrupt Vector Values.
··· 724 #define XG_LINK_STATE_P3(pcifn,val) \ 725 (((val) >> ((pcifn) * 4)) & XG_LINK_STATE_P3_MASK) 726 727 + #define P3_LINK_SPEED_MHZ 100 728 + #define P3_LINK_SPEED_MASK 0xff 729 + #define P3_LINK_SPEED_REG(pcifn) \ 730 + (CRB_PF_LINK_SPEED_1 + (((pcifn) / 4) * 4)) 731 + #define P3_LINK_SPEED_VAL(pcifn, reg) \ 732 + (((reg) >> (8 * ((pcifn) & 0x3))) & P3_LINK_SPEED_MASK) 733 + 734 #define NETXEN_CAM_RAM_BASE (NETXEN_CRB_CAM + 0x02000) 735 #define NETXEN_CAM_RAM(reg) (NETXEN_CAM_RAM_BASE + (reg)) 736 #define NETXEN_FW_VERSION_MAJOR (NETXEN_CAM_RAM(0x150)) ··· 836 837 #define PCIE_SETUP_FUNCTION (0x12040) 838 #define PCIE_SETUP_FUNCTION2 (0x12048) 839 + #define PCIE_MISCCFG_RC (0x1206c) 840 #define PCIE_TGT_SPLIT_CHICKEN (0x12080) 841 #define PCIE_CHICKEN3 (0x120c8) 842 843 + #define ISR_INT_STATE_REG (NETXEN_PCIX_PS_REG(PCIE_MISCCFG_RC)) 844 #define PCIE_MAX_MASTER_SPLIT (0x14048) 845 846 #define NETXEN_PORT_MODE_NONE 0 ··· 854 #define NETXEN_CAM_RAM_DMA_WATCHDOG_CTRL (0x14) 855 856 #define ISR_MSI_INT_TRIGGER(FUNC) (NETXEN_PCIX_PS_REG(PCIX_MSI_F(FUNC))) 857 + #define ISR_LEGACY_INT_TRIGGERED(VAL) (((VAL) & 0x300) == 0x200) 858 859 /* 860 * PCI Interrupt Vector Values.
+62 -41
drivers/net/netxen/netxen_nic_hw.c
··· 285 #define ADDR_IN_RANGE(addr, low, high) \ 286 (((addr) <= (high)) && ((addr) >= (low))) 287 288 - #define NETXEN_MAX_MTU 8000 + NETXEN_ENET_HEADER_SIZE + NETXEN_ETH_FCS_SIZE 289 - #define NETXEN_MIN_MTU 64 290 - #define NETXEN_ETH_FCS_SIZE 4 291 - #define NETXEN_ENET_HEADER_SIZE 14 292 #define NETXEN_WINDOW_ONE 0x2000000 /*CRB Window: bit 25 of CRB address */ 293 - #define NETXEN_FIRMWARE_LEN ((16 * 1024) / 4) 294 - #define NETXEN_NIU_HDRSIZE (0x1 << 6) 295 - #define NETXEN_NIU_TLRSIZE (0x1 << 5) 296 297 #define NETXEN_NIC_ZERO_PAUSE_ADDR 0ULL 298 #define NETXEN_NIC_UNIT_PAUSE_ADDR 0x200ULL ··· 534 return 0; 535 } 536 537 - #define NIC_REQUEST 0x14 538 - #define NETXEN_MAC_EVENT 0x1 539 - 540 static int nx_p3_sre_macaddr_change(struct net_device *dev, 541 u8 *addr, unsigned op) 542 { ··· 543 int rv; 544 545 memset(&req, 0, sizeof(nx_nic_req_t)); 546 - req.qhdr |= (NIC_REQUEST << 23); 547 - req.req_hdr |= NETXEN_MAC_EVENT; 548 req.req_hdr |= ((u64)adapter->portnum << 16); 549 mac_req.op = op; 550 memcpy(&mac_req.mac_addr, addr, 6); ··· 565 nx_mac_list_t *cur, *next, *del_list, *add_list = NULL; 566 struct dev_mc_list *mc_ptr; 567 u8 bcast_addr[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; 568 - 569 - adapter->set_promisc(adapter, NETXEN_NIU_PROMISC_MODE); 570 - 571 - /* 572 - * Programming mac addresses will automaticly enabling L2 filtering. 573 - * HW will replace timestamp with L2 conid when L2 filtering is 574 - * enabled. This causes problem for LSA. Do not enabling L2 filtering 575 - * until that problem is fixed. 576 - */ 577 - if ((netdev->flags & IFF_PROMISC) || 578 - (netdev->mc_count > adapter->max_mc_count)) 579 - return; 580 581 del_list = adapter->mac_list; 582 adapter->mac_list = NULL; 583 584 nx_p3_nic_add_mac(adapter, netdev->dev_addr, &add_list, &del_list); 585 if (netdev->mc_count > 0) { 586 - nx_p3_nic_add_mac(adapter, bcast_addr, &add_list, &del_list); 587 for (mc_ptr = netdev->mc_list; mc_ptr; 588 mc_ptr = mc_ptr->next) { 589 nx_p3_nic_add_mac(adapter, mc_ptr->dmi_addr, 590 &add_list, &del_list); 591 } 592 } 593 for (cur = del_list; cur;) { 594 nx_p3_sre_macaddr_change(netdev, cur->mac_addr, NETXEN_MAC_DEL); 595 next = cur->next; ··· 609 } 610 } 611 612 #define NETXEN_CONFIG_INTR_COALESCE 3 613 614 /* ··· 636 637 memset(&req, 0, sizeof(nx_nic_req_t)); 638 639 - req.qhdr |= (NIC_REQUEST << 23); 640 req.req_hdr |= NETXEN_CONFIG_INTR_COALESCE; 641 req.req_hdr |= ((u64)adapter->portnum << 16); 642 ··· 662 { 663 struct netxen_adapter *adapter = netdev_priv(netdev); 664 int max_mtu; 665 666 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) 667 max_mtu = P3_MAX_MTU; ··· 676 } 677 678 if (adapter->set_mtu) 679 - adapter->set_mtu(adapter, mtu); 680 - netdev->mtu = mtu; 681 682 - mtu += MTU_FUDGE_FACTOR; 683 - if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) 684 - nx_fw_cmd_set_mtu(adapter, mtu); 685 - else if (adapter->set_mtu) 686 - adapter->set_mtu(adapter, mtu); 687 688 - return 0; 689 } 690 691 int netxen_is_flash_supported(struct netxen_adapter *adapter) ··· 1417 (netxen_nic_pci_is_same_window(adapter, off+size-1) == 0)) { 1418 write_unlock_irqrestore(&adapter->adapter_lock, flags); 1419 printk(KERN_ERR "%s out of bound pci memory access. " 1420 - "offset is 0x%llx\n", netxen_nic_driver_name, off); 1421 return -1; 1422 } 1423 ··· 1491 (netxen_nic_pci_is_same_window(adapter, off+size-1) == 0)) { 1492 write_unlock_irqrestore(&adapter->adapter_lock, flags); 1493 printk(KERN_ERR "%s out of bound pci memory access. " 1494 - "offset is 0x%llx\n", netxen_nic_driver_name, off); 1495 return -1; 1496 } 1497 ··· 2024 case NETXEN_BRDTYPE_P3_10G_CX4_LP: 2025 case NETXEN_BRDTYPE_P3_IMEZ: 2026 case NETXEN_BRDTYPE_P3_10G_SFP_PLUS: 2027 case NETXEN_BRDTYPE_P3_10G_XFP: 2028 case NETXEN_BRDTYPE_P3_10000_BASE_T: 2029 ··· 2044 default: 2045 printk("%s: Unknown(%x)\n", netxen_nic_driver_name, 2046 boardinfo->board_type); 2047 break; 2048 } 2049 ··· 2055 2056 int netxen_nic_set_mtu_gb(struct netxen_adapter *adapter, int new_mtu) 2057 { 2058 netxen_nic_write_w0(adapter, 2059 NETXEN_NIU_GB_MAX_FRAME_SIZE(adapter->physical_port), 2060 new_mtu); ··· 2064 2065 int netxen_nic_set_mtu_xgb(struct netxen_adapter *adapter, int new_mtu) 2066 { 2067 - new_mtu += NETXEN_NIU_HDRSIZE + NETXEN_NIU_TLRSIZE; 2068 if (adapter->physical_port == 0) 2069 netxen_nic_write_w0(adapter, NETXEN_NIU_XGE_MAX_FRAME_SIZE, 2070 new_mtu); ··· 2086 __u32 status; 2087 __u32 autoneg; 2088 __u32 mode; 2089 2090 netxen_nic_read_w0(adapter, NETXEN_NIU_MODE, &mode); 2091 if (netxen_get_niu_enable_ge(mode)) { /* Gb 10/100/1000 Mbps mode */ 2092 if (adapter->phy_read 2093 - && adapter-> 2094 - phy_read(adapter, 2095 NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_STATUS, 2096 &status) == 0) { 2097 if (netxen_get_phy_link(status)) { ··· 2131 break; 2132 } 2133 if (adapter->phy_read 2134 - && adapter-> 2135 - phy_read(adapter, 2136 NETXEN_NIU_GB_MII_MGMT_ADDR_AUTONEG, 2137 &autoneg) != 0) 2138 adapter->link_autoneg = autoneg;
··· 285 #define ADDR_IN_RANGE(addr, low, high) \ 286 (((addr) <= (high)) && ((addr) >= (low))) 287 288 #define NETXEN_WINDOW_ONE 0x2000000 /*CRB Window: bit 25 of CRB address */ 289 290 #define NETXEN_NIC_ZERO_PAUSE_ADDR 0ULL 291 #define NETXEN_NIC_UNIT_PAUSE_ADDR 0x200ULL ··· 541 return 0; 542 } 543 544 static int nx_p3_sre_macaddr_change(struct net_device *dev, 545 u8 *addr, unsigned op) 546 { ··· 553 int rv; 554 555 memset(&req, 0, sizeof(nx_nic_req_t)); 556 + req.qhdr |= (NX_NIC_REQUEST << 23); 557 + req.req_hdr |= NX_MAC_EVENT; 558 req.req_hdr |= ((u64)adapter->portnum << 16); 559 mac_req.op = op; 560 memcpy(&mac_req.mac_addr, addr, 6); ··· 575 nx_mac_list_t *cur, *next, *del_list, *add_list = NULL; 576 struct dev_mc_list *mc_ptr; 577 u8 bcast_addr[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; 578 + u32 mode = VPORT_MISS_MODE_DROP; 579 580 del_list = adapter->mac_list; 581 adapter->mac_list = NULL; 582 583 nx_p3_nic_add_mac(adapter, netdev->dev_addr, &add_list, &del_list); 584 + nx_p3_nic_add_mac(adapter, bcast_addr, &add_list, &del_list); 585 + 586 + if (netdev->flags & IFF_PROMISC) { 587 + mode = VPORT_MISS_MODE_ACCEPT_ALL; 588 + goto send_fw_cmd; 589 + } 590 + 591 + if ((netdev->flags & IFF_ALLMULTI) || 592 + (netdev->mc_count > adapter->max_mc_count)) { 593 + mode = VPORT_MISS_MODE_ACCEPT_MULTI; 594 + goto send_fw_cmd; 595 + } 596 + 597 if (netdev->mc_count > 0) { 598 for (mc_ptr = netdev->mc_list; mc_ptr; 599 mc_ptr = mc_ptr->next) { 600 nx_p3_nic_add_mac(adapter, mc_ptr->dmi_addr, 601 &add_list, &del_list); 602 } 603 } 604 + 605 + send_fw_cmd: 606 + adapter->set_promisc(adapter, mode); 607 for (cur = del_list; cur;) { 608 nx_p3_sre_macaddr_change(netdev, cur->mac_addr, NETXEN_MAC_DEL); 609 next = cur->next; ··· 615 } 616 } 617 618 + int netxen_p3_nic_set_promisc(struct netxen_adapter *adapter, u32 mode) 619 + { 620 + nx_nic_req_t req; 621 + 622 + memset(&req, 0, sizeof(nx_nic_req_t)); 623 + 624 + req.qhdr |= (NX_HOST_REQUEST << 23); 625 + req.req_hdr |= NX_NIC_H2C_OPCODE_PROXY_SET_VPORT_MISS_MODE; 626 + req.req_hdr |= ((u64)adapter->portnum << 16); 627 + req.words[0] = cpu_to_le64(mode); 628 + 629 + return netxen_send_cmd_descs(adapter, 630 + (struct cmd_desc_type0 *)&req, 1); 631 + } 632 + 633 #define NETXEN_CONFIG_INTR_COALESCE 3 634 635 /* ··· 627 628 memset(&req, 0, sizeof(nx_nic_req_t)); 629 630 + req.qhdr |= (NX_NIC_REQUEST << 23); 631 req.req_hdr |= NETXEN_CONFIG_INTR_COALESCE; 632 req.req_hdr |= ((u64)adapter->portnum << 16); 633 ··· 653 { 654 struct netxen_adapter *adapter = netdev_priv(netdev); 655 int max_mtu; 656 + int rc = 0; 657 658 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) 659 max_mtu = P3_MAX_MTU; ··· 666 } 667 668 if (adapter->set_mtu) 669 + rc = adapter->set_mtu(adapter, mtu); 670 671 + if (!rc) 672 + netdev->mtu = mtu; 673 674 + return rc; 675 } 676 677 int netxen_is_flash_supported(struct netxen_adapter *adapter) ··· 1411 (netxen_nic_pci_is_same_window(adapter, off+size-1) == 0)) { 1412 write_unlock_irqrestore(&adapter->adapter_lock, flags); 1413 printk(KERN_ERR "%s out of bound pci memory access. " 1414 + "offset is 0x%llx\n", netxen_nic_driver_name, 1415 + (unsigned long long)off); 1416 return -1; 1417 } 1418 ··· 1484 (netxen_nic_pci_is_same_window(adapter, off+size-1) == 0)) { 1485 write_unlock_irqrestore(&adapter->adapter_lock, flags); 1486 printk(KERN_ERR "%s out of bound pci memory access. " 1487 + "offset is 0x%llx\n", netxen_nic_driver_name, 1488 + (unsigned long long)off); 1489 return -1; 1490 } 1491 ··· 2016 case NETXEN_BRDTYPE_P3_10G_CX4_LP: 2017 case NETXEN_BRDTYPE_P3_IMEZ: 2018 case NETXEN_BRDTYPE_P3_10G_SFP_PLUS: 2019 + case NETXEN_BRDTYPE_P3_10G_SFP_CT: 2020 + case NETXEN_BRDTYPE_P3_10G_SFP_QT: 2021 case NETXEN_BRDTYPE_P3_10G_XFP: 2022 case NETXEN_BRDTYPE_P3_10000_BASE_T: 2023 ··· 2034 default: 2035 printk("%s: Unknown(%x)\n", netxen_nic_driver_name, 2036 boardinfo->board_type); 2037 + rv = -ENODEV; 2038 break; 2039 } 2040 ··· 2044 2045 int netxen_nic_set_mtu_gb(struct netxen_adapter *adapter, int new_mtu) 2046 { 2047 + new_mtu += MTU_FUDGE_FACTOR; 2048 netxen_nic_write_w0(adapter, 2049 NETXEN_NIU_GB_MAX_FRAME_SIZE(adapter->physical_port), 2050 new_mtu); ··· 2052 2053 int netxen_nic_set_mtu_xgb(struct netxen_adapter *adapter, int new_mtu) 2054 { 2055 + new_mtu += MTU_FUDGE_FACTOR; 2056 if (adapter->physical_port == 0) 2057 netxen_nic_write_w0(adapter, NETXEN_NIU_XGE_MAX_FRAME_SIZE, 2058 new_mtu); ··· 2074 __u32 status; 2075 __u32 autoneg; 2076 __u32 mode; 2077 + __u32 port_mode; 2078 2079 netxen_nic_read_w0(adapter, NETXEN_NIU_MODE, &mode); 2080 if (netxen_get_niu_enable_ge(mode)) { /* Gb 10/100/1000 Mbps mode */ 2081 + 2082 + adapter->hw_read_wx(adapter, 2083 + NETXEN_PORT_MODE_ADDR, &port_mode, 4); 2084 + if (port_mode == NETXEN_PORT_MODE_802_3_AP) { 2085 + adapter->link_speed = SPEED_1000; 2086 + adapter->link_duplex = DUPLEX_FULL; 2087 + adapter->link_autoneg = AUTONEG_DISABLE; 2088 + return; 2089 + } 2090 + 2091 if (adapter->phy_read 2092 + && adapter->phy_read(adapter, 2093 NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_STATUS, 2094 &status) == 0) { 2095 if (netxen_get_phy_link(status)) { ··· 2109 break; 2110 } 2111 if (adapter->phy_read 2112 + && adapter->phy_read(adapter, 2113 NETXEN_NIU_GB_MII_MGMT_ADDR_AUTONEG, 2114 &autoneg) != 0) 2115 adapter->link_autoneg = autoneg;
+5 -8
drivers/net/netxen/netxen_nic_hw.h
··· 419 #define netxen_get_niu_enable_ge(config_word) \ 420 _netxen_crb_get_bit(config_word, 1) 421 422 - /* Promiscous mode options (GbE mode only) */ 423 - typedef enum { 424 - NETXEN_NIU_PROMISC_MODE = 0, 425 - NETXEN_NIU_NON_PROMISC_MODE, 426 - NETXEN_NIU_ALLMULTI_MODE 427 - } netxen_niu_prom_mode_t; 428 429 /* 430 * NIU GB Drop CRC Register ··· 468 469 /* Set promiscuous mode for a GbE interface */ 470 int netxen_niu_set_promiscuous_mode(struct netxen_adapter *adapter, 471 - netxen_niu_prom_mode_t mode); 472 int netxen_niu_xg_set_promiscuous_mode(struct netxen_adapter *adapter, 473 - netxen_niu_prom_mode_t mode); 474 475 /* set the MAC address for a given MAC */ 476 int netxen_niu_macaddr_set(struct netxen_adapter *adapter,
··· 419 #define netxen_get_niu_enable_ge(config_word) \ 420 _netxen_crb_get_bit(config_word, 1) 421 422 + #define NETXEN_NIU_NON_PROMISC_MODE 0 423 + #define NETXEN_NIU_PROMISC_MODE 1 424 + #define NETXEN_NIU_ALLMULTI_MODE 2 425 426 /* 427 * NIU GB Drop CRC Register ··· 471 472 /* Set promiscuous mode for a GbE interface */ 473 int netxen_niu_set_promiscuous_mode(struct netxen_adapter *adapter, 474 + u32 mode); 475 int netxen_niu_xg_set_promiscuous_mode(struct netxen_adapter *adapter, 476 + u32 mode); 477 478 /* set the MAC address for a given MAC */ 479 int netxen_niu_macaddr_set(struct netxen_adapter *adapter,
+5
drivers/net/netxen/netxen_nic_init.c
··· 364 default: 365 break; 366 } 367 } 368 369 /*
··· 364 default: 365 break; 366 } 367 + 368 + if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { 369 + adapter->set_mtu = nx_fw_cmd_set_mtu; 370 + adapter->set_promisc = netxen_p3_nic_set_promisc; 371 + } 372 } 373 374 /*
+56 -43
drivers/net/netxen/netxen_nic_main.c
··· 166 if (!NETXEN_IS_MSI_FAMILY(adapter)) { 167 do { 168 adapter->pci_write_immediate(adapter, 169 - ISR_INT_TARGET_STATUS, 0xffffffff); 170 mask = adapter->pci_read_immediate(adapter, 171 ISR_INT_VECTOR); 172 if (!(mask & 0x80)) ··· 176 } while (--retries); 177 178 if (!retries) { 179 - printk(KERN_NOTICE "%s: Failed to disable interrupt completely\n", 180 netxen_nic_driver_name); 181 } 182 } else { ··· 190 static void netxen_nic_enable_int(struct netxen_adapter *adapter) 191 { 192 u32 mask; 193 - 194 - DPRINTK(1, INFO, "Entered ISR Enable \n"); 195 196 if (adapter->intr_scheme != -1 && 197 adapter->intr_scheme != INTR_SCHEME_PERPORT) { ··· 212 213 if (!NETXEN_IS_MSI_FAMILY(adapter)) { 214 mask = 0xbff; 215 - if (adapter->intr_scheme != -1 && 216 - adapter->intr_scheme != INTR_SCHEME_PERPORT) { 217 adapter->pci_write_normalize(adapter, 218 CRB_INT_VECTOR, 0); 219 - } 220 - adapter->pci_write_immediate(adapter, 221 - ISR_INT_TARGET_MASK, mask); 222 } 223 - 224 - DPRINTK(1, INFO, "Done with enable Int\n"); 225 } 226 227 static int nx_set_dma_mask(struct netxen_adapter *adapter, uint8_t revision_id) ··· 280 case NETXEN_BRDTYPE_P3_10G_CX4_LP: 281 case NETXEN_BRDTYPE_P3_IMEZ: 282 case NETXEN_BRDTYPE_P3_10G_SFP_PLUS: 283 case NETXEN_BRDTYPE_P3_10G_XFP: 284 case NETXEN_BRDTYPE_P3_10000_BASE_T: 285 adapter->msix_supported = !!use_msi_x; ··· 299 case NETXEN_BRDTYPE_P3_REF_QG: 300 case NETXEN_BRDTYPE_P3_4_GB: 301 case NETXEN_BRDTYPE_P3_4_GB_MM: 302 case NETXEN_BRDTYPE_P2_SB35_4G: 303 case NETXEN_BRDTYPE_P2_SB31_2G: 304 adapter->msix_supported = 0; ··· 702 adapter->status &= ~NETXEN_NETDEV_STATUS; 703 adapter->rx_csum = 1; 704 adapter->mc_enabled = 0; 705 - if (NX_IS_REVISION_P3(revision_id)) { 706 adapter->max_mc_count = 38; 707 - adapter->max_rds_rings = 2; 708 - } else { 709 adapter->max_mc_count = 16; 710 - adapter->max_rds_rings = 3; 711 - } 712 713 netdev->open = netxen_nic_open; 714 netdev->stop = netxen_nic_close; ··· 778 if (adapter->portnum == 0) 779 first_driver = 1; 780 } 781 - adapter->crb_addr_cmd_producer = crb_cmd_producer[adapter->portnum]; 782 - adapter->crb_addr_cmd_consumer = crb_cmd_consumer[adapter->portnum]; 783 - netxen_nic_update_cmd_producer(adapter, 0); 784 - netxen_nic_update_cmd_consumer(adapter, 0); 785 786 if (first_driver) { 787 first_boot = adapter->pci_read_normalize(adapter, ··· 1048 return -EIO; 1049 } 1050 1051 err = netxen_alloc_sw_resources(adapter); 1052 if (err) { 1053 printk(KERN_ERR "%s: Error in setting sw resources\n", ··· 1074 crb_cmd_producer[adapter->portnum]; 1075 adapter->crb_addr_cmd_consumer = 1076 crb_cmd_consumer[adapter->portnum]; 1077 - } 1078 1079 - netxen_nic_update_cmd_producer(adapter, 0); 1080 - netxen_nic_update_cmd_consumer(adapter, 0); 1081 1082 for (ctx = 0; ctx < MAX_RCV_CTX; ++ctx) { 1083 for (ring = 0; ring < adapter->max_rds_rings; ring++) ··· 1113 netxen_nic_set_link_parameters(adapter); 1114 1115 netdev->set_multicast_list(netdev); 1116 - if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) 1117 - nx_fw_cmd_set_mtu(adapter, netdev->mtu); 1118 - else 1119 adapter->set_mtu(adapter, netdev->mtu); 1120 1121 mod_timer(&adapter->watchdog_timer, jiffies); ··· 1408 1409 port = adapter->physical_port; 1410 1411 - if (adapter->ahw.board_type == NETXEN_NIC_GBE) { 1412 - val = adapter->pci_read_normalize(adapter, CRB_XG_STATE); 1413 - linkup = (val >> port) & 1; 1414 } else { 1415 - if (adapter->fw_major < 4) { 1416 - val = adapter->pci_read_normalize(adapter, 1417 - CRB_XG_STATE); 1418 val = (val >> port*8) & 0xff; 1419 linkup = (val == XG_LINK_UP); 1420 - } else { 1421 - val = adapter->pci_read_normalize(adapter, 1422 - CRB_XG_STATE_P3); 1423 - val = XG_LINK_STATE_P3(adapter->ahw.pci_func, val); 1424 - linkup = (val == XG_LINK_UP_P3); 1425 } 1426 } 1427 ··· 1530 struct netxen_adapter *adapter = data; 1531 u32 our_int = 0; 1532 1533 - our_int = adapter->pci_read_normalize(adapter, CRB_INT_VECTOR); 1534 - /* not our interrupt */ 1535 - if ((our_int & (0x80 << adapter->portnum)) == 0) 1536 return IRQ_NONE; 1537 1538 - if (adapter->intr_scheme == INTR_SCHEME_PERPORT) { 1539 - /* claim interrupt */ 1540 - adapter->pci_write_normalize(adapter, CRB_INT_VECTOR, 1541 our_int & ~((u32)(0x80 << adapter->portnum))); 1542 } 1543 1544 netxen_handle_int(adapter);
··· 166 if (!NETXEN_IS_MSI_FAMILY(adapter)) { 167 do { 168 adapter->pci_write_immediate(adapter, 169 + adapter->legacy_intr.tgt_status_reg, 170 + 0xffffffff); 171 mask = adapter->pci_read_immediate(adapter, 172 ISR_INT_VECTOR); 173 if (!(mask & 0x80)) ··· 175 } while (--retries); 176 177 if (!retries) { 178 + printk(KERN_NOTICE "%s: Failed to disable interrupt\n", 179 netxen_nic_driver_name); 180 } 181 } else { ··· 189 static void netxen_nic_enable_int(struct netxen_adapter *adapter) 190 { 191 u32 mask; 192 193 if (adapter->intr_scheme != -1 && 194 adapter->intr_scheme != INTR_SCHEME_PERPORT) { ··· 213 214 if (!NETXEN_IS_MSI_FAMILY(adapter)) { 215 mask = 0xbff; 216 + if (adapter->intr_scheme == INTR_SCHEME_PERPORT) 217 + adapter->pci_write_immediate(adapter, 218 + adapter->legacy_intr.tgt_mask_reg, mask); 219 + else 220 adapter->pci_write_normalize(adapter, 221 CRB_INT_VECTOR, 0); 222 } 223 } 224 225 static int nx_set_dma_mask(struct netxen_adapter *adapter, uint8_t revision_id) ··· 284 case NETXEN_BRDTYPE_P3_10G_CX4_LP: 285 case NETXEN_BRDTYPE_P3_IMEZ: 286 case NETXEN_BRDTYPE_P3_10G_SFP_PLUS: 287 + case NETXEN_BRDTYPE_P3_10G_SFP_QT: 288 + case NETXEN_BRDTYPE_P3_10G_SFP_CT: 289 case NETXEN_BRDTYPE_P3_10G_XFP: 290 case NETXEN_BRDTYPE_P3_10000_BASE_T: 291 adapter->msix_supported = !!use_msi_x; ··· 301 case NETXEN_BRDTYPE_P3_REF_QG: 302 case NETXEN_BRDTYPE_P3_4_GB: 303 case NETXEN_BRDTYPE_P3_4_GB_MM: 304 + adapter->msix_supported = 0; 305 + adapter->max_rx_desc_count = MAX_RCV_DESCRIPTORS_10G; 306 + break; 307 + 308 case NETXEN_BRDTYPE_P2_SB35_4G: 309 case NETXEN_BRDTYPE_P2_SB31_2G: 310 adapter->msix_supported = 0; ··· 700 adapter->status &= ~NETXEN_NETDEV_STATUS; 701 adapter->rx_csum = 1; 702 adapter->mc_enabled = 0; 703 + if (NX_IS_REVISION_P3(revision_id)) 704 adapter->max_mc_count = 38; 705 + else 706 adapter->max_mc_count = 16; 707 708 netdev->open = netxen_nic_open; 709 netdev->stop = netxen_nic_close; ··· 779 if (adapter->portnum == 0) 780 first_driver = 1; 781 } 782 783 if (first_driver) { 784 first_boot = adapter->pci_read_normalize(adapter, ··· 1053 return -EIO; 1054 } 1055 1056 + if (adapter->fw_major < 4) 1057 + adapter->max_rds_rings = 3; 1058 + else 1059 + adapter->max_rds_rings = 2; 1060 + 1061 err = netxen_alloc_sw_resources(adapter); 1062 if (err) { 1063 printk(KERN_ERR "%s: Error in setting sw resources\n", ··· 1074 crb_cmd_producer[adapter->portnum]; 1075 adapter->crb_addr_cmd_consumer = 1076 crb_cmd_consumer[adapter->portnum]; 1077 1078 + netxen_nic_update_cmd_producer(adapter, 0); 1079 + netxen_nic_update_cmd_consumer(adapter, 0); 1080 + } 1081 1082 for (ctx = 0; ctx < MAX_RCV_CTX; ++ctx) { 1083 for (ring = 0; ring < adapter->max_rds_rings; ring++) ··· 1113 netxen_nic_set_link_parameters(adapter); 1114 1115 netdev->set_multicast_list(netdev); 1116 + if (adapter->set_mtu) 1117 adapter->set_mtu(adapter, netdev->mtu); 1118 1119 mod_timer(&adapter->watchdog_timer, jiffies); ··· 1410 1411 port = adapter->physical_port; 1412 1413 + if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { 1414 + val = adapter->pci_read_normalize(adapter, CRB_XG_STATE_P3); 1415 + val = XG_LINK_STATE_P3(adapter->ahw.pci_func, val); 1416 + linkup = (val == XG_LINK_UP_P3); 1417 } else { 1418 + val = adapter->pci_read_normalize(adapter, CRB_XG_STATE); 1419 + if (adapter->ahw.board_type == NETXEN_NIC_GBE) 1420 + linkup = (val >> port) & 1; 1421 + else { 1422 val = (val >> port*8) & 0xff; 1423 linkup = (val == XG_LINK_UP); 1424 } 1425 } 1426 ··· 1535 struct netxen_adapter *adapter = data; 1536 u32 our_int = 0; 1537 1538 + u32 status = 0; 1539 + 1540 + status = adapter->pci_read_immediate(adapter, ISR_INT_VECTOR); 1541 + 1542 + if (!(status & adapter->legacy_intr.int_vec_bit)) 1543 return IRQ_NONE; 1544 1545 + if (adapter->ahw.revision_id >= NX_P3_B1) { 1546 + /* check interrupt state machine, to be sure */ 1547 + status = adapter->pci_read_immediate(adapter, 1548 + ISR_INT_STATE_REG); 1549 + if (!ISR_LEGACY_INT_TRIGGERED(status)) 1550 + return IRQ_NONE; 1551 + 1552 + } else if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) { 1553 + 1554 + our_int = adapter->pci_read_normalize(adapter, CRB_INT_VECTOR); 1555 + /* not our interrupt */ 1556 + if ((our_int & (0x80 << adapter->portnum)) == 0) 1557 + return IRQ_NONE; 1558 + 1559 + if (adapter->intr_scheme == INTR_SCHEME_PERPORT) { 1560 + /* claim interrupt */ 1561 + adapter->pci_write_normalize(adapter, 1562 + CRB_INT_VECTOR, 1563 our_int & ~((u32)(0x80 << adapter->portnum))); 1564 + } 1565 } 1566 1567 netxen_handle_int(adapter);
+14 -2
drivers/net/netxen/netxen_nic_niu.c
··· 610 int i; 611 DECLARE_MAC_BUF(mac); 612 613 for (i = 0; i < 10; i++) { 614 temp[0] = temp[1] = 0; 615 memcpy(temp + 2, addr, 2); ··· 730 __u32 mac_cfg0; 731 u32 port = adapter->physical_port; 732 733 if (port > NETXEN_NIU_MAX_GBE_PORTS) 734 return -EINVAL; 735 mac_cfg0 = 0; ··· 749 __u32 mac_cfg; 750 u32 port = adapter->physical_port; 751 752 if (port > NETXEN_NIU_MAX_XG_PORTS) 753 return -EINVAL; 754 ··· 764 765 /* Set promiscuous mode for a GbE interface */ 766 int netxen_niu_set_promiscuous_mode(struct netxen_adapter *adapter, 767 - netxen_niu_prom_mode_t mode) 768 { 769 __u32 reg; 770 u32 port = adapter->physical_port; ··· 827 int phy = adapter->physical_port; 828 u8 temp[4]; 829 u32 val; 830 831 if ((phy < 0) || (phy > NETXEN_NIU_MAX_XG_PORTS)) 832 return -EIO; ··· 906 #endif /* 0 */ 907 908 int netxen_niu_xg_set_promiscuous_mode(struct netxen_adapter *adapter, 909 - netxen_niu_prom_mode_t mode) 910 { 911 __u32 reg; 912 u32 port = adapter->physical_port;
··· 610 int i; 611 DECLARE_MAC_BUF(mac); 612 613 + if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) 614 + return 0; 615 + 616 for (i = 0; i < 10; i++) { 617 temp[0] = temp[1] = 0; 618 memcpy(temp + 2, addr, 2); ··· 727 __u32 mac_cfg0; 728 u32 port = adapter->physical_port; 729 730 + if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) 731 + return 0; 732 + 733 if (port > NETXEN_NIU_MAX_GBE_PORTS) 734 return -EINVAL; 735 mac_cfg0 = 0; ··· 743 __u32 mac_cfg; 744 u32 port = adapter->physical_port; 745 746 + if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) 747 + return 0; 748 + 749 if (port > NETXEN_NIU_MAX_XG_PORTS) 750 return -EINVAL; 751 ··· 755 756 /* Set promiscuous mode for a GbE interface */ 757 int netxen_niu_set_promiscuous_mode(struct netxen_adapter *adapter, 758 + u32 mode) 759 { 760 __u32 reg; 761 u32 port = adapter->physical_port; ··· 818 int phy = adapter->physical_port; 819 u8 temp[4]; 820 u32 val; 821 + 822 + if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) 823 + return 0; 824 825 if ((phy < 0) || (phy > NETXEN_NIU_MAX_XG_PORTS)) 826 return -EIO; ··· 894 #endif /* 0 */ 895 896 int netxen_niu_xg_set_promiscuous_mode(struct netxen_adapter *adapter, 897 + u32 mode) 898 { 899 __u32 reg; 900 u32 port = adapter->physical_port;
+2 -2
drivers/net/netxen/netxen_nic_phan_reg.h
··· 95 #define CRB_HOST_STS_PROD NETXEN_NIC_REG(0xdc) 96 #define CRB_HOST_STS_CONS NETXEN_NIC_REG(0xe0) 97 #define CRB_PEG_CMD_PROD NETXEN_NIC_REG(0xe4) 98 - #define CRB_PEG_CMD_CONS NETXEN_NIC_REG(0xe8) 99 - #define CRB_HOST_BUFFER_PROD NETXEN_NIC_REG(0xec) 100 #define CRB_HOST_BUFFER_CONS NETXEN_NIC_REG(0xf0) 101 #define CRB_JUMBO_BUFFER_PROD NETXEN_NIC_REG(0xf4) 102 #define CRB_JUMBO_BUFFER_CONS NETXEN_NIC_REG(0xf8)
··· 95 #define CRB_HOST_STS_PROD NETXEN_NIC_REG(0xdc) 96 #define CRB_HOST_STS_CONS NETXEN_NIC_REG(0xe0) 97 #define CRB_PEG_CMD_PROD NETXEN_NIC_REG(0xe4) 98 + #define CRB_PF_LINK_SPEED_1 NETXEN_NIC_REG(0xe8) 99 + #define CRB_PF_LINK_SPEED_2 NETXEN_NIC_REG(0xec) 100 #define CRB_HOST_BUFFER_CONS NETXEN_NIC_REG(0xf0) 101 #define CRB_JUMBO_BUFFER_PROD NETXEN_NIC_REG(0xf4) 102 #define CRB_JUMBO_BUFFER_CONS NETXEN_NIC_REG(0xf8)
-1
drivers/net/ni5010.c
··· 648 PRINTK2((KERN_DEBUG "%s: entering set_multicast_list\n", dev->name)); 649 650 if (dev->flags&IFF_PROMISC || dev->flags&IFF_ALLMULTI || dev->mc_list) { 651 - dev->flags |= IFF_PROMISC; 652 outb(RMD_PROMISC, EDLC_RMODE); /* Enable promiscuous mode */ 653 PRINTK((KERN_DEBUG "%s: Entering promiscuous mode\n", dev->name)); 654 } else {
··· 648 PRINTK2((KERN_DEBUG "%s: entering set_multicast_list\n", dev->name)); 649 650 if (dev->flags&IFF_PROMISC || dev->flags&IFF_ALLMULTI || dev->mc_list) { 651 outb(RMD_PROMISC, EDLC_RMODE); /* Enable promiscuous mode */ 652 PRINTK((KERN_DEBUG "%s: Entering promiscuous mode\n", dev->name)); 653 } else {
+1 -1
drivers/net/ni52.c
··· 621 if (num_addrs > len) { 622 printk(KERN_ERR "%s: switching to promisc. mode\n", 623 dev->name); 624 - dev->flags |= IFF_PROMISC; 625 } 626 } 627 if (dev->flags & IFF_PROMISC)
··· 621 if (num_addrs > len) { 622 printk(KERN_ERR "%s: switching to promisc. mode\n", 623 dev->name); 624 + writeb(0x01, &cfg_cmd->promisc); 625 } 626 } 627 if (dev->flags & IFF_PROMISC)
+6 -17
drivers/net/qla3xxx.c
··· 38 39 #define DRV_NAME "qla3xxx" 40 #define DRV_STRING "QLogic ISP3XXX Network Driver" 41 - #define DRV_VERSION "v2.03.00-k4" 42 #define PFX DRV_NAME " " 43 44 static const char ql3xxx_driver_name[] = DRV_NAME; ··· 3495 case ISP_CONTROL_FN0_NET: 3496 qdev->mac_index = 0; 3497 qdev->mac_ob_opcode = OUTBOUND_MAC_IOCB | func_number; 3498 - qdev->tcp_ob_opcode = OUTBOUND_TCP_IOCB | func_number; 3499 - qdev->update_ob_opcode = UPDATE_NCB_IOCB | func_number; 3500 qdev->mb_bit_mask = FN0_MA_BITS_MASK; 3501 qdev->PHYAddr = PORT0_PHY_ADDRESS; 3502 if (port_status & PORT_STATUS_SM0) ··· 3506 case ISP_CONTROL_FN1_NET: 3507 qdev->mac_index = 1; 3508 qdev->mac_ob_opcode = OUTBOUND_MAC_IOCB | func_number; 3509 - qdev->tcp_ob_opcode = OUTBOUND_TCP_IOCB | func_number; 3510 - qdev->update_ob_opcode = UPDATE_NCB_IOCB | func_number; 3511 qdev->mb_bit_mask = FN1_MA_BITS_MASK; 3512 qdev->PHYAddr = PORT1_PHY_ADDRESS; 3513 if (port_status & PORT_STATUS_SM1) ··· 3724 { 3725 struct ql3_adapter *qdev = netdev_priv(ndev); 3726 return (ql_adapter_up(qdev)); 3727 - } 3728 - 3729 - static void ql3xxx_set_multicast_list(struct net_device *ndev) 3730 - { 3731 - /* 3732 - * We are manually parsing the list in the net_device structure. 3733 - */ 3734 - return; 3735 } 3736 3737 static int ql3xxx_set_mac_address(struct net_device *ndev, void *p) ··· 3995 ndev->open = ql3xxx_open; 3996 ndev->hard_start_xmit = ql3xxx_send; 3997 ndev->stop = ql3xxx_close; 3998 - ndev->set_multicast_list = ql3xxx_set_multicast_list; 3999 SET_ETHTOOL_OPS(ndev, &ql3xxx_ethtool_ops); 4000 ndev->set_mac_address = ql3xxx_set_mac_address; 4001 ndev->tx_timeout = ql3xxx_tx_timeout; ··· 4031 memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len); 4032 4033 ndev->tx_queue_len = NUM_REQ_Q_ENTRIES; 4034 - 4035 - /* Turn off support for multicasting */ 4036 - ndev->flags &= ~IFF_MULTICAST; 4037 4038 /* Record PCI bus information. */ 4039 ql_get_board_info(qdev);
··· 38 39 #define DRV_NAME "qla3xxx" 40 #define DRV_STRING "QLogic ISP3XXX Network Driver" 41 + #define DRV_VERSION "v2.03.00-k5" 42 #define PFX DRV_NAME " " 43 44 static const char ql3xxx_driver_name[] = DRV_NAME; ··· 3495 case ISP_CONTROL_FN0_NET: 3496 qdev->mac_index = 0; 3497 qdev->mac_ob_opcode = OUTBOUND_MAC_IOCB | func_number; 3498 qdev->mb_bit_mask = FN0_MA_BITS_MASK; 3499 qdev->PHYAddr = PORT0_PHY_ADDRESS; 3500 if (port_status & PORT_STATUS_SM0) ··· 3508 case ISP_CONTROL_FN1_NET: 3509 qdev->mac_index = 1; 3510 qdev->mac_ob_opcode = OUTBOUND_MAC_IOCB | func_number; 3511 qdev->mb_bit_mask = FN1_MA_BITS_MASK; 3512 qdev->PHYAddr = PORT1_PHY_ADDRESS; 3513 if (port_status & PORT_STATUS_SM1) ··· 3728 { 3729 struct ql3_adapter *qdev = netdev_priv(ndev); 3730 return (ql_adapter_up(qdev)); 3731 } 3732 3733 static int ql3xxx_set_mac_address(struct net_device *ndev, void *p) ··· 4007 ndev->open = ql3xxx_open; 4008 ndev->hard_start_xmit = ql3xxx_send; 4009 ndev->stop = ql3xxx_close; 4010 + /* ndev->set_multicast_list 4011 + * This device is one side of a two-function adapter 4012 + * (NIC and iSCSI). Promiscuous mode setting/clearing is 4013 + * not allowed from the NIC side. 4014 + */ 4015 SET_ETHTOOL_OPS(ndev, &ql3xxx_ethtool_ops); 4016 ndev->set_mac_address = ql3xxx_set_mac_address; 4017 ndev->tx_timeout = ql3xxx_tx_timeout; ··· 4039 memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len); 4040 4041 ndev->tx_queue_len = NUM_REQ_Q_ENTRIES; 4042 4043 /* Record PCI bus information. */ 4044 ql_get_board_info(qdev);
-105
drivers/net/qla3xxx.h
··· 14 15 #define OPCODE_OB_MAC_IOCB_FN0 0x01 16 #define OPCODE_OB_MAC_IOCB_FN2 0x21 17 - #define OPCODE_OB_TCP_IOCB_FN0 0x03 18 - #define OPCODE_OB_TCP_IOCB_FN2 0x23 19 - #define OPCODE_UPDATE_NCB_IOCB_FN0 0x00 20 - #define OPCODE_UPDATE_NCB_IOCB_FN2 0x20 21 22 - #define OPCODE_UPDATE_NCB_IOCB 0xF0 23 #define OPCODE_IB_MAC_IOCB 0xF9 24 #define OPCODE_IB_3032_MAC_IOCB 0x09 25 #define OPCODE_IB_IP_IOCB 0xFA 26 #define OPCODE_IB_3032_IP_IOCB 0x0A 27 - #define OPCODE_IB_TCP_IOCB 0xFB 28 - #define OPCODE_DUMP_PROTO_IOCB 0xFE 29 - #define OPCODE_BUFFER_ALERT_IOCB 0xFB 30 31 #define OPCODE_FUNC_ID_MASK 0x30 32 #define OUTBOUND_MAC_IOCB 0x01 /* plus function bits */ 33 - #define OUTBOUND_TCP_IOCB 0x03 /* plus function bits */ 34 - #define UPDATE_NCB_IOCB 0x00 /* plus function bits */ 35 36 #define FN0_MA_BITS_MASK 0x00 37 #define FN1_MA_BITS_MASK 0x80 ··· 149 __le32 reserved2; 150 }; 151 152 - struct ob_tcp_iocb_req { 153 - u8 opcode; 154 - 155 - u8 flags0; 156 - #define OB_TCP_IOCB_REQ_P 0x80 157 - #define OB_TCP_IOCB_REQ_CI 0x20 158 - #define OB_TCP_IOCB_REQ_H 0x10 159 - #define OB_TCP_IOCB_REQ_LN 0x08 160 - #define OB_TCP_IOCB_REQ_K 0x04 161 - #define OB_TCP_IOCB_REQ_D 0x02 162 - #define OB_TCP_IOCB_REQ_I 0x01 163 - 164 - u8 flags1; 165 - #define OB_TCP_IOCB_REQ_OSM 0x40 166 - #define OB_TCP_IOCB_REQ_URG 0x20 167 - #define OB_TCP_IOCB_REQ_ACK 0x10 168 - #define OB_TCP_IOCB_REQ_PSH 0x08 169 - #define OB_TCP_IOCB_REQ_RST 0x04 170 - #define OB_TCP_IOCB_REQ_SYN 0x02 171 - #define OB_TCP_IOCB_REQ_FIN 0x01 172 - 173 - u8 options_len; 174 - #define OB_TCP_IOCB_REQ_OMASK 0xF0 175 - #define OB_TCP_IOCB_REQ_SHIFT 4 176 - 177 - __le32 transaction_id; 178 - __le32 data_len; 179 - __le32 hncb_ptr_low; 180 - __le32 hncb_ptr_high; 181 - __le32 buf_addr0_low; 182 - __le32 buf_addr0_high; 183 - __le32 buf_0_len; 184 - __le32 buf_addr1_low; 185 - __le32 buf_addr1_high; 186 - __le32 buf_1_len; 187 - __le32 buf_addr2_low; 188 - __le32 buf_addr2_high; 189 - __le32 buf_2_len; 190 - __le32 time_stamp; 191 - __le32 reserved1; 192 - }; 193 - 194 - struct ob_tcp_iocb_rsp { 195 - u8 opcode; 196 - 197 - u8 flags0; 198 - #define OB_TCP_IOCB_RSP_C 0x20 199 - #define OB_TCP_IOCB_RSP_H 0x10 200 - #define OB_TCP_IOCB_RSP_LN 0x08 201 - #define OB_TCP_IOCB_RSP_K 0x04 202 - #define OB_TCP_IOCB_RSP_D 0x02 203 - #define OB_TCP_IOCB_RSP_I 0x01 204 - 205 - u8 flags1; 206 - #define OB_TCP_IOCB_RSP_E 0x10 207 - #define OB_TCP_IOCB_RSP_W 0x08 208 - #define OB_TCP_IOCB_RSP_P 0x04 209 - #define OB_TCP_IOCB_RSP_T 0x02 210 - #define OB_TCP_IOCB_RSP_F 0x01 211 - 212 - u8 state; 213 - #define OB_TCP_IOCB_RSP_SMASK 0xF0 214 - #define OB_TCP_IOCB_RSP_SHIFT 4 215 - 216 - __le32 transaction_id; 217 - __le32 local_ncb_ptr; 218 - __le32 reserved0; 219 - }; 220 - 221 struct ib_ip_iocb_rsp { 222 u8 opcode; 223 #define IB_IP_IOCB_RSP_3032_V 0x80 ··· 173 #define IB_IP_IOCB_RSP_3032_IPE 0x20 174 __le16 reserved; 175 #define IB_IP_IOCB_RSP_R 0x01 176 - __le32 ial_low; 177 - __le32 ial_high; 178 - }; 179 - 180 - struct ib_tcp_iocb_rsp { 181 - u8 opcode; 182 - u8 flags; 183 - #define IB_TCP_IOCB_RSP_P 0x80 184 - #define IB_TCP_IOCB_RSP_T 0x40 185 - #define IB_TCP_IOCB_RSP_D 0x20 186 - #define IB_TCP_IOCB_RSP_N 0x10 187 - #define IB_TCP_IOCB_RSP_IP 0x03 188 - #define IB_TCP_FLAG_MASK 0xf0 189 - #define IB_TCP_FLAG_IOCB_SYN 0x00 190 - 191 - #define TCP_IB_RSP_FLAGS(x) (x->flags & ~IB_TCP_FLAG_MASK) 192 - 193 - __le16 length; 194 - __le32 hncb_ref_num; 195 __le32 ial_low; 196 __le32 ial_high; 197 }; ··· 1168 u32 small_buf_release_cnt; 1169 u32 small_buf_total_size; 1170 1171 - /* ISR related, saves status for DPC. */ 1172 - u32 control_status; 1173 - 1174 struct eeprom_data nvram_data; 1175 - struct timer_list ioctl_timer; 1176 u32 port_link_state; 1177 - u32 last_rsp_offset; 1178 1179 /* 4022 specific */ 1180 u32 mac_index; /* Driver's MAC number can be 0 or 1 for first and second networking functions respectively */ 1181 u32 PHYAddr; /* Address of PHY 0x1e00 Port 0 and 0x1f00 Port 1 */ 1182 u32 mac_ob_opcode; /* Opcode to use on mac transmission */ 1183 - u32 tcp_ob_opcode; /* Opcode to use on tcp transmission */ 1184 - u32 update_ob_opcode; /* Opcode to use for updating NCB */ 1185 u32 mb_bit_mask; /* MA Bits mask to use on transmission */ 1186 u32 numPorts; 1187 struct workqueue_struct *workqueue;
··· 14 15 #define OPCODE_OB_MAC_IOCB_FN0 0x01 16 #define OPCODE_OB_MAC_IOCB_FN2 0x21 17 18 #define OPCODE_IB_MAC_IOCB 0xF9 19 #define OPCODE_IB_3032_MAC_IOCB 0x09 20 #define OPCODE_IB_IP_IOCB 0xFA 21 #define OPCODE_IB_3032_IP_IOCB 0x0A 22 23 #define OPCODE_FUNC_ID_MASK 0x30 24 #define OUTBOUND_MAC_IOCB 0x01 /* plus function bits */ 25 26 #define FN0_MA_BITS_MASK 0x00 27 #define FN1_MA_BITS_MASK 0x80 ··· 159 __le32 reserved2; 160 }; 161 162 struct ib_ip_iocb_rsp { 163 u8 opcode; 164 #define IB_IP_IOCB_RSP_3032_V 0x80 ··· 252 #define IB_IP_IOCB_RSP_3032_IPE 0x20 253 __le16 reserved; 254 #define IB_IP_IOCB_RSP_R 0x01 255 __le32 ial_low; 256 __le32 ial_high; 257 }; ··· 1266 u32 small_buf_release_cnt; 1267 u32 small_buf_total_size; 1268 1269 struct eeprom_data nvram_data; 1270 u32 port_link_state; 1271 1272 /* 4022 specific */ 1273 u32 mac_index; /* Driver's MAC number can be 0 or 1 for first and second networking functions respectively */ 1274 u32 PHYAddr; /* Address of PHY 0x1e00 Port 0 and 0x1f00 Port 1 */ 1275 u32 mac_ob_opcode; /* Opcode to use on mac transmission */ 1276 u32 mb_bit_mask; /* MA Bits mask to use on transmission */ 1277 u32 numPorts; 1278 struct workqueue_struct *workqueue;
+50 -19
drivers/net/sh_eth.c
··· 34 35 #include "sh_eth.h" 36 37 /* 38 * Program the hardware MAC address from dev->dev_addr. 39 */ ··· 263 /* RX descriptor */ 264 rxdesc = &mdp->rx_ring[i]; 265 rxdesc->addr = (u32)skb->data & ~0x3UL; 266 - rxdesc->status = cpu_to_le32(RD_RACT | RD_RFP); 267 268 /* The size of the buffer is 16 byte boundary. */ 269 rxdesc->buffer_length = (mdp->rx_buf_sz + 16) & ~0x0F; ··· 285 mdp->dirty_rx = (u32) (i - RX_RING_SIZE); 286 287 /* Mark the last entry as wrapping the ring. */ 288 - rxdesc->status |= cpu_to_le32(RD_RDEL); 289 290 memset(mdp->tx_ring, 0, tx_ringsize); 291 ··· 293 for (i = 0; i < TX_RING_SIZE; i++) { 294 mdp->tx_skbuff[i] = NULL; 295 txdesc = &mdp->tx_ring[i]; 296 - txdesc->status = cpu_to_le32(TD_TFP); 297 txdesc->buffer_length = 0; 298 if (i == 0) { 299 - /* Rx descriptor address set */ 300 ctrl_outl((u32)txdesc, ioaddr + TDLAR); 301 #if defined(CONFIG_CPU_SUBTYPE_SH7763) 302 ctrl_outl((u32)txdesc, ioaddr + TDFAR); ··· 304 } 305 } 306 307 - /* Rx descriptor address set */ 308 #if defined(CONFIG_CPU_SUBTYPE_SH7763) 309 ctrl_outl((u32)txdesc, ioaddr + TDFXR); 310 ctrl_outl(0x1, ioaddr + TDFFR); 311 #endif 312 313 - txdesc->status |= cpu_to_le32(TD_TDLE); 314 } 315 316 /* Get skb and descriptor buffer */ ··· 478 for (; mdp->cur_tx - mdp->dirty_tx > 0; mdp->dirty_tx++) { 479 entry = mdp->dirty_tx % TX_RING_SIZE; 480 txdesc = &mdp->tx_ring[entry]; 481 - if (txdesc->status & cpu_to_le32(TD_TACT)) 482 break; 483 /* Free the original skb. */ 484 if (mdp->tx_skbuff[entry]) { ··· 486 mdp->tx_skbuff[entry] = NULL; 487 freeNum++; 488 } 489 - txdesc->status = cpu_to_le32(TD_TFP); 490 if (entry >= TX_RING_SIZE - 1) 491 - txdesc->status |= cpu_to_le32(TD_TDLE); 492 493 mdp->stats.tx_packets++; 494 mdp->stats.tx_bytes += txdesc->buffer_length; ··· 509 u32 desc_status, reserve = 0; 510 511 rxdesc = &mdp->rx_ring[entry]; 512 - while (!(rxdesc->status & cpu_to_le32(RD_RACT))) { 513 - desc_status = le32_to_cpu(rxdesc->status); 514 pkt_len = rxdesc->frame_length; 515 516 if (--boguscnt < 0) ··· 545 mdp->stats.rx_packets++; 546 mdp->stats.rx_bytes += pkt_len; 547 } 548 - rxdesc->status |= cpu_to_le32(RD_RACT); 549 entry = (++mdp->cur_rx) % RX_RING_SIZE; 550 } 551 ··· 575 } 576 if (entry >= RX_RING_SIZE - 1) 577 rxdesc->status |= 578 - cpu_to_le32(RD_RACT | RD_RFP | RD_RDEL); 579 else 580 rxdesc->status |= 581 - cpu_to_le32(RD_RACT | RD_RFP); 582 } 583 584 /* Restart Rx engine if stopped. */ ··· 954 txdesc->buffer_length = skb->len; 955 956 if (entry >= TX_RING_SIZE - 1) 957 - txdesc->status |= cpu_to_le32(TD_TACT | TD_TDLE); 958 else 959 - txdesc->status |= cpu_to_le32(TD_TACT); 960 961 mdp->cur_tx++; 962 ··· 1182 struct resource *res; 1183 struct net_device *ndev = NULL; 1184 struct sh_eth_private *mdp; 1185 1186 /* get base addr */ 1187 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); ··· 1220 mdp = netdev_priv(ndev); 1221 spin_lock_init(&mdp->lock); 1222 1223 /* get PHY ID */ 1224 - mdp->phy_id = (int)pdev->dev.platform_data; 1225 1226 /* set function */ 1227 ndev->open = sh_eth_open; ··· 1244 1245 /* First device only init */ 1246 if (!devno) { 1247 /* reset device */ 1248 ctrl_outl(ARSTR_ARSTR, ARSTR); 1249 mdelay(1); 1250 1251 /* TSU init (Init only)*/ 1252 sh_eth_tsu_init(SH_TSU_ADDR); 1253 } 1254 1255 /* network device register */ ··· 1271 ndev->name, CARDNAME, (u32) ndev->base_addr); 1272 1273 for (i = 0; i < 5; i++) 1274 - printk(KERN_INFO "%02X:", ndev->dev_addr[i]); 1275 - printk(KERN_INFO "%02X, IRQ %d.\n", ndev->dev_addr[i], ndev->irq); 1276 1277 platform_set_drvdata(pdev, ndev); 1278
··· 34 35 #include "sh_eth.h" 36 37 + /* CPU <-> EDMAC endian convert */ 38 + static inline __u32 cpu_to_edmac(struct sh_eth_private *mdp, u32 x) 39 + { 40 + switch (mdp->edmac_endian) { 41 + case EDMAC_LITTLE_ENDIAN: 42 + return cpu_to_le32(x); 43 + case EDMAC_BIG_ENDIAN: 44 + return cpu_to_be32(x); 45 + } 46 + return x; 47 + } 48 + 49 + static inline __u32 edmac_to_cpu(struct sh_eth_private *mdp, u32 x) 50 + { 51 + switch (mdp->edmac_endian) { 52 + case EDMAC_LITTLE_ENDIAN: 53 + return le32_to_cpu(x); 54 + case EDMAC_BIG_ENDIAN: 55 + return be32_to_cpu(x); 56 + } 57 + return x; 58 + } 59 + 60 /* 61 * Program the hardware MAC address from dev->dev_addr. 62 */ ··· 240 /* RX descriptor */ 241 rxdesc = &mdp->rx_ring[i]; 242 rxdesc->addr = (u32)skb->data & ~0x3UL; 243 + rxdesc->status = cpu_to_edmac(mdp, RD_RACT | RD_RFP); 244 245 /* The size of the buffer is 16 byte boundary. */ 246 rxdesc->buffer_length = (mdp->rx_buf_sz + 16) & ~0x0F; ··· 262 mdp->dirty_rx = (u32) (i - RX_RING_SIZE); 263 264 /* Mark the last entry as wrapping the ring. */ 265 + rxdesc->status |= cpu_to_edmac(mdp, RD_RDEL); 266 267 memset(mdp->tx_ring, 0, tx_ringsize); 268 ··· 270 for (i = 0; i < TX_RING_SIZE; i++) { 271 mdp->tx_skbuff[i] = NULL; 272 txdesc = &mdp->tx_ring[i]; 273 + txdesc->status = cpu_to_edmac(mdp, TD_TFP); 274 txdesc->buffer_length = 0; 275 if (i == 0) { 276 + /* Tx descriptor address set */ 277 ctrl_outl((u32)txdesc, ioaddr + TDLAR); 278 #if defined(CONFIG_CPU_SUBTYPE_SH7763) 279 ctrl_outl((u32)txdesc, ioaddr + TDFAR); ··· 281 } 282 } 283 284 + /* Tx descriptor address set */ 285 #if defined(CONFIG_CPU_SUBTYPE_SH7763) 286 ctrl_outl((u32)txdesc, ioaddr + TDFXR); 287 ctrl_outl(0x1, ioaddr + TDFFR); 288 #endif 289 290 + txdesc->status |= cpu_to_edmac(mdp, TD_TDLE); 291 } 292 293 /* Get skb and descriptor buffer */ ··· 455 for (; mdp->cur_tx - mdp->dirty_tx > 0; mdp->dirty_tx++) { 456 entry = mdp->dirty_tx % TX_RING_SIZE; 457 txdesc = &mdp->tx_ring[entry]; 458 + if (txdesc->status & cpu_to_edmac(mdp, TD_TACT)) 459 break; 460 /* Free the original skb. */ 461 if (mdp->tx_skbuff[entry]) { ··· 463 mdp->tx_skbuff[entry] = NULL; 464 freeNum++; 465 } 466 + txdesc->status = cpu_to_edmac(mdp, TD_TFP); 467 if (entry >= TX_RING_SIZE - 1) 468 + txdesc->status |= cpu_to_edmac(mdp, TD_TDLE); 469 470 mdp->stats.tx_packets++; 471 mdp->stats.tx_bytes += txdesc->buffer_length; ··· 486 u32 desc_status, reserve = 0; 487 488 rxdesc = &mdp->rx_ring[entry]; 489 + while (!(rxdesc->status & cpu_to_edmac(mdp, RD_RACT))) { 490 + desc_status = edmac_to_cpu(mdp, rxdesc->status); 491 pkt_len = rxdesc->frame_length; 492 493 if (--boguscnt < 0) ··· 522 mdp->stats.rx_packets++; 523 mdp->stats.rx_bytes += pkt_len; 524 } 525 + rxdesc->status |= cpu_to_edmac(mdp, RD_RACT); 526 entry = (++mdp->cur_rx) % RX_RING_SIZE; 527 } 528 ··· 552 } 553 if (entry >= RX_RING_SIZE - 1) 554 rxdesc->status |= 555 + cpu_to_edmac(mdp, RD_RACT | RD_RFP | RD_RDEL); 556 else 557 rxdesc->status |= 558 + cpu_to_edmac(mdp, RD_RACT | RD_RFP); 559 } 560 561 /* Restart Rx engine if stopped. */ ··· 931 txdesc->buffer_length = skb->len; 932 933 if (entry >= TX_RING_SIZE - 1) 934 + txdesc->status |= cpu_to_edmac(mdp, TD_TACT | TD_TDLE); 935 else 936 + txdesc->status |= cpu_to_edmac(mdp, TD_TACT); 937 938 mdp->cur_tx++; 939 ··· 1159 struct resource *res; 1160 struct net_device *ndev = NULL; 1161 struct sh_eth_private *mdp; 1162 + struct sh_eth_plat_data *pd; 1163 1164 /* get base addr */ 1165 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); ··· 1196 mdp = netdev_priv(ndev); 1197 spin_lock_init(&mdp->lock); 1198 1199 + pd = (struct sh_eth_plat_data *)(pdev->dev.platform_data); 1200 /* get PHY ID */ 1201 + mdp->phy_id = pd->phy; 1202 + /* EDMAC endian */ 1203 + mdp->edmac_endian = pd->edmac_endian; 1204 1205 /* set function */ 1206 ndev->open = sh_eth_open; ··· 1217 1218 /* First device only init */ 1219 if (!devno) { 1220 + #if defined(ARSTR) 1221 /* reset device */ 1222 ctrl_outl(ARSTR_ARSTR, ARSTR); 1223 mdelay(1); 1224 + #endif 1225 1226 + #if defined(SH_TSU_ADDR) 1227 /* TSU init (Init only)*/ 1228 sh_eth_tsu_init(SH_TSU_ADDR); 1229 + #endif 1230 } 1231 1232 /* network device register */ ··· 1240 ndev->name, CARDNAME, (u32) ndev->base_addr); 1241 1242 for (i = 0; i < 5; i++) 1243 + printk("%02X:", ndev->dev_addr[i]); 1244 + printk("%02X, IRQ %d.\n", ndev->dev_addr[i], ndev->irq); 1245 1246 platform_set_drvdata(pdev, ndev); 1247
+18 -4
drivers/net/sh_eth.h
··· 30 #include <linux/netdevice.h> 31 #include <linux/phy.h> 32 33 #define CARDNAME "sh-eth" 34 #define TX_TIMEOUT (5*HZ) 35 #define TX_RING_SIZE 64 /* Tx ring size */ ··· 145 146 #else /* CONFIG_CPU_SUBTYPE_SH7763 */ 147 # define RX_OFFSET 2 /* skb offset */ 148 /* Chip base address */ 149 # define SH_TSU_ADDR 0xA7000804 150 # define ARSTR 0xA7000800 151 - 152 /* Chip Registers */ 153 /* E-DMAC */ 154 # define EDMR 0x0000 ··· 387 FCFTR_RFD1 = 0x00000002, FCFTR_RFD0 = 0x00000001, 388 }; 389 #define FIFO_F_D_RFF (FCFTR_RFF2|FCFTR_RFF1|FCFTR_RFF0) 390 #define FIFO_F_D_RFD (FCFTR_RFD2|FCFTR_RFD1|FCFTR_RFD0) 391 392 /* Transfer descriptor bit */ 393 enum TD_STS_BIT { ··· 421 #ifdef CONFIG_CPU_SUBTYPE_SH7763 422 #define ECMR_CHG_DM (ECMR_TRCCM | ECMR_RZPF | ECMR_ZPF |\ 423 ECMR_PFR | ECMR_RXF | ECMR_TXF | ECMR_MCT) 424 #else 425 - #define ECMR_CHG_DM (ECMR_ZPF | ECMR_PFR ECMR_RXF | ECMR_TXF | ECMR_MCT) 426 #endif 427 428 /* ECSR */ ··· 494 495 /* FDR */ 496 enum FIFO_SIZE_BIT { 497 FIFO_SIZE_T = 0x00000700, FIFO_SIZE_R = 0x00000007, 498 }; 499 enum phy_offsets { 500 PHY_CTRL = 0, PHY_STAT = 1, PHY_IDT1 = 2, PHY_IDT2 = 3, ··· 614 #endif 615 u32 addr; /* TD2 */ 616 u32 pad1; /* padding data */ 617 - }; 618 619 /* 620 * The sh ether Rx buffer descriptors. ··· 631 #endif 632 u32 addr; /* RD2 */ 633 u32 pad0; /* padding data */ 634 - }; 635 636 struct sh_eth_private { 637 dma_addr_t rx_desc_dma; ··· 646 u32 cur_rx, dirty_rx; /* Producer/consumer ring indices */ 647 u32 cur_tx, dirty_tx; 648 u32 rx_buf_sz; /* Based on MTU+slack. */ 649 /* MII transceiver section. */ 650 u32 phy_id; /* PHY ID */ 651 struct mii_bus *mii_bus; /* MDIO bus control */
··· 30 #include <linux/netdevice.h> 31 #include <linux/phy.h> 32 33 + #include <asm/sh_eth.h> 34 + 35 #define CARDNAME "sh-eth" 36 #define TX_TIMEOUT (5*HZ) 37 #define TX_RING_SIZE 64 /* Tx ring size */ ··· 143 144 #else /* CONFIG_CPU_SUBTYPE_SH7763 */ 145 # define RX_OFFSET 2 /* skb offset */ 146 + #ifndef CONFIG_CPU_SUBTYPE_SH7619 147 /* Chip base address */ 148 # define SH_TSU_ADDR 0xA7000804 149 # define ARSTR 0xA7000800 150 + #endif 151 /* Chip Registers */ 152 /* E-DMAC */ 153 # define EDMR 0x0000 ··· 384 FCFTR_RFD1 = 0x00000002, FCFTR_RFD0 = 0x00000001, 385 }; 386 #define FIFO_F_D_RFF (FCFTR_RFF2|FCFTR_RFF1|FCFTR_RFF0) 387 + #ifndef CONFIG_CPU_SUBTYPE_SH7619 388 #define FIFO_F_D_RFD (FCFTR_RFD2|FCFTR_RFD1|FCFTR_RFD0) 389 + #else 390 + #define FIFO_F_D_RFD (FCFTR_RFD0) 391 + #endif 392 393 /* Transfer descriptor bit */ 394 enum TD_STS_BIT { ··· 414 #ifdef CONFIG_CPU_SUBTYPE_SH7763 415 #define ECMR_CHG_DM (ECMR_TRCCM | ECMR_RZPF | ECMR_ZPF |\ 416 ECMR_PFR | ECMR_RXF | ECMR_TXF | ECMR_MCT) 417 + #elif CONFIG_CPU_SUBTYPE_SH7619 418 + #define ECMR_CHG_DM (ECMR_ZPF | ECMR_PFR | ECMR_RXF | ECMR_TXF) 419 #else 420 + #define ECMR_CHG_DM (ECMR_ZPF | ECMR_PFR | ECMR_RXF | ECMR_TXF | ECMR_MCT) 421 #endif 422 423 /* ECSR */ ··· 485 486 /* FDR */ 487 enum FIFO_SIZE_BIT { 488 + #ifndef CONFIG_CPU_SUBTYPE_SH7619 489 FIFO_SIZE_T = 0x00000700, FIFO_SIZE_R = 0x00000007, 490 + #else 491 + FIFO_SIZE_T = 0x00000100, FIFO_SIZE_R = 0x00000001, 492 + #endif 493 }; 494 enum phy_offsets { 495 PHY_CTRL = 0, PHY_STAT = 1, PHY_IDT1 = 2, PHY_IDT2 = 3, ··· 601 #endif 602 u32 addr; /* TD2 */ 603 u32 pad1; /* padding data */ 604 + } __attribute__((aligned(2), packed)); 605 606 /* 607 * The sh ether Rx buffer descriptors. ··· 618 #endif 619 u32 addr; /* RD2 */ 620 u32 pad0; /* padding data */ 621 + } __attribute__((aligned(2), packed)); 622 623 struct sh_eth_private { 624 dma_addr_t rx_desc_dma; ··· 633 u32 cur_rx, dirty_rx; /* Producer/consumer ring indices */ 634 u32 cur_tx, dirty_tx; 635 u32 rx_buf_sz; /* Based on MTU+slack. */ 636 + int edmac_endian; 637 /* MII transceiver section. */ 638 u32 phy_id; /* PHY ID */ 639 struct mii_bus *mii_bus; /* MDIO bus control */
+10 -93
drivers/net/sky2.c
··· 275 PC_VAUX_ON | PC_VCC_OFF)); 276 } 277 278 - static void sky2_power_state(struct sky2_hw *hw, pci_power_t state) 279 - { 280 - u16 power_control = sky2_pci_read16(hw, hw->pm_cap + PCI_PM_CTRL); 281 - int pex = pci_find_capability(hw->pdev, PCI_CAP_ID_EXP); 282 - u32 reg; 283 - 284 - sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); 285 - 286 - switch (state) { 287 - case PCI_D0: 288 - break; 289 - 290 - case PCI_D1: 291 - power_control |= 1; 292 - break; 293 - 294 - case PCI_D2: 295 - power_control |= 2; 296 - break; 297 - 298 - case PCI_D3hot: 299 - case PCI_D3cold: 300 - power_control |= 3; 301 - if (hw->flags & SKY2_HW_ADV_POWER_CTL) { 302 - /* additional power saving measurements */ 303 - reg = sky2_pci_read32(hw, PCI_DEV_REG4); 304 - 305 - /* set gating core clock for LTSSM in L1 state */ 306 - reg |= P_PEX_LTSSM_STAT(P_PEX_LTSSM_L1_STAT) | 307 - /* auto clock gated scheme controlled by CLKREQ */ 308 - P_ASPM_A1_MODE_SELECT | 309 - /* enable Gate Root Core Clock */ 310 - P_CLK_GATE_ROOT_COR_ENA; 311 - 312 - if (pex && (hw->flags & SKY2_HW_CLK_POWER)) { 313 - /* enable Clock Power Management (CLKREQ) */ 314 - u16 ctrl = sky2_pci_read16(hw, pex + PCI_EXP_DEVCTL); 315 - 316 - ctrl |= PCI_EXP_DEVCTL_AUX_PME; 317 - sky2_pci_write16(hw, pex + PCI_EXP_DEVCTL, ctrl); 318 - } else 319 - /* force CLKREQ Enable in Our4 (A1b only) */ 320 - reg |= P_ASPM_FORCE_CLKREQ_ENA; 321 - 322 - /* set Mask Register for Release/Gate Clock */ 323 - sky2_pci_write32(hw, PCI_DEV_REG5, 324 - P_REL_PCIE_EXIT_L1_ST | P_GAT_PCIE_ENTER_L1_ST | 325 - P_REL_PCIE_RX_EX_IDLE | P_GAT_PCIE_RX_EL_IDLE | 326 - P_REL_GPHY_LINK_UP | P_GAT_GPHY_LINK_DOWN); 327 - } else 328 - sky2_write8(hw, B28_Y2_ASF_STAT_CMD, Y2_ASF_CLK_HALT); 329 - 330 - /* put CPU into reset state */ 331 - sky2_write8(hw, B28_Y2_ASF_STAT_CMD, HCU_CCSR_ASF_RESET); 332 - if (hw->chip_id == CHIP_ID_YUKON_SUPR && hw->chip_rev == CHIP_REV_YU_SU_A0) 333 - /* put CPU into halt state */ 334 - sky2_write8(hw, B28_Y2_ASF_STAT_CMD, HCU_CCSR_ASF_HALTED); 335 - 336 - if (pex && !(hw->flags & SKY2_HW_RAM_BUFFER)) { 337 - reg = sky2_pci_read32(hw, PCI_DEV_REG1); 338 - /* force to PCIe L1 */ 339 - reg |= PCI_FORCE_PEX_L1; 340 - sky2_pci_write32(hw, PCI_DEV_REG1, reg); 341 - } 342 - break; 343 - 344 - default: 345 - dev_warn(&hw->pdev->dev, PFX "Invalid power state (%d) ", 346 - state); 347 - return; 348 - } 349 - 350 - power_control |= PCI_PM_CTRL_PME_ENABLE; 351 - /* Finally, set the new power state. */ 352 - sky2_pci_write32(hw, hw->pm_cap + PCI_PM_CTRL, power_control); 353 - 354 - sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); 355 - sky2_pci_read32(hw, B0_CTST); 356 - } 357 - 358 static void sky2_gmac_reset(struct sky2_hw *hw, unsigned port) 359 { 360 u16 reg; ··· 629 sky2_pci_write32(hw, PCI_DEV_REG1, reg1); 630 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); 631 sky2_pci_read32(hw, PCI_DEV_REG1); 632 } 633 634 static void sky2_phy_power_down(struct sky2_hw *hw, unsigned port) ··· 2780 hw->flags = SKY2_HW_GIGABIT 2781 | SKY2_HW_NEWER_PHY 2782 | SKY2_HW_ADV_POWER_CTL; 2783 - 2784 - /* check for Rev. A1 dev 4200 */ 2785 - if (sky2_read16(hw, Q_ADDR(Q_XA1, Q_WM)) == 0) 2786 - hw->flags |= SKY2_HW_CLK_POWER; 2787 break; 2788 2789 case CHIP_ID_YUKON_EX: ··· 2834 hw->pmd_type = sky2_read8(hw, B2_PMD_TYP); 2835 if (hw->pmd_type == 'L' || hw->pmd_type == 'S' || hw->pmd_type == 'P') 2836 hw->flags |= SKY2_HW_FIBRE_PHY; 2837 - 2838 - hw->pm_cap = pci_find_capability(hw->pdev, PCI_CAP_ID_PM); 2839 - if (hw->pm_cap == 0) { 2840 - dev_err(&hw->pdev->dev, "cannot find PowerManagement capability\n"); 2841 - return -EIO; 2842 - } 2843 2844 hw->ports = 1; 2845 t8 = sky2_read8(hw, B2_Y2_HW_RES); ··· 4427 4428 pci_save_state(pdev); 4429 pci_enable_wake(pdev, pci_choose_state(pdev, state), wol); 4430 - sky2_power_state(hw, pci_choose_state(pdev, state)); 4431 4432 return 0; 4433 } ··· 4440 if (!hw) 4441 return 0; 4442 4443 - sky2_power_state(hw, PCI_D0); 4444 4445 err = pci_restore_state(pdev); 4446 if (err) ··· 4512 pci_enable_wake(pdev, PCI_D3cold, wol); 4513 4514 pci_disable_device(pdev); 4515 - sky2_power_state(hw, PCI_D3hot); 4516 } 4517 4518 static struct pci_driver sky2_driver = {
··· 275 PC_VAUX_ON | PC_VCC_OFF)); 276 } 277 278 static void sky2_gmac_reset(struct sky2_hw *hw, unsigned port) 279 { 280 u16 reg; ··· 709 sky2_pci_write32(hw, PCI_DEV_REG1, reg1); 710 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); 711 sky2_pci_read32(hw, PCI_DEV_REG1); 712 + 713 + if (hw->chip_id == CHIP_ID_YUKON_FE) 714 + gm_phy_write(hw, port, PHY_MARV_CTRL, PHY_CT_ANE); 715 + else if (hw->flags & SKY2_HW_ADV_POWER_CTL) 716 + sky2_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_CLR); 717 } 718 719 static void sky2_phy_power_down(struct sky2_hw *hw, unsigned port) ··· 2855 hw->flags = SKY2_HW_GIGABIT 2856 | SKY2_HW_NEWER_PHY 2857 | SKY2_HW_ADV_POWER_CTL; 2858 break; 2859 2860 case CHIP_ID_YUKON_EX: ··· 2913 hw->pmd_type = sky2_read8(hw, B2_PMD_TYP); 2914 if (hw->pmd_type == 'L' || hw->pmd_type == 'S' || hw->pmd_type == 'P') 2915 hw->flags |= SKY2_HW_FIBRE_PHY; 2916 2917 hw->ports = 1; 2918 t8 = sky2_read8(hw, B2_Y2_HW_RES); ··· 4512 4513 pci_save_state(pdev); 4514 pci_enable_wake(pdev, pci_choose_state(pdev, state), wol); 4515 + pci_set_power_state(pdev, pci_choose_state(pdev, state)); 4516 4517 return 0; 4518 } ··· 4525 if (!hw) 4526 return 0; 4527 4528 + err = pci_set_power_state(pdev, PCI_D0); 4529 + if (err) 4530 + goto out; 4531 4532 err = pci_restore_state(pdev); 4533 if (err) ··· 4595 pci_enable_wake(pdev, PCI_D3cold, wol); 4596 4597 pci_disable_device(pdev); 4598 + pci_set_power_state(pdev, PCI_D3hot); 4599 } 4600 4601 static struct pci_driver sky2_driver = {
-2
drivers/net/sky2.h
··· 2072 #define SKY2_HW_NEW_LE 0x00000020 /* new LSOv2 format */ 2073 #define SKY2_HW_AUTO_TX_SUM 0x00000040 /* new IP decode for Tx */ 2074 #define SKY2_HW_ADV_POWER_CTL 0x00000080 /* additional PHY power regs */ 2075 - #define SKY2_HW_CLK_POWER 0x00000100 /* clock power management */ 2076 2077 - int pm_cap; 2078 u8 chip_id; 2079 u8 chip_rev; 2080 u8 pmd_type;
··· 2072 #define SKY2_HW_NEW_LE 0x00000020 /* new LSOv2 format */ 2073 #define SKY2_HW_AUTO_TX_SUM 0x00000040 /* new IP decode for Tx */ 2074 #define SKY2_HW_ADV_POWER_CTL 0x00000080 /* additional PHY power regs */ 2075 2076 u8 chip_id; 2077 u8 chip_rev; 2078 u8 pmd_type;
+2 -5
drivers/net/sun3_82586.c
··· 425 int len = ((char *) p->iscp - (char *) ptr - 8) / 6; 426 if(num_addrs > len) { 427 printk("%s: switching to promisc. mode\n",dev->name); 428 - dev->flags|=IFF_PROMISC; 429 } 430 } 431 if(dev->flags&IFF_PROMISC) 432 - { 433 - cfg_cmd->promisc=1; 434 - dev->flags|=IFF_PROMISC; 435 - } 436 cfg_cmd->carr_coll = 0x00; 437 438 p->scb->cbl_offset = make16(cfg_cmd);
··· 425 int len = ((char *) p->iscp - (char *) ptr - 8) / 6; 426 if(num_addrs > len) { 427 printk("%s: switching to promisc. mode\n",dev->name); 428 + cfg_cmd->promisc = 1; 429 } 430 } 431 if(dev->flags&IFF_PROMISC) 432 + cfg_cmd->promisc = 1; 433 cfg_cmd->carr_coll = 0x00; 434 435 p->scb->cbl_offset = make16(cfg_cmd);
+21
drivers/net/usb/pegasus.c
··· 1285 } 1286 } 1287 1288 static int pegasus_probe(struct usb_interface *intf, 1289 const struct usb_device_id *id) 1290 { ··· 1311 DECLARE_MAC_BUF(mac); 1312 1313 usb_get_dev(dev); 1314 net = alloc_etherdev(sizeof(struct pegasus)); 1315 if (!net) { 1316 dev_err(&intf->dev, "can't allocate %s\n", "device");
··· 1285 } 1286 } 1287 1288 + static int pegasus_blacklisted(struct usb_device *udev) 1289 + { 1290 + struct usb_device_descriptor *udd = &udev->descriptor; 1291 + 1292 + /* Special quirk to keep the driver from handling the Belkin Bluetooth 1293 + * dongle which happens to have the same ID. 1294 + */ 1295 + if ((udd->idVendor == VENDOR_BELKIN && udd->idProduct == 0x0121) && 1296 + (udd->bDeviceClass == USB_CLASS_WIRELESS_CONTROLLER) && 1297 + (udd->bDeviceProtocol == 1)) 1298 + return 1; 1299 + 1300 + return 0; 1301 + } 1302 + 1303 static int pegasus_probe(struct usb_interface *intf, 1304 const struct usb_device_id *id) 1305 { ··· 1296 DECLARE_MAC_BUF(mac); 1297 1298 usb_get_dev(dev); 1299 + 1300 + if (pegasus_blacklisted(dev)) { 1301 + res = -ENODEV; 1302 + goto out; 1303 + } 1304 + 1305 net = alloc_etherdev(sizeof(struct pegasus)); 1306 if (!net) { 1307 dev_err(&intf->dev, "can't allocate %s\n", "device");
+173 -132
drivers/net/via-velocity.c
··· 662 spin_unlock_irq(&vptr->lock); 663 } 664 665 666 /** 667 * velocity_rx_reset - handle a receive reset ··· 681 struct mac_regs __iomem * regs = vptr->mac_regs; 682 int i; 683 684 - vptr->rd_dirty = vptr->rd_filled = vptr->rd_curr = 0; 685 686 /* 687 * Init state, all RD entries belong to the NIC 688 */ 689 for (i = 0; i < vptr->options.numrx; ++i) 690 - vptr->rd_ring[i].rdesc0.len |= OWNED_BY_NIC; 691 692 writew(vptr->options.numrx, &regs->RBRDU); 693 - writel(vptr->rd_pool_dma, &regs->RDBaseLo); 694 writew(0, &regs->RDIdx); 695 writew(vptr->options.numrx - 1, &regs->RDCSize); 696 } ··· 783 784 vptr->int_mask = INT_MASK_DEF; 785 786 - writel(vptr->rd_pool_dma, &regs->RDBaseLo); 787 writew(vptr->options.numrx - 1, &regs->RDCSize); 788 mac_rx_queue_run(regs); 789 mac_rx_queue_wake(regs); 790 791 writew(vptr->options.numtx - 1, &regs->TDCSize); 792 793 - for (i = 0; i < vptr->num_txq; i++) { 794 - writel(vptr->td_pool_dma[i], &regs->TDBaseLo[i]); 795 mac_tx_queue_run(regs, i); 796 } 797 ··· 1051 1052 vptr->pdev = pdev; 1053 vptr->chip_id = info->chip_id; 1054 - vptr->num_txq = info->txqueue; 1055 vptr->multicast_limit = MCAM_SIZE; 1056 spin_lock_init(&vptr->lock); 1057 INIT_LIST_HEAD(&vptr->list); ··· 1097 } 1098 1099 /** 1100 - * velocity_init_rings - set up DMA rings 1101 * @vptr: Velocity to set up 1102 * 1103 * Allocate PCI mapped DMA rings for the receive and transmit layer 1104 * to use. 1105 */ 1106 1107 - static int velocity_init_rings(struct velocity_info *vptr) 1108 { 1109 struct velocity_opt *opt = &vptr->options; 1110 const unsigned int rx_ring_size = opt->numrx * sizeof(struct rx_desc); ··· 1120 * pci_alloc_consistent() fulfills the requirement for 64 bytes 1121 * alignment 1122 */ 1123 - pool = pci_alloc_consistent(pdev, tx_ring_size * vptr->num_txq + 1124 rx_ring_size, &pool_dma); 1125 if (!pool) { 1126 dev_err(&pdev->dev, "%s : DMA memory allocation failed.\n", ··· 1128 return -ENOMEM; 1129 } 1130 1131 - vptr->rd_ring = pool; 1132 - vptr->rd_pool_dma = pool_dma; 1133 1134 pool += rx_ring_size; 1135 pool_dma += rx_ring_size; 1136 1137 - for (i = 0; i < vptr->num_txq; i++) { 1138 - vptr->td_rings[i] = pool; 1139 - vptr->td_pool_dma[i] = pool_dma; 1140 pool += tx_ring_size; 1141 pool_dma += tx_ring_size; 1142 } ··· 1145 } 1146 1147 /** 1148 - * velocity_free_rings - free PCI ring pointers 1149 * @vptr: Velocity to free from 1150 * 1151 * Clean up the PCI ring buffers allocated to this velocity. 1152 */ 1153 1154 - static void velocity_free_rings(struct velocity_info *vptr) 1155 { 1156 const int size = vptr->options.numrx * sizeof(struct rx_desc) + 1157 - vptr->options.numtx * sizeof(struct tx_desc) * vptr->num_txq; 1158 1159 - pci_free_consistent(vptr->pdev, size, vptr->rd_ring, vptr->rd_pool_dma); 1160 } 1161 1162 static void velocity_give_many_rx_descs(struct velocity_info *vptr) ··· 1168 * RD number must be equal to 4X per hardware spec 1169 * (programming guide rev 1.20, p.13) 1170 */ 1171 - if (vptr->rd_filled < 4) 1172 return; 1173 1174 wmb(); 1175 1176 - unusable = vptr->rd_filled & 0x0003; 1177 - dirty = vptr->rd_dirty - unusable; 1178 - for (avail = vptr->rd_filled & 0xfffc; avail; avail--) { 1179 dirty = (dirty > 0) ? dirty - 1 : vptr->options.numrx - 1; 1180 - vptr->rd_ring[dirty].rdesc0.len |= OWNED_BY_NIC; 1181 } 1182 1183 - writew(vptr->rd_filled & 0xfffc, &regs->RBRDU); 1184 - vptr->rd_filled = unusable; 1185 } 1186 1187 static int velocity_rx_refill(struct velocity_info *vptr) 1188 { 1189 - int dirty = vptr->rd_dirty, done = 0; 1190 1191 do { 1192 - struct rx_desc *rd = vptr->rd_ring + dirty; 1193 1194 /* Fine for an all zero Rx desc at init time as well */ 1195 if (rd->rdesc0.len & OWNED_BY_NIC) 1196 break; 1197 1198 - if (!vptr->rd_info[dirty].skb) { 1199 if (velocity_alloc_rx_buf(vptr, dirty) < 0) 1200 break; 1201 } 1202 done++; 1203 dirty = (dirty < vptr->options.numrx - 1) ? dirty + 1 : 0; 1204 - } while (dirty != vptr->rd_curr); 1205 1206 if (done) { 1207 - vptr->rd_dirty = dirty; 1208 - vptr->rd_filled += done; 1209 } 1210 1211 return done; ··· 1213 1214 static void velocity_set_rxbufsize(struct velocity_info *vptr, int mtu) 1215 { 1216 - vptr->rx_buf_sz = (mtu <= ETH_DATA_LEN) ? PKT_BUF_SZ : mtu + 32; 1217 } 1218 1219 /** ··· 1228 { 1229 int ret = -ENOMEM; 1230 1231 - vptr->rd_info = kcalloc(vptr->options.numrx, 1232 sizeof(struct velocity_rd_info), GFP_KERNEL); 1233 - if (!vptr->rd_info) 1234 goto out; 1235 1236 - vptr->rd_filled = vptr->rd_dirty = vptr->rd_curr = 0; 1237 1238 if (velocity_rx_refill(vptr) != vptr->options.numrx) { 1239 VELOCITY_PRT(MSG_LEVEL_ERR, KERN_ERR ··· 1259 { 1260 int i; 1261 1262 - if (vptr->rd_info == NULL) 1263 return; 1264 1265 for (i = 0; i < vptr->options.numrx; i++) { 1266 - struct velocity_rd_info *rd_info = &(vptr->rd_info[i]); 1267 - struct rx_desc *rd = vptr->rd_ring + i; 1268 1269 memset(rd, 0, sizeof(*rd)); 1270 1271 if (!rd_info->skb) 1272 continue; 1273 - pci_unmap_single(vptr->pdev, rd_info->skb_dma, vptr->rx_buf_sz, 1274 PCI_DMA_FROMDEVICE); 1275 rd_info->skb_dma = (dma_addr_t) NULL; 1276 ··· 1278 rd_info->skb = NULL; 1279 } 1280 1281 - kfree(vptr->rd_info); 1282 - vptr->rd_info = NULL; 1283 } 1284 1285 /** ··· 1297 unsigned int j; 1298 1299 /* Init the TD ring entries */ 1300 - for (j = 0; j < vptr->num_txq; j++) { 1301 - curr = vptr->td_pool_dma[j]; 1302 1303 - vptr->td_infos[j] = kcalloc(vptr->options.numtx, 1304 sizeof(struct velocity_td_info), 1305 GFP_KERNEL); 1306 - if (!vptr->td_infos[j]) { 1307 while(--j >= 0) 1308 - kfree(vptr->td_infos[j]); 1309 return -ENOMEM; 1310 } 1311 1312 - vptr->td_tail[j] = vptr->td_curr[j] = vptr->td_used[j] = 0; 1313 } 1314 return 0; 1315 } ··· 1321 static void velocity_free_td_ring_entry(struct velocity_info *vptr, 1322 int q, int n) 1323 { 1324 - struct velocity_td_info * td_info = &(vptr->td_infos[q][n]); 1325 int i; 1326 1327 if (td_info == NULL) ··· 1353 { 1354 int i, j; 1355 1356 - for (j = 0; j < vptr->num_txq; j++) { 1357 - if (vptr->td_infos[j] == NULL) 1358 continue; 1359 for (i = 0; i < vptr->options.numtx; i++) { 1360 velocity_free_td_ring_entry(vptr, j, i); 1361 1362 } 1363 - kfree(vptr->td_infos[j]); 1364 - vptr->td_infos[j] = NULL; 1365 } 1366 } 1367 ··· 1378 static int velocity_rx_srv(struct velocity_info *vptr, int status) 1379 { 1380 struct net_device_stats *stats = &vptr->stats; 1381 - int rd_curr = vptr->rd_curr; 1382 int works = 0; 1383 1384 do { 1385 - struct rx_desc *rd = vptr->rd_ring + rd_curr; 1386 1387 - if (!vptr->rd_info[rd_curr].skb) 1388 break; 1389 1390 if (rd->rdesc0.len & OWNED_BY_NIC) ··· 1416 rd_curr = 0; 1417 } while (++works <= 15); 1418 1419 - vptr->rd_curr = rd_curr; 1420 1421 if ((works > 0) && (velocity_rx_refill(vptr) > 0)) 1422 velocity_give_many_rx_descs(vptr); ··· 1514 { 1515 void (*pci_action)(struct pci_dev *, dma_addr_t, size_t, int); 1516 struct net_device_stats *stats = &vptr->stats; 1517 - struct velocity_rd_info *rd_info = &(vptr->rd_info[idx]); 1518 - struct rx_desc *rd = &(vptr->rd_ring[idx]); 1519 int pkt_len = le16_to_cpu(rd->rdesc0.len) & 0x3fff; 1520 struct sk_buff *skb; 1521 ··· 1531 skb = rd_info->skb; 1532 1533 pci_dma_sync_single_for_cpu(vptr->pdev, rd_info->skb_dma, 1534 - vptr->rx_buf_sz, PCI_DMA_FROMDEVICE); 1535 1536 /* 1537 * Drop frame not meeting IEEE 802.3 ··· 1554 rd_info->skb = NULL; 1555 } 1556 1557 - pci_action(vptr->pdev, rd_info->skb_dma, vptr->rx_buf_sz, 1558 PCI_DMA_FROMDEVICE); 1559 1560 skb_put(skb, pkt_len - 4); ··· 1584 1585 static int velocity_alloc_rx_buf(struct velocity_info *vptr, int idx) 1586 { 1587 - struct rx_desc *rd = &(vptr->rd_ring[idx]); 1588 - struct velocity_rd_info *rd_info = &(vptr->rd_info[idx]); 1589 1590 - rd_info->skb = netdev_alloc_skb(vptr->dev, vptr->rx_buf_sz + 64); 1591 if (rd_info->skb == NULL) 1592 return -ENOMEM; 1593 ··· 1596 * 64byte alignment. 1597 */ 1598 skb_reserve(rd_info->skb, (unsigned long) rd_info->skb->data & 63); 1599 - rd_info->skb_dma = pci_map_single(vptr->pdev, rd_info->skb->data, vptr->rx_buf_sz, PCI_DMA_FROMDEVICE); 1600 1601 /* 1602 * Fill in the descriptor to match 1603 - */ 1604 1605 *((u32 *) & (rd->rdesc0)) = 0; 1606 - rd->size = cpu_to_le16(vptr->rx_buf_sz) | RX_INTEN; 1607 rd->pa_low = cpu_to_le32(rd_info->skb_dma); 1608 rd->pa_high = 0; 1609 return 0; ··· 1630 struct velocity_td_info *tdinfo; 1631 struct net_device_stats *stats = &vptr->stats; 1632 1633 - for (qnum = 0; qnum < vptr->num_txq; qnum++) { 1634 - for (idx = vptr->td_tail[qnum]; vptr->td_used[qnum] > 0; 1635 idx = (idx + 1) % vptr->options.numtx) { 1636 1637 /* 1638 * Get Tx Descriptor 1639 */ 1640 - td = &(vptr->td_rings[qnum][idx]); 1641 - tdinfo = &(vptr->td_infos[qnum][idx]); 1642 1643 if (td->tdesc0.len & OWNED_BY_NIC) 1644 break; ··· 1662 stats->tx_bytes += tdinfo->skb->len; 1663 } 1664 velocity_free_tx_buf(vptr, tdinfo); 1665 - vptr->td_used[qnum]--; 1666 } 1667 - vptr->td_tail[qnum] = idx; 1668 1669 if (AVAIL_TD(vptr, qnum) < 1) { 1670 full = 1; ··· 1851 tdinfo->skb = NULL; 1852 } 1853 1854 /** 1855 * velocity_open - interface activation callback 1856 * @dev: network layer device to open ··· 1901 struct velocity_info *vptr = netdev_priv(dev); 1902 int ret; 1903 1904 - velocity_set_rxbufsize(vptr, dev->mtu); 1905 - 1906 - ret = velocity_init_rings(vptr); 1907 if (ret < 0) 1908 goto out; 1909 - 1910 - ret = velocity_init_rd_ring(vptr); 1911 - if (ret < 0) 1912 - goto err_free_desc_rings; 1913 - 1914 - ret = velocity_init_td_ring(vptr); 1915 - if (ret < 0) 1916 - goto err_free_rd_ring; 1917 1918 /* Ensure chip is running */ 1919 pci_set_power_state(vptr->pdev, PCI_D0); ··· 1917 if (ret < 0) { 1918 /* Power down the chip */ 1919 pci_set_power_state(vptr->pdev, PCI_D3hot); 1920 - goto err_free_td_ring; 1921 } 1922 1923 mac_enable_int(vptr->mac_regs); ··· 1926 vptr->flags |= VELOCITY_FLAGS_OPENED; 1927 out: 1928 return ret; 1929 - 1930 - err_free_td_ring: 1931 - velocity_free_td_ring(vptr); 1932 - err_free_rd_ring: 1933 - velocity_free_rd_ring(vptr); 1934 - err_free_desc_rings: 1935 - velocity_free_rings(vptr); 1936 - goto out; 1937 } 1938 1939 /** ··· 1941 static int velocity_change_mtu(struct net_device *dev, int new_mtu) 1942 { 1943 struct velocity_info *vptr = netdev_priv(dev); 1944 - unsigned long flags; 1945 - int oldmtu = dev->mtu; 1946 int ret = 0; 1947 1948 if ((new_mtu < VELOCITY_MIN_MTU) || new_mtu > (VELOCITY_MAX_MTU)) { 1949 VELOCITY_PRT(MSG_LEVEL_ERR, KERN_NOTICE "%s: Invalid MTU.\n", 1950 vptr->dev->name); 1951 - return -EINVAL; 1952 } 1953 1954 if (!netif_running(dev)) { 1955 dev->mtu = new_mtu; 1956 - return 0; 1957 } 1958 1959 - if (new_mtu != oldmtu) { 1960 spin_lock_irqsave(&vptr->lock, flags); 1961 1962 netif_stop_queue(dev); 1963 velocity_shutdown(vptr); 1964 1965 - velocity_free_td_ring(vptr); 1966 - velocity_free_rd_ring(vptr); 1967 1968 dev->mtu = new_mtu; 1969 1970 - velocity_set_rxbufsize(vptr, new_mtu); 1971 - 1972 - ret = velocity_init_rd_ring(vptr); 1973 - if (ret < 0) 1974 - goto out_unlock; 1975 - 1976 - ret = velocity_init_td_ring(vptr); 1977 - if (ret < 0) 1978 - goto out_unlock; 1979 1980 velocity_init_registers(vptr, VELOCITY_INIT_COLD); 1981 1982 mac_enable_int(vptr->mac_regs); 1983 netif_start_queue(dev); 1984 - out_unlock: 1985 - spin_unlock_irqrestore(&vptr->lock, flags); 1986 - } 1987 1988 return ret; 1989 } 1990 ··· 2052 /* Power down the chip */ 2053 pci_set_power_state(vptr->pdev, PCI_D3hot); 2054 2055 - /* Free the resources */ 2056 - velocity_free_td_ring(vptr); 2057 - velocity_free_rd_ring(vptr); 2058 velocity_free_rings(vptr); 2059 2060 vptr->flags &= (~VELOCITY_FLAGS_OPENED); ··· 2097 2098 spin_lock_irqsave(&vptr->lock, flags); 2099 2100 - index = vptr->td_curr[qnum]; 2101 - td_ptr = &(vptr->td_rings[qnum][index]); 2102 - tdinfo = &(vptr->td_infos[qnum][index]); 2103 2104 td_ptr->tdesc1.TCR = TCR0_TIC; 2105 td_ptr->td_buf[0].size &= ~TD_QUEUE; ··· 2112 skb_copy_from_linear_data(skb, tdinfo->buf, skb->len); 2113 tdinfo->skb_dma[0] = tdinfo->buf_dma; 2114 td_ptr->tdesc0.len = len; 2115 - td_ptr->td_buf[0].pa_low = cpu_to_le32(tdinfo->skb_dma[0]); 2116 - td_ptr->td_buf[0].pa_high = 0; 2117 - td_ptr->td_buf[0].size = len; /* queue is 0 anyway */ 2118 tdinfo->nskb_dma = 1; 2119 } else { 2120 int i = 0; ··· 2125 td_ptr->tdesc0.len = len; 2126 2127 /* FIXME: support 48bit DMA later */ 2128 - td_ptr->td_buf[i].pa_low = cpu_to_le32(tdinfo->skb_dma); 2129 - td_ptr->td_buf[i].pa_high = 0; 2130 - td_ptr->td_buf[i].size = cpu_to_le16(skb_headlen(skb)); 2131 2132 for (i = 0; i < nfrags; i++) { 2133 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; ··· 2135 2136 tdinfo->skb_dma[i + 1] = pci_map_single(vptr->pdev, addr, frag->size, PCI_DMA_TODEVICE); 2137 2138 - td_ptr->td_buf[i + 1].pa_low = cpu_to_le32(tdinfo->skb_dma[i + 1]); 2139 - td_ptr->td_buf[i + 1].pa_high = 0; 2140 - td_ptr->td_buf[i + 1].size = cpu_to_le16(frag->size); 2141 } 2142 tdinfo->nskb_dma = i - 1; 2143 } ··· 2183 if (prev < 0) 2184 prev = vptr->options.numtx - 1; 2185 td_ptr->tdesc0.len |= OWNED_BY_NIC; 2186 - vptr->td_used[qnum]++; 2187 - vptr->td_curr[qnum] = (index + 1) % vptr->options.numtx; 2188 2189 if (AVAIL_TD(vptr, qnum) < 1) 2190 netif_stop_queue(dev); 2191 2192 - td_ptr = &(vptr->td_rings[qnum][prev]); 2193 td_ptr->td_buf[0].size |= TD_QUEUE; 2194 mac_tx_queue_wake(vptr->mac_regs, qnum); 2195 } ··· 3446 3447 velocity_tx_srv(vptr, 0); 3448 3449 - for (i = 0; i < vptr->num_txq; i++) { 3450 - if (vptr->td_used[i]) { 3451 mac_tx_queue_wake(vptr->mac_regs, i); 3452 } 3453 }
··· 662 spin_unlock_irq(&vptr->lock); 663 } 664 665 + static void velocity_init_rx_ring_indexes(struct velocity_info *vptr) 666 + { 667 + vptr->rx.dirty = vptr->rx.filled = vptr->rx.curr = 0; 668 + } 669 670 /** 671 * velocity_rx_reset - handle a receive reset ··· 677 struct mac_regs __iomem * regs = vptr->mac_regs; 678 int i; 679 680 + velocity_init_rx_ring_indexes(vptr); 681 682 /* 683 * Init state, all RD entries belong to the NIC 684 */ 685 for (i = 0; i < vptr->options.numrx; ++i) 686 + vptr->rx.ring[i].rdesc0.len |= OWNED_BY_NIC; 687 688 writew(vptr->options.numrx, &regs->RBRDU); 689 + writel(vptr->rx.pool_dma, &regs->RDBaseLo); 690 writew(0, &regs->RDIdx); 691 writew(vptr->options.numrx - 1, &regs->RDCSize); 692 } ··· 779 780 vptr->int_mask = INT_MASK_DEF; 781 782 + writel(vptr->rx.pool_dma, &regs->RDBaseLo); 783 writew(vptr->options.numrx - 1, &regs->RDCSize); 784 mac_rx_queue_run(regs); 785 mac_rx_queue_wake(regs); 786 787 writew(vptr->options.numtx - 1, &regs->TDCSize); 788 789 + for (i = 0; i < vptr->tx.numq; i++) { 790 + writel(vptr->tx.pool_dma[i], &regs->TDBaseLo[i]); 791 mac_tx_queue_run(regs, i); 792 } 793 ··· 1047 1048 vptr->pdev = pdev; 1049 vptr->chip_id = info->chip_id; 1050 + vptr->tx.numq = info->txqueue; 1051 vptr->multicast_limit = MCAM_SIZE; 1052 spin_lock_init(&vptr->lock); 1053 INIT_LIST_HEAD(&vptr->list); ··· 1093 } 1094 1095 /** 1096 + * velocity_init_dma_rings - set up DMA rings 1097 * @vptr: Velocity to set up 1098 * 1099 * Allocate PCI mapped DMA rings for the receive and transmit layer 1100 * to use. 1101 */ 1102 1103 + static int velocity_init_dma_rings(struct velocity_info *vptr) 1104 { 1105 struct velocity_opt *opt = &vptr->options; 1106 const unsigned int rx_ring_size = opt->numrx * sizeof(struct rx_desc); ··· 1116 * pci_alloc_consistent() fulfills the requirement for 64 bytes 1117 * alignment 1118 */ 1119 + pool = pci_alloc_consistent(pdev, tx_ring_size * vptr->tx.numq + 1120 rx_ring_size, &pool_dma); 1121 if (!pool) { 1122 dev_err(&pdev->dev, "%s : DMA memory allocation failed.\n", ··· 1124 return -ENOMEM; 1125 } 1126 1127 + vptr->rx.ring = pool; 1128 + vptr->rx.pool_dma = pool_dma; 1129 1130 pool += rx_ring_size; 1131 pool_dma += rx_ring_size; 1132 1133 + for (i = 0; i < vptr->tx.numq; i++) { 1134 + vptr->tx.rings[i] = pool; 1135 + vptr->tx.pool_dma[i] = pool_dma; 1136 pool += tx_ring_size; 1137 pool_dma += tx_ring_size; 1138 } ··· 1141 } 1142 1143 /** 1144 + * velocity_free_dma_rings - free PCI ring pointers 1145 * @vptr: Velocity to free from 1146 * 1147 * Clean up the PCI ring buffers allocated to this velocity. 1148 */ 1149 1150 + static void velocity_free_dma_rings(struct velocity_info *vptr) 1151 { 1152 const int size = vptr->options.numrx * sizeof(struct rx_desc) + 1153 + vptr->options.numtx * sizeof(struct tx_desc) * vptr->tx.numq; 1154 1155 + pci_free_consistent(vptr->pdev, size, vptr->rx.ring, vptr->rx.pool_dma); 1156 } 1157 1158 static void velocity_give_many_rx_descs(struct velocity_info *vptr) ··· 1164 * RD number must be equal to 4X per hardware spec 1165 * (programming guide rev 1.20, p.13) 1166 */ 1167 + if (vptr->rx.filled < 4) 1168 return; 1169 1170 wmb(); 1171 1172 + unusable = vptr->rx.filled & 0x0003; 1173 + dirty = vptr->rx.dirty - unusable; 1174 + for (avail = vptr->rx.filled & 0xfffc; avail; avail--) { 1175 dirty = (dirty > 0) ? dirty - 1 : vptr->options.numrx - 1; 1176 + vptr->rx.ring[dirty].rdesc0.len |= OWNED_BY_NIC; 1177 } 1178 1179 + writew(vptr->rx.filled & 0xfffc, &regs->RBRDU); 1180 + vptr->rx.filled = unusable; 1181 } 1182 1183 static int velocity_rx_refill(struct velocity_info *vptr) 1184 { 1185 + int dirty = vptr->rx.dirty, done = 0; 1186 1187 do { 1188 + struct rx_desc *rd = vptr->rx.ring + dirty; 1189 1190 /* Fine for an all zero Rx desc at init time as well */ 1191 if (rd->rdesc0.len & OWNED_BY_NIC) 1192 break; 1193 1194 + if (!vptr->rx.info[dirty].skb) { 1195 if (velocity_alloc_rx_buf(vptr, dirty) < 0) 1196 break; 1197 } 1198 done++; 1199 dirty = (dirty < vptr->options.numrx - 1) ? dirty + 1 : 0; 1200 + } while (dirty != vptr->rx.curr); 1201 1202 if (done) { 1203 + vptr->rx.dirty = dirty; 1204 + vptr->rx.filled += done; 1205 } 1206 1207 return done; ··· 1209 1210 static void velocity_set_rxbufsize(struct velocity_info *vptr, int mtu) 1211 { 1212 + vptr->rx.buf_sz = (mtu <= ETH_DATA_LEN) ? PKT_BUF_SZ : mtu + 32; 1213 } 1214 1215 /** ··· 1224 { 1225 int ret = -ENOMEM; 1226 1227 + vptr->rx.info = kcalloc(vptr->options.numrx, 1228 sizeof(struct velocity_rd_info), GFP_KERNEL); 1229 + if (!vptr->rx.info) 1230 goto out; 1231 1232 + velocity_init_rx_ring_indexes(vptr); 1233 1234 if (velocity_rx_refill(vptr) != vptr->options.numrx) { 1235 VELOCITY_PRT(MSG_LEVEL_ERR, KERN_ERR ··· 1255 { 1256 int i; 1257 1258 + if (vptr->rx.info == NULL) 1259 return; 1260 1261 for (i = 0; i < vptr->options.numrx; i++) { 1262 + struct velocity_rd_info *rd_info = &(vptr->rx.info[i]); 1263 + struct rx_desc *rd = vptr->rx.ring + i; 1264 1265 memset(rd, 0, sizeof(*rd)); 1266 1267 if (!rd_info->skb) 1268 continue; 1269 + pci_unmap_single(vptr->pdev, rd_info->skb_dma, vptr->rx.buf_sz, 1270 PCI_DMA_FROMDEVICE); 1271 rd_info->skb_dma = (dma_addr_t) NULL; 1272 ··· 1274 rd_info->skb = NULL; 1275 } 1276 1277 + kfree(vptr->rx.info); 1278 + vptr->rx.info = NULL; 1279 } 1280 1281 /** ··· 1293 unsigned int j; 1294 1295 /* Init the TD ring entries */ 1296 + for (j = 0; j < vptr->tx.numq; j++) { 1297 + curr = vptr->tx.pool_dma[j]; 1298 1299 + vptr->tx.infos[j] = kcalloc(vptr->options.numtx, 1300 sizeof(struct velocity_td_info), 1301 GFP_KERNEL); 1302 + if (!vptr->tx.infos[j]) { 1303 while(--j >= 0) 1304 + kfree(vptr->tx.infos[j]); 1305 return -ENOMEM; 1306 } 1307 1308 + vptr->tx.tail[j] = vptr->tx.curr[j] = vptr->tx.used[j] = 0; 1309 } 1310 return 0; 1311 } ··· 1317 static void velocity_free_td_ring_entry(struct velocity_info *vptr, 1318 int q, int n) 1319 { 1320 + struct velocity_td_info * td_info = &(vptr->tx.infos[q][n]); 1321 int i; 1322 1323 if (td_info == NULL) ··· 1349 { 1350 int i, j; 1351 1352 + for (j = 0; j < vptr->tx.numq; j++) { 1353 + if (vptr->tx.infos[j] == NULL) 1354 continue; 1355 for (i = 0; i < vptr->options.numtx; i++) { 1356 velocity_free_td_ring_entry(vptr, j, i); 1357 1358 } 1359 + kfree(vptr->tx.infos[j]); 1360 + vptr->tx.infos[j] = NULL; 1361 } 1362 } 1363 ··· 1374 static int velocity_rx_srv(struct velocity_info *vptr, int status) 1375 { 1376 struct net_device_stats *stats = &vptr->stats; 1377 + int rd_curr = vptr->rx.curr; 1378 int works = 0; 1379 1380 do { 1381 + struct rx_desc *rd = vptr->rx.ring + rd_curr; 1382 1383 + if (!vptr->rx.info[rd_curr].skb) 1384 break; 1385 1386 if (rd->rdesc0.len & OWNED_BY_NIC) ··· 1412 rd_curr = 0; 1413 } while (++works <= 15); 1414 1415 + vptr->rx.curr = rd_curr; 1416 1417 if ((works > 0) && (velocity_rx_refill(vptr) > 0)) 1418 velocity_give_many_rx_descs(vptr); ··· 1510 { 1511 void (*pci_action)(struct pci_dev *, dma_addr_t, size_t, int); 1512 struct net_device_stats *stats = &vptr->stats; 1513 + struct velocity_rd_info *rd_info = &(vptr->rx.info[idx]); 1514 + struct rx_desc *rd = &(vptr->rx.ring[idx]); 1515 int pkt_len = le16_to_cpu(rd->rdesc0.len) & 0x3fff; 1516 struct sk_buff *skb; 1517 ··· 1527 skb = rd_info->skb; 1528 1529 pci_dma_sync_single_for_cpu(vptr->pdev, rd_info->skb_dma, 1530 + vptr->rx.buf_sz, PCI_DMA_FROMDEVICE); 1531 1532 /* 1533 * Drop frame not meeting IEEE 802.3 ··· 1550 rd_info->skb = NULL; 1551 } 1552 1553 + pci_action(vptr->pdev, rd_info->skb_dma, vptr->rx.buf_sz, 1554 PCI_DMA_FROMDEVICE); 1555 1556 skb_put(skb, pkt_len - 4); ··· 1580 1581 static int velocity_alloc_rx_buf(struct velocity_info *vptr, int idx) 1582 { 1583 + struct rx_desc *rd = &(vptr->rx.ring[idx]); 1584 + struct velocity_rd_info *rd_info = &(vptr->rx.info[idx]); 1585 1586 + rd_info->skb = dev_alloc_skb(vptr->rx.buf_sz + 64); 1587 if (rd_info->skb == NULL) 1588 return -ENOMEM; 1589 ··· 1592 * 64byte alignment. 1593 */ 1594 skb_reserve(rd_info->skb, (unsigned long) rd_info->skb->data & 63); 1595 + rd_info->skb_dma = pci_map_single(vptr->pdev, rd_info->skb->data, 1596 + vptr->rx.buf_sz, PCI_DMA_FROMDEVICE); 1597 1598 /* 1599 * Fill in the descriptor to match 1600 + */ 1601 1602 *((u32 *) & (rd->rdesc0)) = 0; 1603 + rd->size = cpu_to_le16(vptr->rx.buf_sz) | RX_INTEN; 1604 rd->pa_low = cpu_to_le32(rd_info->skb_dma); 1605 rd->pa_high = 0; 1606 return 0; ··· 1625 struct velocity_td_info *tdinfo; 1626 struct net_device_stats *stats = &vptr->stats; 1627 1628 + for (qnum = 0; qnum < vptr->tx.numq; qnum++) { 1629 + for (idx = vptr->tx.tail[qnum]; vptr->tx.used[qnum] > 0; 1630 idx = (idx + 1) % vptr->options.numtx) { 1631 1632 /* 1633 * Get Tx Descriptor 1634 */ 1635 + td = &(vptr->tx.rings[qnum][idx]); 1636 + tdinfo = &(vptr->tx.infos[qnum][idx]); 1637 1638 if (td->tdesc0.len & OWNED_BY_NIC) 1639 break; ··· 1657 stats->tx_bytes += tdinfo->skb->len; 1658 } 1659 velocity_free_tx_buf(vptr, tdinfo); 1660 + vptr->tx.used[qnum]--; 1661 } 1662 + vptr->tx.tail[qnum] = idx; 1663 1664 if (AVAIL_TD(vptr, qnum) < 1) { 1665 full = 1; ··· 1846 tdinfo->skb = NULL; 1847 } 1848 1849 + static int velocity_init_rings(struct velocity_info *vptr, int mtu) 1850 + { 1851 + int ret; 1852 + 1853 + velocity_set_rxbufsize(vptr, mtu); 1854 + 1855 + ret = velocity_init_dma_rings(vptr); 1856 + if (ret < 0) 1857 + goto out; 1858 + 1859 + ret = velocity_init_rd_ring(vptr); 1860 + if (ret < 0) 1861 + goto err_free_dma_rings_0; 1862 + 1863 + ret = velocity_init_td_ring(vptr); 1864 + if (ret < 0) 1865 + goto err_free_rd_ring_1; 1866 + out: 1867 + return ret; 1868 + 1869 + err_free_rd_ring_1: 1870 + velocity_free_rd_ring(vptr); 1871 + err_free_dma_rings_0: 1872 + velocity_free_dma_rings(vptr); 1873 + goto out; 1874 + } 1875 + 1876 + static void velocity_free_rings(struct velocity_info *vptr) 1877 + { 1878 + velocity_free_td_ring(vptr); 1879 + velocity_free_rd_ring(vptr); 1880 + velocity_free_dma_rings(vptr); 1881 + } 1882 + 1883 /** 1884 * velocity_open - interface activation callback 1885 * @dev: network layer device to open ··· 1862 struct velocity_info *vptr = netdev_priv(dev); 1863 int ret; 1864 1865 + ret = velocity_init_rings(vptr, dev->mtu); 1866 if (ret < 0) 1867 goto out; 1868 1869 /* Ensure chip is running */ 1870 pci_set_power_state(vptr->pdev, PCI_D0); ··· 1888 if (ret < 0) { 1889 /* Power down the chip */ 1890 pci_set_power_state(vptr->pdev, PCI_D3hot); 1891 + velocity_free_rings(vptr); 1892 + goto out; 1893 } 1894 1895 mac_enable_int(vptr->mac_regs); ··· 1896 vptr->flags |= VELOCITY_FLAGS_OPENED; 1897 out: 1898 return ret; 1899 } 1900 1901 /** ··· 1919 static int velocity_change_mtu(struct net_device *dev, int new_mtu) 1920 { 1921 struct velocity_info *vptr = netdev_priv(dev); 1922 int ret = 0; 1923 1924 if ((new_mtu < VELOCITY_MIN_MTU) || new_mtu > (VELOCITY_MAX_MTU)) { 1925 VELOCITY_PRT(MSG_LEVEL_ERR, KERN_NOTICE "%s: Invalid MTU.\n", 1926 vptr->dev->name); 1927 + ret = -EINVAL; 1928 + goto out_0; 1929 } 1930 1931 if (!netif_running(dev)) { 1932 dev->mtu = new_mtu; 1933 + goto out_0; 1934 } 1935 1936 + if (dev->mtu != new_mtu) { 1937 + struct velocity_info *tmp_vptr; 1938 + unsigned long flags; 1939 + struct rx_info rx; 1940 + struct tx_info tx; 1941 + 1942 + tmp_vptr = kzalloc(sizeof(*tmp_vptr), GFP_KERNEL); 1943 + if (!tmp_vptr) { 1944 + ret = -ENOMEM; 1945 + goto out_0; 1946 + } 1947 + 1948 + tmp_vptr->dev = dev; 1949 + tmp_vptr->pdev = vptr->pdev; 1950 + tmp_vptr->options = vptr->options; 1951 + tmp_vptr->tx.numq = vptr->tx.numq; 1952 + 1953 + ret = velocity_init_rings(tmp_vptr, new_mtu); 1954 + if (ret < 0) 1955 + goto out_free_tmp_vptr_1; 1956 + 1957 spin_lock_irqsave(&vptr->lock, flags); 1958 1959 netif_stop_queue(dev); 1960 velocity_shutdown(vptr); 1961 1962 + rx = vptr->rx; 1963 + tx = vptr->tx; 1964 + 1965 + vptr->rx = tmp_vptr->rx; 1966 + vptr->tx = tmp_vptr->tx; 1967 + 1968 + tmp_vptr->rx = rx; 1969 + tmp_vptr->tx = tx; 1970 1971 dev->mtu = new_mtu; 1972 1973 + velocity_give_many_rx_descs(vptr); 1974 1975 velocity_init_registers(vptr, VELOCITY_INIT_COLD); 1976 1977 mac_enable_int(vptr->mac_regs); 1978 netif_start_queue(dev); 1979 1980 + spin_unlock_irqrestore(&vptr->lock, flags); 1981 + 1982 + velocity_free_rings(tmp_vptr); 1983 + 1984 + out_free_tmp_vptr_1: 1985 + kfree(tmp_vptr); 1986 + } 1987 + out_0: 1988 return ret; 1989 } 1990 ··· 2008 /* Power down the chip */ 2009 pci_set_power_state(vptr->pdev, PCI_D3hot); 2010 2011 velocity_free_rings(vptr); 2012 2013 vptr->flags &= (~VELOCITY_FLAGS_OPENED); ··· 2056 2057 spin_lock_irqsave(&vptr->lock, flags); 2058 2059 + index = vptr->tx.curr[qnum]; 2060 + td_ptr = &(vptr->tx.rings[qnum][index]); 2061 + tdinfo = &(vptr->tx.infos[qnum][index]); 2062 2063 td_ptr->tdesc1.TCR = TCR0_TIC; 2064 td_ptr->td_buf[0].size &= ~TD_QUEUE; ··· 2071 skb_copy_from_linear_data(skb, tdinfo->buf, skb->len); 2072 tdinfo->skb_dma[0] = tdinfo->buf_dma; 2073 td_ptr->tdesc0.len = len; 2074 + td_ptr->tx.buf[0].pa_low = cpu_to_le32(tdinfo->skb_dma[0]); 2075 + td_ptr->tx.buf[0].pa_high = 0; 2076 + td_ptr->tx.buf[0].size = len; /* queue is 0 anyway */ 2077 tdinfo->nskb_dma = 1; 2078 } else { 2079 int i = 0; ··· 2084 td_ptr->tdesc0.len = len; 2085 2086 /* FIXME: support 48bit DMA later */ 2087 + td_ptr->tx.buf[i].pa_low = cpu_to_le32(tdinfo->skb_dma); 2088 + td_ptr->tx.buf[i].pa_high = 0; 2089 + td_ptr->tx.buf[i].size = cpu_to_le16(skb_headlen(skb)); 2090 2091 for (i = 0; i < nfrags; i++) { 2092 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; ··· 2094 2095 tdinfo->skb_dma[i + 1] = pci_map_single(vptr->pdev, addr, frag->size, PCI_DMA_TODEVICE); 2096 2097 + td_ptr->tx.buf[i + 1].pa_low = cpu_to_le32(tdinfo->skb_dma[i + 1]); 2098 + td_ptr->tx.buf[i + 1].pa_high = 0; 2099 + td_ptr->tx.buf[i + 1].size = cpu_to_le16(frag->size); 2100 } 2101 tdinfo->nskb_dma = i - 1; 2102 } ··· 2142 if (prev < 0) 2143 prev = vptr->options.numtx - 1; 2144 td_ptr->tdesc0.len |= OWNED_BY_NIC; 2145 + vptr->tx.used[qnum]++; 2146 + vptr->tx.curr[qnum] = (index + 1) % vptr->options.numtx; 2147 2148 if (AVAIL_TD(vptr, qnum) < 1) 2149 netif_stop_queue(dev); 2150 2151 + td_ptr = &(vptr->tx.rings[qnum][prev]); 2152 td_ptr->td_buf[0].size |= TD_QUEUE; 2153 mac_tx_queue_wake(vptr->mac_regs, qnum); 2154 } ··· 3405 3406 velocity_tx_srv(vptr, 0); 3407 3408 + for (i = 0; i < vptr->tx.numq; i++) { 3409 + if (vptr->tx.used[i]) { 3410 mac_tx_queue_wake(vptr->mac_regs, i); 3411 } 3412 }
+24 -20
drivers/net/via-velocity.h
··· 1494 u32 flags; 1495 }; 1496 1497 struct velocity_info { 1498 struct list_head list; 1499 1500 struct pci_dev *pdev; 1501 struct net_device *dev; 1502 struct net_device_stats stats; 1503 - 1504 - dma_addr_t rd_pool_dma; 1505 - dma_addr_t td_pool_dma[TX_QUEUE_NO]; 1506 1507 struct vlan_group *vlgrp; 1508 u8 ip_addr[4]; ··· 1513 unsigned long memaddr; 1514 unsigned long ioaddr; 1515 1516 - u8 rev_id; 1517 1518 - #define AVAIL_TD(p,q) ((p)->options.numtx-((p)->td_used[(q)])) 1519 1520 - int num_txq; 1521 1522 - volatile int td_used[TX_QUEUE_NO]; 1523 - int td_curr[TX_QUEUE_NO]; 1524 - int td_tail[TX_QUEUE_NO]; 1525 - struct tx_desc *td_rings[TX_QUEUE_NO]; 1526 - struct velocity_td_info *td_infos[TX_QUEUE_NO]; 1527 1528 - int rd_curr; 1529 - int rd_dirty; 1530 - u32 rd_filled; 1531 - struct rx_desc *rd_ring; 1532 - struct velocity_rd_info *rd_info; /* It's an array */ 1533 - 1534 - #define GET_RD_BY_IDX(vptr, idx) (vptr->rd_ring[idx]) 1535 u32 mib_counter[MAX_HW_MIB_COUNTER]; 1536 struct velocity_opt options; 1537 ··· 1543 1544 u32 flags; 1545 1546 - int rx_buf_sz; 1547 u32 mii_status; 1548 u32 phy_id; 1549 int multicast_limit; ··· 1558 struct velocity_context context; 1559 1560 u32 ticks; 1561 - u32 rx_bytes; 1562 1563 }; 1564 1565 /**
··· 1494 u32 flags; 1495 }; 1496 1497 + #define AVAIL_TD(p,q) ((p)->options.numtx-((p)->tx.used[(q)])) 1498 + 1499 + #define GET_RD_BY_IDX(vptr, idx) (vptr->rd_ring[idx]) 1500 + 1501 struct velocity_info { 1502 struct list_head list; 1503 1504 struct pci_dev *pdev; 1505 struct net_device *dev; 1506 struct net_device_stats stats; 1507 1508 struct vlan_group *vlgrp; 1509 u8 ip_addr[4]; ··· 1512 unsigned long memaddr; 1513 unsigned long ioaddr; 1514 1515 + struct tx_info { 1516 + int numq; 1517 1518 + /* FIXME: the locality of the data seems rather poor. */ 1519 + int used[TX_QUEUE_NO]; 1520 + int curr[TX_QUEUE_NO]; 1521 + int tail[TX_QUEUE_NO]; 1522 + struct tx_desc *rings[TX_QUEUE_NO]; 1523 + struct velocity_td_info *infos[TX_QUEUE_NO]; 1524 + dma_addr_t pool_dma[TX_QUEUE_NO]; 1525 + } tx; 1526 1527 + struct rx_info { 1528 + int buf_sz; 1529 1530 + int dirty; 1531 + int curr; 1532 + u32 filled; 1533 + struct rx_desc *ring; 1534 + struct velocity_rd_info *info; /* It's an array */ 1535 + dma_addr_t pool_dma; 1536 + } rx; 1537 1538 u32 mib_counter[MAX_HW_MIB_COUNTER]; 1539 struct velocity_opt options; 1540 ··· 1538 1539 u32 flags; 1540 1541 u32 mii_status; 1542 u32 phy_id; 1543 int multicast_limit; ··· 1554 struct velocity_context context; 1555 1556 u32 ticks; 1557 1558 + u8 rev_id; 1559 }; 1560 1561 /**
+6 -9
drivers/net/wan/Kconfig
··· 25 # There is no way to detect a comtrol sv11 - force it modular for now. 26 config HOSTESS_SV11 27 tristate "Comtrol Hostess SV-11 support" 28 - depends on ISA && m && ISA_DMA_API && INET 29 help 30 Driver for Comtrol Hostess SV-11 network card which 31 operates on low speed synchronous serial links at up to ··· 37 # The COSA/SRP driver has not been tested as non-modular yet. 38 config COSA 39 tristate "COSA/SRP sync serial boards support" 40 - depends on ISA && m && ISA_DMA_API 41 ---help--- 42 Driver for COSA and SRP synchronous serial boards. 43 ··· 61 # 62 config LANMEDIA 63 tristate "LanMedia Corp. SSI/V.35, T1/E1, HSSI, T3 boards" 64 - depends on PCI && VIRT_TO_BUS 65 ---help--- 66 Driver for the following Lan Media family of serial boards: 67 ··· 78 - LMC 5245 board connects directly to a T3 circuit saving the 79 additional external hardware. 80 81 - To change setting such as syncPPP vs Cisco HDLC or clock source you 82 - will need lmcctl. It is available at <ftp://ftp.lanmedia.com/> 83 - (broken link). 84 85 To compile this driver as a module, choose M here: the 86 module will be called lmc. ··· 87 # There is no way to detect a Sealevel board. Force it modular 88 config SEALEVEL_4021 89 tristate "Sealevel Systems 4021 support" 90 - depends on ISA && m && ISA_DMA_API && INET 91 help 92 This is a driver for the Sealevel Systems ACB 56 serial I/O adapter. 93 ··· 152 depends on HDLC 153 help 154 Generic HDLC driver supporting PPP over WAN connections. 155 - 156 - It will be replaced by new PPP implementation in Linux 2.6.26. 157 158 If unsure, say N. 159
··· 25 # There is no way to detect a comtrol sv11 - force it modular for now. 26 config HOSTESS_SV11 27 tristate "Comtrol Hostess SV-11 support" 28 + depends on ISA && m && ISA_DMA_API && INET && HDLC 29 help 30 Driver for Comtrol Hostess SV-11 network card which 31 operates on low speed synchronous serial links at up to ··· 37 # The COSA/SRP driver has not been tested as non-modular yet. 38 config COSA 39 tristate "COSA/SRP sync serial boards support" 40 + depends on ISA && m && ISA_DMA_API && HDLC 41 ---help--- 42 Driver for COSA and SRP synchronous serial boards. 43 ··· 61 # 62 config LANMEDIA 63 tristate "LanMedia Corp. SSI/V.35, T1/E1, HSSI, T3 boards" 64 + depends on PCI && VIRT_TO_BUS && HDLC 65 ---help--- 66 Driver for the following Lan Media family of serial boards: 67 ··· 78 - LMC 5245 board connects directly to a T3 circuit saving the 79 additional external hardware. 80 81 + To change setting such as clock source you will need lmcctl. 82 + It is available at <ftp://ftp.lanmedia.com/> (broken link). 83 84 To compile this driver as a module, choose M here: the 85 module will be called lmc. ··· 88 # There is no way to detect a Sealevel board. Force it modular 89 config SEALEVEL_4021 90 tristate "Sealevel Systems 4021 support" 91 + depends on ISA && m && ISA_DMA_API && INET && HDLC 92 help 93 This is a driver for the Sealevel Systems ACB 56 serial I/O adapter. 94 ··· 153 depends on HDLC 154 help 155 Generic HDLC driver supporting PPP over WAN connections. 156 157 If unsure, say N. 158
+5 -6
drivers/net/wan/Makefile
··· 21 pc300-$(CONFIG_PC300_MLPPP) += pc300_tty.o 22 pc300-objs := $(pc300-y) 23 24 - obj-$(CONFIG_HOSTESS_SV11) += z85230.o syncppp.o hostess_sv11.o 25 - obj-$(CONFIG_SEALEVEL_4021) += z85230.o syncppp.o sealevel.o 26 - obj-$(CONFIG_COSA) += syncppp.o cosa.o 27 - obj-$(CONFIG_FARSYNC) += syncppp.o farsync.o 28 - obj-$(CONFIG_DSCC4) += dscc4.o 29 - obj-$(CONFIG_LANMEDIA) += syncppp.o 30 obj-$(CONFIG_X25_ASY) += x25_asy.o 31 32 obj-$(CONFIG_LANMEDIA) += lmc/
··· 21 pc300-$(CONFIG_PC300_MLPPP) += pc300_tty.o 22 pc300-objs := $(pc300-y) 23 24 + obj-$(CONFIG_HOSTESS_SV11) += z85230.o hostess_sv11.o 25 + obj-$(CONFIG_SEALEVEL_4021) += z85230.o sealevel.o 26 + obj-$(CONFIG_COSA) += cosa.o 27 + obj-$(CONFIG_FARSYNC) += farsync.o 28 + obj-$(CONFIG_DSCC4) += dscc4.o 29 obj-$(CONFIG_X25_ASY) += x25_asy.o 30 31 obj-$(CONFIG_LANMEDIA) += lmc/
+128 -165
drivers/net/wan/cosa.c
··· 2 3 /* 4 * Copyright (C) 1995-1997 Jan "Yenya" Kasprzak <kas@fi.muni.cz> 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by ··· 55 * 56 * The Linux driver (unlike the present *BSD drivers :-) can work even 57 * for the COSA and SRP in one computer and allows each channel to work 58 - * in one of the three modes (character device, Cisco HDLC, Sync PPP). 59 * 60 * AUTHOR 61 * ··· 73 * The Comtrol Hostess SV11 driver by Alan Cox 74 * The Sync PPP/Cisco HDLC layer (syncppp.c) ported to Linux by Alan Cox 75 */ 76 - /* 77 - * 5/25/1999 : Marcelo Tosatti <marcelo@conectiva.com.br> 78 - * fixed a deadlock in cosa_sppp_open 79 - */ 80 - 81 - /* ---------- Headers, macros, data structures ---------- */ 82 83 #include <linux/module.h> 84 #include <linux/kernel.h> ··· 81 #include <linux/fs.h> 82 #include <linux/interrupt.h> 83 #include <linux/delay.h> 84 #include <linux/errno.h> 85 #include <linux/ioport.h> 86 #include <linux/netdevice.h> ··· 89 #include <linux/mutex.h> 90 #include <linux/device.h> 91 #include <linux/smp_lock.h> 92 - 93 - #undef COSA_SLOW_IO /* for testing purposes only */ 94 - 95 #include <asm/io.h> 96 #include <asm/dma.h> 97 #include <asm/byteorder.h> 98 99 - #include <net/syncppp.h> 100 #include "cosa.h" 101 102 /* Maximum length of the identification string. */ ··· 106 /* Per-channel data structure */ 107 108 struct channel_data { 109 - void *if_ptr; /* General purpose pointer (used by SPPP) */ 110 int usage; /* Usage count; >0 for chrdev, -1 for netdev */ 111 int num; /* Number of the channel */ 112 struct cosa_data *cosa; /* Pointer to the per-card structure */ ··· 129 wait_queue_head_t txwaitq, rxwaitq; 130 int tx_status, rx_status; 131 132 - /* SPPP/HDLC device parts */ 133 - struct ppp_device pppdev; 134 struct sk_buff *rx_skb, *tx_skb; 135 - struct net_device_stats stats; 136 }; 137 138 /* cosa->firmware_status bits */ ··· 273 static void cosa_kick(struct cosa_data *cosa); 274 static int cosa_dma_able(struct channel_data *chan, char *buf, int data); 275 276 - /* SPPP/HDLC stuff */ 277 - static void sppp_channel_init(struct channel_data *chan); 278 - static void sppp_channel_delete(struct channel_data *chan); 279 - static int cosa_sppp_open(struct net_device *d); 280 - static int cosa_sppp_close(struct net_device *d); 281 - static void cosa_sppp_timeout(struct net_device *d); 282 - static int cosa_sppp_tx(struct sk_buff *skb, struct net_device *d); 283 - static char *sppp_setup_rx(struct channel_data *channel, int size); 284 - static int sppp_rx_done(struct channel_data *channel); 285 - static int sppp_tx_done(struct channel_data *channel, int size); 286 - static int cosa_sppp_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd); 287 - static struct net_device_stats *cosa_net_stats(struct net_device *dev); 288 289 /* Character device */ 290 - static void chardev_channel_init(struct channel_data *chan); 291 static char *chrdev_setup_rx(struct channel_data *channel, int size); 292 static int chrdev_rx_done(struct channel_data *channel); 293 static int chrdev_tx_done(struct channel_data *channel, int size); ··· 347 static void debug_status_out(struct cosa_data *cosa, int status); 348 #endif 349 350 - 351 /* ---------- Initialization stuff ---------- */ 352 353 static int __init cosa_init(void) 354 { 355 int i, err = 0; 356 357 - printk(KERN_INFO "cosa v1.08 (c) 1997-2000 Jan Kasprzak <kas@fi.muni.cz>\n"); 358 - #ifdef CONFIG_SMP 359 - printk(KERN_INFO "cosa: SMP found. Please mail any success/failure reports to the author.\n"); 360 - #endif 361 if (cosa_major > 0) { 362 if (register_chrdev(cosa_major, "cosa", &cosa_fops)) { 363 printk(KERN_WARNING "cosa: unable to get major %d\n", ··· 392 NULL, "cosa%d", i); 393 err = 0; 394 goto out; 395 - 396 out_chrdev: 397 unregister_chrdev(cosa_major, "cosa"); 398 out: ··· 404 { 405 struct cosa_data *cosa; 406 int i; 407 - printk(KERN_INFO "Unloading the cosa module\n"); 408 409 - for (i=0; i<nr_cards; i++) 410 device_destroy(cosa_class, MKDEV(cosa_major, i)); 411 class_destroy(cosa_class); 412 - for (cosa=cosa_cards; nr_cards--; cosa++) { 413 /* Clean up the per-channel data */ 414 - for (i=0; i<cosa->nchannels; i++) { 415 /* Chardev driver has no alloc'd per-channel data */ 416 - sppp_channel_delete(cosa->chan+i); 417 } 418 /* Clean up the per-card data */ 419 kfree(cosa->chan); 420 kfree(cosa->bouncebuf); 421 free_irq(cosa->irq, cosa); 422 free_dma(cosa->dma); 423 - release_region(cosa->datareg,is_8bit(cosa)?2:4); 424 } 425 unregister_chrdev(cosa_major, "cosa"); 426 } 427 module_exit(cosa_exit); 428 429 - /* 430 - * This function should register all the net devices needed for the 431 - * single channel. 432 - */ 433 - static __inline__ void channel_init(struct channel_data *chan) 434 - { 435 - sprintf(chan->name, "cosa%dc%d", chan->cosa->num, chan->num); 436 - 437 - /* Initialize the chardev data structures */ 438 - chardev_channel_init(chan); 439 - 440 - /* Register the sppp interface */ 441 - sppp_channel_init(chan); 442 - } 443 - 444 static int cosa_probe(int base, int irq, int dma) 445 { 446 struct cosa_data *cosa = cosa_cards+nr_cards; ··· 552 /* Initialize the per-channel data */ 553 cosa->chan = kcalloc(cosa->nchannels, sizeof(struct channel_data), GFP_KERNEL); 554 if (!cosa->chan) { 555 - err = -ENOMEM; 556 goto err_out3; 557 } 558 - for (i=0; i<cosa->nchannels; i++) { 559 - cosa->chan[i].cosa = cosa; 560 - cosa->chan[i].num = i; 561 - channel_init(cosa->chan+i); 562 } 563 564 printk (KERN_INFO "cosa%d: %s (%s at 0x%x irq %d dma %d), %d channels\n", ··· 596 cosa->datareg, cosa->irq, cosa->dma, cosa->nchannels); 597 598 return nr_cards++; 599 err_out3: 600 kfree(cosa->bouncebuf); 601 err_out2: 602 free_dma(cosa->dma); 603 err_out1: 604 free_irq(cosa->irq, cosa); 605 - err_out: 606 release_region(cosa->datareg,is_8bit(cosa)?2:4); 607 printk(KERN_NOTICE "cosa%d: allocating resources failed\n", 608 cosa->num); ··· 617 } 618 619 620 - /*---------- SPPP/HDLC netdevice ---------- */ 621 622 - static void cosa_setup(struct net_device *d) 623 { 624 - d->open = cosa_sppp_open; 625 - d->stop = cosa_sppp_close; 626 - d->hard_start_xmit = cosa_sppp_tx; 627 - d->do_ioctl = cosa_sppp_ioctl; 628 - d->get_stats = cosa_net_stats; 629 - d->tx_timeout = cosa_sppp_timeout; 630 - d->watchdog_timeo = TX_TIMEOUT; 631 } 632 633 - static void sppp_channel_init(struct channel_data *chan) 634 { 635 - struct net_device *d; 636 - chan->if_ptr = &chan->pppdev; 637 - d = alloc_netdev(0, chan->name, cosa_setup); 638 - if (!d) { 639 - printk(KERN_WARNING "%s: alloc_netdev failed.\n", chan->name); 640 - return; 641 - } 642 - chan->pppdev.dev = d; 643 - d->base_addr = chan->cosa->datareg; 644 - d->irq = chan->cosa->irq; 645 - d->dma = chan->cosa->dma; 646 - d->ml_priv = chan; 647 - sppp_attach(&chan->pppdev); 648 - if (register_netdev(d)) { 649 - printk(KERN_WARNING "%s: register_netdev failed.\n", d->name); 650 - sppp_detach(d); 651 - free_netdev(d); 652 - chan->pppdev.dev = NULL; 653 - return; 654 - } 655 - } 656 - 657 - static void sppp_channel_delete(struct channel_data *chan) 658 - { 659 - unregister_netdev(chan->pppdev.dev); 660 - sppp_detach(chan->pppdev.dev); 661 - free_netdev(chan->pppdev.dev); 662 - chan->pppdev.dev = NULL; 663 - } 664 - 665 - static int cosa_sppp_open(struct net_device *d) 666 - { 667 - struct channel_data *chan = d->ml_priv; 668 int err; 669 unsigned long flags; 670 ··· 640 } 641 spin_lock_irqsave(&chan->cosa->lock, flags); 642 if (chan->usage != 0) { 643 - printk(KERN_WARNING "%s: sppp_open called with usage count %d\n", 644 - chan->name, chan->usage); 645 spin_unlock_irqrestore(&chan->cosa->lock, flags); 646 return -EBUSY; 647 } 648 - chan->setup_rx = sppp_setup_rx; 649 - chan->tx_done = sppp_tx_done; 650 - chan->rx_done = sppp_rx_done; 651 - chan->usage=-1; 652 chan->cosa->usage++; 653 spin_unlock_irqrestore(&chan->cosa->lock, flags); 654 655 - err = sppp_open(d); 656 if (err) { 657 spin_lock_irqsave(&chan->cosa->lock, flags); 658 - chan->usage=0; 659 chan->cosa->usage--; 660 - 661 spin_unlock_irqrestore(&chan->cosa->lock, flags); 662 return err; 663 } 664 665 - netif_start_queue(d); 666 cosa_enable_rx(chan); 667 return 0; 668 } 669 670 - static int cosa_sppp_tx(struct sk_buff *skb, struct net_device *dev) 671 { 672 - struct channel_data *chan = dev->ml_priv; 673 674 netif_stop_queue(dev); 675 ··· 677 return 0; 678 } 679 680 - static void cosa_sppp_timeout(struct net_device *dev) 681 { 682 - struct channel_data *chan = dev->ml_priv; 683 684 if (test_bit(RXBIT, &chan->cosa->rxtx)) { 685 - chan->stats.rx_errors++; 686 - chan->stats.rx_missed_errors++; 687 } else { 688 - chan->stats.tx_errors++; 689 - chan->stats.tx_aborted_errors++; 690 } 691 cosa_kick(chan->cosa); 692 if (chan->tx_skb) { ··· 696 netif_wake_queue(dev); 697 } 698 699 - static int cosa_sppp_close(struct net_device *d) 700 { 701 - struct channel_data *chan = d->ml_priv; 702 unsigned long flags; 703 704 - netif_stop_queue(d); 705 - sppp_close(d); 706 cosa_disable_rx(chan); 707 spin_lock_irqsave(&chan->cosa->lock, flags); 708 if (chan->rx_skb) { ··· 713 kfree_skb(chan->tx_skb); 714 chan->tx_skb = NULL; 715 } 716 - chan->usage=0; 717 chan->cosa->usage--; 718 spin_unlock_irqrestore(&chan->cosa->lock, flags); 719 return 0; 720 } 721 722 - static char *sppp_setup_rx(struct channel_data *chan, int size) 723 { 724 /* 725 * We can safely fall back to non-dma-able memory, because we have ··· 731 if (chan->rx_skb == NULL) { 732 printk(KERN_NOTICE "%s: Memory squeeze, dropping packet\n", 733 chan->name); 734 - chan->stats.rx_dropped++; 735 return NULL; 736 } 737 - chan->pppdev.dev->trans_start = jiffies; 738 return skb_put(chan->rx_skb, size); 739 } 740 741 - static int sppp_rx_done(struct channel_data *chan) 742 { 743 if (!chan->rx_skb) { 744 printk(KERN_WARNING "%s: rx_done with empty skb!\n", 745 chan->name); 746 - chan->stats.rx_errors++; 747 - chan->stats.rx_frame_errors++; 748 return 0; 749 } 750 - chan->rx_skb->protocol = htons(ETH_P_WAN_PPP); 751 - chan->rx_skb->dev = chan->pppdev.dev; 752 skb_reset_mac_header(chan->rx_skb); 753 - chan->stats.rx_packets++; 754 - chan->stats.rx_bytes += chan->cosa->rxsize; 755 netif_rx(chan->rx_skb); 756 chan->rx_skb = NULL; 757 - chan->pppdev.dev->last_rx = jiffies; 758 return 0; 759 } 760 761 /* ARGSUSED */ 762 - static int sppp_tx_done(struct channel_data *chan, int size) 763 { 764 if (!chan->tx_skb) { 765 printk(KERN_WARNING "%s: tx_done with empty skb!\n", 766 chan->name); 767 - chan->stats.tx_errors++; 768 - chan->stats.tx_aborted_errors++; 769 return 1; 770 } 771 dev_kfree_skb_irq(chan->tx_skb); 772 chan->tx_skb = NULL; 773 - chan->stats.tx_packets++; 774 - chan->stats.tx_bytes += size; 775 - netif_wake_queue(chan->pppdev.dev); 776 return 1; 777 } 778 779 - static struct net_device_stats *cosa_net_stats(struct net_device *dev) 780 - { 781 - struct channel_data *chan = dev->ml_priv; 782 - return &chan->stats; 783 - } 784 - 785 - 786 /*---------- Character device ---------- */ 787 - 788 - static void chardev_channel_init(struct channel_data *chan) 789 - { 790 - mutex_init(&chan->rlock); 791 - init_MUTEX(&chan->wsem); 792 - } 793 794 static ssize_t cosa_read(struct file *file, 795 char __user *buf, size_t count, loff_t *ppos) ··· 1187 return -ENOIOCTLCMD; 1188 } 1189 1190 - static int cosa_sppp_ioctl(struct net_device *dev, struct ifreq *ifr, 1191 - int cmd) 1192 { 1193 int rv; 1194 - struct channel_data *chan = dev->ml_priv; 1195 - rv = cosa_ioctl_common(chan->cosa, chan, cmd, (unsigned long)ifr->ifr_data); 1196 - if (rv == -ENOIOCTLCMD) { 1197 - return sppp_do_ioctl(dev, ifr, cmd); 1198 - } 1199 - return rv; 1200 } 1201 1202 static int cosa_chardev_ioctl(struct inode *inode, struct file *file,
··· 2 3 /* 4 * Copyright (C) 1995-1997 Jan "Yenya" Kasprzak <kas@fi.muni.cz> 5 + * Generic HDLC port Copyright (C) 2008 Krzysztof Halasa <khc@pm.waw.pl> 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License as published by ··· 54 * 55 * The Linux driver (unlike the present *BSD drivers :-) can work even 56 * for the COSA and SRP in one computer and allows each channel to work 57 + * in one of the two modes (character or network device). 58 * 59 * AUTHOR 60 * ··· 72 * The Comtrol Hostess SV11 driver by Alan Cox 73 * The Sync PPP/Cisco HDLC layer (syncppp.c) ported to Linux by Alan Cox 74 */ 75 76 #include <linux/module.h> 77 #include <linux/kernel.h> ··· 86 #include <linux/fs.h> 87 #include <linux/interrupt.h> 88 #include <linux/delay.h> 89 + #include <linux/hdlc.h> 90 #include <linux/errno.h> 91 #include <linux/ioport.h> 92 #include <linux/netdevice.h> ··· 93 #include <linux/mutex.h> 94 #include <linux/device.h> 95 #include <linux/smp_lock.h> 96 #include <asm/io.h> 97 #include <asm/dma.h> 98 #include <asm/byteorder.h> 99 100 + #undef COSA_SLOW_IO /* for testing purposes only */ 101 + 102 #include "cosa.h" 103 104 /* Maximum length of the identification string. */ ··· 112 /* Per-channel data structure */ 113 114 struct channel_data { 115 int usage; /* Usage count; >0 for chrdev, -1 for netdev */ 116 int num; /* Number of the channel */ 117 struct cosa_data *cosa; /* Pointer to the per-card structure */ ··· 136 wait_queue_head_t txwaitq, rxwaitq; 137 int tx_status, rx_status; 138 139 + /* generic HDLC device parts */ 140 + struct net_device *netdev; 141 struct sk_buff *rx_skb, *tx_skb; 142 }; 143 144 /* cosa->firmware_status bits */ ··· 281 static void cosa_kick(struct cosa_data *cosa); 282 static int cosa_dma_able(struct channel_data *chan, char *buf, int data); 283 284 + /* Network device stuff */ 285 + static int cosa_net_attach(struct net_device *dev, unsigned short encoding, 286 + unsigned short parity); 287 + static int cosa_net_open(struct net_device *d); 288 + static int cosa_net_close(struct net_device *d); 289 + static void cosa_net_timeout(struct net_device *d); 290 + static int cosa_net_tx(struct sk_buff *skb, struct net_device *d); 291 + static char *cosa_net_setup_rx(struct channel_data *channel, int size); 292 + static int cosa_net_rx_done(struct channel_data *channel); 293 + static int cosa_net_tx_done(struct channel_data *channel, int size); 294 + static int cosa_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd); 295 296 /* Character device */ 297 static char *chrdev_setup_rx(struct channel_data *channel, int size); 298 static int chrdev_rx_done(struct channel_data *channel); 299 static int chrdev_tx_done(struct channel_data *channel, int size); ··· 357 static void debug_status_out(struct cosa_data *cosa, int status); 358 #endif 359 360 + static inline struct channel_data* dev_to_chan(struct net_device *dev) 361 + { 362 + return (struct channel_data *)dev_to_hdlc(dev)->priv; 363 + } 364 + 365 /* ---------- Initialization stuff ---------- */ 366 367 static int __init cosa_init(void) 368 { 369 int i, err = 0; 370 371 if (cosa_major > 0) { 372 if (register_chrdev(cosa_major, "cosa", &cosa_fops)) { 373 printk(KERN_WARNING "cosa: unable to get major %d\n", ··· 402 NULL, "cosa%d", i); 403 err = 0; 404 goto out; 405 + 406 out_chrdev: 407 unregister_chrdev(cosa_major, "cosa"); 408 out: ··· 414 { 415 struct cosa_data *cosa; 416 int i; 417 418 + for (i = 0; i < nr_cards; i++) 419 device_destroy(cosa_class, MKDEV(cosa_major, i)); 420 class_destroy(cosa_class); 421 + 422 + for (cosa = cosa_cards; nr_cards--; cosa++) { 423 /* Clean up the per-channel data */ 424 + for (i = 0; i < cosa->nchannels; i++) { 425 /* Chardev driver has no alloc'd per-channel data */ 426 + unregister_hdlc_device(cosa->chan[i].netdev); 427 + free_netdev(cosa->chan[i].netdev); 428 } 429 /* Clean up the per-card data */ 430 kfree(cosa->chan); 431 kfree(cosa->bouncebuf); 432 free_irq(cosa->irq, cosa); 433 free_dma(cosa->dma); 434 + release_region(cosa->datareg, is_8bit(cosa) ? 2 : 4); 435 } 436 unregister_chrdev(cosa_major, "cosa"); 437 } 438 module_exit(cosa_exit); 439 440 static int cosa_probe(int base, int irq, int dma) 441 { 442 struct cosa_data *cosa = cosa_cards+nr_cards; ··· 576 /* Initialize the per-channel data */ 577 cosa->chan = kcalloc(cosa->nchannels, sizeof(struct channel_data), GFP_KERNEL); 578 if (!cosa->chan) { 579 + err = -ENOMEM; 580 goto err_out3; 581 } 582 + 583 + for (i = 0; i < cosa->nchannels; i++) { 584 + struct channel_data *chan = &cosa->chan[i]; 585 + 586 + chan->cosa = cosa; 587 + chan->num = i; 588 + sprintf(chan->name, "cosa%dc%d", chan->cosa->num, i); 589 + 590 + /* Initialize the chardev data structures */ 591 + mutex_init(&chan->rlock); 592 + init_MUTEX(&chan->wsem); 593 + 594 + /* Register the network interface */ 595 + if (!(chan->netdev = alloc_hdlcdev(chan))) { 596 + printk(KERN_WARNING "%s: alloc_hdlcdev failed.\n", 597 + chan->name); 598 + goto err_hdlcdev; 599 + } 600 + dev_to_hdlc(chan->netdev)->attach = cosa_net_attach; 601 + dev_to_hdlc(chan->netdev)->xmit = cosa_net_tx; 602 + chan->netdev->open = cosa_net_open; 603 + chan->netdev->stop = cosa_net_close; 604 + chan->netdev->do_ioctl = cosa_net_ioctl; 605 + chan->netdev->tx_timeout = cosa_net_timeout; 606 + chan->netdev->watchdog_timeo = TX_TIMEOUT; 607 + chan->netdev->base_addr = chan->cosa->datareg; 608 + chan->netdev->irq = chan->cosa->irq; 609 + chan->netdev->dma = chan->cosa->dma; 610 + if (register_hdlc_device(chan->netdev)) { 611 + printk(KERN_WARNING "%s: register_hdlc_device()" 612 + " failed.\n", chan->netdev->name); 613 + free_netdev(chan->netdev); 614 + goto err_hdlcdev; 615 + } 616 } 617 618 printk (KERN_INFO "cosa%d: %s (%s at 0x%x irq %d dma %d), %d channels\n", ··· 590 cosa->datareg, cosa->irq, cosa->dma, cosa->nchannels); 591 592 return nr_cards++; 593 + 594 + err_hdlcdev: 595 + while (i-- > 0) { 596 + unregister_hdlc_device(cosa->chan[i].netdev); 597 + free_netdev(cosa->chan[i].netdev); 598 + } 599 + kfree(cosa->chan); 600 err_out3: 601 kfree(cosa->bouncebuf); 602 err_out2: 603 free_dma(cosa->dma); 604 err_out1: 605 free_irq(cosa->irq, cosa); 606 + err_out: 607 release_region(cosa->datareg,is_8bit(cosa)?2:4); 608 printk(KERN_NOTICE "cosa%d: allocating resources failed\n", 609 cosa->num); ··· 604 } 605 606 607 + /*---------- network device ---------- */ 608 609 + static int cosa_net_attach(struct net_device *dev, unsigned short encoding, 610 + unsigned short parity) 611 { 612 + if (encoding == ENCODING_NRZ && parity == PARITY_CRC16_PR1_CCITT) 613 + return 0; 614 + return -EINVAL; 615 } 616 617 + static int cosa_net_open(struct net_device *dev) 618 { 619 + struct channel_data *chan = dev_to_chan(dev); 620 int err; 621 unsigned long flags; 622 ··· 662 } 663 spin_lock_irqsave(&chan->cosa->lock, flags); 664 if (chan->usage != 0) { 665 + printk(KERN_WARNING "%s: cosa_net_open called with usage count" 666 + " %d\n", chan->name, chan->usage); 667 spin_unlock_irqrestore(&chan->cosa->lock, flags); 668 return -EBUSY; 669 } 670 + chan->setup_rx = cosa_net_setup_rx; 671 + chan->tx_done = cosa_net_tx_done; 672 + chan->rx_done = cosa_net_rx_done; 673 + chan->usage = -1; 674 chan->cosa->usage++; 675 spin_unlock_irqrestore(&chan->cosa->lock, flags); 676 677 + err = hdlc_open(dev); 678 if (err) { 679 spin_lock_irqsave(&chan->cosa->lock, flags); 680 + chan->usage = 0; 681 chan->cosa->usage--; 682 spin_unlock_irqrestore(&chan->cosa->lock, flags); 683 return err; 684 } 685 686 + netif_start_queue(dev); 687 cosa_enable_rx(chan); 688 return 0; 689 } 690 691 + static int cosa_net_tx(struct sk_buff *skb, struct net_device *dev) 692 { 693 + struct channel_data *chan = dev_to_chan(dev); 694 695 netif_stop_queue(dev); 696 ··· 700 return 0; 701 } 702 703 + static void cosa_net_timeout(struct net_device *dev) 704 { 705 + struct channel_data *chan = dev_to_chan(dev); 706 707 if (test_bit(RXBIT, &chan->cosa->rxtx)) { 708 + chan->netdev->stats.rx_errors++; 709 + chan->netdev->stats.rx_missed_errors++; 710 } else { 711 + chan->netdev->stats.tx_errors++; 712 + chan->netdev->stats.tx_aborted_errors++; 713 } 714 cosa_kick(chan->cosa); 715 if (chan->tx_skb) { ··· 719 netif_wake_queue(dev); 720 } 721 722 + static int cosa_net_close(struct net_device *dev) 723 { 724 + struct channel_data *chan = dev_to_chan(dev); 725 unsigned long flags; 726 727 + netif_stop_queue(dev); 728 + hdlc_close(dev); 729 cosa_disable_rx(chan); 730 spin_lock_irqsave(&chan->cosa->lock, flags); 731 if (chan->rx_skb) { ··· 736 kfree_skb(chan->tx_skb); 737 chan->tx_skb = NULL; 738 } 739 + chan->usage = 0; 740 chan->cosa->usage--; 741 spin_unlock_irqrestore(&chan->cosa->lock, flags); 742 return 0; 743 } 744 745 + static char *cosa_net_setup_rx(struct channel_data *chan, int size) 746 { 747 /* 748 * We can safely fall back to non-dma-able memory, because we have ··· 754 if (chan->rx_skb == NULL) { 755 printk(KERN_NOTICE "%s: Memory squeeze, dropping packet\n", 756 chan->name); 757 + chan->netdev->stats.rx_dropped++; 758 return NULL; 759 } 760 + chan->netdev->trans_start = jiffies; 761 return skb_put(chan->rx_skb, size); 762 } 763 764 + static int cosa_net_rx_done(struct channel_data *chan) 765 { 766 if (!chan->rx_skb) { 767 printk(KERN_WARNING "%s: rx_done with empty skb!\n", 768 chan->name); 769 + chan->netdev->stats.rx_errors++; 770 + chan->netdev->stats.rx_frame_errors++; 771 return 0; 772 } 773 + chan->rx_skb->protocol = hdlc_type_trans(chan->rx_skb, chan->netdev); 774 + chan->rx_skb->dev = chan->netdev; 775 skb_reset_mac_header(chan->rx_skb); 776 + chan->netdev->stats.rx_packets++; 777 + chan->netdev->stats.rx_bytes += chan->cosa->rxsize; 778 netif_rx(chan->rx_skb); 779 chan->rx_skb = NULL; 780 + chan->netdev->last_rx = jiffies; 781 return 0; 782 } 783 784 /* ARGSUSED */ 785 + static int cosa_net_tx_done(struct channel_data *chan, int size) 786 { 787 if (!chan->tx_skb) { 788 printk(KERN_WARNING "%s: tx_done with empty skb!\n", 789 chan->name); 790 + chan->netdev->stats.tx_errors++; 791 + chan->netdev->stats.tx_aborted_errors++; 792 return 1; 793 } 794 dev_kfree_skb_irq(chan->tx_skb); 795 chan->tx_skb = NULL; 796 + chan->netdev->stats.tx_packets++; 797 + chan->netdev->stats.tx_bytes += size; 798 + netif_wake_queue(chan->netdev); 799 return 1; 800 } 801 802 /*---------- Character device ---------- */ 803 804 static ssize_t cosa_read(struct file *file, 805 char __user *buf, size_t count, loff_t *ppos) ··· 1223 return -ENOIOCTLCMD; 1224 } 1225 1226 + static int cosa_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 1227 { 1228 int rv; 1229 + struct channel_data *chan = dev_to_chan(dev); 1230 + rv = cosa_ioctl_common(chan->cosa, chan, cmd, 1231 + (unsigned long)ifr->ifr_data); 1232 + if (rv != -ENOIOCTLCMD) 1233 + return rv; 1234 + return hdlc_ioctl(dev, ifr, cmd); 1235 } 1236 1237 static int cosa_chardev_ioctl(struct inode *inode, struct file *file,
-1
drivers/net/wan/dscc4.c
··· 103 #include <linux/netdevice.h> 104 #include <linux/skbuff.h> 105 #include <linux/delay.h> 106 - #include <net/syncppp.h> 107 #include <linux/hdlc.h> 108 #include <linux/mutex.h> 109
··· 103 #include <linux/netdevice.h> 104 #include <linux/skbuff.h> 105 #include <linux/delay.h> 106 #include <linux/hdlc.h> 107 #include <linux/mutex.h> 108
+1 -4
drivers/net/wan/farsync.c
··· 47 /* Default parameters for the link 48 */ 49 #define FST_TX_QUEUE_LEN 100 /* At 8Mbps a longer queue length is 50 - * useful, the syncppp module forces 51 - * this down assuming a slower line I 52 - * guess. 53 - */ 54 #define FST_TXQ_DEPTH 16 /* This one is for the buffering 55 * of frames on the way down to the card 56 * so that we can keep the card busy
··· 47 /* Default parameters for the link 48 */ 49 #define FST_TX_QUEUE_LEN 100 /* At 8Mbps a longer queue length is 50 + * useful */ 51 #define FST_TXQ_DEPTH 16 /* This one is for the buffering 52 * of frames on the way down to the card 53 * so that we can keep the card busy
-6
drivers/net/wan/farsync.h
··· 54 55 56 /* Ioctl call command values 57 - * 58 - * The first three private ioctls are used by the sync-PPP module, 59 - * allowing a little room for expansion we start our numbering at 10. 60 */ 61 #define FSTWRITE (SIOCDEVPRIVATE+10) 62 #define FSTCPURESET (SIOCDEVPRIVATE+11) ··· 199 #define J1 7 200 201 /* "proto" */ 202 - #define FST_HDLC 1 /* Cisco compatible HDLC */ 203 - #define FST_PPP 2 /* Sync PPP */ 204 - #define FST_MONITOR 3 /* Monitor only (raw packet reception) */ 205 #define FST_RAW 4 /* Two way raw packets */ 206 #define FST_GEN_HDLC 5 /* Using "Generic HDLC" module */ 207
··· 54 55 56 /* Ioctl call command values 57 */ 58 #define FSTWRITE (SIOCDEVPRIVATE+10) 59 #define FSTCPURESET (SIOCDEVPRIVATE+11) ··· 202 #define J1 7 203 204 /* "proto" */ 205 #define FST_RAW 4 /* Two way raw packets */ 206 #define FST_GEN_HDLC 5 /* Using "Generic HDLC" module */ 207
+15 -16
drivers/net/wan/hdlc.c
··· 22 * - proto->start() and stop() are called with spin_lock_irq held. 23 */ 24 25 - #include <linux/module.h> 26 - #include <linux/kernel.h> 27 - #include <linux/slab.h> 28 - #include <linux/poll.h> 29 #include <linux/errno.h> 30 - #include <linux/if_arp.h> 31 - #include <linux/init.h> 32 - #include <linux/skbuff.h> 33 - #include <linux/pkt_sched.h> 34 - #include <linux/inetdevice.h> 35 - #include <linux/lapb.h> 36 - #include <linux/rtnetlink.h> 37 - #include <linux/notifier.h> 38 #include <linux/hdlc.h> 39 #include <net/net_namespace.h> 40 41 ··· 108 109 if (dev->get_stats != hdlc_get_stats) 110 return NOTIFY_DONE; /* not an HDLC device */ 111 - 112 if (event != NETDEV_CHANGE) 113 return NOTIFY_DONE; /* Only interrested in carrier changes */ 114 ··· 356 357 358 static struct notifier_block hdlc_notifier = { 359 - .notifier_call = hdlc_device_event, 360 }; 361 362 ··· 366 367 printk(KERN_INFO "%s\n", version); 368 if ((result = register_netdevice_notifier(&hdlc_notifier)) != 0) 369 - return result; 370 - dev_add_pack(&hdlc_packet_type); 371 return 0; 372 } 373
··· 22 * - proto->start() and stop() are called with spin_lock_irq held. 23 */ 24 25 #include <linux/errno.h> 26 #include <linux/hdlc.h> 27 + #include <linux/if_arp.h> 28 + #include <linux/inetdevice.h> 29 + #include <linux/init.h> 30 + #include <linux/kernel.h> 31 + #include <linux/module.h> 32 + #include <linux/notifier.h> 33 + #include <linux/pkt_sched.h> 34 + #include <linux/poll.h> 35 + #include <linux/rtnetlink.h> 36 + #include <linux/skbuff.h> 37 + #include <linux/slab.h> 38 #include <net/net_namespace.h> 39 40 ··· 109 110 if (dev->get_stats != hdlc_get_stats) 111 return NOTIFY_DONE; /* not an HDLC device */ 112 + 113 if (event != NETDEV_CHANGE) 114 return NOTIFY_DONE; /* Only interrested in carrier changes */ 115 ··· 357 358 359 static struct notifier_block hdlc_notifier = { 360 + .notifier_call = hdlc_device_event, 361 }; 362 363 ··· 367 368 printk(KERN_INFO "%s\n", version); 369 if ((result = register_netdevice_notifier(&hdlc_notifier)) != 0) 370 + return result; 371 + dev_add_pack(&hdlc_packet_type); 372 return 0; 373 } 374
+17 -18
drivers/net/wan/hdlc_cisco.c
··· 9 * as published by the Free Software Foundation. 10 */ 11 12 - #include <linux/module.h> 13 - #include <linux/kernel.h> 14 - #include <linux/slab.h> 15 - #include <linux/poll.h> 16 #include <linux/errno.h> 17 - #include <linux/if_arp.h> 18 - #include <linux/init.h> 19 - #include <linux/skbuff.h> 20 - #include <linux/pkt_sched.h> 21 - #include <linux/inetdevice.h> 22 - #include <linux/lapb.h> 23 - #include <linux/rtnetlink.h> 24 #include <linux/hdlc.h> 25 26 #undef DEBUG_HARD_HEADER 27 ··· 67 static int cisco_ioctl(struct net_device *dev, struct ifreq *ifr); 68 69 70 - static inline struct cisco_state * state(hdlc_device *hdlc) 71 { 72 - return(struct cisco_state *)(hdlc->state); 73 } 74 75 ··· 171 data->address != CISCO_UNICAST) 172 goto rx_error; 173 174 - switch(ntohs(data->protocol)) { 175 case CISCO_SYS_INFO: 176 /* Packet is not needed, drop it. */ 177 dev_kfree_skb_any(skb); ··· 335 static const struct header_ops cisco_header_ops = { 336 .create = cisco_hard_header, 337 }; 338 - 339 static int cisco_ioctl(struct net_device *dev, struct ifreq *ifr) 340 { 341 cisco_proto __user *cisco_s = ifr->ifr_settings.ifs_ifsu.cisco; ··· 358 return 0; 359 360 case IF_PROTO_CISCO: 361 - if(!capable(CAP_NET_ADMIN)) 362 return -EPERM; 363 364 - if(dev->flags & IFF_UP) 365 return -EBUSY; 366 367 if (copy_from_user(&new_settings, cisco_s, size)) ··· 371 new_settings.timeout < 2) 372 return -EINVAL; 373 374 - result=hdlc->attach(dev, ENCODING_NRZ,PARITY_CRC16_PR1_CCITT); 375 if (result) 376 return result; 377
··· 9 * as published by the Free Software Foundation. 10 */ 11 12 #include <linux/errno.h> 13 #include <linux/hdlc.h> 14 + #include <linux/if_arp.h> 15 + #include <linux/inetdevice.h> 16 + #include <linux/init.h> 17 + #include <linux/kernel.h> 18 + #include <linux/module.h> 19 + #include <linux/pkt_sched.h> 20 + #include <linux/poll.h> 21 + #include <linux/rtnetlink.h> 22 + #include <linux/skbuff.h> 23 + #include <linux/slab.h> 24 25 #undef DEBUG_HARD_HEADER 26 ··· 68 static int cisco_ioctl(struct net_device *dev, struct ifreq *ifr); 69 70 71 + static inline struct cisco_state* state(hdlc_device *hdlc) 72 { 73 + return (struct cisco_state *)hdlc->state; 74 } 75 76 ··· 172 data->address != CISCO_UNICAST) 173 goto rx_error; 174 175 + switch (ntohs(data->protocol)) { 176 case CISCO_SYS_INFO: 177 /* Packet is not needed, drop it. */ 178 dev_kfree_skb_any(skb); ··· 336 static const struct header_ops cisco_header_ops = { 337 .create = cisco_hard_header, 338 }; 339 + 340 static int cisco_ioctl(struct net_device *dev, struct ifreq *ifr) 341 { 342 cisco_proto __user *cisco_s = ifr->ifr_settings.ifs_ifsu.cisco; ··· 359 return 0; 360 361 case IF_PROTO_CISCO: 362 + if (!capable(CAP_NET_ADMIN)) 363 return -EPERM; 364 365 + if (dev->flags & IFF_UP) 366 return -EBUSY; 367 368 if (copy_from_user(&new_settings, cisco_s, size)) ··· 372 new_settings.timeout < 2) 373 return -EINVAL; 374 375 + result = hdlc->attach(dev, ENCODING_NRZ,PARITY_CRC16_PR1_CCITT); 376 if (result) 377 return result; 378
+11 -12
drivers/net/wan/hdlc_fr.c
··· 33 34 */ 35 36 - #include <linux/module.h> 37 - #include <linux/kernel.h> 38 - #include <linux/slab.h> 39 - #include <linux/poll.h> 40 #include <linux/errno.h> 41 - #include <linux/if_arp.h> 42 - #include <linux/init.h> 43 - #include <linux/skbuff.h> 44 - #include <linux/pkt_sched.h> 45 - #include <linux/inetdevice.h> 46 - #include <linux/lapb.h> 47 - #include <linux/rtnetlink.h> 48 #include <linux/etherdevice.h> 49 #include <linux/hdlc.h> 50 51 #undef DEBUG_PKT 52 #undef DEBUG_ECN ··· 95 unsigned ea1: 1; 96 unsigned cr: 1; 97 unsigned dlcih: 6; 98 - 99 unsigned ea2: 1; 100 unsigned de: 1; 101 unsigned becn: 1;
··· 33 34 */ 35 36 #include <linux/errno.h> 37 #include <linux/etherdevice.h> 38 #include <linux/hdlc.h> 39 + #include <linux/if_arp.h> 40 + #include <linux/inetdevice.h> 41 + #include <linux/init.h> 42 + #include <linux/kernel.h> 43 + #include <linux/module.h> 44 + #include <linux/pkt_sched.h> 45 + #include <linux/poll.h> 46 + #include <linux/rtnetlink.h> 47 + #include <linux/skbuff.h> 48 + #include <linux/slab.h> 49 50 #undef DEBUG_PKT 51 #undef DEBUG_ECN ··· 96 unsigned ea1: 1; 97 unsigned cr: 1; 98 unsigned dlcih: 6; 99 + 100 unsigned ea2: 1; 101 unsigned de: 1; 102 unsigned becn: 1;
+10 -11
drivers/net/wan/hdlc_ppp.c
··· 9 * as published by the Free Software Foundation. 10 */ 11 12 - #include <linux/module.h> 13 - #include <linux/kernel.h> 14 - #include <linux/slab.h> 15 - #include <linux/poll.h> 16 #include <linux/errno.h> 17 - #include <linux/if_arp.h> 18 - #include <linux/init.h> 19 - #include <linux/skbuff.h> 20 - #include <linux/pkt_sched.h> 21 - #include <linux/inetdevice.h> 22 - #include <linux/lapb.h> 23 - #include <linux/rtnetlink.h> 24 #include <linux/hdlc.h> 25 #include <net/syncppp.h> 26 27 struct ppp_state {
··· 9 * as published by the Free Software Foundation. 10 */ 11 12 #include <linux/errno.h> 13 #include <linux/hdlc.h> 14 + #include <linux/if_arp.h> 15 + #include <linux/inetdevice.h> 16 + #include <linux/init.h> 17 + #include <linux/kernel.h> 18 + #include <linux/module.h> 19 + #include <linux/pkt_sched.h> 20 + #include <linux/poll.h> 21 + #include <linux/rtnetlink.h> 22 + #include <linux/skbuff.h> 23 + #include <linux/slab.h> 24 #include <net/syncppp.h> 25 26 struct ppp_state {
+10 -11
drivers/net/wan/hdlc_raw.c
··· 9 * as published by the Free Software Foundation. 10 */ 11 12 - #include <linux/module.h> 13 - #include <linux/kernel.h> 14 - #include <linux/slab.h> 15 - #include <linux/poll.h> 16 #include <linux/errno.h> 17 - #include <linux/if_arp.h> 18 - #include <linux/init.h> 19 - #include <linux/skbuff.h> 20 - #include <linux/pkt_sched.h> 21 - #include <linux/inetdevice.h> 22 - #include <linux/lapb.h> 23 - #include <linux/rtnetlink.h> 24 #include <linux/hdlc.h> 25 26 27 static int raw_ioctl(struct net_device *dev, struct ifreq *ifr);
··· 9 * as published by the Free Software Foundation. 10 */ 11 12 #include <linux/errno.h> 13 #include <linux/hdlc.h> 14 + #include <linux/if_arp.h> 15 + #include <linux/inetdevice.h> 16 + #include <linux/init.h> 17 + #include <linux/kernel.h> 18 + #include <linux/module.h> 19 + #include <linux/pkt_sched.h> 20 + #include <linux/poll.h> 21 + #include <linux/rtnetlink.h> 22 + #include <linux/skbuff.h> 23 + #include <linux/slab.h> 24 25 26 static int raw_ioctl(struct net_device *dev, struct ifreq *ifr);
+10 -11
drivers/net/wan/hdlc_raw_eth.c
··· 9 * as published by the Free Software Foundation. 10 */ 11 12 - #include <linux/module.h> 13 - #include <linux/kernel.h> 14 - #include <linux/slab.h> 15 - #include <linux/poll.h> 16 #include <linux/errno.h> 17 - #include <linux/if_arp.h> 18 - #include <linux/init.h> 19 - #include <linux/skbuff.h> 20 - #include <linux/pkt_sched.h> 21 - #include <linux/inetdevice.h> 22 - #include <linux/lapb.h> 23 - #include <linux/rtnetlink.h> 24 #include <linux/etherdevice.h> 25 #include <linux/hdlc.h> 26 27 static int raw_eth_ioctl(struct net_device *dev, struct ifreq *ifr); 28
··· 9 * as published by the Free Software Foundation. 10 */ 11 12 #include <linux/errno.h> 13 #include <linux/etherdevice.h> 14 #include <linux/hdlc.h> 15 + #include <linux/if_arp.h> 16 + #include <linux/inetdevice.h> 17 + #include <linux/init.h> 18 + #include <linux/kernel.h> 19 + #include <linux/module.h> 20 + #include <linux/pkt_sched.h> 21 + #include <linux/poll.h> 22 + #include <linux/rtnetlink.h> 23 + #include <linux/skbuff.h> 24 + #include <linux/slab.h> 25 26 static int raw_eth_ioctl(struct net_device *dev, struct ifreq *ifr); 27
+11 -12
drivers/net/wan/hdlc_x25.c
··· 9 * as published by the Free Software Foundation. 10 */ 11 12 - #include <linux/module.h> 13 - #include <linux/kernel.h> 14 - #include <linux/slab.h> 15 - #include <linux/poll.h> 16 #include <linux/errno.h> 17 - #include <linux/if_arp.h> 18 - #include <linux/init.h> 19 - #include <linux/skbuff.h> 20 - #include <linux/pkt_sched.h> 21 - #include <linux/inetdevice.h> 22 - #include <linux/lapb.h> 23 - #include <linux/rtnetlink.h> 24 #include <linux/hdlc.h> 25 - 26 #include <net/x25device.h> 27 28 static int x25_ioctl(struct net_device *dev, struct ifreq *ifr);
··· 9 * as published by the Free Software Foundation. 10 */ 11 12 #include <linux/errno.h> 13 #include <linux/hdlc.h> 14 + #include <linux/if_arp.h> 15 + #include <linux/inetdevice.h> 16 + #include <linux/init.h> 17 + #include <linux/kernel.h> 18 + #include <linux/lapb.h> 19 + #include <linux/module.h> 20 + #include <linux/pkt_sched.h> 21 + #include <linux/poll.h> 22 + #include <linux/rtnetlink.h> 23 + #include <linux/skbuff.h> 24 + #include <linux/slab.h> 25 #include <net/x25device.h> 26 27 static int x25_ioctl(struct net_device *dev, struct ifreq *ifr);
+153 -227
drivers/net/wan/hostess_sv11.c
··· 16 * touching control registers. 17 * 18 * Port B isnt wired (why - beats me) 19 */ 20 21 #include <linux/module.h> ··· 28 #include <linux/netdevice.h> 29 #include <linux/if_arp.h> 30 #include <linux/delay.h> 31 #include <linux/ioport.h> 32 #include <net/arp.h> 33 ··· 36 #include <asm/io.h> 37 #include <asm/dma.h> 38 #include <asm/byteorder.h> 39 - #include <net/syncppp.h> 40 #include "z85230.h" 41 42 static int dma; 43 - 44 - struct sv11_device 45 - { 46 - void *if_ptr; /* General purpose pointer (used by SPPP) */ 47 - struct z8530_dev sync; 48 - struct ppp_device netdev; 49 - }; 50 51 /* 52 * Network driver support routines 53 */ 54 55 /* 56 - * Frame receive. Simple for our card as we do sync ppp and there 57 * is no funny garbage involved 58 */ 59 - 60 static void hostess_input(struct z8530_channel *c, struct sk_buff *skb) 61 { 62 /* Drop the CRC - it's not a good idea to try and negotiate it ;) */ 63 - skb_trim(skb, skb->len-2); 64 - skb->protocol=__constant_htons(ETH_P_WAN_PPP); 65 skb_reset_mac_header(skb); 66 - skb->dev=c->netdevice; 67 /* 68 * Send it to the PPP layer. We don't have time to process 69 * it right now. ··· 68 netif_rx(skb); 69 c->netdevice->last_rx = jiffies; 70 } 71 - 72 /* 73 * We've been placed in the UP state 74 - */ 75 - 76 static int hostess_open(struct net_device *d) 77 { 78 - struct sv11_device *sv11=d->ml_priv; 79 int err = -1; 80 - 81 /* 82 * Link layer up 83 */ 84 - switch(dma) 85 - { 86 case 0: 87 - err=z8530_sync_open(d, &sv11->sync.chanA); 88 break; 89 case 1: 90 - err=z8530_sync_dma_open(d, &sv11->sync.chanA); 91 break; 92 case 2: 93 - err=z8530_sync_txdma_open(d, &sv11->sync.chanA); 94 break; 95 } 96 - 97 - if(err) 98 return err; 99 - /* 100 - * Begin PPP 101 - */ 102 - err=sppp_open(d); 103 - if(err) 104 - { 105 - switch(dma) 106 - { 107 case 0: 108 - z8530_sync_close(d, &sv11->sync.chanA); 109 break; 110 case 1: 111 - z8530_sync_dma_close(d, &sv11->sync.chanA); 112 break; 113 case 2: 114 - z8530_sync_txdma_close(d, &sv11->sync.chanA); 115 break; 116 - } 117 return err; 118 } 119 - sv11->sync.chanA.rx_function=hostess_input; 120 - 121 /* 122 * Go go go 123 */ ··· 123 124 static int hostess_close(struct net_device *d) 125 { 126 - struct sv11_device *sv11=d->ml_priv; 127 /* 128 * Discard new frames 129 */ 130 - sv11->sync.chanA.rx_function=z8530_null_rx; 131 - /* 132 - * PPP off 133 - */ 134 - sppp_close(d); 135 - /* 136 - * Link layer down 137 - */ 138 netif_stop_queue(d); 139 - 140 - switch(dma) 141 - { 142 case 0: 143 - z8530_sync_close(d, &sv11->sync.chanA); 144 break; 145 case 1: 146 - z8530_sync_dma_close(d, &sv11->sync.chanA); 147 break; 148 case 2: 149 - z8530_sync_txdma_close(d, &sv11->sync.chanA); 150 break; 151 } 152 return 0; ··· 148 149 static int hostess_ioctl(struct net_device *d, struct ifreq *ifr, int cmd) 150 { 151 - /* struct sv11_device *sv11=d->ml_priv; 152 - z8530_ioctl(d,&sv11->sync.chanA,ifr,cmd) */ 153 - return sppp_do_ioctl(d, ifr,cmd); 154 - } 155 - 156 - static struct net_device_stats *hostess_get_stats(struct net_device *d) 157 - { 158 - struct sv11_device *sv11=d->ml_priv; 159 - if(sv11) 160 - return z8530_get_stats(&sv11->sync.chanA); 161 - else 162 - return NULL; 163 } 164 165 /* 166 - * Passed PPP frames, fire them downwind. 167 */ 168 - 169 static int hostess_queue_xmit(struct sk_buff *skb, struct net_device *d) 170 { 171 - struct sv11_device *sv11=d->ml_priv; 172 - return z8530_queue_xmit(&sv11->sync.chanA, skb); 173 } 174 175 - static int hostess_neigh_setup(struct neighbour *n) 176 { 177 - if (n->nud_state == NUD_NONE) { 178 - n->ops = &arp_broken_ops; 179 - n->output = n->ops->output; 180 - } 181 - return 0; 182 - } 183 - 184 - static int hostess_neigh_setup_dev(struct net_device *dev, struct neigh_parms *p) 185 - { 186 - if (p->tbl->family == AF_INET) { 187 - p->neigh_setup = hostess_neigh_setup; 188 - p->ucast_probes = 0; 189 - p->mcast_probes = 0; 190 - } 191 - return 0; 192 - } 193 - 194 - static void sv11_setup(struct net_device *dev) 195 - { 196 - dev->open = hostess_open; 197 - dev->stop = hostess_close; 198 - dev->hard_start_xmit = hostess_queue_xmit; 199 - dev->get_stats = hostess_get_stats; 200 - dev->do_ioctl = hostess_ioctl; 201 - dev->neigh_setup = hostess_neigh_setup_dev; 202 } 203 204 /* 205 * Description block for a Comtrol Hostess SV11 card 206 */ 207 - 208 - static struct sv11_device *sv11_init(int iobase, int irq) 209 { 210 - struct z8530_dev *dev; 211 - struct sv11_device *sv; 212 - 213 /* 214 * Get the needed I/O space 215 */ 216 - 217 - if(!request_region(iobase, 8, "Comtrol SV11")) 218 - { 219 - printk(KERN_WARNING "hostess: I/O 0x%X already in use.\n", iobase); 220 return NULL; 221 } 222 - 223 - sv = kzalloc(sizeof(struct sv11_device), GFP_KERNEL); 224 - if(!sv) 225 - goto fail3; 226 - 227 - sv->if_ptr=&sv->netdev; 228 - 229 - sv->netdev.dev = alloc_netdev(0, "hdlc%d", sv11_setup); 230 - if(!sv->netdev.dev) 231 - goto fail2; 232 233 - dev=&sv->sync; 234 - 235 /* 236 * Stuff in the I/O addressing 237 */ 238 - 239 - dev->active = 0; 240 - 241 - dev->chanA.ctrlio=iobase+1; 242 - dev->chanA.dataio=iobase+3; 243 - dev->chanB.ctrlio=-1; 244 - dev->chanB.dataio=-1; 245 - dev->chanA.irqs=&z8530_nop; 246 - dev->chanB.irqs=&z8530_nop; 247 - 248 - outb(0, iobase+4); /* DMA off */ 249 - 250 /* We want a fast IRQ for this device. Actually we'd like an even faster 251 IRQ ;) - This is one driver RtLinux is made for */ 252 - 253 - if(request_irq(irq, &z8530_interrupt, IRQF_DISABLED, "Hostess SV11", dev)<0) 254 - { 255 printk(KERN_WARNING "hostess: IRQ %d already in use.\n", irq); 256 - goto fail1; 257 } 258 - 259 - dev->irq=irq; 260 - dev->chanA.private=sv; 261 - dev->chanA.netdevice=sv->netdev.dev; 262 - dev->chanA.dev=dev; 263 - dev->chanB.dev=dev; 264 - 265 - if(dma) 266 - { 267 /* 268 * You can have DMA off or 1 and 3 thats the lot 269 * on the Comtrol. 270 */ 271 - dev->chanA.txdma=3; 272 - dev->chanA.rxdma=1; 273 - outb(0x03|0x08, iobase+4); /* DMA on */ 274 - if(request_dma(dev->chanA.txdma, "Hostess SV/11 (TX)")!=0) 275 - goto fail; 276 - 277 - if(dma==1) 278 - { 279 - if(request_dma(dev->chanA.rxdma, "Hostess SV/11 (RX)")!=0) 280 - goto dmafail; 281 - } 282 } 283 284 /* Kill our private IRQ line the hostess can end up chattering 285 until the configuration is set */ 286 disable_irq(irq); 287 - 288 /* 289 * Begin normal initialise 290 */ 291 - 292 - if(z8530_init(dev)!=0) 293 - { 294 printk(KERN_ERR "Z8530 series device not found.\n"); 295 enable_irq(irq); 296 - goto dmafail2; 297 } 298 - z8530_channel_load(&dev->chanB, z8530_dead_port); 299 - if(dev->type==Z85C30) 300 - z8530_channel_load(&dev->chanA, z8530_hdlc_kilostream); 301 else 302 - z8530_channel_load(&dev->chanA, z8530_hdlc_kilostream_85230); 303 - 304 enable_irq(irq); 305 - 306 307 /* 308 * Now we can take the IRQ 309 */ 310 - if(dev_alloc_name(dev->chanA.netdevice,"hdlc%d")>=0) 311 - { 312 - struct net_device *d=dev->chanA.netdevice; 313 314 - /* 315 - * Initialise the PPP components 316 - */ 317 - d->ml_priv = sv; 318 - sppp_attach(&sv->netdev); 319 - 320 - /* 321 - * Local fields 322 - */ 323 - 324 - d->base_addr = iobase; 325 - d->irq = irq; 326 - 327 - if(register_netdev(d)) 328 - { 329 - printk(KERN_ERR "%s: unable to register device.\n", 330 - d->name); 331 - sppp_detach(d); 332 - goto dmafail2; 333 - } 334 335 - z8530_describe(dev, "I/O", iobase); 336 - dev->active=1; 337 - return sv; 338 } 339 - dmafail2: 340 - if(dma==1) 341 - free_dma(dev->chanA.rxdma); 342 - dmafail: 343 - if(dma) 344 - free_dma(dev->chanA.txdma); 345 - fail: 346 - free_irq(irq, dev); 347 - fail1: 348 - free_netdev(sv->netdev.dev); 349 - fail2: 350 kfree(sv); 351 - fail3: 352 - release_region(iobase,8); 353 return NULL; 354 } 355 356 - static void sv11_shutdown(struct sv11_device *dev) 357 { 358 - sppp_detach(dev->netdev.dev); 359 - unregister_netdev(dev->netdev.dev); 360 - z8530_shutdown(&dev->sync); 361 - free_irq(dev->sync.irq, dev); 362 - if(dma) 363 - { 364 - if(dma==1) 365 - free_dma(dev->sync.chanA.rxdma); 366 - free_dma(dev->sync.chanA.txdma); 367 } 368 - release_region(dev->sync.chanA.ctrlio-1, 8); 369 - free_netdev(dev->netdev.dev); 370 kfree(dev); 371 } 372 373 - #ifdef MODULE 374 - 375 - static int io=0x200; 376 - static int irq=9; 377 378 module_param(io, int, 0); 379 MODULE_PARM_DESC(io, "The I/O base of the Comtrol Hostess SV11 card"); ··· 328 MODULE_LICENSE("GPL"); 329 MODULE_DESCRIPTION("Modular driver for the Comtrol Hostess SV11"); 330 331 - static struct sv11_device *sv11_unit; 332 333 int init_module(void) 334 { 335 - printk(KERN_INFO "SV-11 Z85230 Synchronous Driver v 0.03.\n"); 336 - printk(KERN_INFO "(c) Copyright 2001, Red Hat Inc.\n"); 337 - if((sv11_unit=sv11_init(io,irq))==NULL) 338 return -ENODEV; 339 return 0; 340 } 341 342 void cleanup_module(void) 343 { 344 - if(sv11_unit) 345 sv11_shutdown(sv11_unit); 346 } 347 - 348 - #endif 349 -
··· 16 * touching control registers. 17 * 18 * Port B isnt wired (why - beats me) 19 + * 20 + * Generic HDLC port Copyright (C) 2008 Krzysztof Halasa <khc@pm.waw.pl> 21 */ 22 23 #include <linux/module.h> ··· 26 #include <linux/netdevice.h> 27 #include <linux/if_arp.h> 28 #include <linux/delay.h> 29 + #include <linux/hdlc.h> 30 #include <linux/ioport.h> 31 #include <net/arp.h> 32 ··· 33 #include <asm/io.h> 34 #include <asm/dma.h> 35 #include <asm/byteorder.h> 36 #include "z85230.h" 37 38 static int dma; 39 40 /* 41 * Network driver support routines 42 */ 43 44 + static inline struct z8530_dev* dev_to_sv(struct net_device *dev) 45 + { 46 + return (struct z8530_dev *)dev_to_hdlc(dev)->priv; 47 + } 48 + 49 /* 50 + * Frame receive. Simple for our card as we do HDLC and there 51 * is no funny garbage involved 52 */ 53 + 54 static void hostess_input(struct z8530_channel *c, struct sk_buff *skb) 55 { 56 /* Drop the CRC - it's not a good idea to try and negotiate it ;) */ 57 + skb_trim(skb, skb->len - 2); 58 + skb->protocol = hdlc_type_trans(skb, c->netdevice); 59 skb_reset_mac_header(skb); 60 + skb->dev = c->netdevice; 61 /* 62 * Send it to the PPP layer. We don't have time to process 63 * it right now. ··· 68 netif_rx(skb); 69 c->netdevice->last_rx = jiffies; 70 } 71 + 72 /* 73 * We've been placed in the UP state 74 + */ 75 + 76 static int hostess_open(struct net_device *d) 77 { 78 + struct z8530_dev *sv11 = dev_to_sv(d); 79 int err = -1; 80 + 81 /* 82 * Link layer up 83 */ 84 + switch (dma) { 85 case 0: 86 + err = z8530_sync_open(d, &sv11->chanA); 87 break; 88 case 1: 89 + err = z8530_sync_dma_open(d, &sv11->chanA); 90 break; 91 case 2: 92 + err = z8530_sync_txdma_open(d, &sv11->chanA); 93 break; 94 } 95 + 96 + if (err) 97 return err; 98 + 99 + err = hdlc_open(d); 100 + if (err) { 101 + switch (dma) { 102 case 0: 103 + z8530_sync_close(d, &sv11->chanA); 104 break; 105 case 1: 106 + z8530_sync_dma_close(d, &sv11->chanA); 107 break; 108 case 2: 109 + z8530_sync_txdma_close(d, &sv11->chanA); 110 break; 111 + } 112 return err; 113 } 114 + sv11->chanA.rx_function = hostess_input; 115 + 116 /* 117 * Go go go 118 */ ··· 128 129 static int hostess_close(struct net_device *d) 130 { 131 + struct z8530_dev *sv11 = dev_to_sv(d); 132 /* 133 * Discard new frames 134 */ 135 + sv11->chanA.rx_function = z8530_null_rx; 136 + 137 + hdlc_close(d); 138 netif_stop_queue(d); 139 + 140 + switch (dma) { 141 case 0: 142 + z8530_sync_close(d, &sv11->chanA); 143 break; 144 case 1: 145 + z8530_sync_dma_close(d, &sv11->chanA); 146 break; 147 case 2: 148 + z8530_sync_txdma_close(d, &sv11->chanA); 149 break; 150 } 151 return 0; ··· 159 160 static int hostess_ioctl(struct net_device *d, struct ifreq *ifr, int cmd) 161 { 162 + /* struct z8530_dev *sv11=dev_to_sv(d); 163 + z8530_ioctl(d,&sv11->chanA,ifr,cmd) */ 164 + return hdlc_ioctl(d, ifr, cmd); 165 } 166 167 /* 168 + * Passed network frames, fire them downwind. 169 */ 170 + 171 static int hostess_queue_xmit(struct sk_buff *skb, struct net_device *d) 172 { 173 + return z8530_queue_xmit(&dev_to_sv(d)->chanA, skb); 174 } 175 176 + static int hostess_attach(struct net_device *dev, unsigned short encoding, 177 + unsigned short parity) 178 { 179 + if (encoding == ENCODING_NRZ && parity == PARITY_CRC16_PR1_CCITT) 180 + return 0; 181 + return -EINVAL; 182 } 183 184 /* 185 * Description block for a Comtrol Hostess SV11 card 186 */ 187 + 188 + static struct z8530_dev *sv11_init(int iobase, int irq) 189 { 190 + struct z8530_dev *sv; 191 + struct net_device *netdev; 192 /* 193 * Get the needed I/O space 194 */ 195 + 196 + if (!request_region(iobase, 8, "Comtrol SV11")) { 197 + printk(KERN_WARNING "hostess: I/O 0x%X already in use.\n", 198 + iobase); 199 return NULL; 200 } 201 202 + sv = kzalloc(sizeof(struct z8530_dev), GFP_KERNEL); 203 + if (!sv) 204 + goto err_kzalloc; 205 + 206 /* 207 * Stuff in the I/O addressing 208 */ 209 + 210 + sv->active = 0; 211 + 212 + sv->chanA.ctrlio = iobase + 1; 213 + sv->chanA.dataio = iobase + 3; 214 + sv->chanB.ctrlio = -1; 215 + sv->chanB.dataio = -1; 216 + sv->chanA.irqs = &z8530_nop; 217 + sv->chanB.irqs = &z8530_nop; 218 + 219 + outb(0, iobase + 4); /* DMA off */ 220 + 221 /* We want a fast IRQ for this device. Actually we'd like an even faster 222 IRQ ;) - This is one driver RtLinux is made for */ 223 + 224 + if (request_irq(irq, &z8530_interrupt, IRQF_DISABLED, 225 + "Hostess SV11", sv) < 0) { 226 printk(KERN_WARNING "hostess: IRQ %d already in use.\n", irq); 227 + goto err_irq; 228 } 229 + 230 + sv->irq = irq; 231 + sv->chanA.private = sv; 232 + sv->chanA.dev = sv; 233 + sv->chanB.dev = sv; 234 + 235 + if (dma) { 236 /* 237 * You can have DMA off or 1 and 3 thats the lot 238 * on the Comtrol. 239 */ 240 + sv->chanA.txdma = 3; 241 + sv->chanA.rxdma = 1; 242 + outb(0x03 | 0x08, iobase + 4); /* DMA on */ 243 + if (request_dma(sv->chanA.txdma, "Hostess SV/11 (TX)")) 244 + goto err_txdma; 245 + 246 + if (dma == 1) 247 + if (request_dma(sv->chanA.rxdma, "Hostess SV/11 (RX)")) 248 + goto err_rxdma; 249 } 250 251 /* Kill our private IRQ line the hostess can end up chattering 252 until the configuration is set */ 253 disable_irq(irq); 254 + 255 /* 256 * Begin normal initialise 257 */ 258 + 259 + if (z8530_init(sv)) { 260 printk(KERN_ERR "Z8530 series device not found.\n"); 261 enable_irq(irq); 262 + goto free_dma; 263 } 264 + z8530_channel_load(&sv->chanB, z8530_dead_port); 265 + if (sv->type == Z85C30) 266 + z8530_channel_load(&sv->chanA, z8530_hdlc_kilostream); 267 else 268 + z8530_channel_load(&sv->chanA, z8530_hdlc_kilostream_85230); 269 + 270 enable_irq(irq); 271 272 /* 273 * Now we can take the IRQ 274 */ 275 276 + sv->chanA.netdevice = netdev = alloc_hdlcdev(sv); 277 + if (!netdev) 278 + goto free_dma; 279 280 + dev_to_hdlc(netdev)->attach = hostess_attach; 281 + dev_to_hdlc(netdev)->xmit = hostess_queue_xmit; 282 + netdev->open = hostess_open; 283 + netdev->stop = hostess_close; 284 + netdev->do_ioctl = hostess_ioctl; 285 + netdev->base_addr = iobase; 286 + netdev->irq = irq; 287 + 288 + if (register_hdlc_device(netdev)) { 289 + printk(KERN_ERR "hostess: unable to register HDLC device.\n"); 290 + free_netdev(netdev); 291 + goto free_dma; 292 } 293 + 294 + z8530_describe(sv, "I/O", iobase); 295 + sv->active = 1; 296 + return sv; 297 + 298 + free_dma: 299 + if (dma == 1) 300 + free_dma(sv->chanA.rxdma); 301 + err_rxdma: 302 + if (dma) 303 + free_dma(sv->chanA.txdma); 304 + err_txdma: 305 + free_irq(irq, sv); 306 + err_irq: 307 kfree(sv); 308 + err_kzalloc: 309 + release_region(iobase, 8); 310 return NULL; 311 } 312 313 + static void sv11_shutdown(struct z8530_dev *dev) 314 { 315 + unregister_hdlc_device(dev->chanA.netdevice); 316 + z8530_shutdown(dev); 317 + free_irq(dev->irq, dev); 318 + if (dma) { 319 + if (dma == 1) 320 + free_dma(dev->chanA.rxdma); 321 + free_dma(dev->chanA.txdma); 322 } 323 + release_region(dev->chanA.ctrlio - 1, 8); 324 + free_netdev(dev->chanA.netdevice); 325 kfree(dev); 326 } 327 328 + static int io = 0x200; 329 + static int irq = 9; 330 331 module_param(io, int, 0); 332 MODULE_PARM_DESC(io, "The I/O base of the Comtrol Hostess SV11 card"); ··· 397 MODULE_LICENSE("GPL"); 398 MODULE_DESCRIPTION("Modular driver for the Comtrol Hostess SV11"); 399 400 + static struct z8530_dev *sv11_unit; 401 402 int init_module(void) 403 { 404 + if ((sv11_unit = sv11_init(io, irq)) == NULL) 405 return -ENODEV; 406 return 0; 407 } 408 409 void cleanup_module(void) 410 { 411 + if (sv11_unit) 412 sv11_shutdown(sv11_unit); 413 }
+5 -6
drivers/net/wan/lmc/lmc.h
··· 11 devaddr, unsigned regno); 12 void lmc_mii_writereg(lmc_softc_t * const sc, unsigned devaddr, 13 unsigned regno, unsigned data); 14 - void lmc_led_on(lmc_softc_t * const, u_int32_t); 15 - void lmc_led_off(lmc_softc_t * const, u_int32_t); 16 unsigned lmc_mii_readreg(lmc_softc_t * const, unsigned, unsigned); 17 void lmc_mii_writereg(lmc_softc_t * const, unsigned, unsigned, unsigned); 18 - void lmc_gpio_mkinput(lmc_softc_t * const sc, u_int32_t bits); 19 - void lmc_gpio_mkoutput(lmc_softc_t * const sc, u_int32_t bits); 20 21 int lmc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd); 22 ··· 26 extern lmc_media_t lmc_hssi_media; 27 28 #ifdef _DBG_EVENTLOG 29 - static void lmcEventLog( u_int32_t EventNum, u_int32_t arg2, u_int32_t arg3 ); 30 #endif 31 32 #endif 33 -
··· 11 devaddr, unsigned regno); 12 void lmc_mii_writereg(lmc_softc_t * const sc, unsigned devaddr, 13 unsigned regno, unsigned data); 14 + void lmc_led_on(lmc_softc_t * const, u32); 15 + void lmc_led_off(lmc_softc_t * const, u32); 16 unsigned lmc_mii_readreg(lmc_softc_t * const, unsigned, unsigned); 17 void lmc_mii_writereg(lmc_softc_t * const, unsigned, unsigned, unsigned); 18 + void lmc_gpio_mkinput(lmc_softc_t * const sc, u32 bits); 19 + void lmc_gpio_mkoutput(lmc_softc_t * const sc, u32 bits); 20 21 int lmc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd); 22 ··· 26 extern lmc_media_t lmc_hssi_media; 27 28 #ifdef _DBG_EVENTLOG 29 + static void lmcEventLog(u32 EventNum, u32 arg2, u32 arg3); 30 #endif 31 32 #endif
+3 -4
drivers/net/wan/lmc/lmc_debug.c
··· 1 - 2 #include <linux/types.h> 3 #include <linux/netdevice.h> 4 #include <linux/interrupt.h> ··· 47 #endif 48 49 #ifdef DEBUG 50 - u_int32_t lmcEventLogIndex = 0; 51 - u_int32_t lmcEventLogBuf[LMC_EVENTLOGSIZE * LMC_EVENTLOGARGS]; 52 53 - void lmcEventLog (u_int32_t EventNum, u_int32_t arg2, u_int32_t arg3) 54 { 55 lmcEventLogBuf[lmcEventLogIndex++] = EventNum; 56 lmcEventLogBuf[lmcEventLogIndex++] = arg2;
··· 1 #include <linux/types.h> 2 #include <linux/netdevice.h> 3 #include <linux/interrupt.h> ··· 48 #endif 49 50 #ifdef DEBUG 51 + u32 lmcEventLogIndex; 52 + u32 lmcEventLogBuf[LMC_EVENTLOGSIZE * LMC_EVENTLOGARGS]; 53 54 + void lmcEventLog(u32 EventNum, u32 arg2, u32 arg3) 55 { 56 lmcEventLogBuf[lmcEventLogIndex++] = EventNum; 57 lmcEventLogBuf[lmcEventLogIndex++] = arg2;
+3 -3
drivers/net/wan/lmc/lmc_debug.h
··· 38 39 40 #ifdef DEBUG 41 - extern u_int32_t lmcEventLogIndex; 42 - extern u_int32_t lmcEventLogBuf[LMC_EVENTLOGSIZE * LMC_EVENTLOGARGS]; 43 #define LMC_EVENT_LOG(x, y, z) lmcEventLog((x), (y), (z)) 44 #else 45 #define LMC_EVENT_LOG(x,y,z) 46 #endif /* end ifdef _DBG_EVENTLOG */ 47 48 void lmcConsoleLog(char *type, unsigned char *ucData, int iLen); 49 - void lmcEventLog (u_int32_t EventNum, u_int32_t arg2, u_int32_t arg3); 50 void lmc_trace(struct net_device *dev, char *msg); 51 52 #endif
··· 38 39 40 #ifdef DEBUG 41 + extern u32 lmcEventLogIndex; 42 + extern u32 lmcEventLogBuf[LMC_EVENTLOGSIZE * LMC_EVENTLOGARGS]; 43 #define LMC_EVENT_LOG(x, y, z) lmcEventLog((x), (y), (z)) 44 #else 45 #define LMC_EVENT_LOG(x,y,z) 46 #endif /* end ifdef _DBG_EVENTLOG */ 47 48 void lmcConsoleLog(char *type, unsigned char *ucData, int iLen); 49 + void lmcEventLog(u32 EventNum, u32 arg2, u32 arg3); 50 void lmc_trace(struct net_device *dev, char *msg); 51 52 #endif
+1 -1
drivers/net/wan/lmc/lmc_ioctl.h
··· 61 /* 62 * IFTYPE defines 63 */ 64 - #define LMC_PPP 1 /* use sppp interface */ 65 #define LMC_NET 2 /* use direct net interface */ 66 #define LMC_RAW 3 /* use direct net interface */ 67
··· 61 /* 62 * IFTYPE defines 63 */ 64 + #define LMC_PPP 1 /* use generic HDLC interface */ 65 #define LMC_NET 2 /* use direct net interface */ 66 #define LMC_RAW 3 /* use direct net interface */ 67
+292 -376
drivers/net/wan/lmc/lmc_main.c
··· 1 /* 2 * Copyright (c) 1997-2000 LAN Media Corporation (LMC) 3 * All rights reserved. www.lanmedia.com 4 * 5 * This code is written by: 6 * Andrew Stanley-Jones (asj@cban.com) ··· 37 * 38 */ 39 40 - /* $Id: lmc_main.c,v 1.36 2000/04/11 05:25:25 asj Exp $ */ 41 - 42 #include <linux/kernel.h> 43 #include <linux/module.h> 44 #include <linux/string.h> ··· 48 #include <linux/interrupt.h> 49 #include <linux/pci.h> 50 #include <linux/delay.h> 51 #include <linux/init.h> 52 #include <linux/in.h> 53 #include <linux/if_arp.h> ··· 57 #include <linux/skbuff.h> 58 #include <linux/inet.h> 59 #include <linux/bitops.h> 60 - 61 - #include <net/syncppp.h> 62 - 63 #include <asm/processor.h> /* Processor type for cache alignment. */ 64 #include <asm/io.h> 65 #include <asm/dma.h> ··· 75 #include "lmc_debug.h" 76 #include "lmc_proto.h" 77 78 - static int lmc_first_load = 0; 79 - 80 static int LMC_PKT_BUF_SZ = 1542; 81 82 static struct pci_device_id lmc_pci_tbl[] = { ··· 86 }; 87 88 MODULE_DEVICE_TABLE(pci, lmc_pci_tbl); 89 - MODULE_LICENSE("GPL"); 90 91 92 - static int lmc_start_xmit(struct sk_buff *skb, struct net_device *dev); 93 static int lmc_start_xmit(struct sk_buff *skb, struct net_device *dev); 94 static int lmc_rx (struct net_device *dev); 95 static int lmc_open(struct net_device *dev); ··· 108 * linux reserves 16 device specific IOCTLs. We call them 109 * LMCIOC* to control various bits of our world. 110 */ 111 - int lmc_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd) /*fold00*/ 112 { 113 - lmc_softc_t *sc; 114 lmc_ctl_t ctl; 115 - int ret; 116 - u_int16_t regVal; 117 unsigned long flags; 118 - 119 - struct sppp *sp; 120 - 121 - ret = -EOPNOTSUPP; 122 - 123 - sc = dev->priv; 124 125 lmc_trace(dev, "lmc_ioctl in"); 126 ··· 137 break; 138 139 case LMCIOCSINFO: /*fold01*/ 140 - sp = &((struct ppp_device *) dev)->sppp; 141 if (!capable(CAP_NET_ADMIN)) { 142 ret = -EPERM; 143 break; ··· 162 sc->TxDescriptControlInit &= ~LMC_TDES_ADD_CRC_DISABLE; 163 } 164 165 - if (ctl.keepalive_onoff == LMC_CTL_OFF) 166 - sp->pp_flags &= ~PP_KEEPALIVE; /* Turn off */ 167 - else 168 - sp->pp_flags |= PP_KEEPALIVE; /* Turn on */ 169 - 170 ret = 0; 171 break; 172 173 case LMCIOCIFTYPE: /*fold01*/ 174 { 175 - u_int16_t old_type = sc->if_type; 176 - u_int16_t new_type; 177 178 if (!capable(CAP_NET_ADMIN)) { 179 ret = -EPERM; 180 break; 181 } 182 183 - if (copy_from_user(&new_type, ifr->ifr_data, sizeof(u_int16_t))) { 184 ret = -EFAULT; 185 break; 186 } ··· 188 } 189 190 lmc_proto_close(sc); 191 - lmc_proto_detach(sc); 192 193 sc->if_type = new_type; 194 - // lmc_proto_init(sc); 195 lmc_proto_attach(sc); 196 - lmc_proto_open(sc); 197 - 198 - ret = 0 ; 199 - break ; 200 } 201 202 case LMCIOCGETXINFO: /*fold01*/ ··· 219 220 break; 221 222 - case LMCIOCGETLMCSTATS: /*fold01*/ 223 - if (sc->lmc_cardtype == LMC_CARDTYPE_T1){ 224 - lmc_mii_writereg (sc, 0, 17, T1FRAMER_FERR_LSB); 225 - sc->stats.framingBitErrorCount += 226 - lmc_mii_readreg (sc, 0, 18) & 0xff; 227 - lmc_mii_writereg (sc, 0, 17, T1FRAMER_FERR_MSB); 228 - sc->stats.framingBitErrorCount += 229 - (lmc_mii_readreg (sc, 0, 18) & 0xff) << 8; 230 - lmc_mii_writereg (sc, 0, 17, T1FRAMER_LCV_LSB); 231 - sc->stats.lineCodeViolationCount += 232 - lmc_mii_readreg (sc, 0, 18) & 0xff; 233 - lmc_mii_writereg (sc, 0, 17, T1FRAMER_LCV_MSB); 234 - sc->stats.lineCodeViolationCount += 235 - (lmc_mii_readreg (sc, 0, 18) & 0xff) << 8; 236 - lmc_mii_writereg (sc, 0, 17, T1FRAMER_AERR); 237 - regVal = lmc_mii_readreg (sc, 0, 18) & 0xff; 238 239 - sc->stats.lossOfFrameCount += 240 - (regVal & T1FRAMER_LOF_MASK) >> 4; 241 - sc->stats.changeOfFrameAlignmentCount += 242 - (regVal & T1FRAMER_COFA_MASK) >> 2; 243 - sc->stats.severelyErroredFrameCount += 244 - regVal & T1FRAMER_SEF_MASK; 245 - } 246 247 - if (copy_to_user(ifr->ifr_data, &sc->stats, 248 - sizeof (struct lmc_statistics))) 249 - ret = -EFAULT; 250 - else 251 - ret = 0; 252 - break; 253 254 - case LMCIOCCLEARLMCSTATS: /*fold01*/ 255 - if (!capable(CAP_NET_ADMIN)){ 256 - ret = -EPERM; 257 - break; 258 - } 259 - 260 - memset (&sc->stats, 0, sizeof (struct lmc_statistics)); 261 - sc->stats.check = STATCHECK; 262 - sc->stats.version_size = (DRIVER_VERSION << 16) + 263 - sizeof (struct lmc_statistics); 264 - sc->stats.lmc_cardtype = sc->lmc_cardtype; 265 - ret = 0; 266 - break; 267 268 case LMCIOCSETCIRCUIT: /*fold01*/ 269 if (!capable(CAP_NET_ADMIN)){ ··· 310 ret = -EFAULT; 311 break; 312 } 313 - if (copy_to_user(ifr->ifr_data + sizeof (u32), lmcEventLogBuf, sizeof (lmcEventLogBuf))) 314 ret = -EFAULT; 315 else 316 ret = 0; ··· 622 /* the watchdog process that cruises around */ 623 static void lmc_watchdog (unsigned long data) /*fold00*/ 624 { 625 - struct net_device *dev = (struct net_device *) data; 626 - lmc_softc_t *sc; 627 int link_status; 628 - u_int32_t ticks; 629 unsigned long flags; 630 - 631 - sc = dev->priv; 632 633 lmc_trace(dev, "lmc_watchdog in"); 634 ··· 656 * check for a transmit interrupt timeout 657 * Has the packet xmt vs xmt serviced threshold been exceeded */ 658 if (sc->lmc_taint_tx == sc->lastlmc_taint_tx && 659 - sc->stats.tx_packets > sc->lasttx_packets && 660 - sc->tx_TimeoutInd == 0) 661 { 662 663 /* wait for the watchdog to come around again */ 664 sc->tx_TimeoutInd = 1; 665 } 666 else if (sc->lmc_taint_tx == sc->lastlmc_taint_tx && 667 - sc->stats.tx_packets > sc->lasttx_packets && 668 - sc->tx_TimeoutInd) 669 { 670 671 LMC_EVENT_LOG(LMC_EVENT_XMTINTTMO, LMC_CSR_READ (sc, csr_status), 0); 672 673 sc->tx_TimeoutDisplay = 1; 674 - sc->stats.tx_TimeoutCnt++; 675 676 /* DEC chip is stuck, hit it with a RESET!!!! */ 677 lmc_running_reset (dev); ··· 691 /* reset the transmit timeout detection flag */ 692 sc->tx_TimeoutInd = 0; 693 sc->lastlmc_taint_tx = sc->lmc_taint_tx; 694 - sc->lasttx_packets = sc->stats.tx_packets; 695 - } 696 - else 697 - { 698 sc->tx_TimeoutInd = 0; 699 sc->lastlmc_taint_tx = sc->lmc_taint_tx; 700 - sc->lasttx_packets = sc->stats.tx_packets; 701 } 702 703 /* --- end time out check ----------------------------------- */ ··· 725 sc->last_link_status = 1; 726 /* lmc_reset (sc); Again why reset??? */ 727 728 - /* Inform the world that link protocol is back up. */ 729 netif_carrier_on(dev); 730 - 731 - /* Now we have to tell the syncppp that we had an outage 732 - * and that it should deal. Calling sppp_reopen here 733 - * should do the trick, but we may have to call sppp_close 734 - * when the link goes down, and call sppp_open here. 735 - * Subject to more testing. 736 - * --bbraun 737 - */ 738 - 739 - lmc_proto_reopen(sc); 740 - 741 } 742 743 /* Call media specific watchdog functions */ ··· 781 782 } 783 784 - static void lmc_setup(struct net_device * const dev) /*fold00*/ 785 { 786 - lmc_trace(dev, "lmc_setup in"); 787 - 788 - dev->type = ARPHRD_HDLC; 789 - dev->hard_start_xmit = lmc_start_xmit; 790 - dev->open = lmc_open; 791 - dev->stop = lmc_close; 792 - dev->get_stats = lmc_get_stats; 793 - dev->do_ioctl = lmc_ioctl; 794 - dev->tx_timeout = lmc_driver_timeout; 795 - dev->watchdog_timeo = (HZ); /* 1 second */ 796 - 797 - lmc_trace(dev, "lmc_setup out"); 798 } 799 - 800 801 static int __devinit lmc_init_one(struct pci_dev *pdev, 802 const struct pci_device_id *ent) 803 { 804 - struct net_device *dev; 805 - lmc_softc_t *sc; 806 - u16 subdevice; 807 - u_int16_t AdapModelNum; 808 - int err = -ENOMEM; 809 - static int cards_found; 810 - #ifndef GCOM 811 - /* We name by type not by vendor */ 812 - static const char lmcname[] = "hdlc%d"; 813 - #else 814 - /* 815 - * GCOM uses LMC vendor name so that clients can know which card 816 - * to attach to. 817 - */ 818 - static const char lmcname[] = "lmc%d"; 819 - #endif 820 821 822 - /* 823 - * Allocate our own device structure 824 - */ 825 - dev = alloc_netdev(sizeof(lmc_softc_t), lmcname, lmc_setup); 826 - if (!dev) { 827 - printk (KERN_ERR "lmc:alloc_netdev for device failed\n"); 828 - goto out1; 829 - } 830 - 831 - lmc_trace(dev, "lmc_init_one in"); 832 833 - err = pci_enable_device(pdev); 834 - if (err) { 835 - printk(KERN_ERR "lmc: pci enable failed:%d\n", err); 836 - goto out2; 837 - } 838 - 839 - if (pci_request_regions(pdev, "lmc")) { 840 - printk(KERN_ERR "lmc: pci_request_region failed\n"); 841 - err = -EIO; 842 - goto out3; 843 - } 844 845 - pci_set_drvdata(pdev, dev); 846 847 - if(lmc_first_load == 0){ 848 - printk(KERN_INFO "Lan Media Corporation WAN Driver Version %d.%d.%d\n", 849 - DRIVER_MAJOR_VERSION, DRIVER_MINOR_VERSION,DRIVER_SUB_VERSION); 850 - lmc_first_load = 1; 851 - } 852 - 853 - sc = dev->priv; 854 - sc->lmc_device = dev; 855 - sc->name = dev->name; 856 857 - /* Initialize the sppp layer */ 858 - /* An ioctl can cause a subsequent detach for raw frame interface */ 859 - dev->ml_priv = sc; 860 - sc->if_type = LMC_PPP; 861 - sc->check = 0xBEAFCAFE; 862 - dev->base_addr = pci_resource_start(pdev, 0); 863 - dev->irq = pdev->irq; 864 865 - SET_NETDEV_DEV(dev, &pdev->dev); 866 - 867 - /* 868 - * This will get the protocol layer ready and do any 1 time init's 869 - * Must have a valid sc and dev structure 870 - */ 871 - lmc_proto_init(sc); 872 - 873 - lmc_proto_attach(sc); 874 - 875 - /* 876 - * Why were we changing this??? 877 - dev->tx_queue_len = 100; 878 - */ 879 - 880 - /* Init the spin lock so can call it latter */ 881 - 882 - spin_lock_init(&sc->lmc_lock); 883 - pci_set_master(pdev); 884 - 885 - printk ("%s: detected at %lx, irq %d\n", dev->name, 886 - dev->base_addr, dev->irq); 887 - 888 - if (register_netdev (dev) != 0) { 889 - printk (KERN_ERR "%s: register_netdev failed.\n", dev->name); 890 - goto out4; 891 - } 892 893 sc->lmc_cardtype = LMC_CARDTYPE_UNKNOWN; 894 sc->lmc_timing = LMC_CTL_CLOCK_SOURCE_EXT; ··· 883 884 switch (subdevice) { 885 case PCI_DEVICE_ID_LMC_HSSI: 886 - printk ("%s: LMC HSSI\n", dev->name); 887 sc->lmc_cardtype = LMC_CARDTYPE_HSSI; 888 sc->lmc_media = &lmc_hssi_media; 889 break; 890 case PCI_DEVICE_ID_LMC_DS3: 891 - printk ("%s: LMC DS3\n", dev->name); 892 sc->lmc_cardtype = LMC_CARDTYPE_DS3; 893 sc->lmc_media = &lmc_ds3_media; 894 break; 895 case PCI_DEVICE_ID_LMC_SSI: 896 - printk ("%s: LMC SSI\n", dev->name); 897 sc->lmc_cardtype = LMC_CARDTYPE_SSI; 898 sc->lmc_media = &lmc_ssi_media; 899 break; 900 case PCI_DEVICE_ID_LMC_T1: 901 - printk ("%s: LMC T1\n", dev->name); 902 sc->lmc_cardtype = LMC_CARDTYPE_T1; 903 sc->lmc_media = &lmc_t1_media; 904 break; 905 default: 906 - printk (KERN_WARNING "%s: LMC UNKOWN CARD!\n", dev->name); 907 break; 908 } 909 ··· 921 */ 922 AdapModelNum = (lmc_mii_readreg (sc, 0, 3) & 0x3f0) >> 4; 923 924 - if ((AdapModelNum == LMC_ADAP_T1 925 - && subdevice == PCI_DEVICE_ID_LMC_T1) || /* detect LMC1200 */ 926 - (AdapModelNum == LMC_ADAP_SSI 927 - && subdevice == PCI_DEVICE_ID_LMC_SSI) || /* detect LMC1000 */ 928 - (AdapModelNum == LMC_ADAP_DS3 929 - && subdevice == PCI_DEVICE_ID_LMC_DS3) || /* detect LMC5245 */ 930 - (AdapModelNum == LMC_ADAP_HSSI 931 - && subdevice == PCI_DEVICE_ID_LMC_HSSI)) 932 - { /* detect LMC5200 */ 933 934 - } 935 - else { 936 - printk ("%s: Model number (%d) miscompare for PCI Subsystem ID = 0x%04x\n", 937 - dev->name, AdapModelNum, subdevice); 938 - // return (NULL); 939 - } 940 /* 941 * reset clock 942 */ 943 LMC_CSR_WRITE (sc, csr_gp_timer, 0xFFFFFFFFUL); 944 945 sc->board_idx = cards_found++; 946 - sc->stats.check = STATCHECK; 947 - sc->stats.version_size = (DRIVER_VERSION << 16) + 948 - sizeof (struct lmc_statistics); 949 - sc->stats.lmc_cardtype = sc->lmc_cardtype; 950 951 sc->lmc_ok = 0; 952 sc->last_link_status = 0; ··· 950 lmc_trace(dev, "lmc_init_one out"); 951 return 0; 952 953 - out4: 954 - lmc_proto_detach(sc); 955 - out3: 956 - if (pdev) { 957 - pci_release_regions(pdev); 958 - pci_set_drvdata(pdev, NULL); 959 - } 960 - out2: 961 - free_netdev(dev); 962 - out1: 963 - return err; 964 } 965 966 /* 967 * Called from pci when removing module. 968 */ 969 - static void __devexit lmc_remove_one (struct pci_dev *pdev) 970 { 971 - struct net_device *dev = pci_get_drvdata(pdev); 972 - 973 - if (dev) { 974 - lmc_softc_t *sc = dev->priv; 975 - 976 - printk("%s: removing...\n", dev->name); 977 - lmc_proto_detach(sc); 978 - unregister_netdev(dev); 979 - free_netdev(dev); 980 - pci_release_regions(pdev); 981 - pci_disable_device(pdev); 982 - pci_set_drvdata(pdev, NULL); 983 - } 984 } 985 986 /* After this is called, packets can be sent. 987 * Does not initialize the addresses 988 */ 989 - static int lmc_open (struct net_device *dev) /*fold00*/ 990 { 991 - lmc_softc_t *sc = dev->priv; 992 993 lmc_trace(dev, "lmc_open in"); 994 995 lmc_led_on(sc, LMC_DS3_LED0); 996 997 - lmc_dec_reset (sc); 998 - lmc_reset (sc); 999 1000 - LMC_EVENT_LOG(LMC_EVENT_RESET1, LMC_CSR_READ (sc, csr_status), 0); 1001 - LMC_EVENT_LOG(LMC_EVENT_RESET2, 1002 - lmc_mii_readreg (sc, 0, 16), 1003 - lmc_mii_readreg (sc, 0, 17)); 1004 - 1005 1006 if (sc->lmc_ok){ 1007 lmc_trace(dev, "lmc_open lmc_ok out"); ··· 1039 1040 /* dev->flags |= IFF_UP; */ 1041 1042 - lmc_proto_open(sc); 1043 1044 dev->do_ioctl = lmc_ioctl; 1045 1046 1047 netif_start_queue(dev); 1048 - 1049 - sc->stats.tx_tbusy0++ ; 1050 1051 /* 1052 * select what interrupts we want to get ··· 1098 1099 static void lmc_running_reset (struct net_device *dev) /*fold00*/ 1100 { 1101 - 1102 - lmc_softc_t *sc = (lmc_softc_t *) dev->priv; 1103 1104 lmc_trace(dev, "lmc_runnig_reset in"); 1105 ··· 1116 netif_wake_queue(dev); 1117 1118 sc->lmc_txfull = 0; 1119 - sc->stats.tx_tbusy0++ ; 1120 1121 sc->lmc_intrmask = TULIP_DEFAULT_INTR_MASK; 1122 LMC_CSR_WRITE (sc, csr_intr, sc->lmc_intrmask); ··· 1132 * This disables the timer for the watchdog and keepalives, 1133 * and disables the irq for dev. 1134 */ 1135 - static int lmc_close (struct net_device *dev) /*fold00*/ 1136 { 1137 /* not calling release_region() as we should */ 1138 - lmc_softc_t *sc; 1139 1140 lmc_trace(dev, "lmc_close in"); 1141 - 1142 - sc = dev->priv; 1143 sc->lmc_ok = 0; 1144 sc->lmc_media->set_link_status (sc, 0); 1145 del_timer (&sc->timer); ··· 1146 lmc_ifdown (dev); 1147 1148 lmc_trace(dev, "lmc_close out"); 1149 - 1150 return 0; 1151 } 1152 ··· 1154 /* When the interface goes down, this is called */ 1155 static int lmc_ifdown (struct net_device *dev) /*fold00*/ 1156 { 1157 - lmc_softc_t *sc = dev->priv; 1158 u32 csr6; 1159 int i; 1160 1161 lmc_trace(dev, "lmc_ifdown in"); 1162 - 1163 /* Don't let anything else go on right now */ 1164 // dev->start = 0; 1165 netif_stop_queue(dev); 1166 - sc->stats.tx_tbusy1++ ; 1167 1168 /* stop interrupts */ 1169 /* Clear the interrupt mask */ ··· 1175 csr6 &= ~LMC_DEC_SR; /* Turn off the Receive bit */ 1176 LMC_CSR_WRITE (sc, csr_command, csr6); 1177 1178 - sc->stats.rx_missed_errors += 1179 - LMC_CSR_READ (sc, csr_missed_frames) & 0xffff; 1180 1181 /* release the interrupt */ 1182 if(sc->got_irq == 1){ ··· 1207 lmc_led_off (sc, LMC_MII16_LED_ALL); 1208 1209 netif_wake_queue(dev); 1210 - sc->stats.tx_tbusy0++ ; 1211 1212 lmc_trace(dev, "lmc_ifdown out"); 1213 ··· 1220 static irqreturn_t lmc_interrupt (int irq, void *dev_instance) /*fold00*/ 1221 { 1222 struct net_device *dev = (struct net_device *) dev_instance; 1223 - lmc_softc_t *sc; 1224 u32 csr; 1225 int i; 1226 s32 stat; ··· 1231 1232 lmc_trace(dev, "lmc_interrupt in"); 1233 1234 - sc = dev->priv; 1235 - 1236 spin_lock(&sc->lmc_lock); 1237 1238 /* ··· 1283 1284 int n_compl = 0 ; 1285 /* reset the transmit timeout detection flag -baz */ 1286 - sc->stats.tx_NoCompleteCnt = 0; 1287 1288 badtx = sc->lmc_taint_tx; 1289 i = badtx % LMC_TXDESCS; ··· 1307 if (sc->lmc_txq[i] == NULL) 1308 continue; 1309 1310 - /* 1311 - * Check the total error summary to look for any errors 1312 - */ 1313 - if (stat & 0x8000) { 1314 - sc->stats.tx_errors++; 1315 - if (stat & 0x4104) 1316 - sc->stats.tx_aborted_errors++; 1317 - if (stat & 0x0C00) 1318 - sc->stats.tx_carrier_errors++; 1319 - if (stat & 0x0200) 1320 - sc->stats.tx_window_errors++; 1321 - if (stat & 0x0002) 1322 - sc->stats.tx_fifo_errors++; 1323 } 1324 - else { 1325 - 1326 - sc->stats.tx_bytes += sc->lmc_txring[i].length & 0x7ff; 1327 - 1328 - sc->stats.tx_packets++; 1329 - } 1330 - 1331 // dev_kfree_skb(sc->lmc_txq[i]); 1332 dev_kfree_skb_irq(sc->lmc_txq[i]); 1333 sc->lmc_txq[i] = NULL; ··· 1342 LMC_EVENT_LOG(LMC_EVENT_TBUSY0, n_compl, 0); 1343 sc->lmc_txfull = 0; 1344 netif_wake_queue(dev); 1345 - sc->stats.tx_tbusy0++ ; 1346 1347 1348 #ifdef DEBUG 1349 - sc->stats.dirtyTx = badtx; 1350 - sc->stats.lmc_next_tx = sc->lmc_next_tx; 1351 - sc->stats.lmc_txfull = sc->lmc_txfull; 1352 #endif 1353 sc->lmc_taint_tx = badtx; 1354 ··· 1403 return IRQ_RETVAL(handled); 1404 } 1405 1406 - static int lmc_start_xmit (struct sk_buff *skb, struct net_device *dev) /*fold00*/ 1407 { 1408 - lmc_softc_t *sc; 1409 u32 flag; 1410 int entry; 1411 int ret = 0; 1412 unsigned long flags; 1413 1414 lmc_trace(dev, "lmc_start_xmit in"); 1415 - 1416 - sc = dev->priv; 1417 1418 spin_lock_irqsave(&sc->lmc_lock, flags); 1419 ··· 1457 if (sc->lmc_next_tx - sc->lmc_taint_tx >= LMC_TXDESCS - 1) 1458 { /* ring full, go busy */ 1459 sc->lmc_txfull = 1; 1460 - netif_stop_queue(dev); 1461 - sc->stats.tx_tbusy1++ ; 1462 LMC_EVENT_LOG(LMC_EVENT_TBUSY1, entry, 0); 1463 } 1464 #endif ··· 1475 * the watchdog timer handler. -baz 1476 */ 1477 1478 - sc->stats.tx_NoCompleteCnt++; 1479 sc->lmc_next_tx++; 1480 1481 /* give ownership to the chip */ ··· 1494 } 1495 1496 1497 - static int lmc_rx (struct net_device *dev) /*fold00*/ 1498 { 1499 - lmc_softc_t *sc; 1500 int i; 1501 int rx_work_limit = LMC_RXDESCS; 1502 unsigned int next_rx; ··· 1507 u16 len; 1508 1509 lmc_trace(dev, "lmc_rx in"); 1510 - 1511 - sc = dev->priv; 1512 1513 lmc_led_on(sc, LMC_DS3_LED3); 1514 ··· 1520 rxIntLoopCnt++; /* debug -baz */ 1521 len = ((stat & LMC_RDES_FRAME_LENGTH) >> RDES_FRAME_LENGTH_BIT_NUMBER); 1522 if ((stat & 0x0300) != 0x0300) { /* Check first segment and last segment */ 1523 - if ((stat & 0x0000ffff) != 0x7fff) { 1524 - /* Oversized frame */ 1525 - sc->stats.rx_length_errors++; 1526 - goto skip_packet; 1527 - } 1528 - } 1529 1530 - if(stat & 0x00000008){ /* Catch a dribbling bit error */ 1531 - sc->stats.rx_errors++; 1532 - sc->stats.rx_frame_errors++; 1533 - goto skip_packet; 1534 - } 1535 - 1536 - 1537 - if(stat & 0x00000004){ /* Catch a CRC error by the Xilinx */ 1538 - sc->stats.rx_errors++; 1539 - sc->stats.rx_crc_errors++; 1540 - goto skip_packet; 1541 - } 1542 1543 1544 - if (len > LMC_PKT_BUF_SZ){ 1545 - sc->stats.rx_length_errors++; 1546 - localLengthErrCnt++; 1547 - goto skip_packet; 1548 - } 1549 1550 - if (len < sc->lmc_crcSize + 2) { 1551 - sc->stats.rx_length_errors++; 1552 - sc->stats.rx_SmallPktCnt++; 1553 - localLengthErrCnt++; 1554 - goto skip_packet; 1555 - } 1556 1557 if(stat & 0x00004000){ 1558 printk(KERN_WARNING "%s: Receiver descriptor error, receiver out of sync?\n", dev->name); ··· 1578 } 1579 1580 dev->last_rx = jiffies; 1581 - sc->stats.rx_packets++; 1582 - sc->stats.rx_bytes += len; 1583 1584 LMC_CONSOLE_LOG("recv", skb->data, len); 1585 ··· 1601 1602 skb_put (skb, len); 1603 skb->protocol = lmc_proto_type(sc, skb); 1604 - skb->protocol = htons(ETH_P_WAN_PPP); 1605 skb_reset_mac_header(skb); 1606 /* skb_reset_network_header(skb); */ 1607 skb->dev = dev; ··· 1625 * in which care we'll try to allocate the buffer 1626 * again. (once a second) 1627 */ 1628 - sc->stats.rx_BuffAllocErr++; 1629 LMC_EVENT_LOG(LMC_EVENT_RCVINT, stat, len); 1630 sc->failed_recv_alloc = 1; 1631 goto skip_out_of_mem; ··· 1660 * descriptors with bogus packets 1661 * 1662 if (localLengthErrCnt > LMC_RXDESCS - 3) { 1663 - sc->stats.rx_BadPktSurgeCnt++; 1664 - LMC_EVENT_LOG(LMC_EVENT_BADPKTSURGE, 1665 - localLengthErrCnt, 1666 - sc->stats.rx_BadPktSurgeCnt); 1667 } */ 1668 1669 /* save max count of receive descriptors serviced */ 1670 - if (rxIntLoopCnt > sc->stats.rxIntLoopCnt) { 1671 - sc->stats.rxIntLoopCnt = rxIntLoopCnt; /* debug -baz */ 1672 - } 1673 1674 #ifdef DEBUG 1675 if (rxIntLoopCnt == 0) ··· 1694 return 0; 1695 } 1696 1697 - static struct net_device_stats *lmc_get_stats (struct net_device *dev) /*fold00*/ 1698 { 1699 - lmc_softc_t *sc = dev->priv; 1700 unsigned long flags; 1701 1702 lmc_trace(dev, "lmc_get_stats in"); 1703 1704 - 1705 spin_lock_irqsave(&sc->lmc_lock, flags); 1706 1707 - sc->stats.rx_missed_errors += LMC_CSR_READ (sc, csr_missed_frames) & 0xffff; 1708 1709 spin_unlock_irqrestore(&sc->lmc_lock, flags); 1710 1711 lmc_trace(dev, "lmc_get_stats out"); 1712 1713 - return (struct net_device_stats *) &sc->stats; 1714 } 1715 1716 static struct pci_driver lmc_driver = { ··· 1888 { 1889 if (sc->lmc_txq[i] != NULL){ /* have buffer */ 1890 dev_kfree_skb(sc->lmc_txq[i]); /* free it */ 1891 - sc->stats.tx_dropped++; /* We just dropped a packet */ 1892 } 1893 sc->lmc_txq[i] = NULL; 1894 sc->lmc_txring[i].status = 0x00000000; ··· 1900 lmc_trace(sc->lmc_device, "lmc_softreset out"); 1901 } 1902 1903 - void lmc_gpio_mkinput(lmc_softc_t * const sc, u_int32_t bits) /*fold00*/ 1904 { 1905 lmc_trace(sc->lmc_device, "lmc_gpio_mkinput in"); 1906 sc->lmc_gpio_io &= ~bits; ··· 1908 lmc_trace(sc->lmc_device, "lmc_gpio_mkinput out"); 1909 } 1910 1911 - void lmc_gpio_mkoutput(lmc_softc_t * const sc, u_int32_t bits) /*fold00*/ 1912 { 1913 lmc_trace(sc->lmc_device, "lmc_gpio_mkoutput in"); 1914 sc->lmc_gpio_io |= bits; ··· 1916 lmc_trace(sc->lmc_device, "lmc_gpio_mkoutput out"); 1917 } 1918 1919 - void lmc_led_on(lmc_softc_t * const sc, u_int32_t led) /*fold00*/ 1920 { 1921 lmc_trace(sc->lmc_device, "lmc_led_on in"); 1922 if((~sc->lmc_miireg16) & led){ /* Already on! */ ··· 1929 lmc_trace(sc->lmc_device, "lmc_led_on out"); 1930 } 1931 1932 - void lmc_led_off(lmc_softc_t * const sc, u_int32_t led) /*fold00*/ 1933 { 1934 lmc_trace(sc->lmc_device, "lmc_led_off in"); 1935 if(sc->lmc_miireg16 & led){ /* Already set don't do anything */ ··· 1979 */ 1980 sc->lmc_media->init(sc); 1981 1982 - sc->stats.resetCount++; 1983 lmc_trace(sc->lmc_device, "lmc_reset out"); 1984 } 1985 1986 static void lmc_dec_reset(lmc_softc_t * const sc) /*fold00*/ 1987 { 1988 - u_int32_t val; 1989 lmc_trace(sc->lmc_device, "lmc_dec_reset in"); 1990 1991 /* ··· 2069 lmc_trace(sc->lmc_device, "lmc_initcsrs out"); 2070 } 2071 2072 - static void lmc_driver_timeout(struct net_device *dev) { /*fold00*/ 2073 - lmc_softc_t *sc; 2074 u32 csr6; 2075 unsigned long flags; 2076 2077 lmc_trace(dev, "lmc_driver_timeout in"); 2078 2079 - sc = dev->priv; 2080 - 2081 spin_lock_irqsave(&sc->lmc_lock, flags); 2082 2083 printk("%s: Xmitter busy|\n", dev->name); 2084 2085 - sc->stats.tx_tbusy_calls++ ; 2086 - if (jiffies - dev->trans_start < TX_TIMEOUT) { 2087 - goto bug_out; 2088 - } 2089 2090 /* 2091 * Chip seems to have locked up ··· 2094 2095 LMC_EVENT_LOG(LMC_EVENT_XMTPRCTMO, 2096 LMC_CSR_READ (sc, csr_status), 2097 - sc->stats.tx_ProcTimeout); 2098 2099 lmc_running_reset (dev); 2100 ··· 2111 /* immediate transmit */ 2112 LMC_CSR_WRITE (sc, csr_txpoll, 0); 2113 2114 - sc->stats.tx_errors++; 2115 - sc->stats.tx_ProcTimeout++; /* -baz */ 2116 2117 dev->trans_start = jiffies; 2118
··· 1 /* 2 * Copyright (c) 1997-2000 LAN Media Corporation (LMC) 3 * All rights reserved. www.lanmedia.com 4 + * Generic HDLC port Copyright (C) 2008 Krzysztof Halasa <khc@pm.waw.pl> 5 * 6 * This code is written by: 7 * Andrew Stanley-Jones (asj@cban.com) ··· 36 * 37 */ 38 39 #include <linux/kernel.h> 40 #include <linux/module.h> 41 #include <linux/string.h> ··· 49 #include <linux/interrupt.h> 50 #include <linux/pci.h> 51 #include <linux/delay.h> 52 + #include <linux/hdlc.h> 53 #include <linux/init.h> 54 #include <linux/in.h> 55 #include <linux/if_arp.h> ··· 57 #include <linux/skbuff.h> 58 #include <linux/inet.h> 59 #include <linux/bitops.h> 60 #include <asm/processor.h> /* Processor type for cache alignment. */ 61 #include <asm/io.h> 62 #include <asm/dma.h> ··· 78 #include "lmc_debug.h" 79 #include "lmc_proto.h" 80 81 static int LMC_PKT_BUF_SZ = 1542; 82 83 static struct pci_device_id lmc_pci_tbl[] = { ··· 91 }; 92 93 MODULE_DEVICE_TABLE(pci, lmc_pci_tbl); 94 + MODULE_LICENSE("GPL v2"); 95 96 97 static int lmc_start_xmit(struct sk_buff *skb, struct net_device *dev); 98 static int lmc_rx (struct net_device *dev); 99 static int lmc_open(struct net_device *dev); ··· 114 * linux reserves 16 device specific IOCTLs. We call them 115 * LMCIOC* to control various bits of our world. 116 */ 117 + int lmc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) /*fold00*/ 118 { 119 + lmc_softc_t *sc = dev_to_sc(dev); 120 lmc_ctl_t ctl; 121 + int ret = -EOPNOTSUPP; 122 + u16 regVal; 123 unsigned long flags; 124 125 lmc_trace(dev, "lmc_ioctl in"); 126 ··· 149 break; 150 151 case LMCIOCSINFO: /*fold01*/ 152 if (!capable(CAP_NET_ADMIN)) { 153 ret = -EPERM; 154 break; ··· 175 sc->TxDescriptControlInit &= ~LMC_TDES_ADD_CRC_DISABLE; 176 } 177 178 ret = 0; 179 break; 180 181 case LMCIOCIFTYPE: /*fold01*/ 182 { 183 + u16 old_type = sc->if_type; 184 + u16 new_type; 185 186 if (!capable(CAP_NET_ADMIN)) { 187 ret = -EPERM; 188 break; 189 } 190 191 + if (copy_from_user(&new_type, ifr->ifr_data, sizeof(u16))) { 192 ret = -EFAULT; 193 break; 194 } ··· 206 } 207 208 lmc_proto_close(sc); 209 210 sc->if_type = new_type; 211 lmc_proto_attach(sc); 212 + ret = lmc_proto_open(sc); 213 + break; 214 } 215 216 case LMCIOCGETXINFO: /*fold01*/ ··· 241 242 break; 243 244 + case LMCIOCGETLMCSTATS: 245 + if (sc->lmc_cardtype == LMC_CARDTYPE_T1) { 246 + lmc_mii_writereg(sc, 0, 17, T1FRAMER_FERR_LSB); 247 + sc->extra_stats.framingBitErrorCount += 248 + lmc_mii_readreg(sc, 0, 18) & 0xff; 249 + lmc_mii_writereg(sc, 0, 17, T1FRAMER_FERR_MSB); 250 + sc->extra_stats.framingBitErrorCount += 251 + (lmc_mii_readreg(sc, 0, 18) & 0xff) << 8; 252 + lmc_mii_writereg(sc, 0, 17, T1FRAMER_LCV_LSB); 253 + sc->extra_stats.lineCodeViolationCount += 254 + lmc_mii_readreg(sc, 0, 18) & 0xff; 255 + lmc_mii_writereg(sc, 0, 17, T1FRAMER_LCV_MSB); 256 + sc->extra_stats.lineCodeViolationCount += 257 + (lmc_mii_readreg(sc, 0, 18) & 0xff) << 8; 258 + lmc_mii_writereg(sc, 0, 17, T1FRAMER_AERR); 259 + regVal = lmc_mii_readreg(sc, 0, 18) & 0xff; 260 261 + sc->extra_stats.lossOfFrameCount += 262 + (regVal & T1FRAMER_LOF_MASK) >> 4; 263 + sc->extra_stats.changeOfFrameAlignmentCount += 264 + (regVal & T1FRAMER_COFA_MASK) >> 2; 265 + sc->extra_stats.severelyErroredFrameCount += 266 + regVal & T1FRAMER_SEF_MASK; 267 + } 268 + if (copy_to_user(ifr->ifr_data, &sc->lmc_device->stats, 269 + sizeof(sc->lmc_device->stats)) || 270 + copy_to_user(ifr->ifr_data + sizeof(sc->lmc_device->stats), 271 + &sc->extra_stats, sizeof(sc->extra_stats))) 272 + ret = -EFAULT; 273 + else 274 + ret = 0; 275 + break; 276 277 + case LMCIOCCLEARLMCSTATS: 278 + if (!capable(CAP_NET_ADMIN)) { 279 + ret = -EPERM; 280 + break; 281 + } 282 283 + memset(&sc->lmc_device->stats, 0, sizeof(sc->lmc_device->stats)); 284 + memset(&sc->extra_stats, 0, sizeof(sc->extra_stats)); 285 + sc->extra_stats.check = STATCHECK; 286 + sc->extra_stats.version_size = (DRIVER_VERSION << 16) + 287 + sizeof(sc->lmc_device->stats) + sizeof(sc->extra_stats); 288 + sc->extra_stats.lmc_cardtype = sc->lmc_cardtype; 289 + ret = 0; 290 + break; 291 292 case LMCIOCSETCIRCUIT: /*fold01*/ 293 if (!capable(CAP_NET_ADMIN)){ ··· 330 ret = -EFAULT; 331 break; 332 } 333 + if (copy_to_user(ifr->ifr_data + sizeof(u32), lmcEventLogBuf, 334 + sizeof(lmcEventLogBuf))) 335 ret = -EFAULT; 336 else 337 ret = 0; ··· 641 /* the watchdog process that cruises around */ 642 static void lmc_watchdog (unsigned long data) /*fold00*/ 643 { 644 + struct net_device *dev = (struct net_device *)data; 645 + lmc_softc_t *sc = dev_to_sc(dev); 646 int link_status; 647 + u32 ticks; 648 unsigned long flags; 649 650 lmc_trace(dev, "lmc_watchdog in"); 651 ··· 677 * check for a transmit interrupt timeout 678 * Has the packet xmt vs xmt serviced threshold been exceeded */ 679 if (sc->lmc_taint_tx == sc->lastlmc_taint_tx && 680 + sc->lmc_device->stats.tx_packets > sc->lasttx_packets && 681 + sc->tx_TimeoutInd == 0) 682 { 683 684 /* wait for the watchdog to come around again */ 685 sc->tx_TimeoutInd = 1; 686 } 687 else if (sc->lmc_taint_tx == sc->lastlmc_taint_tx && 688 + sc->lmc_device->stats.tx_packets > sc->lasttx_packets && 689 + sc->tx_TimeoutInd) 690 { 691 692 LMC_EVENT_LOG(LMC_EVENT_XMTINTTMO, LMC_CSR_READ (sc, csr_status), 0); 693 694 sc->tx_TimeoutDisplay = 1; 695 + sc->extra_stats.tx_TimeoutCnt++; 696 697 /* DEC chip is stuck, hit it with a RESET!!!! */ 698 lmc_running_reset (dev); ··· 712 /* reset the transmit timeout detection flag */ 713 sc->tx_TimeoutInd = 0; 714 sc->lastlmc_taint_tx = sc->lmc_taint_tx; 715 + sc->lasttx_packets = sc->lmc_device->stats.tx_packets; 716 + } else { 717 sc->tx_TimeoutInd = 0; 718 sc->lastlmc_taint_tx = sc->lmc_taint_tx; 719 + sc->lasttx_packets = sc->lmc_device->stats.tx_packets; 720 } 721 722 /* --- end time out check ----------------------------------- */ ··· 748 sc->last_link_status = 1; 749 /* lmc_reset (sc); Again why reset??? */ 750 751 netif_carrier_on(dev); 752 } 753 754 /* Call media specific watchdog functions */ ··· 816 817 } 818 819 + static int lmc_attach(struct net_device *dev, unsigned short encoding, 820 + unsigned short parity) 821 { 822 + if (encoding == ENCODING_NRZ && parity == PARITY_CRC16_PR1_CCITT) 823 + return 0; 824 + return -EINVAL; 825 } 826 827 static int __devinit lmc_init_one(struct pci_dev *pdev, 828 const struct pci_device_id *ent) 829 { 830 + lmc_softc_t *sc; 831 + struct net_device *dev; 832 + u16 subdevice; 833 + u16 AdapModelNum; 834 + int err; 835 + static int cards_found; 836 + 837 + /* lmc_trace(dev, "lmc_init_one in"); */ 838 + 839 + err = pci_enable_device(pdev); 840 + if (err) { 841 + printk(KERN_ERR "lmc: pci enable failed: %d\n", err); 842 + return err; 843 + } 844 + 845 + err = pci_request_regions(pdev, "lmc"); 846 + if (err) { 847 + printk(KERN_ERR "lmc: pci_request_region failed\n"); 848 + goto err_req_io; 849 + } 850 + 851 + /* 852 + * Allocate our own device structure 853 + */ 854 + sc = kzalloc(sizeof(lmc_softc_t), GFP_KERNEL); 855 + if (!sc) { 856 + err = -ENOMEM; 857 + goto err_kzalloc; 858 + } 859 + 860 + dev = alloc_hdlcdev(sc); 861 + if (!dev) { 862 + printk(KERN_ERR "lmc:alloc_netdev for device failed\n"); 863 + goto err_hdlcdev; 864 + } 865 866 867 + dev->type = ARPHRD_HDLC; 868 + dev_to_hdlc(dev)->xmit = lmc_start_xmit; 869 + dev_to_hdlc(dev)->attach = lmc_attach; 870 + dev->open = lmc_open; 871 + dev->stop = lmc_close; 872 + dev->get_stats = lmc_get_stats; 873 + dev->do_ioctl = lmc_ioctl; 874 + dev->tx_timeout = lmc_driver_timeout; 875 + dev->watchdog_timeo = HZ; /* 1 second */ 876 + dev->tx_queue_len = 100; 877 + sc->lmc_device = dev; 878 + sc->name = dev->name; 879 + sc->if_type = LMC_PPP; 880 + sc->check = 0xBEAFCAFE; 881 + dev->base_addr = pci_resource_start(pdev, 0); 882 + dev->irq = pdev->irq; 883 + pci_set_drvdata(pdev, dev); 884 + SET_NETDEV_DEV(dev, &pdev->dev); 885 886 + /* 887 + * This will get the protocol layer ready and do any 1 time init's 888 + * Must have a valid sc and dev structure 889 + */ 890 + lmc_proto_attach(sc); 891 892 + /* Init the spin lock so can call it latter */ 893 894 + spin_lock_init(&sc->lmc_lock); 895 + pci_set_master(pdev); 896 897 + printk(KERN_INFO "%s: detected at %lx, irq %d\n", dev->name, 898 + dev->base_addr, dev->irq); 899 900 + err = register_hdlc_device(dev); 901 + if (err) { 902 + printk(KERN_ERR "%s: register_netdev failed.\n", dev->name); 903 + free_netdev(dev); 904 + goto err_hdlcdev; 905 + } 906 907 sc->lmc_cardtype = LMC_CARDTYPE_UNKNOWN; 908 sc->lmc_timing = LMC_CTL_CLOCK_SOURCE_EXT; ··· 939 940 switch (subdevice) { 941 case PCI_DEVICE_ID_LMC_HSSI: 942 + printk(KERN_INFO "%s: LMC HSSI\n", dev->name); 943 sc->lmc_cardtype = LMC_CARDTYPE_HSSI; 944 sc->lmc_media = &lmc_hssi_media; 945 break; 946 case PCI_DEVICE_ID_LMC_DS3: 947 + printk(KERN_INFO "%s: LMC DS3\n", dev->name); 948 sc->lmc_cardtype = LMC_CARDTYPE_DS3; 949 sc->lmc_media = &lmc_ds3_media; 950 break; 951 case PCI_DEVICE_ID_LMC_SSI: 952 + printk(KERN_INFO "%s: LMC SSI\n", dev->name); 953 sc->lmc_cardtype = LMC_CARDTYPE_SSI; 954 sc->lmc_media = &lmc_ssi_media; 955 break; 956 case PCI_DEVICE_ID_LMC_T1: 957 + printk(KERN_INFO "%s: LMC T1\n", dev->name); 958 sc->lmc_cardtype = LMC_CARDTYPE_T1; 959 sc->lmc_media = &lmc_t1_media; 960 break; 961 default: 962 + printk(KERN_WARNING "%s: LMC UNKOWN CARD!\n", dev->name); 963 break; 964 } 965 ··· 977 */ 978 AdapModelNum = (lmc_mii_readreg (sc, 0, 3) & 0x3f0) >> 4; 979 980 + if ((AdapModelNum != LMC_ADAP_T1 || /* detect LMC1200 */ 981 + subdevice != PCI_DEVICE_ID_LMC_T1) && 982 + (AdapModelNum != LMC_ADAP_SSI || /* detect LMC1000 */ 983 + subdevice != PCI_DEVICE_ID_LMC_SSI) && 984 + (AdapModelNum != LMC_ADAP_DS3 || /* detect LMC5245 */ 985 + subdevice != PCI_DEVICE_ID_LMC_DS3) && 986 + (AdapModelNum != LMC_ADAP_HSSI || /* detect LMC5200 */ 987 + subdevice != PCI_DEVICE_ID_LMC_HSSI)) 988 + printk(KERN_WARNING "%s: Model number (%d) miscompare for PCI" 989 + " Subsystem ID = 0x%04x\n", 990 + dev->name, AdapModelNum, subdevice); 991 992 /* 993 * reset clock 994 */ 995 LMC_CSR_WRITE (sc, csr_gp_timer, 0xFFFFFFFFUL); 996 997 sc->board_idx = cards_found++; 998 + sc->extra_stats.check = STATCHECK; 999 + sc->extra_stats.version_size = (DRIVER_VERSION << 16) + 1000 + sizeof(sc->lmc_device->stats) + sizeof(sc->extra_stats); 1001 + sc->extra_stats.lmc_cardtype = sc->lmc_cardtype; 1002 1003 sc->lmc_ok = 0; 1004 sc->last_link_status = 0; ··· 1010 lmc_trace(dev, "lmc_init_one out"); 1011 return 0; 1012 1013 + err_hdlcdev: 1014 + pci_set_drvdata(pdev, NULL); 1015 + kfree(sc); 1016 + err_kzalloc: 1017 + pci_release_regions(pdev); 1018 + err_req_io: 1019 + pci_disable_device(pdev); 1020 + return err; 1021 } 1022 1023 /* 1024 * Called from pci when removing module. 1025 */ 1026 + static void __devexit lmc_remove_one(struct pci_dev *pdev) 1027 { 1028 + struct net_device *dev = pci_get_drvdata(pdev); 1029 + 1030 + if (dev) { 1031 + printk(KERN_DEBUG "%s: removing...\n", dev->name); 1032 + unregister_hdlc_device(dev); 1033 + free_netdev(dev); 1034 + pci_release_regions(pdev); 1035 + pci_disable_device(pdev); 1036 + pci_set_drvdata(pdev, NULL); 1037 + } 1038 } 1039 1040 /* After this is called, packets can be sent. 1041 * Does not initialize the addresses 1042 */ 1043 + static int lmc_open(struct net_device *dev) 1044 { 1045 + lmc_softc_t *sc = dev_to_sc(dev); 1046 + int err; 1047 1048 lmc_trace(dev, "lmc_open in"); 1049 1050 lmc_led_on(sc, LMC_DS3_LED0); 1051 1052 + lmc_dec_reset(sc); 1053 + lmc_reset(sc); 1054 1055 + LMC_EVENT_LOG(LMC_EVENT_RESET1, LMC_CSR_READ(sc, csr_status), 0); 1056 + LMC_EVENT_LOG(LMC_EVENT_RESET2, lmc_mii_readreg(sc, 0, 16), 1057 + lmc_mii_readreg(sc, 0, 17)); 1058 1059 if (sc->lmc_ok){ 1060 lmc_trace(dev, "lmc_open lmc_ok out"); ··· 1106 1107 /* dev->flags |= IFF_UP; */ 1108 1109 + if ((err = lmc_proto_open(sc)) != 0) 1110 + return err; 1111 1112 dev->do_ioctl = lmc_ioctl; 1113 1114 1115 netif_start_queue(dev); 1116 + sc->extra_stats.tx_tbusy0++; 1117 1118 /* 1119 * select what interrupts we want to get ··· 1165 1166 static void lmc_running_reset (struct net_device *dev) /*fold00*/ 1167 { 1168 + lmc_softc_t *sc = dev_to_sc(dev); 1169 1170 lmc_trace(dev, "lmc_runnig_reset in"); 1171 ··· 1184 netif_wake_queue(dev); 1185 1186 sc->lmc_txfull = 0; 1187 + sc->extra_stats.tx_tbusy0++; 1188 1189 sc->lmc_intrmask = TULIP_DEFAULT_INTR_MASK; 1190 LMC_CSR_WRITE (sc, csr_intr, sc->lmc_intrmask); ··· 1200 * This disables the timer for the watchdog and keepalives, 1201 * and disables the irq for dev. 1202 */ 1203 + static int lmc_close(struct net_device *dev) 1204 { 1205 /* not calling release_region() as we should */ 1206 + lmc_softc_t *sc = dev_to_sc(dev); 1207 1208 lmc_trace(dev, "lmc_close in"); 1209 + 1210 sc->lmc_ok = 0; 1211 sc->lmc_media->set_link_status (sc, 0); 1212 del_timer (&sc->timer); ··· 1215 lmc_ifdown (dev); 1216 1217 lmc_trace(dev, "lmc_close out"); 1218 + 1219 return 0; 1220 } 1221 ··· 1223 /* When the interface goes down, this is called */ 1224 static int lmc_ifdown (struct net_device *dev) /*fold00*/ 1225 { 1226 + lmc_softc_t *sc = dev_to_sc(dev); 1227 u32 csr6; 1228 int i; 1229 1230 lmc_trace(dev, "lmc_ifdown in"); 1231 + 1232 /* Don't let anything else go on right now */ 1233 // dev->start = 0; 1234 netif_stop_queue(dev); 1235 + sc->extra_stats.tx_tbusy1++; 1236 1237 /* stop interrupts */ 1238 /* Clear the interrupt mask */ ··· 1244 csr6 &= ~LMC_DEC_SR; /* Turn off the Receive bit */ 1245 LMC_CSR_WRITE (sc, csr_command, csr6); 1246 1247 + sc->lmc_device->stats.rx_missed_errors += 1248 + LMC_CSR_READ(sc, csr_missed_frames) & 0xffff; 1249 1250 /* release the interrupt */ 1251 if(sc->got_irq == 1){ ··· 1276 lmc_led_off (sc, LMC_MII16_LED_ALL); 1277 1278 netif_wake_queue(dev); 1279 + sc->extra_stats.tx_tbusy0++; 1280 1281 lmc_trace(dev, "lmc_ifdown out"); 1282 ··· 1289 static irqreturn_t lmc_interrupt (int irq, void *dev_instance) /*fold00*/ 1290 { 1291 struct net_device *dev = (struct net_device *) dev_instance; 1292 + lmc_softc_t *sc = dev_to_sc(dev); 1293 u32 csr; 1294 int i; 1295 s32 stat; ··· 1300 1301 lmc_trace(dev, "lmc_interrupt in"); 1302 1303 spin_lock(&sc->lmc_lock); 1304 1305 /* ··· 1354 1355 int n_compl = 0 ; 1356 /* reset the transmit timeout detection flag -baz */ 1357 + sc->extra_stats.tx_NoCompleteCnt = 0; 1358 1359 badtx = sc->lmc_taint_tx; 1360 i = badtx % LMC_TXDESCS; ··· 1378 if (sc->lmc_txq[i] == NULL) 1379 continue; 1380 1381 + /* 1382 + * Check the total error summary to look for any errors 1383 + */ 1384 + if (stat & 0x8000) { 1385 + sc->lmc_device->stats.tx_errors++; 1386 + if (stat & 0x4104) 1387 + sc->lmc_device->stats.tx_aborted_errors++; 1388 + if (stat & 0x0C00) 1389 + sc->lmc_device->stats.tx_carrier_errors++; 1390 + if (stat & 0x0200) 1391 + sc->lmc_device->stats.tx_window_errors++; 1392 + if (stat & 0x0002) 1393 + sc->lmc_device->stats.tx_fifo_errors++; 1394 + } else { 1395 + sc->lmc_device->stats.tx_bytes += sc->lmc_txring[i].length & 0x7ff; 1396 + 1397 + sc->lmc_device->stats.tx_packets++; 1398 } 1399 + 1400 // dev_kfree_skb(sc->lmc_txq[i]); 1401 dev_kfree_skb_irq(sc->lmc_txq[i]); 1402 sc->lmc_txq[i] = NULL; ··· 1415 LMC_EVENT_LOG(LMC_EVENT_TBUSY0, n_compl, 0); 1416 sc->lmc_txfull = 0; 1417 netif_wake_queue(dev); 1418 + sc->extra_stats.tx_tbusy0++; 1419 1420 1421 #ifdef DEBUG 1422 + sc->extra_stats.dirtyTx = badtx; 1423 + sc->extra_stats.lmc_next_tx = sc->lmc_next_tx; 1424 + sc->extra_stats.lmc_txfull = sc->lmc_txfull; 1425 #endif 1426 sc->lmc_taint_tx = badtx; 1427 ··· 1476 return IRQ_RETVAL(handled); 1477 } 1478 1479 + static int lmc_start_xmit(struct sk_buff *skb, struct net_device *dev) 1480 { 1481 + lmc_softc_t *sc = dev_to_sc(dev); 1482 u32 flag; 1483 int entry; 1484 int ret = 0; 1485 unsigned long flags; 1486 1487 lmc_trace(dev, "lmc_start_xmit in"); 1488 1489 spin_lock_irqsave(&sc->lmc_lock, flags); 1490 ··· 1532 if (sc->lmc_next_tx - sc->lmc_taint_tx >= LMC_TXDESCS - 1) 1533 { /* ring full, go busy */ 1534 sc->lmc_txfull = 1; 1535 + netif_stop_queue(dev); 1536 + sc->extra_stats.tx_tbusy1++; 1537 LMC_EVENT_LOG(LMC_EVENT_TBUSY1, entry, 0); 1538 } 1539 #endif ··· 1550 * the watchdog timer handler. -baz 1551 */ 1552 1553 + sc->extra_stats.tx_NoCompleteCnt++; 1554 sc->lmc_next_tx++; 1555 1556 /* give ownership to the chip */ ··· 1569 } 1570 1571 1572 + static int lmc_rx(struct net_device *dev) 1573 { 1574 + lmc_softc_t *sc = dev_to_sc(dev); 1575 int i; 1576 int rx_work_limit = LMC_RXDESCS; 1577 unsigned int next_rx; ··· 1582 u16 len; 1583 1584 lmc_trace(dev, "lmc_rx in"); 1585 1586 lmc_led_on(sc, LMC_DS3_LED3); 1587 ··· 1597 rxIntLoopCnt++; /* debug -baz */ 1598 len = ((stat & LMC_RDES_FRAME_LENGTH) >> RDES_FRAME_LENGTH_BIT_NUMBER); 1599 if ((stat & 0x0300) != 0x0300) { /* Check first segment and last segment */ 1600 + if ((stat & 0x0000ffff) != 0x7fff) { 1601 + /* Oversized frame */ 1602 + sc->lmc_device->stats.rx_length_errors++; 1603 + goto skip_packet; 1604 + } 1605 + } 1606 1607 + if (stat & 0x00000008) { /* Catch a dribbling bit error */ 1608 + sc->lmc_device->stats.rx_errors++; 1609 + sc->lmc_device->stats.rx_frame_errors++; 1610 + goto skip_packet; 1611 + } 1612 1613 1614 + if (stat & 0x00000004) { /* Catch a CRC error by the Xilinx */ 1615 + sc->lmc_device->stats.rx_errors++; 1616 + sc->lmc_device->stats.rx_crc_errors++; 1617 + goto skip_packet; 1618 + } 1619 1620 + if (len > LMC_PKT_BUF_SZ) { 1621 + sc->lmc_device->stats.rx_length_errors++; 1622 + localLengthErrCnt++; 1623 + goto skip_packet; 1624 + } 1625 + 1626 + if (len < sc->lmc_crcSize + 2) { 1627 + sc->lmc_device->stats.rx_length_errors++; 1628 + sc->extra_stats.rx_SmallPktCnt++; 1629 + localLengthErrCnt++; 1630 + goto skip_packet; 1631 + } 1632 1633 if(stat & 0x00004000){ 1634 printk(KERN_WARNING "%s: Receiver descriptor error, receiver out of sync?\n", dev->name); ··· 1656 } 1657 1658 dev->last_rx = jiffies; 1659 + sc->lmc_device->stats.rx_packets++; 1660 + sc->lmc_device->stats.rx_bytes += len; 1661 1662 LMC_CONSOLE_LOG("recv", skb->data, len); 1663 ··· 1679 1680 skb_put (skb, len); 1681 skb->protocol = lmc_proto_type(sc, skb); 1682 skb_reset_mac_header(skb); 1683 /* skb_reset_network_header(skb); */ 1684 skb->dev = dev; ··· 1704 * in which care we'll try to allocate the buffer 1705 * again. (once a second) 1706 */ 1707 + sc->extra_stats.rx_BuffAllocErr++; 1708 LMC_EVENT_LOG(LMC_EVENT_RCVINT, stat, len); 1709 sc->failed_recv_alloc = 1; 1710 goto skip_out_of_mem; ··· 1739 * descriptors with bogus packets 1740 * 1741 if (localLengthErrCnt > LMC_RXDESCS - 3) { 1742 + sc->extra_stats.rx_BadPktSurgeCnt++; 1743 + LMC_EVENT_LOG(LMC_EVENT_BADPKTSURGE, localLengthErrCnt, 1744 + sc->extra_stats.rx_BadPktSurgeCnt); 1745 } */ 1746 1747 /* save max count of receive descriptors serviced */ 1748 + if (rxIntLoopCnt > sc->extra_stats.rxIntLoopCnt) 1749 + sc->extra_stats.rxIntLoopCnt = rxIntLoopCnt; /* debug -baz */ 1750 1751 #ifdef DEBUG 1752 if (rxIntLoopCnt == 0) ··· 1775 return 0; 1776 } 1777 1778 + static struct net_device_stats *lmc_get_stats(struct net_device *dev) 1779 { 1780 + lmc_softc_t *sc = dev_to_sc(dev); 1781 unsigned long flags; 1782 1783 lmc_trace(dev, "lmc_get_stats in"); 1784 1785 spin_lock_irqsave(&sc->lmc_lock, flags); 1786 1787 + sc->lmc_device->stats.rx_missed_errors += LMC_CSR_READ(sc, csr_missed_frames) & 0xffff; 1788 1789 spin_unlock_irqrestore(&sc->lmc_lock, flags); 1790 1791 lmc_trace(dev, "lmc_get_stats out"); 1792 1793 + return &sc->lmc_device->stats; 1794 } 1795 1796 static struct pci_driver lmc_driver = { ··· 1970 { 1971 if (sc->lmc_txq[i] != NULL){ /* have buffer */ 1972 dev_kfree_skb(sc->lmc_txq[i]); /* free it */ 1973 + sc->lmc_device->stats.tx_dropped++; /* We just dropped a packet */ 1974 } 1975 sc->lmc_txq[i] = NULL; 1976 sc->lmc_txring[i].status = 0x00000000; ··· 1982 lmc_trace(sc->lmc_device, "lmc_softreset out"); 1983 } 1984 1985 + void lmc_gpio_mkinput(lmc_softc_t * const sc, u32 bits) /*fold00*/ 1986 { 1987 lmc_trace(sc->lmc_device, "lmc_gpio_mkinput in"); 1988 sc->lmc_gpio_io &= ~bits; ··· 1990 lmc_trace(sc->lmc_device, "lmc_gpio_mkinput out"); 1991 } 1992 1993 + void lmc_gpio_mkoutput(lmc_softc_t * const sc, u32 bits) /*fold00*/ 1994 { 1995 lmc_trace(sc->lmc_device, "lmc_gpio_mkoutput in"); 1996 sc->lmc_gpio_io |= bits; ··· 1998 lmc_trace(sc->lmc_device, "lmc_gpio_mkoutput out"); 1999 } 2000 2001 + void lmc_led_on(lmc_softc_t * const sc, u32 led) /*fold00*/ 2002 { 2003 lmc_trace(sc->lmc_device, "lmc_led_on in"); 2004 if((~sc->lmc_miireg16) & led){ /* Already on! */ ··· 2011 lmc_trace(sc->lmc_device, "lmc_led_on out"); 2012 } 2013 2014 + void lmc_led_off(lmc_softc_t * const sc, u32 led) /*fold00*/ 2015 { 2016 lmc_trace(sc->lmc_device, "lmc_led_off in"); 2017 if(sc->lmc_miireg16 & led){ /* Already set don't do anything */ ··· 2061 */ 2062 sc->lmc_media->init(sc); 2063 2064 + sc->extra_stats.resetCount++; 2065 lmc_trace(sc->lmc_device, "lmc_reset out"); 2066 } 2067 2068 static void lmc_dec_reset(lmc_softc_t * const sc) /*fold00*/ 2069 { 2070 + u32 val; 2071 lmc_trace(sc->lmc_device, "lmc_dec_reset in"); 2072 2073 /* ··· 2151 lmc_trace(sc->lmc_device, "lmc_initcsrs out"); 2152 } 2153 2154 + static void lmc_driver_timeout(struct net_device *dev) 2155 + { 2156 + lmc_softc_t *sc = dev_to_sc(dev); 2157 u32 csr6; 2158 unsigned long flags; 2159 2160 lmc_trace(dev, "lmc_driver_timeout in"); 2161 2162 spin_lock_irqsave(&sc->lmc_lock, flags); 2163 2164 printk("%s: Xmitter busy|\n", dev->name); 2165 2166 + sc->extra_stats.tx_tbusy_calls++; 2167 + if (jiffies - dev->trans_start < TX_TIMEOUT) 2168 + goto bug_out; 2169 2170 /* 2171 * Chip seems to have locked up ··· 2178 2179 LMC_EVENT_LOG(LMC_EVENT_XMTPRCTMO, 2180 LMC_CSR_READ (sc, csr_status), 2181 + sc->extra_stats.tx_ProcTimeout); 2182 2183 lmc_running_reset (dev); 2184 ··· 2195 /* immediate transmit */ 2196 LMC_CSR_WRITE (sc, csr_txpoll, 0); 2197 2198 + sc->lmc_device->stats.tx_errors++; 2199 + sc->extra_stats.tx_ProcTimeout++; /* -baz */ 2200 2201 dev->trans_start = jiffies; 2202
+27 -39
drivers/net/wan/lmc/lmc_media.c
··· 16 #include <linux/inet.h> 17 #include <linux/bitops.h> 18 19 - #include <net/syncppp.h> 20 - 21 #include <asm/processor.h> /* Processor type for cache alignment. */ 22 #include <asm/io.h> 23 #include <asm/dma.h> ··· 93 static void lmc_dummy_set2_1 (lmc_softc_t * const, lmc_ctl_t *); 94 95 static inline void write_av9110_bit (lmc_softc_t *, int); 96 - static void write_av9110 (lmc_softc_t *, u_int32_t, u_int32_t, u_int32_t, 97 - u_int32_t, u_int32_t); 98 99 lmc_media_t lmc_ds3_media = { 100 lmc_ds3_init, /* special media init stuff */ ··· 424 static int 425 lmc_ds3_get_link_status (lmc_softc_t * const sc) 426 { 427 - u_int16_t link_status, link_status_11; 428 int ret = 1; 429 430 lmc_mii_writereg (sc, 0, 17, 7); ··· 446 (link_status & LMC_FRAMER_REG0_OOFS)){ 447 ret = 0; 448 if(sc->last_led_err[3] != 1){ 449 - u16 r1; 450 lmc_mii_writereg (sc, 0, 17, 01); /* Turn on Xbit error as our cisco does */ 451 r1 = lmc_mii_readreg (sc, 0, 18); 452 r1 &= 0xfe; ··· 459 else { 460 lmc_led_off(sc, LMC_DS3_LED3); /* turn on red LED */ 461 if(sc->last_led_err[3] == 1){ 462 - u16 r1; 463 lmc_mii_writereg (sc, 0, 17, 01); /* Turn off Xbit error */ 464 r1 = lmc_mii_readreg (sc, 0, 18); 465 r1 |= 0x01; ··· 537 * SSI methods 538 */ 539 540 - static void 541 - lmc_ssi_init (lmc_softc_t * const sc) 542 { 543 - u_int16_t mii17; 544 - int cable; 545 546 - sc->ictl.cardtype = LMC_CTL_CARDTYPE_LMC1000; 547 548 - mii17 = lmc_mii_readreg (sc, 0, 17); 549 550 - cable = (mii17 & LMC_MII17_SSI_CABLE_MASK) >> LMC_MII17_SSI_CABLE_SHIFT; 551 - sc->ictl.cable_type = cable; 552 553 - lmc_gpio_mkoutput (sc, LMC_GEP_SSI_TXCLOCK); 554 } 555 556 static void ··· 677 static int 678 lmc_ssi_get_link_status (lmc_softc_t * const sc) 679 { 680 - u_int16_t link_status; 681 - u_int32_t ticks; 682 int ret = 1; 683 int hw_hdsk = 1; 684 - 685 /* 686 * missing CTS? Hmm. If we require CTS on, we may never get the 687 * link to come up, so omit it in this test. ··· 716 } 717 else if (ticks == 0 ) { /* no clock found ? */ 718 ret = 0; 719 - if(sc->last_led_err[3] != 1){ 720 - sc->stats.tx_lossOfClockCnt++; 721 - printk(KERN_WARNING "%s: Lost Clock, Link Down\n", sc->name); 722 } 723 sc->last_led_err[3] = 1; 724 lmc_led_on (sc, LMC_MII16_LED3); /* turn ON red LED */ ··· 834 LMC_CSR_WRITE (sc, csr_gp, sc->lmc_gpio); 835 } 836 837 - static void 838 - write_av9110 (lmc_softc_t * sc, u_int32_t n, u_int32_t m, u_int32_t v, 839 - u_int32_t x, u_int32_t r) 840 { 841 int i; 842 ··· 881 | LMC_GEP_SSI_GENERATOR)); 882 } 883 884 - static void 885 - lmc_ssi_watchdog (lmc_softc_t * const sc) 886 { 887 - u_int16_t mii17 = lmc_mii_readreg (sc, 0, 17); 888 - if (((mii17 >> 3) & 7) == 7) 889 - { 890 - lmc_led_off (sc, LMC_MII16_LED2); 891 - } 892 - else 893 - { 894 - lmc_led_on (sc, LMC_MII16_LED2); 895 - } 896 - 897 } 898 899 /* ··· 917 static void 918 lmc_t1_init (lmc_softc_t * const sc) 919 { 920 - u_int16_t mii16; 921 int i; 922 923 sc->ictl.cardtype = LMC_CTL_CARDTYPE_LMC1200; ··· 1016 */ static int 1017 lmc_t1_get_link_status (lmc_softc_t * const sc) 1018 { 1019 - u_int16_t link_status; 1020 int ret = 1; 1021 1022 /* LMC5245 (DS3) & LMC1200 (DS1) LED definitions
··· 16 #include <linux/inet.h> 17 #include <linux/bitops.h> 18 19 #include <asm/processor.h> /* Processor type for cache alignment. */ 20 #include <asm/io.h> 21 #include <asm/dma.h> ··· 95 static void lmc_dummy_set2_1 (lmc_softc_t * const, lmc_ctl_t *); 96 97 static inline void write_av9110_bit (lmc_softc_t *, int); 98 + static void write_av9110(lmc_softc_t *, u32, u32, u32, u32, u32); 99 100 lmc_media_t lmc_ds3_media = { 101 lmc_ds3_init, /* special media init stuff */ ··· 427 static int 428 lmc_ds3_get_link_status (lmc_softc_t * const sc) 429 { 430 + u16 link_status, link_status_11; 431 int ret = 1; 432 433 lmc_mii_writereg (sc, 0, 17, 7); ··· 449 (link_status & LMC_FRAMER_REG0_OOFS)){ 450 ret = 0; 451 if(sc->last_led_err[3] != 1){ 452 + u16 r1; 453 lmc_mii_writereg (sc, 0, 17, 01); /* Turn on Xbit error as our cisco does */ 454 r1 = lmc_mii_readreg (sc, 0, 18); 455 r1 &= 0xfe; ··· 462 else { 463 lmc_led_off(sc, LMC_DS3_LED3); /* turn on red LED */ 464 if(sc->last_led_err[3] == 1){ 465 + u16 r1; 466 lmc_mii_writereg (sc, 0, 17, 01); /* Turn off Xbit error */ 467 r1 = lmc_mii_readreg (sc, 0, 18); 468 r1 |= 0x01; ··· 540 * SSI methods 541 */ 542 543 + static void lmc_ssi_init(lmc_softc_t * const sc) 544 { 545 + u16 mii17; 546 + int cable; 547 548 + sc->ictl.cardtype = LMC_CTL_CARDTYPE_LMC1000; 549 550 + mii17 = lmc_mii_readreg(sc, 0, 17); 551 552 + cable = (mii17 & LMC_MII17_SSI_CABLE_MASK) >> LMC_MII17_SSI_CABLE_SHIFT; 553 + sc->ictl.cable_type = cable; 554 555 + lmc_gpio_mkoutput(sc, LMC_GEP_SSI_TXCLOCK); 556 } 557 558 static void ··· 681 static int 682 lmc_ssi_get_link_status (lmc_softc_t * const sc) 683 { 684 + u16 link_status; 685 + u32 ticks; 686 int ret = 1; 687 int hw_hdsk = 1; 688 + 689 /* 690 * missing CTS? Hmm. If we require CTS on, we may never get the 691 * link to come up, so omit it in this test. ··· 720 } 721 else if (ticks == 0 ) { /* no clock found ? */ 722 ret = 0; 723 + if (sc->last_led_err[3] != 1) { 724 + sc->extra_stats.tx_lossOfClockCnt++; 725 + printk(KERN_WARNING "%s: Lost Clock, Link Down\n", sc->name); 726 } 727 sc->last_led_err[3] = 1; 728 lmc_led_on (sc, LMC_MII16_LED3); /* turn ON red LED */ ··· 838 LMC_CSR_WRITE (sc, csr_gp, sc->lmc_gpio); 839 } 840 841 + static void write_av9110(lmc_softc_t *sc, u32 n, u32 m, u32 v, u32 x, u32 r) 842 { 843 int i; 844 ··· 887 | LMC_GEP_SSI_GENERATOR)); 888 } 889 890 + static void lmc_ssi_watchdog(lmc_softc_t * const sc) 891 { 892 + u16 mii17 = lmc_mii_readreg(sc, 0, 17); 893 + if (((mii17 >> 3) & 7) == 7) 894 + lmc_led_off(sc, LMC_MII16_LED2); 895 + else 896 + lmc_led_on(sc, LMC_MII16_LED2); 897 } 898 899 /* ··· 929 static void 930 lmc_t1_init (lmc_softc_t * const sc) 931 { 932 + u16 mii16; 933 int i; 934 935 sc->ictl.cardtype = LMC_CTL_CARDTYPE_LMC1200; ··· 1028 */ static int 1029 lmc_t1_get_link_status (lmc_softc_t * const sc) 1030 { 1031 + u16 link_status; 1032 int ret = 1; 1033 1034 /* LMC5245 (DS3) & LMC1200 (DS1) LED definitions
+27 -123
drivers/net/wan/lmc/lmc_proto.c
··· 36 #include <linux/workqueue.h> 37 #include <linux/proc_fs.h> 38 #include <linux/bitops.h> 39 - 40 - #include <net/syncppp.h> 41 - 42 #include <asm/processor.h> /* Processor type for cache alignment. */ 43 #include <asm/io.h> 44 #include <asm/dma.h> ··· 47 #include "lmc_ioctl.h" 48 #include "lmc_proto.h" 49 50 - /* 51 - * The compile-time variable SPPPSTUP causes the module to be 52 - * compiled without referencing any of the sync ppp routines. 53 - */ 54 - #ifdef SPPPSTUB 55 - #define SPPP_detach(d) (void)0 56 - #define SPPP_open(d) 0 57 - #define SPPP_reopen(d) (void)0 58 - #define SPPP_close(d) (void)0 59 - #define SPPP_attach(d) (void)0 60 - #define SPPP_do_ioctl(d,i,c) -EOPNOTSUPP 61 - #else 62 - #define SPPP_attach(x) sppp_attach((x)->pd) 63 - #define SPPP_detach(x) sppp_detach((x)->pd->dev) 64 - #define SPPP_open(x) sppp_open((x)->pd->dev) 65 - #define SPPP_reopen(x) sppp_reopen((x)->pd->dev) 66 - #define SPPP_close(x) sppp_close((x)->pd->dev) 67 - #define SPPP_do_ioctl(x, y, z) sppp_do_ioctl((x)->pd->dev, (y), (z)) 68 - #endif 69 - 70 - // init 71 - void lmc_proto_init(lmc_softc_t *sc) /*FOLD00*/ 72 - { 73 - lmc_trace(sc->lmc_device, "lmc_proto_init in"); 74 - switch(sc->if_type){ 75 - case LMC_PPP: 76 - sc->pd = kmalloc(sizeof(struct ppp_device), GFP_KERNEL); 77 - if (!sc->pd) { 78 - printk("lmc_proto_init(): kmalloc failure!\n"); 79 - return; 80 - } 81 - sc->pd->dev = sc->lmc_device; 82 - sc->if_ptr = sc->pd; 83 - break; 84 - case LMC_RAW: 85 - break; 86 - default: 87 - break; 88 - } 89 - lmc_trace(sc->lmc_device, "lmc_proto_init out"); 90 - } 91 - 92 // attach 93 void lmc_proto_attach(lmc_softc_t *sc) /*FOLD00*/ 94 { ··· 55 case LMC_PPP: 56 { 57 struct net_device *dev = sc->lmc_device; 58 - SPPP_attach(sc); 59 dev->do_ioctl = lmc_ioctl; 60 } 61 break; ··· 62 { 63 struct net_device *dev = sc->lmc_device; 64 /* 65 - * They set a few basics because they don't use sync_ppp 66 */ 67 dev->flags |= IFF_POINTOPOINT; 68 ··· 78 lmc_trace(sc->lmc_device, "lmc_proto_attach out"); 79 } 80 81 - // detach 82 - void lmc_proto_detach(lmc_softc_t *sc) /*FOLD00*/ 83 { 84 - switch(sc->if_type){ 85 - case LMC_PPP: 86 - SPPP_detach(sc); 87 - break; 88 - case LMC_RAW: /* Tell someone we're detaching? */ 89 - break; 90 - default: 91 - break; 92 - } 93 - 94 } 95 96 - // reopen 97 - void lmc_proto_reopen(lmc_softc_t *sc) /*FOLD00*/ 98 { 99 - lmc_trace(sc->lmc_device, "lmc_proto_reopen in"); 100 - switch(sc->if_type){ 101 - case LMC_PPP: 102 - SPPP_reopen(sc); 103 - break; 104 - case LMC_RAW: /* Reset the interface after being down, prerape to receive packets again */ 105 - break; 106 - default: 107 - break; 108 - } 109 - lmc_trace(sc->lmc_device, "lmc_proto_reopen out"); 110 } 111 112 - 113 - // ioctl 114 - int lmc_proto_ioctl(lmc_softc_t *sc, struct ifreq *ifr, int cmd) /*FOLD00*/ 115 { 116 - lmc_trace(sc->lmc_device, "lmc_proto_ioctl out"); 117 - switch(sc->if_type){ 118 - case LMC_PPP: 119 - return SPPP_do_ioctl (sc, ifr, cmd); 120 - break; 121 - default: 122 - return -EOPNOTSUPP; 123 - break; 124 - } 125 - lmc_trace(sc->lmc_device, "lmc_proto_ioctl out"); 126 - } 127 128 - // open 129 - void lmc_proto_open(lmc_softc_t *sc) /*FOLD00*/ 130 - { 131 - int ret; 132 133 - lmc_trace(sc->lmc_device, "lmc_proto_open in"); 134 - switch(sc->if_type){ 135 - case LMC_PPP: 136 - ret = SPPP_open(sc); 137 - if(ret < 0) 138 - printk("%s: syncPPP open failed: %d\n", sc->name, ret); 139 - break; 140 - case LMC_RAW: /* We're about to start getting packets! */ 141 - break; 142 - default: 143 - break; 144 - } 145 - lmc_trace(sc->lmc_device, "lmc_proto_open out"); 146 - } 147 - 148 - // close 149 - 150 - void lmc_proto_close(lmc_softc_t *sc) /*FOLD00*/ 151 - { 152 - lmc_trace(sc->lmc_device, "lmc_proto_close in"); 153 - switch(sc->if_type){ 154 - case LMC_PPP: 155 - SPPP_close(sc); 156 - break; 157 - case LMC_RAW: /* Interface going down */ 158 - break; 159 - default: 160 - break; 161 - } 162 - lmc_trace(sc->lmc_device, "lmc_proto_close out"); 163 } 164 165 __be16 lmc_proto_type(lmc_softc_t *sc, struct sk_buff *skb) /*FOLD00*/ ··· 118 lmc_trace(sc->lmc_device, "lmc_proto_type in"); 119 switch(sc->if_type){ 120 case LMC_PPP: 121 - return htons(ETH_P_WAN_PPP); 122 - break; 123 case LMC_NET: 124 return htons(ETH_P_802_2); 125 break; ··· 150 } 151 lmc_trace(sc->lmc_device, "lmc_proto_netif out"); 152 } 153 -
··· 36 #include <linux/workqueue.h> 37 #include <linux/proc_fs.h> 38 #include <linux/bitops.h> 39 #include <asm/processor.h> /* Processor type for cache alignment. */ 40 #include <asm/io.h> 41 #include <asm/dma.h> ··· 50 #include "lmc_ioctl.h" 51 #include "lmc_proto.h" 52 53 // attach 54 void lmc_proto_attach(lmc_softc_t *sc) /*FOLD00*/ 55 { ··· 100 case LMC_PPP: 101 { 102 struct net_device *dev = sc->lmc_device; 103 dev->do_ioctl = lmc_ioctl; 104 } 105 break; ··· 108 { 109 struct net_device *dev = sc->lmc_device; 110 /* 111 + * They set a few basics because they don't use HDLC 112 */ 113 dev->flags |= IFF_POINTOPOINT; 114 ··· 124 lmc_trace(sc->lmc_device, "lmc_proto_attach out"); 125 } 126 127 + int lmc_proto_ioctl(lmc_softc_t *sc, struct ifreq *ifr, int cmd) 128 { 129 + lmc_trace(sc->lmc_device, "lmc_proto_ioctl"); 130 + if (sc->if_type == LMC_PPP) 131 + return hdlc_ioctl(sc->lmc_device, ifr, cmd); 132 + return -EOPNOTSUPP; 133 } 134 135 + int lmc_proto_open(lmc_softc_t *sc) 136 { 137 + int ret = 0; 138 + 139 + lmc_trace(sc->lmc_device, "lmc_proto_open in"); 140 + 141 + if (sc->if_type == LMC_PPP) { 142 + ret = hdlc_open(sc->lmc_device); 143 + if (ret < 0) 144 + printk(KERN_WARNING "%s: HDLC open failed: %d\n", 145 + sc->name, ret); 146 + } 147 + 148 + lmc_trace(sc->lmc_device, "lmc_proto_open out"); 149 + return ret; 150 } 151 152 + void lmc_proto_close(lmc_softc_t *sc) 153 { 154 + lmc_trace(sc->lmc_device, "lmc_proto_close in"); 155 156 + if (sc->if_type == LMC_PPP) 157 + hdlc_close(sc->lmc_device); 158 159 + lmc_trace(sc->lmc_device, "lmc_proto_close out"); 160 } 161 162 __be16 lmc_proto_type(lmc_softc_t *sc, struct sk_buff *skb) /*FOLD00*/ ··· 213 lmc_trace(sc->lmc_device, "lmc_proto_type in"); 214 switch(sc->if_type){ 215 case LMC_PPP: 216 + return hdlc_type_trans(skb, sc->lmc_device); 217 + break; 218 case LMC_NET: 219 return htons(ETH_P_802_2); 220 break; ··· 245 } 246 lmc_trace(sc->lmc_device, "lmc_proto_netif out"); 247 }
+8 -6
drivers/net/wan/lmc/lmc_proto.h
··· 1 #ifndef _LMC_PROTO_H_ 2 #define _LMC_PROTO_H_ 3 4 - void lmc_proto_init(lmc_softc_t *sc); 5 void lmc_proto_attach(lmc_softc_t *sc); 6 - void lmc_proto_detach(lmc_softc_t *sc); 7 - void lmc_proto_reopen(lmc_softc_t *sc); 8 int lmc_proto_ioctl(lmc_softc_t *sc, struct ifreq *ifr, int cmd); 9 - void lmc_proto_open(lmc_softc_t *sc); 10 void lmc_proto_close(lmc_softc_t *sc); 11 __be16 lmc_proto_type(lmc_softc_t *sc, struct sk_buff *skb); 12 void lmc_proto_netif(lmc_softc_t *sc, struct sk_buff *skb); 13 - int lmc_skb_rawpackets(char *buf, char **start, off_t offset, int len, int unused); 14 15 #endif 16 -
··· 1 #ifndef _LMC_PROTO_H_ 2 #define _LMC_PROTO_H_ 3 4 + #include <linux/hdlc.h> 5 + 6 void lmc_proto_attach(lmc_softc_t *sc); 7 int lmc_proto_ioctl(lmc_softc_t *sc, struct ifreq *ifr, int cmd); 8 + int lmc_proto_open(lmc_softc_t *sc); 9 void lmc_proto_close(lmc_softc_t *sc); 10 __be16 lmc_proto_type(lmc_softc_t *sc, struct sk_buff *skb); 11 void lmc_proto_netif(lmc_softc_t *sc, struct sk_buff *skb); 12 + 13 + static inline lmc_softc_t* dev_to_sc(struct net_device *dev) 14 + { 15 + return (lmc_softc_t *)dev_to_hdlc(dev)->priv; 16 + } 17 18 #endif
+125 -225
drivers/net/wan/lmc/lmc_var.h
··· 1 #ifndef _LMC_VAR_H_ 2 #define _LMC_VAR_H_ 3 4 - /* $Id: lmc_var.h,v 1.17 2000/04/06 12:16:47 asj Exp $ */ 5 - 6 /* 7 * Copyright (c) 1997-2000 LAN Media Corporation (LMC) 8 * All rights reserved. www.lanmedia.com ··· 17 18 #include <linux/timer.h> 19 20 - #ifndef __KERNEL__ 21 - typedef signed char s8; 22 - typedef unsigned char u8; 23 - 24 - typedef signed short s16; 25 - typedef unsigned short u16; 26 - 27 - typedef signed int s32; 28 - typedef unsigned int u32; 29 - 30 - typedef signed long long s64; 31 - typedef unsigned long long u64; 32 - 33 - #define BITS_PER_LONG 32 34 - 35 - #endif 36 - 37 /* 38 * basic definitions used in lmc include files 39 */ ··· 26 typedef struct lmc___ctl lmc_ctl_t; 27 28 #define lmc_csrptr_t unsigned long 29 - #define u_int16_t u16 30 - #define u_int8_t u8 31 - #define tulip_uint32_t u32 32 33 #define LMC_REG_RANGE 0x80 34 ··· 100 * used to define bits in the second tulip_desc_t field (length) 101 * for the transmit descriptor -baz */ 102 103 - #define LMC_TDES_FIRST_BUFFER_SIZE ((u_int32_t)(0x000007FF)) 104 - #define LMC_TDES_SECOND_BUFFER_SIZE ((u_int32_t)(0x003FF800)) 105 - #define LMC_TDES_HASH_FILTERING ((u_int32_t)(0x00400000)) 106 - #define LMC_TDES_DISABLE_PADDING ((u_int32_t)(0x00800000)) 107 - #define LMC_TDES_SECOND_ADDR_CHAINED ((u_int32_t)(0x01000000)) 108 - #define LMC_TDES_END_OF_RING ((u_int32_t)(0x02000000)) 109 - #define LMC_TDES_ADD_CRC_DISABLE ((u_int32_t)(0x04000000)) 110 - #define LMC_TDES_SETUP_PACKET ((u_int32_t)(0x08000000)) 111 - #define LMC_TDES_INVERSE_FILTERING ((u_int32_t)(0x10000000)) 112 - #define LMC_TDES_FIRST_SEGMENT ((u_int32_t)(0x20000000)) 113 - #define LMC_TDES_LAST_SEGMENT ((u_int32_t)(0x40000000)) 114 - #define LMC_TDES_INTERRUPT_ON_COMPLETION ((u_int32_t)(0x80000000)) 115 116 #define TDES_SECOND_BUFFER_SIZE_BIT_NUMBER 11 117 #define TDES_COLLISION_COUNT_BIT_NUMBER 3 118 119 /* Constants for the RCV descriptor RDES */ 120 121 - #define LMC_RDES_OVERFLOW ((u_int32_t)(0x00000001)) 122 - #define LMC_RDES_CRC_ERROR ((u_int32_t)(0x00000002)) 123 - #define LMC_RDES_DRIBBLING_BIT ((u_int32_t)(0x00000004)) 124 - #define LMC_RDES_REPORT_ON_MII_ERR ((u_int32_t)(0x00000008)) 125 - #define LMC_RDES_RCV_WATCHDOG_TIMEOUT ((u_int32_t)(0x00000010)) 126 - #define LMC_RDES_FRAME_TYPE ((u_int32_t)(0x00000020)) 127 - #define LMC_RDES_COLLISION_SEEN ((u_int32_t)(0x00000040)) 128 - #define LMC_RDES_FRAME_TOO_LONG ((u_int32_t)(0x00000080)) 129 - #define LMC_RDES_LAST_DESCRIPTOR ((u_int32_t)(0x00000100)) 130 - #define LMC_RDES_FIRST_DESCRIPTOR ((u_int32_t)(0x00000200)) 131 - #define LMC_RDES_MULTICAST_FRAME ((u_int32_t)(0x00000400)) 132 - #define LMC_RDES_RUNT_FRAME ((u_int32_t)(0x00000800)) 133 - #define LMC_RDES_DATA_TYPE ((u_int32_t)(0x00003000)) 134 - #define LMC_RDES_LENGTH_ERROR ((u_int32_t)(0x00004000)) 135 - #define LMC_RDES_ERROR_SUMMARY ((u_int32_t)(0x00008000)) 136 - #define LMC_RDES_FRAME_LENGTH ((u_int32_t)(0x3FFF0000)) 137 - #define LMC_RDES_OWN_BIT ((u_int32_t)(0x80000000)) 138 139 #define RDES_FRAME_LENGTH_BIT_NUMBER 16 140 141 - #define LMC_RDES_ERROR_MASK ( (u_int32_t)( \ 142 LMC_RDES_OVERFLOW \ 143 | LMC_RDES_DRIBBLING_BIT \ 144 | LMC_RDES_REPORT_ON_MII_ERR \ ··· 150 */ 151 152 typedef struct { 153 - u_int32_t n; 154 - u_int32_t m; 155 - u_int32_t v; 156 - u_int32_t x; 157 - u_int32_t r; 158 - u_int32_t f; 159 - u_int32_t exact; 160 } lmc_av9110_t; 161 162 /* 163 * Common structure passed to the ioctl code. 164 */ 165 struct lmc___ctl { 166 - u_int32_t cardtype; 167 - u_int32_t clock_source; /* HSSI, T1 */ 168 - u_int32_t clock_rate; /* T1 */ 169 - u_int32_t crc_length; 170 - u_int32_t cable_length; /* DS3 */ 171 - u_int32_t scrambler_onoff; /* DS3 */ 172 - u_int32_t cable_type; /* T1 */ 173 - u_int32_t keepalive_onoff; /* protocol */ 174 - u_int32_t ticks; /* ticks/sec */ 175 union { 176 lmc_av9110_t ssi; 177 } cardspec; 178 - u_int32_t circuit_type; /* T1 or E1 */ 179 }; 180 181 ··· 222 223 #define STATCHECK 0xBEEFCAFE 224 225 - /* Included in this structure are first 226 - * - standard net_device_stats 227 - * - some other counters used for debug and driver performance 228 - * evaluation -baz 229 - */ 230 - struct lmc_statistics 231 { 232 - unsigned long rx_packets; /* total packets received */ 233 - unsigned long tx_packets; /* total packets transmitted */ 234 - unsigned long rx_bytes; 235 - unsigned long tx_bytes; 236 - 237 - unsigned long rx_errors; /* bad packets received */ 238 - unsigned long tx_errors; /* packet transmit problems */ 239 - unsigned long rx_dropped; /* no space in linux buffers */ 240 - unsigned long tx_dropped; /* no space available in linux */ 241 - unsigned long multicast; /* multicast packets received */ 242 - unsigned long collisions; 243 244 - /* detailed rx_errors: */ 245 - unsigned long rx_length_errors; 246 - unsigned long rx_over_errors; /* receiver ring buff overflow */ 247 - unsigned long rx_crc_errors; /* recved pkt with crc error */ 248 - unsigned long rx_frame_errors; /* recv'd frame alignment error */ 249 - unsigned long rx_fifo_errors; /* recv'r fifo overrun */ 250 - unsigned long rx_missed_errors; /* receiver missed packet */ 251 252 - /* detailed tx_errors */ 253 - unsigned long tx_aborted_errors; 254 - unsigned long tx_carrier_errors; 255 - unsigned long tx_fifo_errors; 256 - unsigned long tx_heartbeat_errors; 257 - unsigned long tx_window_errors; 258 259 - /* for cslip etc */ 260 - unsigned long rx_compressed; 261 - unsigned long tx_compressed; 262 263 - /* ------------------------------------- 264 - * Custom stats & counters follow -baz */ 265 - u_int32_t version_size; 266 - u_int32_t lmc_cardtype; 267 268 - u_int32_t tx_ProcTimeout; 269 - u_int32_t tx_IntTimeout; 270 - u_int32_t tx_NoCompleteCnt; 271 - u_int32_t tx_MaxXmtsB4Int; 272 - u_int32_t tx_TimeoutCnt; 273 - u_int32_t tx_OutOfSyncPtr; 274 - u_int32_t tx_tbusy0; 275 - u_int32_t tx_tbusy1; 276 - u_int32_t tx_tbusy_calls; 277 - u_int32_t resetCount; 278 - u_int32_t lmc_txfull; 279 - u_int32_t tbusy; 280 - u_int32_t dirtyTx; 281 - u_int32_t lmc_next_tx; 282 - u_int32_t otherTypeCnt; 283 - u_int32_t lastType; 284 - u_int32_t lastTypeOK; 285 - u_int32_t txLoopCnt; 286 - u_int32_t usedXmtDescripCnt; 287 - u_int32_t txIndexCnt; 288 - u_int32_t rxIntLoopCnt; 289 - 290 - u_int32_t rx_SmallPktCnt; 291 - u_int32_t rx_BadPktSurgeCnt; 292 - u_int32_t rx_BuffAllocErr; 293 - u_int32_t tx_lossOfClockCnt; 294 - 295 - /* T1 error counters */ 296 - u_int32_t framingBitErrorCount; 297 - u_int32_t lineCodeViolationCount; 298 - 299 - u_int32_t lossOfFrameCount; 300 - u_int32_t changeOfFrameAlignmentCount; 301 - u_int32_t severelyErroredFrameCount; 302 - 303 - u_int32_t check; 304 }; 305 306 - 307 typedef struct lmc_xinfo { 308 - u_int32_t Magic0; /* BEEFCAFE */ 309 310 - u_int32_t PciCardType; 311 - u_int32_t PciSlotNumber; /* PCI slot number */ 312 313 - u_int16_t DriverMajorVersion; 314 - u_int16_t DriverMinorVersion; 315 - u_int16_t DriverSubVersion; 316 317 - u_int16_t XilinxRevisionNumber; 318 - u_int16_t MaxFrameSize; 319 320 - u_int16_t t1_alarm1_status; 321 - u_int16_t t1_alarm2_status; 322 323 - int link_status; 324 - u_int32_t mii_reg16; 325 326 - u_int32_t Magic1; /* DEADBEEF */ 327 } LMC_XINFO; 328 329 ··· 292 * forward decl 293 */ 294 struct lmc___softc { 295 - void *if_ptr; /* General purpose pointer (used by SPPP) */ 296 char *name; 297 u8 board_idx; 298 - struct lmc_statistics stats; 299 - struct net_device *lmc_device; 300 301 int hang, rxdesc, bad_packet, some_counter; 302 - u_int32_t txgo; 303 struct lmc_regfile_t lmc_csrs; 304 - volatile u_int32_t lmc_txtick; 305 - volatile u_int32_t lmc_rxtick; 306 - u_int32_t lmc_flags; 307 - u_int32_t lmc_intrmask; /* our copy of csr_intr */ 308 - u_int32_t lmc_cmdmode; /* our copy of csr_cmdmode */ 309 - u_int32_t lmc_busmode; /* our copy of csr_busmode */ 310 - u_int32_t lmc_gpio_io; /* state of in/out settings */ 311 - u_int32_t lmc_gpio; /* state of outputs */ 312 struct sk_buff* lmc_txq[LMC_TXDESCS]; 313 struct sk_buff* lmc_rxq[LMC_RXDESCS]; 314 volatile ··· 319 unsigned int lmc_taint_tx, lmc_taint_rx; 320 int lmc_tx_start, lmc_txfull; 321 int lmc_txbusy; 322 - u_int16_t lmc_miireg16; 323 int lmc_ok; 324 int last_link_status; 325 int lmc_cardtype; 326 - u_int32_t last_frameerr; 327 lmc_media_t *lmc_media; 328 struct timer_list timer; 329 lmc_ctl_t ictl; 330 - u_int32_t TxDescriptControlInit; 331 332 int tx_TimeoutInd; /* additional driver state */ 333 int tx_TimeoutDisplay; 334 unsigned int lastlmc_taint_tx; 335 int lasttx_packets; 336 - u_int32_t tx_clockState; 337 - u_int32_t lmc_crcSize; 338 - LMC_XINFO lmc_xinfo; 339 char lmc_yel, lmc_blue, lmc_red; /* for T1 and DS3 */ 340 - char lmc_timing; /* for HSSI and SSI */ 341 - int got_irq; 342 343 - char last_led_err[4]; 344 345 - u32 last_int; 346 - u32 num_int; 347 348 spinlock_t lmc_lock; 349 - u_int16_t if_type; /* PPP or NET */ 350 - struct ppp_device *pd; 351 352 - /* Failure cases */ 353 - u8 failed_ring; 354 - u8 failed_recv_alloc; 355 356 - /* Structure check */ 357 - u32 check; 358 }; 359 360 #define LMC_PCI_TIME 1 ··· 449 | TULIP_STS_TXUNDERFLOW\ 450 | TULIP_STS_RXSTOPPED ) 451 452 - #define DESC_OWNED_BY_SYSTEM ((u_int32_t)(0x00000000)) 453 - #define DESC_OWNED_BY_DC21X4 ((u_int32_t)(0x80000000)) 454 455 #ifndef TULIP_CMD_RECEIVEALL 456 #define TULIP_CMD_RECEIVEALL 0x40000000L ··· 462 #define LMC_ADAP_SSI 4 463 #define LMC_ADAP_T1 5 464 465 - #define HDLC_HDR_LEN 4 466 - #define HDLC_ADDR_LEN 1 467 - #define HDLC_SLARP 0x8035 468 #define LMC_MTU 1500 469 - #define SLARP_LINECHECK 2 470 471 #define LMC_CRC_LEN_16 2 /* 16-bit CRC */ 472 #define LMC_CRC_LEN_32 4 473 - 474 - #ifdef LMC_HDLC 475 - /* definition of an hdlc header. */ 476 - struct hdlc_hdr 477 - { 478 - u8 address; 479 - u8 control; 480 - u16 type; 481 - }; 482 - 483 - /* definition of a slarp header. */ 484 - struct slarp 485 - { 486 - long code; 487 - union sl 488 - { 489 - struct 490 - { 491 - ulong address; 492 - ulong mask; 493 - ushort unused; 494 - } add; 495 - struct 496 - { 497 - ulong mysequence; 498 - ulong yoursequence; 499 - ushort reliability; 500 - ulong time; 501 - } chk; 502 - } t; 503 - }; 504 - #endif /* LMC_HDLC */ 505 - 506 507 #endif /* _LMC_VAR_H_ */
··· 1 #ifndef _LMC_VAR_H_ 2 #define _LMC_VAR_H_ 3 4 /* 5 * Copyright (c) 1997-2000 LAN Media Corporation (LMC) 6 * All rights reserved. www.lanmedia.com ··· 19 20 #include <linux/timer.h> 21 22 /* 23 * basic definitions used in lmc include files 24 */ ··· 45 typedef struct lmc___ctl lmc_ctl_t; 46 47 #define lmc_csrptr_t unsigned long 48 49 #define LMC_REG_RANGE 0x80 50 ··· 122 * used to define bits in the second tulip_desc_t field (length) 123 * for the transmit descriptor -baz */ 124 125 + #define LMC_TDES_FIRST_BUFFER_SIZE ((u32)(0x000007FF)) 126 + #define LMC_TDES_SECOND_BUFFER_SIZE ((u32)(0x003FF800)) 127 + #define LMC_TDES_HASH_FILTERING ((u32)(0x00400000)) 128 + #define LMC_TDES_DISABLE_PADDING ((u32)(0x00800000)) 129 + #define LMC_TDES_SECOND_ADDR_CHAINED ((u32)(0x01000000)) 130 + #define LMC_TDES_END_OF_RING ((u32)(0x02000000)) 131 + #define LMC_TDES_ADD_CRC_DISABLE ((u32)(0x04000000)) 132 + #define LMC_TDES_SETUP_PACKET ((u32)(0x08000000)) 133 + #define LMC_TDES_INVERSE_FILTERING ((u32)(0x10000000)) 134 + #define LMC_TDES_FIRST_SEGMENT ((u32)(0x20000000)) 135 + #define LMC_TDES_LAST_SEGMENT ((u32)(0x40000000)) 136 + #define LMC_TDES_INTERRUPT_ON_COMPLETION ((u32)(0x80000000)) 137 138 #define TDES_SECOND_BUFFER_SIZE_BIT_NUMBER 11 139 #define TDES_COLLISION_COUNT_BIT_NUMBER 3 140 141 /* Constants for the RCV descriptor RDES */ 142 143 + #define LMC_RDES_OVERFLOW ((u32)(0x00000001)) 144 + #define LMC_RDES_CRC_ERROR ((u32)(0x00000002)) 145 + #define LMC_RDES_DRIBBLING_BIT ((u32)(0x00000004)) 146 + #define LMC_RDES_REPORT_ON_MII_ERR ((u32)(0x00000008)) 147 + #define LMC_RDES_RCV_WATCHDOG_TIMEOUT ((u32)(0x00000010)) 148 + #define LMC_RDES_FRAME_TYPE ((u32)(0x00000020)) 149 + #define LMC_RDES_COLLISION_SEEN ((u32)(0x00000040)) 150 + #define LMC_RDES_FRAME_TOO_LONG ((u32)(0x00000080)) 151 + #define LMC_RDES_LAST_DESCRIPTOR ((u32)(0x00000100)) 152 + #define LMC_RDES_FIRST_DESCRIPTOR ((u32)(0x00000200)) 153 + #define LMC_RDES_MULTICAST_FRAME ((u32)(0x00000400)) 154 + #define LMC_RDES_RUNT_FRAME ((u32)(0x00000800)) 155 + #define LMC_RDES_DATA_TYPE ((u32)(0x00003000)) 156 + #define LMC_RDES_LENGTH_ERROR ((u32)(0x00004000)) 157 + #define LMC_RDES_ERROR_SUMMARY ((u32)(0x00008000)) 158 + #define LMC_RDES_FRAME_LENGTH ((u32)(0x3FFF0000)) 159 + #define LMC_RDES_OWN_BIT ((u32)(0x80000000)) 160 161 #define RDES_FRAME_LENGTH_BIT_NUMBER 16 162 163 + #define LMC_RDES_ERROR_MASK ( (u32)( \ 164 LMC_RDES_OVERFLOW \ 165 | LMC_RDES_DRIBBLING_BIT \ 166 | LMC_RDES_REPORT_ON_MII_ERR \ ··· 172 */ 173 174 typedef struct { 175 + u32 n; 176 + u32 m; 177 + u32 v; 178 + u32 x; 179 + u32 r; 180 + u32 f; 181 + u32 exact; 182 } lmc_av9110_t; 183 184 /* 185 * Common structure passed to the ioctl code. 186 */ 187 struct lmc___ctl { 188 + u32 cardtype; 189 + u32 clock_source; /* HSSI, T1 */ 190 + u32 clock_rate; /* T1 */ 191 + u32 crc_length; 192 + u32 cable_length; /* DS3 */ 193 + u32 scrambler_onoff; /* DS3 */ 194 + u32 cable_type; /* T1 */ 195 + u32 keepalive_onoff; /* protocol */ 196 + u32 ticks; /* ticks/sec */ 197 union { 198 lmc_av9110_t ssi; 199 } cardspec; 200 + u32 circuit_type; /* T1 or E1 */ 201 }; 202 203 ··· 244 245 #define STATCHECK 0xBEEFCAFE 246 247 + struct lmc_extra_statistics 248 { 249 + u32 version_size; 250 + u32 lmc_cardtype; 251 252 + u32 tx_ProcTimeout; 253 + u32 tx_IntTimeout; 254 + u32 tx_NoCompleteCnt; 255 + u32 tx_MaxXmtsB4Int; 256 + u32 tx_TimeoutCnt; 257 + u32 tx_OutOfSyncPtr; 258 + u32 tx_tbusy0; 259 + u32 tx_tbusy1; 260 + u32 tx_tbusy_calls; 261 + u32 resetCount; 262 + u32 lmc_txfull; 263 + u32 tbusy; 264 + u32 dirtyTx; 265 + u32 lmc_next_tx; 266 + u32 otherTypeCnt; 267 + u32 lastType; 268 + u32 lastTypeOK; 269 + u32 txLoopCnt; 270 + u32 usedXmtDescripCnt; 271 + u32 txIndexCnt; 272 + u32 rxIntLoopCnt; 273 274 + u32 rx_SmallPktCnt; 275 + u32 rx_BadPktSurgeCnt; 276 + u32 rx_BuffAllocErr; 277 + u32 tx_lossOfClockCnt; 278 279 + /* T1 error counters */ 280 + u32 framingBitErrorCount; 281 + u32 lineCodeViolationCount; 282 283 + u32 lossOfFrameCount; 284 + u32 changeOfFrameAlignmentCount; 285 + u32 severelyErroredFrameCount; 286 287 + u32 check; 288 }; 289 290 typedef struct lmc_xinfo { 291 + u32 Magic0; /* BEEFCAFE */ 292 293 + u32 PciCardType; 294 + u32 PciSlotNumber; /* PCI slot number */ 295 296 + u16 DriverMajorVersion; 297 + u16 DriverMinorVersion; 298 + u16 DriverSubVersion; 299 300 + u16 XilinxRevisionNumber; 301 + u16 MaxFrameSize; 302 303 + u16 t1_alarm1_status; 304 + u16 t1_alarm2_status; 305 306 + int link_status; 307 + u32 mii_reg16; 308 309 + u32 Magic1; /* DEADBEEF */ 310 } LMC_XINFO; 311 312 ··· 353 * forward decl 354 */ 355 struct lmc___softc { 356 char *name; 357 u8 board_idx; 358 + struct lmc_extra_statistics extra_stats; 359 + struct net_device *lmc_device; 360 361 int hang, rxdesc, bad_packet, some_counter; 362 + u32 txgo; 363 struct lmc_regfile_t lmc_csrs; 364 + volatile u32 lmc_txtick; 365 + volatile u32 lmc_rxtick; 366 + u32 lmc_flags; 367 + u32 lmc_intrmask; /* our copy of csr_intr */ 368 + u32 lmc_cmdmode; /* our copy of csr_cmdmode */ 369 + u32 lmc_busmode; /* our copy of csr_busmode */ 370 + u32 lmc_gpio_io; /* state of in/out settings */ 371 + u32 lmc_gpio; /* state of outputs */ 372 struct sk_buff* lmc_txq[LMC_TXDESCS]; 373 struct sk_buff* lmc_rxq[LMC_RXDESCS]; 374 volatile ··· 381 unsigned int lmc_taint_tx, lmc_taint_rx; 382 int lmc_tx_start, lmc_txfull; 383 int lmc_txbusy; 384 + u16 lmc_miireg16; 385 int lmc_ok; 386 int last_link_status; 387 int lmc_cardtype; 388 + u32 last_frameerr; 389 lmc_media_t *lmc_media; 390 struct timer_list timer; 391 lmc_ctl_t ictl; 392 + u32 TxDescriptControlInit; 393 394 int tx_TimeoutInd; /* additional driver state */ 395 int tx_TimeoutDisplay; 396 unsigned int lastlmc_taint_tx; 397 int lasttx_packets; 398 + u32 tx_clockState; 399 + u32 lmc_crcSize; 400 + LMC_XINFO lmc_xinfo; 401 char lmc_yel, lmc_blue, lmc_red; /* for T1 and DS3 */ 402 + char lmc_timing; /* for HSSI and SSI */ 403 + int got_irq; 404 405 + char last_led_err[4]; 406 407 + u32 last_int; 408 + u32 num_int; 409 410 spinlock_t lmc_lock; 411 + u16 if_type; /* HDLC/PPP or NET */ 412 413 + /* Failure cases */ 414 + u8 failed_ring; 415 + u8 failed_recv_alloc; 416 417 + /* Structure check */ 418 + u32 check; 419 }; 420 421 #define LMC_PCI_TIME 1 ··· 512 | TULIP_STS_TXUNDERFLOW\ 513 | TULIP_STS_RXSTOPPED ) 514 515 + #define DESC_OWNED_BY_SYSTEM ((u32)(0x00000000)) 516 + #define DESC_OWNED_BY_DC21X4 ((u32)(0x80000000)) 517 518 #ifndef TULIP_CMD_RECEIVEALL 519 #define TULIP_CMD_RECEIVEALL 0x40000000L ··· 525 #define LMC_ADAP_SSI 4 526 #define LMC_ADAP_T1 5 527 528 #define LMC_MTU 1500 529 530 #define LMC_CRC_LEN_16 2 /* 16-bit CRC */ 531 #define LMC_CRC_LEN_32 4 532 533 #endif /* _LMC_VAR_H_ */
+91 -137
drivers/net/wan/pc300.h
··· 100 #define _PC300_H 101 102 #include <linux/hdlc.h> 103 - #include <net/syncppp.h> 104 #include "hd64572.h" 105 #include "pc300-falc-lh.h" 106 107 - #ifndef CY_TYPES 108 - #define CY_TYPES 109 - typedef __u64 ucdouble; /* 64 bits, unsigned */ 110 - typedef __u32 uclong; /* 32 bits, unsigned */ 111 - typedef __u16 ucshort; /* 16 bits, unsigned */ 112 - typedef __u8 ucchar; /* 8 bits, unsigned */ 113 - #endif /* CY_TYPES */ 114 115 - #define PC300_PROTO_MLPPP 1 116 - 117 - #define PC300_KERNEL "2.4.x" /* Kernel supported by this driver */ 118 - 119 - #define PC300_DEVNAME "hdlc" /* Dev. name base (for hdlc0, hdlc1, etc.) */ 120 - #define PC300_MAXINDEX 100 /* Max dev. name index (the '0' in hdlc0) */ 121 - 122 - #define PC300_MAXCARDS 4 /* Max number of cards per system */ 123 #define PC300_MAXCHAN 2 /* Number of channels per card */ 124 125 - #define PC300_PLX_WIN 0x80 /* PLX control window size (128b) */ 126 #define PC300_RAMSIZE 0x40000 /* RAM window size (256Kb) */ 127 - #define PC300_SCASIZE 0x400 /* SCA window size (1Kb) */ 128 #define PC300_FALCSIZE 0x400 /* FALC window size (1Kb) */ 129 130 #define PC300_OSC_CLOCK 24576000 ··· 143 * Memory access functions/macros * 144 * (required to support Alpha systems) * 145 ***************************************/ 146 - #ifdef __KERNEL__ 147 - #define cpc_writeb(port,val) {writeb((ucchar)(val),(port)); mb();} 148 #define cpc_writew(port,val) {writew((ushort)(val),(port)); mb();} 149 - #define cpc_writel(port,val) {writel((uclong)(val),(port)); mb();} 150 151 #define cpc_readb(port) readb(port) 152 #define cpc_readw(port) readw(port) 153 #define cpc_readl(port) readl(port) 154 - 155 - #else /* __KERNEL__ */ 156 - #define cpc_writeb(port,val) (*(volatile ucchar *)(port) = (ucchar)(val)) 157 - #define cpc_writew(port,val) (*(volatile ucshort *)(port) = (ucshort)(val)) 158 - #define cpc_writel(port,val) (*(volatile uclong *)(port) = (uclong)(val)) 159 - 160 - #define cpc_readb(port) (*(volatile ucchar *)(port)) 161 - #define cpc_readw(port) (*(volatile ucshort *)(port)) 162 - #define cpc_readl(port) (*(volatile uclong *)(port)) 163 - 164 - #endif /* __KERNEL__ */ 165 166 /****** Data Structures *****************************************************/ 167 ··· 159 * (memory mapped). 160 */ 161 struct RUNTIME_9050 { 162 - uclong loc_addr_range[4]; /* 00-0Ch : Local Address Ranges */ 163 - uclong loc_rom_range; /* 10h : Local ROM Range */ 164 - uclong loc_addr_base[4]; /* 14-20h : Local Address Base Addrs */ 165 - uclong loc_rom_base; /* 24h : Local ROM Base */ 166 - uclong loc_bus_descr[4]; /* 28-34h : Local Bus Descriptors */ 167 - uclong rom_bus_descr; /* 38h : ROM Bus Descriptor */ 168 - uclong cs_base[4]; /* 3C-48h : Chip Select Base Addrs */ 169 - uclong intr_ctrl_stat; /* 4Ch : Interrupt Control/Status */ 170 - uclong init_ctrl; /* 50h : EEPROM ctrl, Init Ctrl, etc */ 171 }; 172 173 #define PLX_9050_LINT1_ENABLE 0x01 ··· 211 #define PC300_FALC_MAXLOOP 0x0000ffff /* for falc_issue_cmd() */ 212 213 typedef struct falc { 214 - ucchar sync; /* If true FALC is synchronized */ 215 - ucchar active; /* if TRUE then already active */ 216 - ucchar loop_active; /* if TRUE a line loopback UP was received */ 217 - ucchar loop_gen; /* if TRUE a line loopback UP was issued */ 218 219 - ucchar num_channels; 220 - ucchar offset; /* 1 for T1, 0 for E1 */ 221 - ucchar full_bandwidth; 222 223 - ucchar xmb_cause; 224 - ucchar multiframe_mode; 225 226 /* Statistics */ 227 - ucshort pden; /* Pulse Density violation count */ 228 - ucshort los; /* Loss of Signal count */ 229 - ucshort losr; /* Loss of Signal recovery count */ 230 - ucshort lfa; /* Loss of frame alignment count */ 231 - ucshort farec; /* Frame Alignment Recovery count */ 232 - ucshort lmfa; /* Loss of multiframe alignment count */ 233 - ucshort ais; /* Remote Alarm indication Signal count */ 234 - ucshort sec; /* One-second timer */ 235 - ucshort es; /* Errored second */ 236 - ucshort rai; /* remote alarm received */ 237 - ucshort bec; 238 - ucshort fec; 239 - ucshort cvc; 240 - ucshort cec; 241 - ucshort ebc; 242 243 /* Status */ 244 - ucchar red_alarm; 245 - ucchar blue_alarm; 246 - ucchar loss_fa; 247 - ucchar yellow_alarm; 248 - ucchar loss_mfa; 249 - ucchar prbs; 250 } falc_t; 251 252 typedef struct falc_status { 253 - ucchar sync; /* If true FALC is synchronized */ 254 - ucchar red_alarm; 255 - ucchar blue_alarm; 256 - ucchar loss_fa; 257 - ucchar yellow_alarm; 258 - ucchar loss_mfa; 259 - ucchar prbs; 260 } falc_status_t; 261 262 typedef struct rsv_x21_status { 263 - ucchar dcd; 264 - ucchar dsr; 265 - ucchar cts; 266 - ucchar rts; 267 - ucchar dtr; 268 } rsv_x21_status_t; 269 270 typedef struct pc300stats { 271 int hw_type; 272 - uclong line_on; 273 - uclong line_off; 274 struct net_device_stats gen_stats; 275 falc_t te_stats; 276 } pc300stats_t; ··· 288 289 typedef struct pc300patterntst { 290 char patrntst_on; /* 0 - off; 1 - on; 2 - read num_errors */ 291 - ucshort num_errors; 292 } pc300patterntst_t; 293 294 typedef struct pc300dev { 295 - void *if_ptr; /* General purpose pointer */ 296 struct pc300ch *chan; 297 - ucchar trace_on; 298 - uclong line_on; /* DCD(X.21, RSV) / sync(TE) change counters */ 299 - uclong line_off; 300 - #ifdef __KERNEL__ 301 char name[16]; 302 struct net_device *dev; 303 - 304 - void *private; 305 - struct sk_buff *tx_skb; 306 - union { /* This union has all the protocol-specific structures */ 307 - struct ppp_device pppdev; 308 - }ifu; 309 #ifdef CONFIG_PC300_MLPPP 310 void *cpc_tty; /* information to PC300 TTY driver */ 311 #endif 312 - #endif /* __KERNEL__ */ 313 }pc300dev_t; 314 315 typedef struct pc300hw { ··· 308 int bus; /* Bus (PCI, PMC, etc.) */ 309 int nchan; /* number of channels */ 310 int irq; /* interrupt request level */ 311 - uclong clock; /* Board clock */ 312 - ucchar cpld_id; /* CPLD ID (TE only) */ 313 - ucshort cpld_reg1; /* CPLD reg 1 (TE only) */ 314 - ucshort cpld_reg2; /* CPLD reg 2 (TE only) */ 315 - ucshort gpioc_reg; /* PLX GPIOC reg */ 316 - ucshort intctl_reg; /* PLX Int Ctrl/Status reg */ 317 - uclong iophys; /* PLX registers I/O base */ 318 - uclong iosize; /* PLX registers I/O size */ 319 - uclong plxphys; /* PLX registers MMIO base (physical) */ 320 void __iomem * plxbase; /* PLX registers MMIO base (virtual) */ 321 - uclong plxsize; /* PLX registers MMIO size */ 322 - uclong scaphys; /* SCA registers MMIO base (physical) */ 323 void __iomem * scabase; /* SCA registers MMIO base (virtual) */ 324 - uclong scasize; /* SCA registers MMIO size */ 325 - uclong ramphys; /* On-board RAM MMIO base (physical) */ 326 void __iomem * rambase; /* On-board RAM MMIO base (virtual) */ 327 - uclong alloc_ramsize; /* RAM MMIO size allocated by the PCI bridge */ 328 - uclong ramsize; /* On-board RAM MMIO size */ 329 - uclong falcphys; /* FALC registers MMIO base (physical) */ 330 void __iomem * falcbase;/* FALC registers MMIO base (virtual) */ 331 - uclong falcsize; /* FALC registers MMIO size */ 332 } pc300hw_t; 333 334 typedef struct pc300chconf { 335 - sync_serial_settings phys_settings; /* Clock type/rate (in bps), 336 loopback mode */ 337 raw_hdlc_proto proto_settings; /* Encoding, parity (CRC) */ 338 - uclong media; /* HW media (RS232, V.35, etc.) */ 339 - uclong proto; /* Protocol (PPP, X.25, etc.) */ 340 - ucchar monitor; /* Monitor mode (0 = off, !0 = on) */ 341 342 /* TE-specific parameters */ 343 - ucchar lcode; /* Line Code (AMI, B8ZS, etc.) */ 344 - ucchar fr_mode; /* Frame Mode (ESF, D4, etc.) */ 345 - ucchar lbo; /* Line Build Out */ 346 - ucchar rx_sens; /* Rx Sensitivity (long- or short-haul) */ 347 - uclong tslot_bitmap; /* bit[i]=1 => timeslot _i_ is active */ 348 } pc300chconf_t; 349 350 typedef struct pc300ch { ··· 351 int channel; 352 pc300dev_t d; 353 pc300chconf_t conf; 354 - ucchar tx_first_bd; /* First TX DMA block descr. w/ data */ 355 - ucchar tx_next_bd; /* Next free TX DMA block descriptor */ 356 - ucchar rx_first_bd; /* First free RX DMA block descriptor */ 357 - ucchar rx_last_bd; /* Last free RX DMA block descriptor */ 358 - ucchar nfree_tx_bd; /* Number of free TX DMA block descriptors */ 359 - falc_t falc; /* FALC structure (TE only) */ 360 } pc300ch_t; 361 362 typedef struct pc300 { 363 pc300hw_t hw; /* hardware config. */ 364 pc300ch_t chan[PC300_MAXCHAN]; 365 - #ifdef __KERNEL__ 366 spinlock_t card_lock; 367 - #endif /* __KERNEL__ */ 368 } pc300_t; 369 370 typedef struct pc300conf { ··· 430 #define PC300_TX_QUEUE_LEN 100 431 #define PC300_DEF_MTU 1600 432 433 - #ifdef __KERNEL__ 434 /* Function Prototypes */ 435 - void tx_dma_start(pc300_t *, int); 436 int cpc_open(struct net_device *dev); 437 - int cpc_set_media(hdlc_device *, int); 438 - #endif /* __KERNEL__ */ 439 440 #endif /* _PC300_H */ 441 -
··· 100 #define _PC300_H 101 102 #include <linux/hdlc.h> 103 #include "hd64572.h" 104 #include "pc300-falc-lh.h" 105 106 + #define PC300_PROTO_MLPPP 1 107 108 #define PC300_MAXCHAN 2 /* Number of channels per card */ 109 110 #define PC300_RAMSIZE 0x40000 /* RAM window size (256Kb) */ 111 #define PC300_FALCSIZE 0x400 /* FALC window size (1Kb) */ 112 113 #define PC300_OSC_CLOCK 24576000 ··· 160 * Memory access functions/macros * 161 * (required to support Alpha systems) * 162 ***************************************/ 163 + #define cpc_writeb(port,val) {writeb((u8)(val),(port)); mb();} 164 #define cpc_writew(port,val) {writew((ushort)(val),(port)); mb();} 165 + #define cpc_writel(port,val) {writel((u32)(val),(port)); mb();} 166 167 #define cpc_readb(port) readb(port) 168 #define cpc_readw(port) readw(port) 169 #define cpc_readl(port) readl(port) 170 171 /****** Data Structures *****************************************************/ 172 ··· 188 * (memory mapped). 189 */ 190 struct RUNTIME_9050 { 191 + u32 loc_addr_range[4]; /* 00-0Ch : Local Address Ranges */ 192 + u32 loc_rom_range; /* 10h : Local ROM Range */ 193 + u32 loc_addr_base[4]; /* 14-20h : Local Address Base Addrs */ 194 + u32 loc_rom_base; /* 24h : Local ROM Base */ 195 + u32 loc_bus_descr[4]; /* 28-34h : Local Bus Descriptors */ 196 + u32 rom_bus_descr; /* 38h : ROM Bus Descriptor */ 197 + u32 cs_base[4]; /* 3C-48h : Chip Select Base Addrs */ 198 + u32 intr_ctrl_stat; /* 4Ch : Interrupt Control/Status */ 199 + u32 init_ctrl; /* 50h : EEPROM ctrl, Init Ctrl, etc */ 200 }; 201 202 #define PLX_9050_LINT1_ENABLE 0x01 ··· 240 #define PC300_FALC_MAXLOOP 0x0000ffff /* for falc_issue_cmd() */ 241 242 typedef struct falc { 243 + u8 sync; /* If true FALC is synchronized */ 244 + u8 active; /* if TRUE then already active */ 245 + u8 loop_active; /* if TRUE a line loopback UP was received */ 246 + u8 loop_gen; /* if TRUE a line loopback UP was issued */ 247 248 + u8 num_channels; 249 + u8 offset; /* 1 for T1, 0 for E1 */ 250 + u8 full_bandwidth; 251 252 + u8 xmb_cause; 253 + u8 multiframe_mode; 254 255 /* Statistics */ 256 + u16 pden; /* Pulse Density violation count */ 257 + u16 los; /* Loss of Signal count */ 258 + u16 losr; /* Loss of Signal recovery count */ 259 + u16 lfa; /* Loss of frame alignment count */ 260 + u16 farec; /* Frame Alignment Recovery count */ 261 + u16 lmfa; /* Loss of multiframe alignment count */ 262 + u16 ais; /* Remote Alarm indication Signal count */ 263 + u16 sec; /* One-second timer */ 264 + u16 es; /* Errored second */ 265 + u16 rai; /* remote alarm received */ 266 + u16 bec; 267 + u16 fec; 268 + u16 cvc; 269 + u16 cec; 270 + u16 ebc; 271 272 /* Status */ 273 + u8 red_alarm; 274 + u8 blue_alarm; 275 + u8 loss_fa; 276 + u8 yellow_alarm; 277 + u8 loss_mfa; 278 + u8 prbs; 279 } falc_t; 280 281 typedef struct falc_status { 282 + u8 sync; /* If true FALC is synchronized */ 283 + u8 red_alarm; 284 + u8 blue_alarm; 285 + u8 loss_fa; 286 + u8 yellow_alarm; 287 + u8 loss_mfa; 288 + u8 prbs; 289 } falc_status_t; 290 291 typedef struct rsv_x21_status { 292 + u8 dcd; 293 + u8 dsr; 294 + u8 cts; 295 + u8 rts; 296 + u8 dtr; 297 } rsv_x21_status_t; 298 299 typedef struct pc300stats { 300 int hw_type; 301 + u32 line_on; 302 + u32 line_off; 303 struct net_device_stats gen_stats; 304 falc_t te_stats; 305 } pc300stats_t; ··· 317 318 typedef struct pc300patterntst { 319 char patrntst_on; /* 0 - off; 1 - on; 2 - read num_errors */ 320 + u16 num_errors; 321 } pc300patterntst_t; 322 323 typedef struct pc300dev { 324 struct pc300ch *chan; 325 + u8 trace_on; 326 + u32 line_on; /* DCD(X.21, RSV) / sync(TE) change counters */ 327 + u32 line_off; 328 char name[16]; 329 struct net_device *dev; 330 #ifdef CONFIG_PC300_MLPPP 331 void *cpc_tty; /* information to PC300 TTY driver */ 332 #endif 333 }pc300dev_t; 334 335 typedef struct pc300hw { ··· 346 int bus; /* Bus (PCI, PMC, etc.) */ 347 int nchan; /* number of channels */ 348 int irq; /* interrupt request level */ 349 + u32 clock; /* Board clock */ 350 + u8 cpld_id; /* CPLD ID (TE only) */ 351 + u16 cpld_reg1; /* CPLD reg 1 (TE only) */ 352 + u16 cpld_reg2; /* CPLD reg 2 (TE only) */ 353 + u16 gpioc_reg; /* PLX GPIOC reg */ 354 + u16 intctl_reg; /* PLX Int Ctrl/Status reg */ 355 + u32 iophys; /* PLX registers I/O base */ 356 + u32 iosize; /* PLX registers I/O size */ 357 + u32 plxphys; /* PLX registers MMIO base (physical) */ 358 void __iomem * plxbase; /* PLX registers MMIO base (virtual) */ 359 + u32 plxsize; /* PLX registers MMIO size */ 360 + u32 scaphys; /* SCA registers MMIO base (physical) */ 361 void __iomem * scabase; /* SCA registers MMIO base (virtual) */ 362 + u32 scasize; /* SCA registers MMIO size */ 363 + u32 ramphys; /* On-board RAM MMIO base (physical) */ 364 void __iomem * rambase; /* On-board RAM MMIO base (virtual) */ 365 + u32 alloc_ramsize; /* RAM MMIO size allocated by the PCI bridge */ 366 + u32 ramsize; /* On-board RAM MMIO size */ 367 + u32 falcphys; /* FALC registers MMIO base (physical) */ 368 void __iomem * falcbase;/* FALC registers MMIO base (virtual) */ 369 + u32 falcsize; /* FALC registers MMIO size */ 370 } pc300hw_t; 371 372 typedef struct pc300chconf { 373 + sync_serial_settings phys_settings; /* Clock type/rate (in bps), 374 loopback mode */ 375 raw_hdlc_proto proto_settings; /* Encoding, parity (CRC) */ 376 + u32 media; /* HW media (RS232, V.35, etc.) */ 377 + u32 proto; /* Protocol (PPP, X.25, etc.) */ 378 379 /* TE-specific parameters */ 380 + u8 lcode; /* Line Code (AMI, B8ZS, etc.) */ 381 + u8 fr_mode; /* Frame Mode (ESF, D4, etc.) */ 382 + u8 lbo; /* Line Build Out */ 383 + u8 rx_sens; /* Rx Sensitivity (long- or short-haul) */ 384 + u32 tslot_bitmap; /* bit[i]=1 => timeslot _i_ is active */ 385 } pc300chconf_t; 386 387 typedef struct pc300ch { ··· 390 int channel; 391 pc300dev_t d; 392 pc300chconf_t conf; 393 + u8 tx_first_bd; /* First TX DMA block descr. w/ data */ 394 + u8 tx_next_bd; /* Next free TX DMA block descriptor */ 395 + u8 rx_first_bd; /* First free RX DMA block descriptor */ 396 + u8 rx_last_bd; /* Last free RX DMA block descriptor */ 397 + u8 nfree_tx_bd; /* Number of free TX DMA block descriptors */ 398 + falc_t falc; /* FALC structure (TE only) */ 399 } pc300ch_t; 400 401 typedef struct pc300 { 402 pc300hw_t hw; /* hardware config. */ 403 pc300ch_t chan[PC300_MAXCHAN]; 404 spinlock_t card_lock; 405 } pc300_t; 406 407 typedef struct pc300conf { ··· 471 #define PC300_TX_QUEUE_LEN 100 472 #define PC300_DEF_MTU 1600 473 474 /* Function Prototypes */ 475 int cpc_open(struct net_device *dev); 476 477 #endif /* _PC300_H */
+64 -82
drivers/net/wan/pc300_drv.c
··· 227 #include <linux/netdevice.h> 228 #include <linux/spinlock.h> 229 #include <linux/if.h> 230 - 231 - #include <net/syncppp.h> 232 #include <net/arp.h> 233 234 #include <asm/io.h> ··· 283 static void tx_dma_buf_check(pc300_t *, int); 284 static void rx_dma_buf_check(pc300_t *, int); 285 static irqreturn_t cpc_intr(int, void *); 286 - static int clock_rate_calc(uclong, uclong, int *); 287 - static uclong detect_ram(pc300_t *); 288 static void plx_init(pc300_t *); 289 static void cpc_trace(struct net_device *, struct sk_buff *, char); 290 static int cpc_attach(struct net_device *, unsigned short, unsigned short); ··· 309 + DMA_TX_BD_BASE + ch_factor * sizeof(pcsca_bd_t)); 310 311 for (i = 0; i < N_DMA_TX_BUF; i++, ptdescr++) { 312 - cpc_writel(&ptdescr->next, (uclong) (DMA_TX_BD_BASE + 313 (ch_factor + ((i + 1) & (N_DMA_TX_BUF - 1))) * sizeof(pcsca_bd_t))); 314 - cpc_writel(&ptdescr->ptbuf, 315 - (uclong) (DMA_TX_BASE + (ch_factor + i) * BD_DEF_LEN)); 316 } 317 } 318 ··· 339 + DMA_RX_BD_BASE + ch_factor * sizeof(pcsca_bd_t)); 340 341 for (i = 0; i < N_DMA_RX_BUF; i++, ptdescr++) { 342 - cpc_writel(&ptdescr->next, (uclong) (DMA_RX_BD_BASE + 343 - (ch_factor + ((i + 1) & (N_DMA_RX_BUF - 1))) * sizeof(pcsca_bd_t))); 344 cpc_writel(&ptdescr->ptbuf, 345 - (uclong) (DMA_RX_BASE + (ch_factor + i) * BD_DEF_LEN)); 346 } 347 } 348 ··· 365 { 366 volatile pcsca_bd_t __iomem *ptdescr; 367 int i; 368 - ucshort first_bd = card->chan[ch].tx_first_bd; 369 - ucshort next_bd = card->chan[ch].tx_next_bd; 370 371 printk("#CH%d: f_bd = %d(0x%08zx), n_bd = %d(0x%08zx)\n", ch, 372 first_bd, TX_BD_ADDR(ch, first_bd), ··· 390 { 391 volatile pcsca_bd_t __iomem *ptdescr; 392 int i; 393 - ucshort first_bd = card->chan[ch].tx_first_bd; 394 - ucshort next_bd = card->chan[ch].tx_next_bd; 395 - uclong scabase = card->hw.scabase; 396 397 printk ("\nnfree_tx_bd = %d \n", card->chan[ch].nfree_tx_bd); 398 printk("#CH%d: f_bd = %d(0x%08x), n_bd = %d(0x%08x)\n", ch, ··· 411 printk("\n"); 412 } 413 #endif 414 - 415 static void rx_dma_buf_check(pc300_t * card, int ch) 416 { 417 volatile pcsca_bd_t __iomem *ptdescr; 418 int i; 419 - ucshort first_bd = card->chan[ch].rx_first_bd; 420 - ucshort last_bd = card->chan[ch].rx_last_bd; 421 int ch_factor; 422 423 ch_factor = ch * N_DMA_RX_BUF; ··· 438 static int dma_get_rx_frame_size(pc300_t * card, int ch) 439 { 440 volatile pcsca_bd_t __iomem *ptdescr; 441 - ucshort first_bd = card->chan[ch].rx_first_bd; 442 int rcvd = 0; 443 - volatile ucchar status; 444 445 ptdescr = (card->hw.rambase + RX_BD_ADDR(ch, first_bd)); 446 while ((status = cpc_readb(&ptdescr->status)) & DST_OSB) { ··· 460 * dma_buf_write: writes a frame to the Tx DMA buffers 461 * NOTE: this function writes one frame at a time. 462 */ 463 - static int dma_buf_write(pc300_t * card, int ch, ucchar * ptdata, int len) 464 { 465 int i, nchar; 466 volatile pcsca_bd_t __iomem *ptdescr; 467 int tosend = len; 468 - ucchar nbuf = ((len - 1) / BD_DEF_LEN) + 1; 469 470 if (nbuf >= card->chan[ch].nfree_tx_bd) { 471 return -ENOMEM; ··· 507 pc300ch_t *chan = (pc300ch_t *) & card->chan[ch]; 508 volatile pcsca_bd_t __iomem *ptdescr; 509 int rcvd = 0; 510 - volatile ucchar status; 511 512 ptdescr = (card->hw.rambase + 513 RX_BD_ADDR(ch, chan->rx_first_bd)); ··· 561 static void tx_dma_stop(pc300_t * card, int ch) 562 { 563 void __iomem *scabase = card->hw.scabase; 564 - ucchar drr_ena_bit = 1 << (5 + 2 * ch); 565 - ucchar drr_rst_bit = 1 << (1 + 2 * ch); 566 567 /* Disable DMA */ 568 cpc_writeb(scabase + DRR, drr_ena_bit); ··· 572 static void rx_dma_stop(pc300_t * card, int ch) 573 { 574 void __iomem *scabase = card->hw.scabase; 575 - ucchar drr_ena_bit = 1 << (4 + 2 * ch); 576 - ucchar drr_rst_bit = 1 << (2 * ch); 577 578 /* Disable DMA */ 579 cpc_writeb(scabase + DRR, drr_ena_bit); ··· 605 /*************************/ 606 /*** FALC Routines ***/ 607 /*************************/ 608 - static void falc_issue_cmd(pc300_t * card, int ch, ucchar cmd) 609 { 610 void __iomem *falcbase = card->hw.falcbase; 611 unsigned long i = 0; ··· 673 static void falc_open_timeslot(pc300_t * card, int ch, int timeslot) 674 { 675 void __iomem *falcbase = card->hw.falcbase; 676 - ucchar tshf = card->chan[ch].falc.offset; 677 678 cpc_writeb(falcbase + F_REG((ICB1 + (timeslot - tshf) / 8), ch), 679 cpc_readb(falcbase + F_REG((ICB1 + (timeslot - tshf) / 8), ch)) & ··· 689 static void falc_close_timeslot(pc300_t * card, int ch, int timeslot) 690 { 691 void __iomem *falcbase = card->hw.falcbase; 692 - ucchar tshf = card->chan[ch].falc.offset; 693 694 cpc_writeb(falcbase + F_REG((ICB1 + (timeslot - tshf) / 8), ch), 695 cpc_readb(falcbase + F_REG((ICB1 + (timeslot - tshf) / 8), ch)) | ··· 810 pc300chconf_t *conf = (pc300chconf_t *) & chan->conf; 811 falc_t *pfalc = (falc_t *) & chan->falc; 812 void __iomem *falcbase = card->hw.falcbase; 813 - ucchar dja = (ch ? (LIM2_DJA2 | LIM2_DJA1) : 0); 814 815 /* Switch to T1 mode (PCM 24) */ 816 cpc_writeb(falcbase + F_REG(FMR1, ch), FMR1_PMOD); ··· 979 pc300chconf_t *conf = (pc300chconf_t *) & chan->conf; 980 falc_t *pfalc = (falc_t *) & chan->falc; 981 void __iomem *falcbase = card->hw.falcbase; 982 - ucchar dja = (ch ? (LIM2_DJA2 | LIM2_DJA1) : 0); 983 984 /* Switch to E1 mode (PCM 30) */ 985 cpc_writeb(falcbase + F_REG(FMR1, ch), ··· 1185 pc300chconf_t *conf = (pc300chconf_t *) & chan->conf; 1186 falc_t *pfalc = (falc_t *) & chan->falc; 1187 void __iomem *falcbase = card->hw.falcbase; 1188 - ucchar dummy; 1189 unsigned long flags; 1190 1191 memset(pfalc, 0, sizeof(falc_t)); ··· 1401 pc300chconf_t *conf = (pc300chconf_t *) & chan->conf; 1402 falc_t *pfalc = (falc_t *) & chan->falc; 1403 void __iomem *falcbase = card->hw.falcbase; 1404 - ucshort counter; 1405 1406 counter = cpc_readb(falcbase + F_REG(FECL, ch)); 1407 counter |= cpc_readb(falcbase + F_REG(FECH, ch)) << 8; ··· 1727 * Description: This routine returns the bit error counter value 1728 *---------------------------------------------------------------------------- 1729 */ 1730 - static ucshort falc_pattern_test_error(pc300_t * card, int ch) 1731 { 1732 pc300ch_t *chan = (pc300ch_t *) & card->chan[ch]; 1733 falc_t *pfalc = (falc_t *) & chan->falc; ··· 1774 pc300_t *card = (pc300_t *) chan->card; 1775 int ch = chan->channel; 1776 unsigned long flags; 1777 - ucchar ilar; 1778 1779 dev->stats.tx_errors++; 1780 dev->stats.tx_aborted_errors++; ··· 1805 int i; 1806 #endif 1807 1808 - if (chan->conf.monitor) { 1809 - /* In monitor mode no Tx is done: ignore packet */ 1810 - dev_kfree_skb(skb); 1811 - return 0; 1812 - } else if (!netif_carrier_ok(dev)) { 1813 /* DCD must be OFF: drop packet */ 1814 dev_kfree_skb(skb); 1815 dev->stats.tx_errors++; ··· 1830 } 1831 1832 /* Write buffer to DMA buffers */ 1833 - if (dma_buf_write(card, ch, (ucchar *) skb->data, skb->len) != 0) { 1834 // printk("%s: write error. Dropping TX packet.\n", dev->name); 1835 netif_stop_queue(dev); 1836 dev_kfree_skb(skb); ··· 1995 static void sca_intr(pc300_t * card) 1996 { 1997 void __iomem *scabase = card->hw.scabase; 1998 - volatile uclong status; 1999 int ch; 2000 int intr_count = 0; 2001 unsigned char dsr_rx; ··· 2010 2011 /**** Reception ****/ 2012 if (status & IR0_DRX((IR0_DMIA | IR0_DMIB), ch)) { 2013 - ucchar drx_stat = cpc_readb(scabase + DSR_RX(ch)); 2014 2015 /* Clear RX interrupts */ 2016 cpc_writeb(scabase + DSR_RX(ch), drx_stat | DSR_DWE); ··· 2084 2085 /**** Transmission ****/ 2086 if (status & IR0_DTX((IR0_EFT | IR0_DMIA | IR0_DMIB), ch)) { 2087 - ucchar dtx_stat = cpc_readb(scabase + DSR_TX(ch)); 2088 2089 /* Clear TX interrupts */ 2090 cpc_writeb(scabase + DSR_TX(ch), dtx_stat | DSR_DWE); ··· 2128 2129 /**** MSCI ****/ 2130 if (status & IR0_M(IR0_RXINTA, ch)) { 2131 - ucchar st1 = cpc_readb(scabase + M_REG(ST1, ch)); 2132 2133 /* Clear MSCI interrupts */ 2134 cpc_writeb(scabase + M_REG(ST1, ch), st1); ··· 2170 } 2171 } 2172 2173 - static void falc_t1_loop_detection(pc300_t * card, int ch, ucchar frs1) 2174 { 2175 pc300ch_t *chan = (pc300ch_t *) & card->chan[ch]; 2176 falc_t *pfalc = (falc_t *) & chan->falc; ··· 2195 } 2196 } 2197 2198 - static void falc_e1_loop_detection(pc300_t * card, int ch, ucchar rsp) 2199 { 2200 pc300ch_t *chan = (pc300ch_t *) & card->chan[ch]; 2201 falc_t *pfalc = (falc_t *) & chan->falc; ··· 2225 pc300ch_t *chan = (pc300ch_t *) & card->chan[ch]; 2226 falc_t *pfalc = (falc_t *) & chan->falc; 2227 void __iomem *falcbase = card->hw.falcbase; 2228 - ucchar isr0, isr3, gis; 2229 - ucchar dummy; 2230 2231 while ((gis = cpc_readb(falcbase + F_REG(GIS, ch))) != 0) { 2232 if (gis & GIS_ISR0) { ··· 2272 pc300ch_t *chan = (pc300ch_t *) & card->chan[ch]; 2273 falc_t *pfalc = (falc_t *) & chan->falc; 2274 void __iomem *falcbase = card->hw.falcbase; 2275 - ucchar isr1, isr2, isr3, gis, rsp; 2276 - ucchar dummy; 2277 2278 while ((gis = cpc_readb(falcbase + F_REG(GIS, ch))) != 0) { 2279 rsp = cpc_readb(falcbase + F_REG(RSP, ch)); ··· 2355 static irqreturn_t cpc_intr(int irq, void *dev_id) 2356 { 2357 pc300_t *card = dev_id; 2358 - volatile ucchar plx_status; 2359 2360 if (!card) { 2361 #ifdef PC300_DEBUG_INTR ··· 2394 2395 static void cpc_sca_status(pc300_t * card, int ch) 2396 { 2397 - ucchar ilar; 2398 void __iomem *scabase = card->hw.scabase; 2399 unsigned long flags; 2400 ··· 2812 } 2813 } 2814 2815 - static int clock_rate_calc(uclong rate, uclong clock, int *br_io) 2816 { 2817 int br, tc; 2818 int br_pwr, error; ··· 2849 void __iomem *scabase = card->hw.scabase; 2850 void __iomem *plxbase = card->hw.plxbase; 2851 int ch = chan->channel; 2852 - uclong clkrate = chan->conf.phys_settings.clock_rate; 2853 - uclong clktype = chan->conf.phys_settings.clock_type; 2854 - ucshort encoding = chan->conf.proto_settings.encoding; 2855 - ucshort parity = chan->conf.proto_settings.parity; 2856 - ucchar md0, md2; 2857 - 2858 /* Reset the channel */ 2859 cpc_writeb(scabase + M_REG(CMD, ch), CMD_CH_RST); 2860 ··· 3146 printk("pc300: cpc_open"); 3147 #endif 3148 3149 - #ifdef FIXME 3150 - if (hdlc->proto.id == IF_PROTO_PPP) { 3151 - d->if_ptr = &hdlc->state.ppp.pppdev; 3152 - } 3153 - #endif 3154 - 3155 result = hdlc_open(dev); 3156 - if (/* FIXME hdlc->proto.id == IF_PROTO_PPP*/ 0) { 3157 - dev->priv = d; 3158 - } 3159 - if (result) { 3160 return result; 3161 - } 3162 3163 sprintf(ifr.ifr_name, "%s", dev->name); 3164 result = cpc_opench(d); ··· 3182 CPC_UNLOCK(card, flags); 3183 3184 hdlc_close(dev); 3185 - if (/* FIXME hdlc->proto.id == IF_PROTO_PPP*/ 0) { 3186 - d->if_ptr = NULL; 3187 - } 3188 #ifdef CONFIG_PC300_MLPPP 3189 if (chan->conf.proto == PC300_PROTO_MLPPP) { 3190 cpc_tty_unregister_service(d); ··· 3193 return 0; 3194 } 3195 3196 - static uclong detect_ram(pc300_t * card) 3197 { 3198 - uclong i; 3199 - ucchar data; 3200 void __iomem *rambase = card->hw.rambase; 3201 3202 card->hw.ramsize = PC300_RAMSIZE; 3203 /* Let's find out how much RAM is present on this board */ 3204 for (i = 0; i < card->hw.ramsize; i++) { 3205 - data = (ucchar) (i & 0xff); 3206 cpc_writeb(rambase + i, data); 3207 if (cpc_readb(rambase + i) != data) { 3208 break; ··· 3279 cpc_writeb(card->hw.scabase + DMER, 0x80); 3280 3281 if (card->hw.type == PC300_TE) { 3282 - ucchar reg1; 3283 3284 /* Check CPLD version */ 3285 reg1 = cpc_readb(card->hw.falcbase + CPLD_REG1); ··· 3343 chan->nfree_tx_bd = N_DMA_TX_BUF; 3344 3345 d->chan = chan; 3346 - d->tx_skb = NULL; 3347 d->trace_on = 0; 3348 d->line_on = 0; 3349 d->line_off = 0; ··· 3413 { 3414 static int first_time = 1; 3415 int err, eeprom_outdated = 0; 3416 - ucshort device_id; 3417 pc300_t *card; 3418 3419 if (first_time) {
··· 227 #include <linux/netdevice.h> 228 #include <linux/spinlock.h> 229 #include <linux/if.h> 230 #include <net/arp.h> 231 232 #include <asm/io.h> ··· 285 static void tx_dma_buf_check(pc300_t *, int); 286 static void rx_dma_buf_check(pc300_t *, int); 287 static irqreturn_t cpc_intr(int, void *); 288 + static int clock_rate_calc(u32, u32, int *); 289 + static u32 detect_ram(pc300_t *); 290 static void plx_init(pc300_t *); 291 static void cpc_trace(struct net_device *, struct sk_buff *, char); 292 static int cpc_attach(struct net_device *, unsigned short, unsigned short); ··· 311 + DMA_TX_BD_BASE + ch_factor * sizeof(pcsca_bd_t)); 312 313 for (i = 0; i < N_DMA_TX_BUF; i++, ptdescr++) { 314 + cpc_writel(&ptdescr->next, (u32)(DMA_TX_BD_BASE + 315 (ch_factor + ((i + 1) & (N_DMA_TX_BUF - 1))) * sizeof(pcsca_bd_t))); 316 + cpc_writel(&ptdescr->ptbuf, 317 + (u32)(DMA_TX_BASE + (ch_factor + i) * BD_DEF_LEN)); 318 } 319 } 320 ··· 341 + DMA_RX_BD_BASE + ch_factor * sizeof(pcsca_bd_t)); 342 343 for (i = 0; i < N_DMA_RX_BUF; i++, ptdescr++) { 344 + cpc_writel(&ptdescr->next, (u32)(DMA_RX_BD_BASE + 345 + (ch_factor + ((i + 1) & (N_DMA_RX_BUF - 1))) * sizeof(pcsca_bd_t))); 346 cpc_writel(&ptdescr->ptbuf, 347 + (u32)(DMA_RX_BASE + (ch_factor + i) * BD_DEF_LEN)); 348 } 349 } 350 ··· 367 { 368 volatile pcsca_bd_t __iomem *ptdescr; 369 int i; 370 + u16 first_bd = card->chan[ch].tx_first_bd; 371 + u16 next_bd = card->chan[ch].tx_next_bd; 372 373 printk("#CH%d: f_bd = %d(0x%08zx), n_bd = %d(0x%08zx)\n", ch, 374 first_bd, TX_BD_ADDR(ch, first_bd), ··· 392 { 393 volatile pcsca_bd_t __iomem *ptdescr; 394 int i; 395 + u16 first_bd = card->chan[ch].tx_first_bd; 396 + u16 next_bd = card->chan[ch].tx_next_bd; 397 + u32 scabase = card->hw.scabase; 398 399 printk ("\nnfree_tx_bd = %d \n", card->chan[ch].nfree_tx_bd); 400 printk("#CH%d: f_bd = %d(0x%08x), n_bd = %d(0x%08x)\n", ch, ··· 413 printk("\n"); 414 } 415 #endif 416 + 417 static void rx_dma_buf_check(pc300_t * card, int ch) 418 { 419 volatile pcsca_bd_t __iomem *ptdescr; 420 int i; 421 + u16 first_bd = card->chan[ch].rx_first_bd; 422 + u16 last_bd = card->chan[ch].rx_last_bd; 423 int ch_factor; 424 425 ch_factor = ch * N_DMA_RX_BUF; ··· 440 static int dma_get_rx_frame_size(pc300_t * card, int ch) 441 { 442 volatile pcsca_bd_t __iomem *ptdescr; 443 + u16 first_bd = card->chan[ch].rx_first_bd; 444 int rcvd = 0; 445 + volatile u8 status; 446 447 ptdescr = (card->hw.rambase + RX_BD_ADDR(ch, first_bd)); 448 while ((status = cpc_readb(&ptdescr->status)) & DST_OSB) { ··· 462 * dma_buf_write: writes a frame to the Tx DMA buffers 463 * NOTE: this function writes one frame at a time. 464 */ 465 + static int dma_buf_write(pc300_t *card, int ch, u8 *ptdata, int len) 466 { 467 int i, nchar; 468 volatile pcsca_bd_t __iomem *ptdescr; 469 int tosend = len; 470 + u8 nbuf = ((len - 1) / BD_DEF_LEN) + 1; 471 472 if (nbuf >= card->chan[ch].nfree_tx_bd) { 473 return -ENOMEM; ··· 509 pc300ch_t *chan = (pc300ch_t *) & card->chan[ch]; 510 volatile pcsca_bd_t __iomem *ptdescr; 511 int rcvd = 0; 512 + volatile u8 status; 513 514 ptdescr = (card->hw.rambase + 515 RX_BD_ADDR(ch, chan->rx_first_bd)); ··· 563 static void tx_dma_stop(pc300_t * card, int ch) 564 { 565 void __iomem *scabase = card->hw.scabase; 566 + u8 drr_ena_bit = 1 << (5 + 2 * ch); 567 + u8 drr_rst_bit = 1 << (1 + 2 * ch); 568 569 /* Disable DMA */ 570 cpc_writeb(scabase + DRR, drr_ena_bit); ··· 574 static void rx_dma_stop(pc300_t * card, int ch) 575 { 576 void __iomem *scabase = card->hw.scabase; 577 + u8 drr_ena_bit = 1 << (4 + 2 * ch); 578 + u8 drr_rst_bit = 1 << (2 * ch); 579 580 /* Disable DMA */ 581 cpc_writeb(scabase + DRR, drr_ena_bit); ··· 607 /*************************/ 608 /*** FALC Routines ***/ 609 /*************************/ 610 + static void falc_issue_cmd(pc300_t *card, int ch, u8 cmd) 611 { 612 void __iomem *falcbase = card->hw.falcbase; 613 unsigned long i = 0; ··· 675 static void falc_open_timeslot(pc300_t * card, int ch, int timeslot) 676 { 677 void __iomem *falcbase = card->hw.falcbase; 678 + u8 tshf = card->chan[ch].falc.offset; 679 680 cpc_writeb(falcbase + F_REG((ICB1 + (timeslot - tshf) / 8), ch), 681 cpc_readb(falcbase + F_REG((ICB1 + (timeslot - tshf) / 8), ch)) & ··· 691 static void falc_close_timeslot(pc300_t * card, int ch, int timeslot) 692 { 693 void __iomem *falcbase = card->hw.falcbase; 694 + u8 tshf = card->chan[ch].falc.offset; 695 696 cpc_writeb(falcbase + F_REG((ICB1 + (timeslot - tshf) / 8), ch), 697 cpc_readb(falcbase + F_REG((ICB1 + (timeslot - tshf) / 8), ch)) | ··· 812 pc300chconf_t *conf = (pc300chconf_t *) & chan->conf; 813 falc_t *pfalc = (falc_t *) & chan->falc; 814 void __iomem *falcbase = card->hw.falcbase; 815 + u8 dja = (ch ? (LIM2_DJA2 | LIM2_DJA1) : 0); 816 817 /* Switch to T1 mode (PCM 24) */ 818 cpc_writeb(falcbase + F_REG(FMR1, ch), FMR1_PMOD); ··· 981 pc300chconf_t *conf = (pc300chconf_t *) & chan->conf; 982 falc_t *pfalc = (falc_t *) & chan->falc; 983 void __iomem *falcbase = card->hw.falcbase; 984 + u8 dja = (ch ? (LIM2_DJA2 | LIM2_DJA1) : 0); 985 986 /* Switch to E1 mode (PCM 30) */ 987 cpc_writeb(falcbase + F_REG(FMR1, ch), ··· 1187 pc300chconf_t *conf = (pc300chconf_t *) & chan->conf; 1188 falc_t *pfalc = (falc_t *) & chan->falc; 1189 void __iomem *falcbase = card->hw.falcbase; 1190 + u8 dummy; 1191 unsigned long flags; 1192 1193 memset(pfalc, 0, sizeof(falc_t)); ··· 1403 pc300chconf_t *conf = (pc300chconf_t *) & chan->conf; 1404 falc_t *pfalc = (falc_t *) & chan->falc; 1405 void __iomem *falcbase = card->hw.falcbase; 1406 + u16 counter; 1407 1408 counter = cpc_readb(falcbase + F_REG(FECL, ch)); 1409 counter |= cpc_readb(falcbase + F_REG(FECH, ch)) << 8; ··· 1729 * Description: This routine returns the bit error counter value 1730 *---------------------------------------------------------------------------- 1731 */ 1732 + static u16 falc_pattern_test_error(pc300_t * card, int ch) 1733 { 1734 pc300ch_t *chan = (pc300ch_t *) & card->chan[ch]; 1735 falc_t *pfalc = (falc_t *) & chan->falc; ··· 1776 pc300_t *card = (pc300_t *) chan->card; 1777 int ch = chan->channel; 1778 unsigned long flags; 1779 + u8 ilar; 1780 1781 dev->stats.tx_errors++; 1782 dev->stats.tx_aborted_errors++; ··· 1807 int i; 1808 #endif 1809 1810 + if (!netif_carrier_ok(dev)) { 1811 /* DCD must be OFF: drop packet */ 1812 dev_kfree_skb(skb); 1813 dev->stats.tx_errors++; ··· 1836 } 1837 1838 /* Write buffer to DMA buffers */ 1839 + if (dma_buf_write(card, ch, (u8 *)skb->data, skb->len) != 0) { 1840 // printk("%s: write error. Dropping TX packet.\n", dev->name); 1841 netif_stop_queue(dev); 1842 dev_kfree_skb(skb); ··· 2001 static void sca_intr(pc300_t * card) 2002 { 2003 void __iomem *scabase = card->hw.scabase; 2004 + volatile u32 status; 2005 int ch; 2006 int intr_count = 0; 2007 unsigned char dsr_rx; ··· 2016 2017 /**** Reception ****/ 2018 if (status & IR0_DRX((IR0_DMIA | IR0_DMIB), ch)) { 2019 + u8 drx_stat = cpc_readb(scabase + DSR_RX(ch)); 2020 2021 /* Clear RX interrupts */ 2022 cpc_writeb(scabase + DSR_RX(ch), drx_stat | DSR_DWE); ··· 2090 2091 /**** Transmission ****/ 2092 if (status & IR0_DTX((IR0_EFT | IR0_DMIA | IR0_DMIB), ch)) { 2093 + u8 dtx_stat = cpc_readb(scabase + DSR_TX(ch)); 2094 2095 /* Clear TX interrupts */ 2096 cpc_writeb(scabase + DSR_TX(ch), dtx_stat | DSR_DWE); ··· 2134 2135 /**** MSCI ****/ 2136 if (status & IR0_M(IR0_RXINTA, ch)) { 2137 + u8 st1 = cpc_readb(scabase + M_REG(ST1, ch)); 2138 2139 /* Clear MSCI interrupts */ 2140 cpc_writeb(scabase + M_REG(ST1, ch), st1); ··· 2176 } 2177 } 2178 2179 + static void falc_t1_loop_detection(pc300_t *card, int ch, u8 frs1) 2180 { 2181 pc300ch_t *chan = (pc300ch_t *) & card->chan[ch]; 2182 falc_t *pfalc = (falc_t *) & chan->falc; ··· 2201 } 2202 } 2203 2204 + static void falc_e1_loop_detection(pc300_t *card, int ch, u8 rsp) 2205 { 2206 pc300ch_t *chan = (pc300ch_t *) & card->chan[ch]; 2207 falc_t *pfalc = (falc_t *) & chan->falc; ··· 2231 pc300ch_t *chan = (pc300ch_t *) & card->chan[ch]; 2232 falc_t *pfalc = (falc_t *) & chan->falc; 2233 void __iomem *falcbase = card->hw.falcbase; 2234 + u8 isr0, isr3, gis; 2235 + u8 dummy; 2236 2237 while ((gis = cpc_readb(falcbase + F_REG(GIS, ch))) != 0) { 2238 if (gis & GIS_ISR0) { ··· 2278 pc300ch_t *chan = (pc300ch_t *) & card->chan[ch]; 2279 falc_t *pfalc = (falc_t *) & chan->falc; 2280 void __iomem *falcbase = card->hw.falcbase; 2281 + u8 isr1, isr2, isr3, gis, rsp; 2282 + u8 dummy; 2283 2284 while ((gis = cpc_readb(falcbase + F_REG(GIS, ch))) != 0) { 2285 rsp = cpc_readb(falcbase + F_REG(RSP, ch)); ··· 2361 static irqreturn_t cpc_intr(int irq, void *dev_id) 2362 { 2363 pc300_t *card = dev_id; 2364 + volatile u8 plx_status; 2365 2366 if (!card) { 2367 #ifdef PC300_DEBUG_INTR ··· 2400 2401 static void cpc_sca_status(pc300_t * card, int ch) 2402 { 2403 + u8 ilar; 2404 void __iomem *scabase = card->hw.scabase; 2405 unsigned long flags; 2406 ··· 2818 } 2819 } 2820 2821 + static int clock_rate_calc(u32 rate, u32 clock, int *br_io) 2822 { 2823 int br, tc; 2824 int br_pwr, error; ··· 2855 void __iomem *scabase = card->hw.scabase; 2856 void __iomem *plxbase = card->hw.plxbase; 2857 int ch = chan->channel; 2858 + u32 clkrate = chan->conf.phys_settings.clock_rate; 2859 + u32 clktype = chan->conf.phys_settings.clock_type; 2860 + u16 encoding = chan->conf.proto_settings.encoding; 2861 + u16 parity = chan->conf.proto_settings.parity; 2862 + u8 md0, md2; 2863 + 2864 /* Reset the channel */ 2865 cpc_writeb(scabase + M_REG(CMD, ch), CMD_CH_RST); 2866 ··· 3152 printk("pc300: cpc_open"); 3153 #endif 3154 3155 result = hdlc_open(dev); 3156 + 3157 + if (result) 3158 return result; 3159 3160 sprintf(ifr.ifr_name, "%s", dev->name); 3161 result = cpc_opench(d); ··· 3197 CPC_UNLOCK(card, flags); 3198 3199 hdlc_close(dev); 3200 + 3201 #ifdef CONFIG_PC300_MLPPP 3202 if (chan->conf.proto == PC300_PROTO_MLPPP) { 3203 cpc_tty_unregister_service(d); ··· 3210 return 0; 3211 } 3212 3213 + static u32 detect_ram(pc300_t * card) 3214 { 3215 + u32 i; 3216 + u8 data; 3217 void __iomem *rambase = card->hw.rambase; 3218 3219 card->hw.ramsize = PC300_RAMSIZE; 3220 /* Let's find out how much RAM is present on this board */ 3221 for (i = 0; i < card->hw.ramsize; i++) { 3222 + data = (u8)(i & 0xff); 3223 cpc_writeb(rambase + i, data); 3224 if (cpc_readb(rambase + i) != data) { 3225 break; ··· 3296 cpc_writeb(card->hw.scabase + DMER, 0x80); 3297 3298 if (card->hw.type == PC300_TE) { 3299 + u8 reg1; 3300 3301 /* Check CPLD version */ 3302 reg1 = cpc_readb(card->hw.falcbase + CPLD_REG1); ··· 3360 chan->nfree_tx_bd = N_DMA_TX_BUF; 3361 3362 d->chan = chan; 3363 d->trace_on = 0; 3364 d->line_on = 0; 3365 d->line_off = 0; ··· 3431 { 3432 static int first_time = 1; 3433 int err, eeprom_outdated = 0; 3434 + u16 device_id; 3435 pc300_t *card; 3436 3437 if (first_time) {
+147 -224
drivers/net/wan/sealevel.c
··· 8 * 9 * (c) Copyright 1999, 2001 Alan Cox 10 * (c) Copyright 2001 Red Hat Inc. 11 * 12 */ 13 ··· 20 #include <linux/netdevice.h> 21 #include <linux/if_arp.h> 22 #include <linux/delay.h> 23 #include <linux/ioport.h> 24 #include <linux/init.h> 25 #include <net/arp.h> ··· 29 #include <asm/io.h> 30 #include <asm/dma.h> 31 #include <asm/byteorder.h> 32 - #include <net/syncppp.h> 33 #include "z85230.h" 34 35 36 struct slvl_device 37 { 38 - void *if_ptr; /* General purpose pointer (used by SPPP) */ 39 struct z8530_channel *chan; 40 - struct ppp_device pppdev; 41 int channel; 42 }; 43 44 45 struct slvl_board 46 { 47 - struct slvl_device *dev[2]; 48 struct z8530_dev board; 49 int iobase; 50 }; ··· 50 * Network driver support routines 51 */ 52 53 /* 54 - * Frame receive. Simple for our card as we do sync ppp and there 55 * is no funny garbage involved 56 */ 57 - 58 static void sealevel_input(struct z8530_channel *c, struct sk_buff *skb) 59 { 60 /* Drop the CRC - it's not a good idea to try and negotiate it ;) */ 61 - skb_trim(skb, skb->len-2); 62 - skb->protocol=htons(ETH_P_WAN_PPP); 63 skb_reset_mac_header(skb); 64 - skb->dev=c->netdevice; 65 - /* 66 - * Send it to the PPP layer. We don't have time to process 67 - * it right now. 68 - */ 69 netif_rx(skb); 70 c->netdevice->last_rx = jiffies; 71 } 72 - 73 /* 74 * We've been placed in the UP state 75 - */ 76 - 77 static int sealevel_open(struct net_device *d) 78 { 79 - struct slvl_device *slvl=d->priv; 80 int err = -1; 81 int unit = slvl->channel; 82 - 83 /* 84 - * Link layer up. 85 */ 86 87 - switch(unit) 88 { 89 case 0: 90 - err=z8530_sync_dma_open(d, slvl->chan); 91 break; 92 case 1: 93 - err=z8530_sync_open(d, slvl->chan); 94 break; 95 } 96 - 97 - if(err) 98 return err; 99 - /* 100 - * Begin PPP 101 - */ 102 - err=sppp_open(d); 103 - if(err) 104 - { 105 - switch(unit) 106 - { 107 case 0: 108 z8530_sync_dma_close(d, slvl->chan); 109 break; 110 case 1: 111 z8530_sync_close(d, slvl->chan); 112 break; 113 - } 114 return err; 115 } 116 - 117 - slvl->chan->rx_function=sealevel_input; 118 - 119 /* 120 * Go go go 121 */ ··· 122 123 static int sealevel_close(struct net_device *d) 124 { 125 - struct slvl_device *slvl=d->priv; 126 int unit = slvl->channel; 127 - 128 /* 129 * Discard new frames 130 */ 131 - 132 - slvl->chan->rx_function=z8530_null_rx; 133 - 134 - /* 135 - * PPP off 136 - */ 137 - sppp_close(d); 138 - /* 139 - * Link layer down 140 - */ 141 142 netif_stop_queue(d); 143 - 144 - switch(unit) 145 { 146 case 0: 147 z8530_sync_dma_close(d, slvl->chan); ··· 148 149 static int sealevel_ioctl(struct net_device *d, struct ifreq *ifr, int cmd) 150 { 151 - /* struct slvl_device *slvl=d->priv; 152 z8530_ioctl(d,&slvl->sync.chanA,ifr,cmd) */ 153 - return sppp_do_ioctl(d, ifr,cmd); 154 - } 155 - 156 - static struct net_device_stats *sealevel_get_stats(struct net_device *d) 157 - { 158 - struct slvl_device *slvl=d->priv; 159 - if(slvl) 160 - return z8530_get_stats(slvl->chan); 161 - else 162 - return NULL; 163 } 164 165 /* 166 - * Passed PPP frames, fire them downwind. 167 */ 168 - 169 static int sealevel_queue_xmit(struct sk_buff *skb, struct net_device *d) 170 { 171 - struct slvl_device *slvl=d->priv; 172 - return z8530_queue_xmit(slvl->chan, skb); 173 } 174 175 - static int sealevel_neigh_setup(struct neighbour *n) 176 { 177 - if (n->nud_state == NUD_NONE) { 178 - n->ops = &arp_broken_ops; 179 - n->output = n->ops->output; 180 } 181 return 0; 182 - } 183 - 184 - static int sealevel_neigh_setup_dev(struct net_device *dev, struct neigh_parms *p) 185 - { 186 - if (p->tbl->family == AF_INET) { 187 - p->neigh_setup = sealevel_neigh_setup; 188 - p->ucast_probes = 0; 189 - p->mcast_probes = 0; 190 - } 191 - return 0; 192 - } 193 - 194 - static int sealevel_attach(struct net_device *dev) 195 - { 196 - struct slvl_device *sv = dev->priv; 197 - sppp_attach(&sv->pppdev); 198 - return 0; 199 - } 200 - 201 - static void sealevel_detach(struct net_device *dev) 202 - { 203 - sppp_detach(dev); 204 - } 205 - 206 - static void slvl_setup(struct net_device *d) 207 - { 208 - d->open = sealevel_open; 209 - d->stop = sealevel_close; 210 - d->init = sealevel_attach; 211 - d->uninit = sealevel_detach; 212 - d->hard_start_xmit = sealevel_queue_xmit; 213 - d->get_stats = sealevel_get_stats; 214 - d->set_multicast_list = NULL; 215 - d->do_ioctl = sealevel_ioctl; 216 - d->neigh_setup = sealevel_neigh_setup_dev; 217 - d->set_mac_address = NULL; 218 - 219 - } 220 - 221 - static inline struct slvl_device *slvl_alloc(int iobase, int irq) 222 - { 223 - struct net_device *d; 224 - struct slvl_device *sv; 225 - 226 - d = alloc_netdev(sizeof(struct slvl_device), "hdlc%d", 227 - slvl_setup); 228 - 229 - if (!d) 230 - return NULL; 231 - 232 - sv = d->priv; 233 - d->ml_priv = sv; 234 - sv->if_ptr = &sv->pppdev; 235 - sv->pppdev.dev = d; 236 - d->base_addr = iobase; 237 - d->irq = irq; 238 - 239 - return sv; 240 } 241 242 243 /* 244 * Allocate and setup Sealevel board. 245 */ 246 - 247 - static __init struct slvl_board *slvl_init(int iobase, int irq, 248 int txdma, int rxdma, int slow) 249 { 250 struct z8530_dev *dev; 251 struct slvl_board *b; 252 - 253 /* 254 * Get the needed I/O space 255 */ 256 257 - if(!request_region(iobase, 8, "Sealevel 4021")) 258 - { 259 - printk(KERN_WARNING "sealevel: I/O 0x%X already in use.\n", iobase); 260 return NULL; 261 } 262 - 263 b = kzalloc(sizeof(struct slvl_board), GFP_KERNEL); 264 - if(!b) 265 - goto fail3; 266 267 - if (!(b->dev[0]= slvl_alloc(iobase, irq))) 268 - goto fail2; 269 270 - b->dev[0]->chan = &b->board.chanA; 271 - b->dev[0]->channel = 0; 272 - 273 - if (!(b->dev[1] = slvl_alloc(iobase, irq))) 274 - goto fail1_0; 275 - 276 - b->dev[1]->chan = &b->board.chanB; 277 - b->dev[1]->channel = 1; 278 279 dev = &b->board; 280 - 281 /* 282 * Stuff in the I/O addressing 283 */ 284 - 285 dev->active = 0; 286 287 b->iobase = iobase; 288 - 289 /* 290 * Select 8530 delays for the old board 291 */ 292 - 293 - if(slow) 294 iobase |= Z8530_PORT_SLEEP; 295 - 296 - dev->chanA.ctrlio=iobase+1; 297 - dev->chanA.dataio=iobase; 298 - dev->chanB.ctrlio=iobase+3; 299 - dev->chanB.dataio=iobase+2; 300 - 301 - dev->chanA.irqs=&z8530_nop; 302 - dev->chanB.irqs=&z8530_nop; 303 - 304 /* 305 * Assert DTR enable DMA 306 */ 307 - 308 - outb(3|(1<<7), b->iobase+4); 309 - 310 311 /* We want a fast IRQ for this device. Actually we'd like an even faster 312 IRQ ;) - This is one driver RtLinux is made for */ 313 - 314 - if(request_irq(irq, &z8530_interrupt, IRQF_DISABLED, "SeaLevel", dev)<0) 315 - { 316 - printk(KERN_WARNING "sealevel: IRQ %d already in use.\n", irq); 317 - goto fail1_1; 318 - } 319 - 320 - dev->irq=irq; 321 - dev->chanA.private=&b->dev[0]; 322 - dev->chanB.private=&b->dev[1]; 323 - dev->chanA.netdevice=b->dev[0]->pppdev.dev; 324 - dev->chanB.netdevice=b->dev[1]->pppdev.dev; 325 - dev->chanA.dev=dev; 326 - dev->chanB.dev=dev; 327 328 - dev->chanA.txdma=3; 329 - dev->chanA.rxdma=1; 330 - if(request_dma(dev->chanA.txdma, "SeaLevel (TX)")!=0) 331 - goto fail; 332 - 333 - if(request_dma(dev->chanA.rxdma, "SeaLevel (RX)")!=0) 334 - goto dmafail; 335 - 336 disable_irq(irq); 337 - 338 /* 339 * Begin normal initialise 340 */ 341 - 342 - if(z8530_init(dev)!=0) 343 - { 344 printk(KERN_ERR "Z8530 series device not found.\n"); 345 enable_irq(irq); 346 - goto dmafail2; 347 } 348 - if(dev->type==Z85C30) 349 - { 350 z8530_channel_load(&dev->chanA, z8530_hdlc_kilostream); 351 z8530_channel_load(&dev->chanB, z8530_hdlc_kilostream); 352 - } 353 - else 354 - { 355 z8530_channel_load(&dev->chanA, z8530_hdlc_kilostream_85230); 356 z8530_channel_load(&dev->chanB, z8530_hdlc_kilostream_85230); 357 } ··· 302 /* 303 * Now we can take the IRQ 304 */ 305 - 306 enable_irq(irq); 307 308 - if (register_netdev(b->dev[0]->pppdev.dev)) 309 - goto dmafail2; 310 - 311 - if (register_netdev(b->dev[1]->pppdev.dev)) 312 - goto fail_unit; 313 314 z8530_describe(dev, "I/O", iobase); 315 - dev->active=1; 316 return b; 317 318 - fail_unit: 319 - unregister_netdev(b->dev[0]->pppdev.dev); 320 - 321 - dmafail2: 322 free_dma(dev->chanA.rxdma); 323 - dmafail: 324 free_dma(dev->chanA.txdma); 325 - fail: 326 free_irq(irq, dev); 327 - fail1_1: 328 - free_netdev(b->dev[1]->pppdev.dev); 329 - fail1_0: 330 - free_netdev(b->dev[0]->pppdev.dev); 331 - fail2: 332 kfree(b); 333 - fail3: 334 - release_region(iobase,8); 335 return NULL; 336 } 337 ··· 335 int u; 336 337 z8530_shutdown(&b->board); 338 - 339 - for(u=0; u<2; u++) 340 { 341 - struct net_device *d = b->dev[u]->pppdev.dev; 342 - unregister_netdev(d); 343 free_netdev(d); 344 } 345 - 346 free_irq(b->board.irq, &b->board); 347 free_dma(b->board.chanA.rxdma); 348 free_dma(b->board.chanA.txdma); ··· 378 379 static int __init slvl_init_module(void) 380 { 381 - #ifdef MODULE 382 - printk(KERN_INFO "SeaLevel Z85230 Synchronous Driver v 0.02.\n"); 383 - printk(KERN_INFO "(c) Copyright 1998, Building Number Three Ltd.\n"); 384 - #endif 385 slvl_unit = slvl_init(io, irq, txdma, rxdma, slow); 386 387 return slvl_unit ? 0 : -ENODEV;
··· 8 * 9 * (c) Copyright 1999, 2001 Alan Cox 10 * (c) Copyright 2001 Red Hat Inc. 11 + * Generic HDLC port Copyright (C) 2008 Krzysztof Halasa <khc@pm.waw.pl> 12 * 13 */ 14 ··· 19 #include <linux/netdevice.h> 20 #include <linux/if_arp.h> 21 #include <linux/delay.h> 22 + #include <linux/hdlc.h> 23 #include <linux/ioport.h> 24 #include <linux/init.h> 25 #include <net/arp.h> ··· 27 #include <asm/io.h> 28 #include <asm/dma.h> 29 #include <asm/byteorder.h> 30 #include "z85230.h" 31 32 33 struct slvl_device 34 { 35 struct z8530_channel *chan; 36 int channel; 37 }; 38 39 40 struct slvl_board 41 { 42 + struct slvl_device dev[2]; 43 struct z8530_dev board; 44 int iobase; 45 }; ··· 51 * Network driver support routines 52 */ 53 54 + static inline struct slvl_device* dev_to_chan(struct net_device *dev) 55 + { 56 + return (struct slvl_device *)dev_to_hdlc(dev)->priv; 57 + } 58 + 59 /* 60 + * Frame receive. Simple for our card as we do HDLC and there 61 * is no funny garbage involved 62 */ 63 + 64 static void sealevel_input(struct z8530_channel *c, struct sk_buff *skb) 65 { 66 /* Drop the CRC - it's not a good idea to try and negotiate it ;) */ 67 + skb_trim(skb, skb->len - 2); 68 + skb->protocol = hdlc_type_trans(skb, c->netdevice); 69 skb_reset_mac_header(skb); 70 + skb->dev = c->netdevice; 71 netif_rx(skb); 72 c->netdevice->last_rx = jiffies; 73 } 74 + 75 /* 76 * We've been placed in the UP state 77 + */ 78 + 79 static int sealevel_open(struct net_device *d) 80 { 81 + struct slvl_device *slvl = dev_to_chan(d); 82 int err = -1; 83 int unit = slvl->channel; 84 + 85 /* 86 + * Link layer up. 87 */ 88 89 + switch (unit) 90 { 91 case 0: 92 + err = z8530_sync_dma_open(d, slvl->chan); 93 break; 94 case 1: 95 + err = z8530_sync_open(d, slvl->chan); 96 break; 97 } 98 + 99 + if (err) 100 return err; 101 + 102 + err = hdlc_open(d); 103 + if (err) { 104 + switch (unit) { 105 case 0: 106 z8530_sync_dma_close(d, slvl->chan); 107 break; 108 case 1: 109 z8530_sync_close(d, slvl->chan); 110 break; 111 + } 112 return err; 113 } 114 + 115 + slvl->chan->rx_function = sealevel_input; 116 + 117 /* 118 * Go go go 119 */ ··· 126 127 static int sealevel_close(struct net_device *d) 128 { 129 + struct slvl_device *slvl = dev_to_chan(d); 130 int unit = slvl->channel; 131 + 132 /* 133 * Discard new frames 134 */ 135 136 + slvl->chan->rx_function = z8530_null_rx; 137 + 138 + hdlc_close(d); 139 netif_stop_queue(d); 140 + 141 + switch (unit) 142 { 143 case 0: 144 z8530_sync_dma_close(d, slvl->chan); ··· 159 160 static int sealevel_ioctl(struct net_device *d, struct ifreq *ifr, int cmd) 161 { 162 + /* struct slvl_device *slvl=dev_to_chan(d); 163 z8530_ioctl(d,&slvl->sync.chanA,ifr,cmd) */ 164 + return hdlc_ioctl(d, ifr, cmd); 165 } 166 167 /* 168 + * Passed network frames, fire them downwind. 169 */ 170 + 171 static int sealevel_queue_xmit(struct sk_buff *skb, struct net_device *d) 172 { 173 + return z8530_queue_xmit(dev_to_chan(d)->chan, skb); 174 } 175 176 + static int sealevel_attach(struct net_device *dev, unsigned short encoding, 177 + unsigned short parity) 178 { 179 + if (encoding == ENCODING_NRZ && parity == PARITY_CRC16_PR1_CCITT) 180 + return 0; 181 + return -EINVAL; 182 + } 183 + 184 + static int slvl_setup(struct slvl_device *sv, int iobase, int irq) 185 + { 186 + struct net_device *dev = alloc_hdlcdev(sv); 187 + if (!dev) 188 + return -1; 189 + 190 + dev_to_hdlc(dev)->attach = sealevel_attach; 191 + dev_to_hdlc(dev)->xmit = sealevel_queue_xmit; 192 + dev->open = sealevel_open; 193 + dev->stop = sealevel_close; 194 + dev->do_ioctl = sealevel_ioctl; 195 + dev->base_addr = iobase; 196 + dev->irq = irq; 197 + 198 + if (register_hdlc_device(dev)) { 199 + printk(KERN_ERR "sealevel: unable to register HDLC device\n"); 200 + free_netdev(dev); 201 + return -1; 202 } 203 + 204 + sv->chan->netdevice = dev; 205 return 0; 206 } 207 208 209 /* 210 * Allocate and setup Sealevel board. 211 */ 212 + 213 + static __init struct slvl_board *slvl_init(int iobase, int irq, 214 int txdma, int rxdma, int slow) 215 { 216 struct z8530_dev *dev; 217 struct slvl_board *b; 218 + 219 /* 220 * Get the needed I/O space 221 */ 222 223 + if (!request_region(iobase, 8, "Sealevel 4021")) { 224 + printk(KERN_WARNING "sealevel: I/O 0x%X already in use.\n", 225 + iobase); 226 return NULL; 227 } 228 + 229 b = kzalloc(sizeof(struct slvl_board), GFP_KERNEL); 230 + if (!b) 231 + goto err_kzalloc; 232 233 + b->dev[0].chan = &b->board.chanA; 234 + b->dev[0].channel = 0; 235 236 + b->dev[1].chan = &b->board.chanB; 237 + b->dev[1].channel = 1; 238 239 dev = &b->board; 240 + 241 /* 242 * Stuff in the I/O addressing 243 */ 244 + 245 dev->active = 0; 246 247 b->iobase = iobase; 248 + 249 /* 250 * Select 8530 delays for the old board 251 */ 252 + 253 + if (slow) 254 iobase |= Z8530_PORT_SLEEP; 255 + 256 + dev->chanA.ctrlio = iobase + 1; 257 + dev->chanA.dataio = iobase; 258 + dev->chanB.ctrlio = iobase + 3; 259 + dev->chanB.dataio = iobase + 2; 260 + 261 + dev->chanA.irqs = &z8530_nop; 262 + dev->chanB.irqs = &z8530_nop; 263 + 264 /* 265 * Assert DTR enable DMA 266 */ 267 + 268 + outb(3 | (1 << 7), b->iobase + 4); 269 + 270 271 /* We want a fast IRQ for this device. Actually we'd like an even faster 272 IRQ ;) - This is one driver RtLinux is made for */ 273 274 + if (request_irq(irq, &z8530_interrupt, IRQF_DISABLED, 275 + "SeaLevel", dev) < 0) { 276 + printk(KERN_WARNING "sealevel: IRQ %d already in use.\n", irq); 277 + goto err_request_irq; 278 + } 279 + 280 + dev->irq = irq; 281 + dev->chanA.private = &b->dev[0]; 282 + dev->chanB.private = &b->dev[1]; 283 + dev->chanA.dev = dev; 284 + dev->chanB.dev = dev; 285 + 286 + dev->chanA.txdma = 3; 287 + dev->chanA.rxdma = 1; 288 + if (request_dma(dev->chanA.txdma, "SeaLevel (TX)")) 289 + goto err_dma_tx; 290 + 291 + if (request_dma(dev->chanA.rxdma, "SeaLevel (RX)")) 292 + goto err_dma_rx; 293 + 294 disable_irq(irq); 295 + 296 /* 297 * Begin normal initialise 298 */ 299 + 300 + if (z8530_init(dev) != 0) { 301 printk(KERN_ERR "Z8530 series device not found.\n"); 302 enable_irq(irq); 303 + goto free_hw; 304 } 305 + if (dev->type == Z85C30) { 306 z8530_channel_load(&dev->chanA, z8530_hdlc_kilostream); 307 z8530_channel_load(&dev->chanB, z8530_hdlc_kilostream); 308 + } else { 309 z8530_channel_load(&dev->chanA, z8530_hdlc_kilostream_85230); 310 z8530_channel_load(&dev->chanB, z8530_hdlc_kilostream_85230); 311 } ··· 370 /* 371 * Now we can take the IRQ 372 */ 373 + 374 enable_irq(irq); 375 376 + if (slvl_setup(&b->dev[0], iobase, irq)) 377 + goto free_hw; 378 + if (slvl_setup(&b->dev[1], iobase, irq)) 379 + goto free_netdev0; 380 381 z8530_describe(dev, "I/O", iobase); 382 + dev->active = 1; 383 return b; 384 385 + free_netdev0: 386 + unregister_hdlc_device(b->dev[0].chan->netdevice); 387 + free_netdev(b->dev[0].chan->netdevice); 388 + free_hw: 389 free_dma(dev->chanA.rxdma); 390 + err_dma_rx: 391 free_dma(dev->chanA.txdma); 392 + err_dma_tx: 393 free_irq(irq, dev); 394 + err_request_irq: 395 kfree(b); 396 + err_kzalloc: 397 + release_region(iobase, 8); 398 return NULL; 399 } 400 ··· 408 int u; 409 410 z8530_shutdown(&b->board); 411 + 412 + for (u = 0; u < 2; u++) 413 { 414 + struct net_device *d = b->dev[u].chan->netdevice; 415 + unregister_hdlc_device(d); 416 free_netdev(d); 417 } 418 + 419 free_irq(b->board.irq, &b->board); 420 free_dma(b->board.chanA.rxdma); 421 free_dma(b->board.chanA.txdma); ··· 451 452 static int __init slvl_init_module(void) 453 { 454 slvl_unit = slvl_init(io, irq, txdma, rxdma, slow); 455 456 return slvl_unit ? 0 : -ENODEV;
-9
drivers/net/wan/syncppp.c
··· 230 skb->dev=dev; 231 skb_reset_mac_header(skb); 232 233 - if (dev->flags & IFF_RUNNING) 234 - { 235 - /* Count received bytes, add FCS and one flag */ 236 - sp->ibytes+= skb->len + 3; 237 - sp->ipkts++; 238 - } 239 - 240 if (!pskb_may_pull(skb, PPP_HEADER_LEN)) { 241 /* Too small packet, drop it. */ 242 if (sp->pp_flags & PP_DEBUG) ··· 825 sppp_print_bytes ((u8*) (lh+1), len); 826 printk (">\n"); 827 } 828 - sp->obytes += skb->len; 829 /* Control is high priority so it doesn't get queued behind data */ 830 skb->priority=TC_PRIO_CONTROL; 831 skb->dev = dev; ··· 867 printk (KERN_WARNING "%s: cisco output: <%xh %xh %xh %xh %xh-%xh>\n", 868 dev->name, ntohl (ch->type), ch->par1, 869 ch->par2, ch->rel, ch->time0, ch->time1); 870 - sp->obytes += skb->len; 871 skb->priority=TC_PRIO_CONTROL; 872 skb->dev = dev; 873 skb_queue_tail(&tx_queue, skb);
··· 230 skb->dev=dev; 231 skb_reset_mac_header(skb); 232 233 if (!pskb_may_pull(skb, PPP_HEADER_LEN)) { 234 /* Too small packet, drop it. */ 235 if (sp->pp_flags & PP_DEBUG) ··· 832 sppp_print_bytes ((u8*) (lh+1), len); 833 printk (">\n"); 834 } 835 /* Control is high priority so it doesn't get queued behind data */ 836 skb->priority=TC_PRIO_CONTROL; 837 skb->dev = dev; ··· 875 printk (KERN_WARNING "%s: cisco output: <%xh %xh %xh %xh %xh-%xh>\n", 876 dev->name, ntohl (ch->type), ch->par1, 877 ch->par2, ch->rel, ch->time0, ch->time1); 878 skb->priority=TC_PRIO_CONTROL; 879 skb->dev = dev; 880 skb_queue_tail(&tx_queue, skb);
+77 -116
drivers/net/wan/z85230.c
··· 43 #include <linux/netdevice.h> 44 #include <linux/if_arp.h> 45 #include <linux/delay.h> 46 #include <linux/ioport.h> 47 #include <linux/init.h> 48 #include <asm/dma.h> ··· 52 #define RT_UNLOCK 53 #include <linux/spinlock.h> 54 55 - #include <net/syncppp.h> 56 #include "z85230.h" 57 58 ··· 440 * A status event occurred in PIO synchronous mode. There are several 441 * reasons the chip will bother us here. A transmit underrun means we 442 * failed to feed the chip fast enough and just broke a packet. A DCD 443 - * change is a line up or down. We communicate that back to the protocol 444 - * layer for synchronous PPP to renegotiate. 445 */ 446 447 static void z8530_status(struct z8530_channel *chan) 448 { 449 u8 status, altered; 450 451 - status=read_zsreg(chan, R0); 452 - altered=chan->status^status; 453 - 454 - chan->status=status; 455 - 456 - if(status&TxEOM) 457 - { 458 /* printk("%s: Tx underrun.\n", chan->dev->name); */ 459 - chan->stats.tx_fifo_errors++; 460 write_zsctrl(chan, ERR_RES); 461 z8530_tx_done(chan); 462 } 463 - 464 - if(altered&chan->dcdcheck) 465 { 466 - if(status&chan->dcdcheck) 467 - { 468 printk(KERN_INFO "%s: DCD raised\n", chan->dev->name); 469 - write_zsreg(chan, R3, chan->regs[3]|RxENABLE); 470 - if(chan->netdevice && 471 - ((chan->netdevice->type == ARPHRD_HDLC) || 472 - (chan->netdevice->type == ARPHRD_PPP))) 473 - sppp_reopen(chan->netdevice); 474 - } 475 - else 476 - { 477 printk(KERN_INFO "%s: DCD lost\n", chan->dev->name); 478 - write_zsreg(chan, R3, chan->regs[3]&~RxENABLE); 479 z8530_flush_fifo(chan); 480 } 481 - 482 - } 483 write_zsctrl(chan, RES_EXT_INT); 484 write_zsctrl(chan, RES_H_IUS); 485 } 486 487 - struct z8530_irqhandler z8530_sync= 488 { 489 z8530_rx, 490 z8530_tx, ··· 551 * 552 * A status event occurred on the Z8530. We receive these for two reasons 553 * when in DMA mode. Firstly if we finished a packet transfer we get one 554 - * and kick the next packet out. Secondly we may see a DCD change and 555 - * have to poke the protocol layer. 556 * 557 */ 558 ··· 580 } 581 } 582 583 - if(altered&chan->dcdcheck) 584 { 585 - if(status&chan->dcdcheck) 586 - { 587 printk(KERN_INFO "%s: DCD raised\n", chan->dev->name); 588 - write_zsreg(chan, R3, chan->regs[3]|RxENABLE); 589 - if(chan->netdevice && 590 - ((chan->netdevice->type == ARPHRD_HDLC) || 591 - (chan->netdevice->type == ARPHRD_PPP))) 592 - sppp_reopen(chan->netdevice); 593 - } 594 - else 595 - { 596 printk(KERN_INFO "%s:DCD lost\n", chan->dev->name); 597 - write_zsreg(chan, R3, chan->regs[3]&~RxENABLE); 598 z8530_flush_fifo(chan); 599 } 600 - } 601 602 write_zsctrl(chan, RES_EXT_INT); 603 write_zsctrl(chan, RES_H_IUS); ··· 1450 /* 1451 * Check if we crapped out. 1452 */ 1453 - if(get_dma_residue(c->txdma)) 1454 { 1455 - c->stats.tx_dropped++; 1456 - c->stats.tx_fifo_errors++; 1457 } 1458 release_dma_lock(flags); 1459 } ··· 1525 * packet. This code is fairly timing sensitive. 1526 * 1527 * Called with the register lock held. 1528 - */ 1529 - 1530 static void z8530_tx_done(struct z8530_channel *c) 1531 { 1532 struct sk_buff *skb; 1533 1534 /* Actually this can happen.*/ 1535 - if(c->tx_skb==NULL) 1536 return; 1537 1538 - skb=c->tx_skb; 1539 - c->tx_skb=NULL; 1540 z8530_tx_begin(c); 1541 - c->stats.tx_packets++; 1542 - c->stats.tx_bytes+=skb->len; 1543 dev_kfree_skb_irq(skb); 1544 } 1545 ··· 1549 * @skb: The buffer 1550 * 1551 * We point the receive handler at this function when idle. Instead 1552 - * of syncppp processing the frames we get to throw them away. 1553 */ 1554 1555 void z8530_null_rx(struct z8530_channel *c, struct sk_buff *skb) ··· 1626 else 1627 /* Can't occur as we dont reenable the DMA irq until 1628 after the flip is done */ 1629 - printk(KERN_WARNING "%s: DMA flip overrun!\n", c->netdevice->name); 1630 - 1631 release_dma_lock(flags); 1632 - 1633 /* 1634 * Shove the old buffer into an sk_buff. We can't DMA 1635 * directly into one on a PC - it might be above the 16Mb ··· 1638 * can avoid the copy. Optimisation 2 - make the memcpy 1639 * a copychecksum. 1640 */ 1641 - 1642 - skb=dev_alloc_skb(ct); 1643 - if(skb==NULL) 1644 - { 1645 - c->stats.rx_dropped++; 1646 - printk(KERN_WARNING "%s: Memory squeeze.\n", c->netdevice->name); 1647 - } 1648 - else 1649 - { 1650 skb_put(skb, ct); 1651 skb_copy_to_linear_data(skb, rxb, ct); 1652 - c->stats.rx_packets++; 1653 - c->stats.rx_bytes+=ct; 1654 } 1655 - c->dma_ready=1; 1656 - } 1657 - else 1658 - { 1659 - RT_LOCK; 1660 - skb=c->skb; 1661 - 1662 /* 1663 * The game we play for non DMA is similar. We want to 1664 * get the controller set up for the next packet as fast ··· 1665 * if you build a system where the sync irq isnt blocked 1666 * by the kernel IRQ disable then you need only block the 1667 * sync IRQ for the RT_LOCK area. 1668 - * 1669 */ 1670 ct=c->count; 1671 - 1672 c->skb = c->skb2; 1673 c->count = 0; 1674 c->max = c->mtu; 1675 - if(c->skb) 1676 - { 1677 c->dptr = c->skb->data; 1678 c->max = c->mtu; 1679 - } 1680 - else 1681 - { 1682 - c->count= 0; 1683 c->max = 0; 1684 } 1685 RT_UNLOCK; 1686 1687 c->skb2 = dev_alloc_skb(c->mtu); 1688 - if(c->skb2==NULL) 1689 printk(KERN_WARNING "%s: memory squeeze.\n", 1690 - c->netdevice->name); 1691 else 1692 - { 1693 - skb_put(c->skb2,c->mtu); 1694 - } 1695 - c->stats.rx_packets++; 1696 - c->stats.rx_bytes+=ct; 1697 - 1698 } 1699 /* 1700 * If we received a frame we must now process it. 1701 */ 1702 - if(skb) 1703 - { 1704 skb_trim(skb, ct); 1705 - c->rx_function(c,skb); 1706 - } 1707 - else 1708 - { 1709 - c->stats.rx_dropped++; 1710 printk(KERN_ERR "%s: Lost a frame\n", c->netdevice->name); 1711 } 1712 } ··· 1709 * Returns true if the buffer cross a DMA boundary on a PC. The poor 1710 * thing can only DMA within a 64K block not across the edges of it. 1711 */ 1712 - 1713 static inline int spans_boundary(struct sk_buff *skb) 1714 { 1715 unsigned long a=(unsigned long)skb->data; ··· 1777 } 1778 1779 EXPORT_SYMBOL(z8530_queue_xmit); 1780 - 1781 - /** 1782 - * z8530_get_stats - Get network statistics 1783 - * @c: The channel to use 1784 - * 1785 - * Get the statistics block. We keep the statistics in software as 1786 - * the chip doesn't do it for us. 1787 - * 1788 - * Locking is ignored here - we could lock for a copy but its 1789 - * not likely to be that big an issue 1790 - */ 1791 - 1792 - struct net_device_stats *z8530_get_stats(struct z8530_channel *c) 1793 - { 1794 - return &c->stats; 1795 - } 1796 - 1797 - EXPORT_SYMBOL(z8530_get_stats); 1798 1799 /* 1800 * Module support
··· 43 #include <linux/netdevice.h> 44 #include <linux/if_arp.h> 45 #include <linux/delay.h> 46 + #include <linux/hdlc.h> 47 #include <linux/ioport.h> 48 #include <linux/init.h> 49 #include <asm/dma.h> ··· 51 #define RT_UNLOCK 52 #include <linux/spinlock.h> 53 54 #include "z85230.h" 55 56 ··· 440 * A status event occurred in PIO synchronous mode. There are several 441 * reasons the chip will bother us here. A transmit underrun means we 442 * failed to feed the chip fast enough and just broke a packet. A DCD 443 + * change is a line up or down. 444 */ 445 446 static void z8530_status(struct z8530_channel *chan) 447 { 448 u8 status, altered; 449 450 + status = read_zsreg(chan, R0); 451 + altered = chan->status ^ status; 452 + 453 + chan->status = status; 454 + 455 + if (status & TxEOM) { 456 /* printk("%s: Tx underrun.\n", chan->dev->name); */ 457 + chan->netdevice->stats.tx_fifo_errors++; 458 write_zsctrl(chan, ERR_RES); 459 z8530_tx_done(chan); 460 } 461 + 462 + if (altered & chan->dcdcheck) 463 { 464 + if (status & chan->dcdcheck) { 465 printk(KERN_INFO "%s: DCD raised\n", chan->dev->name); 466 + write_zsreg(chan, R3, chan->regs[3] | RxENABLE); 467 + if (chan->netdevice) 468 + netif_carrier_on(chan->netdevice); 469 + } else { 470 printk(KERN_INFO "%s: DCD lost\n", chan->dev->name); 471 + write_zsreg(chan, R3, chan->regs[3] & ~RxENABLE); 472 z8530_flush_fifo(chan); 473 + if (chan->netdevice) 474 + netif_carrier_off(chan->netdevice); 475 } 476 + 477 + } 478 write_zsctrl(chan, RES_EXT_INT); 479 write_zsctrl(chan, RES_H_IUS); 480 } 481 482 + struct z8530_irqhandler z8530_sync = 483 { 484 z8530_rx, 485 z8530_tx, ··· 556 * 557 * A status event occurred on the Z8530. We receive these for two reasons 558 * when in DMA mode. Firstly if we finished a packet transfer we get one 559 + * and kick the next packet out. Secondly we may see a DCD change. 560 * 561 */ 562 ··· 586 } 587 } 588 589 + if (altered & chan->dcdcheck) 590 { 591 + if (status & chan->dcdcheck) { 592 printk(KERN_INFO "%s: DCD raised\n", chan->dev->name); 593 + write_zsreg(chan, R3, chan->regs[3] | RxENABLE); 594 + if (chan->netdevice) 595 + netif_carrier_on(chan->netdevice); 596 + } else { 597 printk(KERN_INFO "%s:DCD lost\n", chan->dev->name); 598 + write_zsreg(chan, R3, chan->regs[3] & ~RxENABLE); 599 z8530_flush_fifo(chan); 600 + if (chan->netdevice) 601 + netif_carrier_off(chan->netdevice); 602 } 603 + } 604 605 write_zsctrl(chan, RES_EXT_INT); 606 write_zsctrl(chan, RES_H_IUS); ··· 1459 /* 1460 * Check if we crapped out. 1461 */ 1462 + if (get_dma_residue(c->txdma)) 1463 { 1464 + c->netdevice->stats.tx_dropped++; 1465 + c->netdevice->stats.tx_fifo_errors++; 1466 } 1467 release_dma_lock(flags); 1468 } ··· 1534 * packet. This code is fairly timing sensitive. 1535 * 1536 * Called with the register lock held. 1537 + */ 1538 + 1539 static void z8530_tx_done(struct z8530_channel *c) 1540 { 1541 struct sk_buff *skb; 1542 1543 /* Actually this can happen.*/ 1544 + if (c->tx_skb == NULL) 1545 return; 1546 1547 + skb = c->tx_skb; 1548 + c->tx_skb = NULL; 1549 z8530_tx_begin(c); 1550 + c->netdevice->stats.tx_packets++; 1551 + c->netdevice->stats.tx_bytes += skb->len; 1552 dev_kfree_skb_irq(skb); 1553 } 1554 ··· 1558 * @skb: The buffer 1559 * 1560 * We point the receive handler at this function when idle. Instead 1561 + * of processing the frames we get to throw them away. 1562 */ 1563 1564 void z8530_null_rx(struct z8530_channel *c, struct sk_buff *skb) ··· 1635 else 1636 /* Can't occur as we dont reenable the DMA irq until 1637 after the flip is done */ 1638 + printk(KERN_WARNING "%s: DMA flip overrun!\n", 1639 + c->netdevice->name); 1640 + 1641 release_dma_lock(flags); 1642 + 1643 /* 1644 * Shove the old buffer into an sk_buff. We can't DMA 1645 * directly into one on a PC - it might be above the 16Mb ··· 1646 * can avoid the copy. Optimisation 2 - make the memcpy 1647 * a copychecksum. 1648 */ 1649 + 1650 + skb = dev_alloc_skb(ct); 1651 + if (skb == NULL) { 1652 + c->netdevice->stats.rx_dropped++; 1653 + printk(KERN_WARNING "%s: Memory squeeze.\n", 1654 + c->netdevice->name); 1655 + } else { 1656 skb_put(skb, ct); 1657 skb_copy_to_linear_data(skb, rxb, ct); 1658 + c->netdevice->stats.rx_packets++; 1659 + c->netdevice->stats.rx_bytes += ct; 1660 } 1661 + c->dma_ready = 1; 1662 + } else { 1663 + RT_LOCK; 1664 + skb = c->skb; 1665 + 1666 /* 1667 * The game we play for non DMA is similar. We want to 1668 * get the controller set up for the next packet as fast ··· 1677 * if you build a system where the sync irq isnt blocked 1678 * by the kernel IRQ disable then you need only block the 1679 * sync IRQ for the RT_LOCK area. 1680 + * 1681 */ 1682 ct=c->count; 1683 + 1684 c->skb = c->skb2; 1685 c->count = 0; 1686 c->max = c->mtu; 1687 + if (c->skb) { 1688 c->dptr = c->skb->data; 1689 c->max = c->mtu; 1690 + } else { 1691 + c->count = 0; 1692 c->max = 0; 1693 } 1694 RT_UNLOCK; 1695 1696 c->skb2 = dev_alloc_skb(c->mtu); 1697 + if (c->skb2 == NULL) 1698 printk(KERN_WARNING "%s: memory squeeze.\n", 1699 + c->netdevice->name); 1700 else 1701 + skb_put(c->skb2, c->mtu); 1702 + c->netdevice->stats.rx_packets++; 1703 + c->netdevice->stats.rx_bytes += ct; 1704 } 1705 /* 1706 * If we received a frame we must now process it. 1707 */ 1708 + if (skb) { 1709 skb_trim(skb, ct); 1710 + c->rx_function(c, skb); 1711 + } else { 1712 + c->netdevice->stats.rx_dropped++; 1713 printk(KERN_ERR "%s: Lost a frame\n", c->netdevice->name); 1714 } 1715 } ··· 1730 * Returns true if the buffer cross a DMA boundary on a PC. The poor 1731 * thing can only DMA within a 64K block not across the edges of it. 1732 */ 1733 + 1734 static inline int spans_boundary(struct sk_buff *skb) 1735 { 1736 unsigned long a=(unsigned long)skb->data; ··· 1798 } 1799 1800 EXPORT_SYMBOL(z8530_queue_xmit); 1801 1802 /* 1803 * Module support
+4 -6
drivers/net/wan/z85230.h
··· 325 326 void *private; /* For our owner */ 327 struct net_device *netdevice; /* Network layer device */ 328 - struct net_device_stats stats; /* Network layer statistics */ 329 330 /* 331 * Async features ··· 365 unsigned char tx_active; /* character is being xmitted */ 366 unsigned char tx_stopped; /* output is suspended */ 367 368 - spinlock_t *lock; /* Devicr lock */ 369 - }; 370 371 /* 372 * Each Z853x0 device. 373 - */ 374 - 375 struct z8530_dev 376 { 377 char *name; /* Device instance name */ ··· 407 extern int z8530_sync_txdma_close(struct net_device *, struct z8530_channel *); 408 extern int z8530_channel_load(struct z8530_channel *, u8 *); 409 extern int z8530_queue_xmit(struct z8530_channel *c, struct sk_buff *skb); 410 - extern struct net_device_stats *z8530_get_stats(struct z8530_channel *c); 411 extern void z8530_null_rx(struct z8530_channel *c, struct sk_buff *skb); 412 413
··· 325 326 void *private; /* For our owner */ 327 struct net_device *netdevice; /* Network layer device */ 328 329 /* 330 * Async features ··· 366 unsigned char tx_active; /* character is being xmitted */ 367 unsigned char tx_stopped; /* output is suspended */ 368 369 + spinlock_t *lock; /* Device lock */ 370 + }; 371 372 /* 373 * Each Z853x0 device. 374 + */ 375 + 376 struct z8530_dev 377 { 378 char *name; /* Device instance name */ ··· 408 extern int z8530_sync_txdma_close(struct net_device *, struct z8530_channel *); 409 extern int z8530_channel_load(struct z8530_channel *, u8 *); 410 extern int z8530_queue_xmit(struct z8530_channel *c, struct sk_buff *skb); 411 extern void z8530_null_rx(struct z8530_channel *c, struct sk_buff *skb); 412 413
-7
drivers/net/wireless/orinoco.c
··· 1998 else 1999 priv->mc_count = mc_count; 2000 } 2001 - 2002 - /* Since we can set the promiscuous flag when it wasn't asked 2003 - for, make sure the net_device knows about it. */ 2004 - if (priv->promiscuous) 2005 - dev->flags |= IFF_PROMISC; 2006 - else 2007 - dev->flags &= ~IFF_PROMISC; 2008 } 2009 2010 /* This must be called from user context, without locks held - use
··· 1998 else 1999 priv->mc_count = mc_count; 2000 } 2001 } 2002 2003 /* This must be called from user context, without locks held - use
-3
drivers/net/wireless/wavelan.c
··· 1409 lp->mc_count = 0; 1410 1411 wv_82586_reconfig(dev); 1412 - 1413 - /* Tell the kernel that we are doing a really bad job. */ 1414 - dev->flags |= IFF_PROMISC; 1415 } 1416 } else 1417 /* Are there multicast addresses to send? */
··· 1409 lp->mc_count = 0; 1410 1411 wv_82586_reconfig(dev); 1412 } 1413 } else 1414 /* Are there multicast addresses to send? */
-6
drivers/net/wireless/wavelan_cs.c
··· 1412 lp->mc_count = 0; 1413 1414 wv_82593_reconfig(dev); 1415 - 1416 - /* Tell the kernel that we are doing a really bad job... */ 1417 - dev->flags |= IFF_PROMISC; 1418 } 1419 } 1420 else ··· 1430 lp->mc_count = 0; 1431 1432 wv_82593_reconfig(dev); 1433 - 1434 - /* Tell the kernel that we are doing a really bad job... */ 1435 - dev->flags |= IFF_ALLMULTI; 1436 } 1437 } 1438 else
··· 1412 lp->mc_count = 0; 1413 1414 wv_82593_reconfig(dev); 1415 } 1416 } 1417 else ··· 1433 lp->mc_count = 0; 1434 1435 wv_82593_reconfig(dev); 1436 } 1437 } 1438 else
+1 -1
drivers/net/xen-netfront.c
··· 329 } 330 spin_unlock_bh(&np->rx_lock); 331 332 - xennet_maybe_wake_tx(dev); 333 334 return 0; 335 }
··· 329 } 330 spin_unlock_bh(&np->rx_lock); 331 332 + netif_start_queue(dev); 333 334 return 0; 335 }
+1
include/linux/dm9000.h
··· 27 28 struct dm9000_plat_data { 29 unsigned int flags; 30 31 /* allow replacement IO routines */ 32
··· 27 28 struct dm9000_plat_data { 29 unsigned int flags; 30 + unsigned char dev_addr[6]; 31 32 /* allow replacement IO routines */ 33
+16 -1
include/linux/ethtool.h
··· 27 __u8 autoneg; /* Enable or disable autonegotiation */ 28 __u32 maxtxpkt; /* Tx pkts before generating tx int */ 29 __u32 maxrxpkt; /* Rx pkts before generating rx int */ 30 - __u32 reserved[4]; 31 }; 32 33 #define ETHTOOL_BUSINFO_LEN 32 34 /* these strings are set to whatever the driver author decides... */
··· 27 __u8 autoneg; /* Enable or disable autonegotiation */ 28 __u32 maxtxpkt; /* Tx pkts before generating tx int */ 29 __u32 maxrxpkt; /* Rx pkts before generating rx int */ 30 + __u16 speed_hi; 31 + __u16 reserved2; 32 + __u32 reserved[3]; 33 }; 34 + 35 + static inline void ethtool_cmd_speed_set(struct ethtool_cmd *ep, 36 + __u32 speed) 37 + { 38 + 39 + ep->speed = (__u16)speed; 40 + ep->speed_hi = (__u16)(speed >> 16); 41 + } 42 + 43 + static inline __u32 ethtool_cmd_speed(struct ethtool_cmd *ep) 44 + { 45 + return (ep->speed_hi << 16) | ep->speed; 46 + } 47 48 #define ETHTOOL_BUSINFO_LEN 32 49 /* these strings are set to whatever the driver author decides... */
-2
include/net/syncppp.h
··· 43 u32 pp_rseq; /* remote sequence number */ 44 struct slcp lcp; /* LCP params */ 45 struct sipcp ipcp; /* IPCP params */ 46 - u32 ibytes,obytes; /* Bytes in/out */ 47 - u32 ipkts,opkts; /* Packets in/out */ 48 struct timer_list pp_timer; 49 struct net_device *pp_if; 50 char pp_link_state; /* Link status */
··· 43 u32 pp_rseq; /* remote sequence number */ 44 struct slcp lcp; /* LCP params */ 45 struct sipcp ipcp; /* IPCP params */ 46 struct timer_list pp_timer; 47 struct net_device *pp_if; 48 char pp_link_state; /* Link status */
+13 -22
net/core/dev.c
··· 1939 1940 EXPORT_SYMBOL(netif_rx_ni); 1941 1942 - static inline struct net_device *skb_bond(struct sk_buff *skb) 1943 - { 1944 - struct net_device *dev = skb->dev; 1945 - 1946 - if (dev->master) { 1947 - if (skb_bond_should_drop(skb)) { 1948 - kfree_skb(skb); 1949 - return NULL; 1950 - } 1951 - skb->dev = dev->master; 1952 - } 1953 - 1954 - return dev; 1955 - } 1956 - 1957 - 1958 static void net_tx_action(struct softirq_action *h) 1959 { 1960 struct softnet_data *sd = &__get_cpu_var(softnet_data); ··· 2165 { 2166 struct packet_type *ptype, *pt_prev; 2167 struct net_device *orig_dev; 2168 int ret = NET_RX_DROP; 2169 __be16 type; 2170 ··· 2179 if (!skb->iif) 2180 skb->iif = skb->dev->ifindex; 2181 2182 - orig_dev = skb_bond(skb); 2183 - 2184 - if (!orig_dev) 2185 - return NET_RX_DROP; 2186 2187 __get_cpu_var(netdev_rx_stat).total++; 2188 ··· 2210 #endif 2211 2212 list_for_each_entry_rcu(ptype, &ptype_all, list) { 2213 - if (!ptype->dev || ptype->dev == skb->dev) { 2214 if (pt_prev) 2215 ret = deliver_skb(skb, pt_prev, orig_dev); 2216 pt_prev = ptype; ··· 2236 list_for_each_entry_rcu(ptype, 2237 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) { 2238 if (ptype->type == type && 2239 - (!ptype->dev || ptype->dev == skb->dev)) { 2240 if (pt_prev) 2241 ret = deliver_skb(skb, pt_prev, orig_dev); 2242 pt_prev = ptype;
··· 1939 1940 EXPORT_SYMBOL(netif_rx_ni); 1941 1942 static void net_tx_action(struct softirq_action *h) 1943 { 1944 struct softnet_data *sd = &__get_cpu_var(softnet_data); ··· 2181 { 2182 struct packet_type *ptype, *pt_prev; 2183 struct net_device *orig_dev; 2184 + struct net_device *null_or_orig; 2185 int ret = NET_RX_DROP; 2186 __be16 type; 2187 ··· 2194 if (!skb->iif) 2195 skb->iif = skb->dev->ifindex; 2196 2197 + null_or_orig = NULL; 2198 + orig_dev = skb->dev; 2199 + if (orig_dev->master) { 2200 + if (skb_bond_should_drop(skb)) 2201 + null_or_orig = orig_dev; /* deliver only exact match */ 2202 + else 2203 + skb->dev = orig_dev->master; 2204 + } 2205 2206 __get_cpu_var(netdev_rx_stat).total++; 2207 ··· 2221 #endif 2222 2223 list_for_each_entry_rcu(ptype, &ptype_all, list) { 2224 + if (ptype->dev == null_or_orig || ptype->dev == skb->dev || 2225 + ptype->dev == orig_dev) { 2226 if (pt_prev) 2227 ret = deliver_skb(skb, pt_prev, orig_dev); 2228 pt_prev = ptype; ··· 2246 list_for_each_entry_rcu(ptype, 2247 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) { 2248 if (ptype->type == type && 2249 + (ptype->dev == null_or_orig || ptype->dev == skb->dev || 2250 + ptype->dev == orig_dev)) { 2251 if (pt_prev) 2252 ret = deliver_skb(skb, pt_prev, orig_dev); 2253 pt_prev = ptype;
-27
net/wanrouter/wanmain.c
··· 57 #include <linux/vmalloc.h> /* vmalloc, vfree */ 58 #include <asm/uaccess.h> /* copy_to/from_user */ 59 #include <linux/init.h> /* __initfunc et al. */ 60 - #include <net/syncppp.h> 61 62 #define KMEM_SAFETYZONE 8 63 ··· 566 { 567 wanif_conf_t *cnf; 568 struct net_device *dev = NULL; 569 - #ifdef CONFIG_WANPIPE_MULTPPP 570 - struct ppp_device *pppdev=NULL; 571 - #endif 572 int err; 573 574 if ((wandev->state == WAN_UNCONFIGURED) || (wandev->new_if == NULL)) ··· 584 goto out; 585 586 if (cnf->config_id == WANCONFIG_MPPP) { 587 - #ifdef CONFIG_WANPIPE_MULTPPP 588 - pppdev = kzalloc(sizeof(struct ppp_device), GFP_KERNEL); 589 - err = -ENOBUFS; 590 - if (pppdev == NULL) 591 - goto out; 592 - pppdev->dev = kzalloc(sizeof(struct net_device), GFP_KERNEL); 593 - if (pppdev->dev == NULL) { 594 - kfree(pppdev); 595 - err = -ENOBUFS; 596 - goto out; 597 - } 598 - err = wandev->new_if(wandev, (struct net_device *)pppdev, cnf); 599 - dev = pppdev->dev; 600 - #else 601 printk(KERN_INFO "%s: Wanpipe Mulit-Port PPP support has not been compiled in!\n", 602 wandev->name); 603 err = -EPROTONOSUPPORT; 604 goto out; 605 - #endif 606 } else { 607 dev = kzalloc(sizeof(struct net_device), GFP_KERNEL); 608 err = -ENOBUFS; ··· 642 kfree(dev->priv); 643 dev->priv = NULL; 644 645 - #ifdef CONFIG_WANPIPE_MULTPPP 646 - if (cnf->config_id == WANCONFIG_MPPP) 647 - kfree(pppdev); 648 - else 649 - kfree(dev); 650 - #else 651 /* Sync PPP is disabled */ 652 if (cnf->config_id != WANCONFIG_MPPP) 653 kfree(dev); 654 - #endif 655 - 656 out: 657 kfree(cnf); 658 return err;
··· 57 #include <linux/vmalloc.h> /* vmalloc, vfree */ 58 #include <asm/uaccess.h> /* copy_to/from_user */ 59 #include <linux/init.h> /* __initfunc et al. */ 60 61 #define KMEM_SAFETYZONE 8 62 ··· 567 { 568 wanif_conf_t *cnf; 569 struct net_device *dev = NULL; 570 int err; 571 572 if ((wandev->state == WAN_UNCONFIGURED) || (wandev->new_if == NULL)) ··· 588 goto out; 589 590 if (cnf->config_id == WANCONFIG_MPPP) { 591 printk(KERN_INFO "%s: Wanpipe Mulit-Port PPP support has not been compiled in!\n", 592 wandev->name); 593 err = -EPROTONOSUPPORT; 594 goto out; 595 } else { 596 dev = kzalloc(sizeof(struct net_device), GFP_KERNEL); 597 err = -ENOBUFS; ··· 661 kfree(dev->priv); 662 dev->priv = NULL; 663 664 /* Sync PPP is disabled */ 665 if (cnf->config_id != WANCONFIG_MPPP) 666 kfree(dev); 667 out: 668 kfree(cnf); 669 return err;