Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6: (77 commits)
[IPV6]: Reorg struct ifmcaddr6 to save some bytes
[INET_TIMEWAIT_SOCK]: Reorganize struct inet_timewait_sock to save some bytes
[DCCP]: Reorganize struct dccp_sock to save 8 bytes
[INET6]: Reorganize struct inet6_dev to save 8 bytes
[SOCK] proto: Add hashinfo member to struct proto
EMAC driver: Fix bug: The clock divisor is set to all ones at reset.
EMAC driver: fix bug - invalidate data cache of new_skb->data range when cache is WB
EMAC driver: add power down mode
EMAC driver: ADSP-BF52x arch/mach support
EMAC driver: use simpler comment headers and strip out information that is maintained in the scm's log
EMAC driver: bf537 MAC multicast hash filtering patch
EMAC driver: define MDC_CLK=2.5MHz and caculate mdc_div according to SCLK.
EMAC driver: shorten the mdelay value to solve netperf performance issue
[netdrvr] sis190: build fix
sky2: fix Wake On Lan interaction with BIOS
sky2: restore multicast addresses after recovery
pci-skeleton: Misc fixes to build neatly
phylib: Add Realtek 821x eth PHY support
natsemi: Update locking documentation
PHYLIB: Locking fixes for PHY I/O potentially sleeping
...

+1293 -1138
-25
MAINTAINERS
··· 84 84 it has been replaced by a better system and you 85 85 should be using that. 86 86 87 - 3C359 NETWORK DRIVER 88 - P: Mike Phillips 89 - M: mikep@linuxtr.net 90 - L: netdev@vger.kernel.org 91 - W: http://www.linuxtr.net 92 - S: Maintained 93 - 94 87 3C505 NETWORK DRIVER 95 88 P: Philip Blundell 96 89 M: philb@gnu.org ··· 932 939 S: Maintained 933 940 934 941 BONDING DRIVER 935 - P: Chad Tindel 936 - M: ctindel@users.sourceforge.net 937 942 P: Jay Vosburgh 938 943 M: fubar@us.ibm.com 939 944 L: bonding-devel@lists.sourceforge.net ··· 2855 2864 W: http://oss.oracle.com/projects/ocfs2/ 2856 2865 S: Supported 2857 2866 2858 - OLYMPIC NETWORK DRIVER 2859 - P: Peter De Shrijver 2860 - M: p2@ace.ulyssis.student.kuleuven.ac.be 2861 - P: Mike Phillips 2862 - M: mikep@linuxtr.net 2863 - L: netdev@vger.kernel.org 2864 - W: http://www.linuxtr.net 2865 - S: Maintained 2866 - 2867 2867 OMNIKEY CARDMAN 4000 DRIVER 2868 2868 P: Harald Welte 2869 2869 M: laforge@gnumonks.org ··· 3768 3786 M: chessman@tux.org 3769 3787 L: tlan-devel@lists.sourceforge.net (subscribers-only) 3770 3788 W: http://sourceforge.net/projects/tlan/ 3771 - S: Maintained 3772 - 3773 - TOKEN-RING NETWORK DRIVER 3774 - P: Mike Phillips 3775 - M: mikep@linuxtr.net 3776 - L: netdev@vger.kernel.org 3777 - W: http://www.linuxtr.net 3778 3789 S: Maintained 3779 3790 3780 3791 TOSHIBA ACPI EXTRAS DRIVER
+6 -5
drivers/net/Kconfig
··· 814 814 will be called smc-ultra32. 815 815 816 816 config BFIN_MAC 817 - tristate "Blackfin 536/537 on-chip mac support" 818 - depends on NET_ETHERNET && (BF537 || BF536) && (!BF537_PORT_H) 817 + tristate "Blackfin 527/536/537 on-chip mac support" 818 + depends on NET_ETHERNET && (BF527 || BF537 || BF536) && (!BF537_PORT_H) 819 819 select CRC32 820 820 select MII 821 821 select PHYLIB ··· 828 828 829 829 config BFIN_MAC_USE_L1 830 830 bool "Use L1 memory for rx/tx packets" 831 - depends on BFIN_MAC && BF537 831 + depends on BFIN_MAC && (BF527 || BF537) 832 832 default y 833 833 help 834 834 To get maximum network performance, you should use L1 memory as rx/tx buffers. ··· 855 855 config BFIN_MAC_RMII 856 856 bool "RMII PHY Interface (EXPERIMENTAL)" 857 857 depends on BFIN_MAC && EXPERIMENTAL 858 - default n 858 + default y if BFIN527_EZKIT 859 + default n if BFIN537_STAMP 859 860 help 860 861 Use Reduced PHY MII Interface 861 862 ··· 1200 1199 1201 1200 config IBMLANA 1202 1201 tristate "IBM LAN Adapter/A support" 1203 - depends on MCA && MCA_LEGACY 1202 + depends on MCA 1204 1203 ---help--- 1205 1204 This is a Micro Channel Ethernet adapter. You need to set 1206 1205 CONFIG_MCA to use this driver. It is both available as an in-kernel
+1 -1
drivers/net/arm/at91_ether.c
··· 384 384 /* Wait until PHY reset is complete */ 385 385 do { 386 386 read_phy(lp->phy_address, MII_BMCR, &bmcr); 387 - } while (!(bmcr && BMCR_RESET)); 387 + } while (!(bmcr & BMCR_RESET)); 388 388 389 389 disable_mdi(); 390 390 spin_unlock_irq(&lp->lock);
+23 -17
drivers/net/ax88796.c
··· 137 137 static void ax_reset_8390(struct net_device *dev) 138 138 { 139 139 struct ei_device *ei_local = netdev_priv(dev); 140 + struct ax_device *ax = to_ax_dev(dev); 140 141 unsigned long reset_start_time = jiffies; 141 142 void __iomem *addr = (void __iomem *)dev->base_addr; 142 143 143 144 if (ei_debug > 1) 144 - printk(KERN_DEBUG "resetting the 8390 t=%ld...", jiffies); 145 + dev_dbg(&ax->dev->dev, "resetting the 8390 t=%ld\n", jiffies); 145 146 146 147 ei_outb(ei_inb(addr + NE_RESET), addr + NE_RESET); 147 148 ··· 152 151 /* This check _should_not_ be necessary, omit eventually. */ 153 152 while ((ei_inb(addr + EN0_ISR) & ENISR_RESET) == 0) { 154 153 if (jiffies - reset_start_time > 2*HZ/100) { 155 - printk(KERN_WARNING "%s: %s did not complete.\n", 154 + dev_warn(&ax->dev->dev, "%s: %s did not complete.\n", 156 155 __FUNCTION__, dev->name); 157 156 break; 158 157 } ··· 166 165 int ring_page) 167 166 { 168 167 struct ei_device *ei_local = netdev_priv(dev); 168 + struct ax_device *ax = to_ax_dev(dev); 169 169 void __iomem *nic_base = ei_local->mem; 170 170 171 171 /* This *shouldn't* happen. If it does, it's the last thing you'll see */ 172 172 if (ei_status.dmaing) { 173 - printk(KERN_EMERG "%s: DMAing conflict in %s [DMAstat:%d][irqlock:%d].\n", 173 + dev_err(&ax->dev->dev, "%s: DMAing conflict in %s " 174 + "[DMAstat:%d][irqlock:%d].\n", 174 175 dev->name, __FUNCTION__, 175 - ei_status.dmaing, ei_status.irqlock); 176 + ei_status.dmaing, ei_status.irqlock); 176 177 return; 177 178 } 178 179 ··· 207 204 struct sk_buff *skb, int ring_offset) 208 205 { 209 206 struct ei_device *ei_local = netdev_priv(dev); 207 + struct ax_device *ax = to_ax_dev(dev); 210 208 void __iomem *nic_base = ei_local->mem; 211 209 char *buf = skb->data; 212 210 213 211 if (ei_status.dmaing) { 214 - printk(KERN_EMERG "%s: DMAing conflict in ax_block_input " 212 + dev_err(&ax->dev->dev, 213 + "%s: DMAing conflict in %s " 215 214 "[DMAstat:%d][irqlock:%d].\n", 216 - dev->name, ei_status.dmaing, ei_status.irqlock); 215 + dev->name, __FUNCTION__, 216 + ei_status.dmaing, ei_status.irqlock); 217 217 return; 218 218 } 219 219 ··· 245 239 const unsigned char *buf, const int start_page) 246 240 { 247 241 struct ei_device *ei_local = netdev_priv(dev); 242 + struct ax_device *ax = to_ax_dev(dev); 248 243 void __iomem *nic_base = ei_local->mem; 249 244 unsigned long dma_start; 250 245 ··· 258 251 259 252 /* This *shouldn't* happen. If it does, it's the last thing you'll see */ 260 253 if (ei_status.dmaing) { 261 - printk(KERN_EMERG "%s: DMAing conflict in %s." 254 + dev_err(&ax->dev->dev, "%s: DMAing conflict in %s." 262 255 "[DMAstat:%d][irqlock:%d]\n", 263 256 dev->name, __FUNCTION__, 264 257 ei_status.dmaing, ei_status.irqlock); ··· 288 281 289 282 while ((ei_inb(nic_base + EN0_ISR) & ENISR_RDC) == 0) { 290 283 if (jiffies - dma_start > 2*HZ/100) { /* 20ms */ 291 - printk(KERN_WARNING "%s: timeout waiting for Tx RDC.\n", dev->name); 284 + dev_warn(&ax->dev->dev, 285 + "%s: timeout waiting for Tx RDC.\n", dev->name); 292 286 ax_reset_8390(dev); 293 287 ax_NS8390_init(dev,1); 294 288 break; ··· 432 424 ax_phy_write(struct net_device *dev, int phy_addr, int reg, int value) 433 425 { 434 426 struct ei_device *ei = (struct ei_device *) netdev_priv(dev); 427 + struct ax_device *ax = to_ax_dev(dev); 435 428 unsigned long flags; 436 429 437 - printk(KERN_DEBUG "%s: %p, %04x, %04x %04x\n", 438 - __FUNCTION__, dev, phy_addr, reg, value); 430 + dev_dbg(&ax->dev->dev, "%s: %p, %04x, %04x %04x\n", 431 + __FUNCTION__, dev, phy_addr, reg, value); 439 432 440 433 spin_lock_irqsave(&ei->page_lock, flags); 441 434 ··· 759 750 ax_NS8390_init(dev, 0); 760 751 761 752 if (first_init) { 762 - printk("AX88796: %dbit, irq %d, %lx, MAC: ", 763 - ei_status.word16 ? 16:8, dev->irq, dev->base_addr); 753 + DECLARE_MAC_BUF(mac); 764 754 765 - for (i = 0; i < ETHER_ADDR_LEN; i++) 766 - printk("%2.2x%c", dev->dev_addr[i], 767 - (i < (ETHER_ADDR_LEN-1) ? ':' : ' ')); 768 - 769 - printk("\n"); 755 + dev_info(&ax->dev->dev, "%dbit, irq %d, %lx, MAC: %s\n", 756 + ei_status.word16 ? 16:8, dev->irq, dev->base_addr, 757 + print_mac(mac, dev->dev_addr)); 770 758 } 771 759 772 760 ret = register_netdev(dev);
+69 -38
drivers/net/bfin_mac.c
··· 1 1 /* 2 - * File: drivers/net/bfin_mac.c 3 - * Based on: 4 - * Maintainer: 5 - * Bryan Wu <bryan.wu@analog.com> 2 + * Blackfin On-Chip MAC Driver 6 3 * 7 - * Original author: 8 - * Luke Yang <luke.yang@analog.com> 4 + * Copyright 2004-2007 Analog Devices Inc. 9 5 * 10 - * Created: 11 - * Description: 6 + * Enter bugs at http://blackfin.uclinux.org/ 12 7 * 13 - * Modified: 14 - * Copyright 2004-2006 Analog Devices Inc. 15 - * 16 - * Bugs: Enter bugs at http://blackfin.uclinux.org/ 17 - * 18 - * This program is free software ; you can redistribute it and/or modify 19 - * it under the terms of the GNU General Public License as published by 20 - * the Free Software Foundation ; either version 2, or (at your option) 21 - * any later version. 22 - * 23 - * This program is distributed in the hope that it will be useful, 24 - * but WITHOUT ANY WARRANTY ; without even the implied warranty of 25 - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 26 - * GNU General Public License for more details. 27 - * 28 - * You should have received a copy of the GNU General Public License 29 - * along with this program ; see the file COPYING. 30 - * If not, write to the Free Software Foundation, 31 - * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 8 + * Licensed under the GPL-2 or later. 32 9 */ 33 10 34 11 #include <linux/init.h> ··· 42 65 #define DRV_NAME "bfin_mac" 43 66 #define DRV_VERSION "1.1" 44 67 #define DRV_AUTHOR "Bryan Wu, Luke Yang" 45 - #define DRV_DESC "Blackfin BF53[67] on-chip Ethernet MAC driver" 68 + #define DRV_DESC "Blackfin BF53[67] BF527 on-chip Ethernet MAC driver" 46 69 47 70 MODULE_AUTHOR(DRV_AUTHOR); 48 71 MODULE_LICENSE("GPL"); ··· 273 296 274 297 /* poll the STABUSY bit */ 275 298 while ((bfin_read_EMAC_STAADD()) & STABUSY) { 276 - mdelay(10); 299 + udelay(1); 277 300 if (timeout_cnt-- < 0) { 278 301 printk(KERN_ERR DRV_NAME 279 302 ": wait MDC/MDIO transaction to complete timeout\n"); ··· 389 412 spin_unlock_irqrestore(&lp->lock, flags); 390 413 } 391 414 415 + /* MDC = 2.5 MHz */ 416 + #define MDC_CLK 2500000 417 + 392 418 static int mii_probe(struct net_device *dev) 393 419 { 394 420 struct bf537mac_local *lp = netdev_priv(dev); 395 421 struct phy_device *phydev = NULL; 396 422 unsigned short sysctl; 397 423 int i; 424 + u32 sclk, mdc_div; 398 425 399 426 /* Enable PHY output early */ 400 427 if (!(bfin_read_VR_CTL() & PHYCLKOE)) 401 428 bfin_write_VR_CTL(bfin_read_VR_CTL() | PHYCLKOE); 402 429 403 - /* MDC = 2.5 MHz */ 430 + sclk = get_sclk(); 431 + mdc_div = ((sclk / MDC_CLK) / 2) - 1; 432 + 404 433 sysctl = bfin_read_EMAC_SYSCTL(); 405 - sysctl |= SET_MDCDIV(24); 434 + sysctl = (sysctl & ~MDCDIV) | SET_MDCDIV(mdc_div); 406 435 bfin_write_EMAC_SYSCTL(sysctl); 407 436 408 437 /* search for connect PHY device */ ··· 460 477 lp->phydev = phydev; 461 478 462 479 printk(KERN_INFO "%s: attached PHY driver [%s] " 463 - "(mii_bus:phy_addr=%s, irq=%d)\n", 464 - DRV_NAME, phydev->drv->name, phydev->dev.bus_id, phydev->irq); 480 + "(mii_bus:phy_addr=%s, irq=%d, mdc_clk=%dHz(mdc_div=%d)" 481 + "@sclk=%dMHz)\n", 482 + DRV_NAME, phydev->drv->name, phydev->dev.bus_id, phydev->irq, 483 + MDC_CLK, mdc_div, sclk/1000000); 465 484 466 485 return 0; 467 486 } ··· 536 551 */ 537 552 if (current_tx_ptr->next->next == tx_list_head) { 538 553 while (tx_list_head->status.status_word == 0) { 539 - mdelay(10); 554 + mdelay(1); 540 555 if (tx_list_head->status.status_word != 0 541 556 || !(bfin_read_DMA2_IRQ_STATUS() & 0x08)) { 542 557 goto adjust_head; ··· 651 666 current_rx_ptr->skb = new_skb; 652 667 current_rx_ptr->desc_a.start_addr = (unsigned long)new_skb->data - 2; 653 668 669 + /* Invidate the data cache of skb->data range when it is write back 670 + * cache. It will prevent overwritting the new data from DMA 671 + */ 672 + blackfin_dcache_invalidate_range((unsigned long)new_skb->head, 673 + (unsigned long)new_skb->end); 674 + 654 675 len = (unsigned short)((current_rx_ptr->status.status_word) & RX_FRLEN); 655 676 skb_put(skb, len); 656 677 blackfin_dcache_invalidate_range((unsigned long)skb->head, ··· 758 767 759 768 #if defined(CONFIG_BFIN_MAC_RMII) 760 769 opmode |= RMII; /* For Now only 100MBit are supported */ 761 - #ifdef CONFIG_BF_REV_0_2 770 + #if (defined(CONFIG_BF537) || defined(CONFIG_BF536)) && CONFIG_BF_REV_0_2 762 771 opmode |= TE; 763 772 #endif 764 773 #endif ··· 783 792 netif_wake_queue(dev); 784 793 } 785 794 795 + static void bf537mac_multicast_hash(struct net_device *dev) 796 + { 797 + u32 emac_hashhi, emac_hashlo; 798 + struct dev_mc_list *dmi = dev->mc_list; 799 + char *addrs; 800 + int i; 801 + u32 crc; 802 + 803 + emac_hashhi = emac_hashlo = 0; 804 + 805 + for (i = 0; i < dev->mc_count; i++) { 806 + addrs = dmi->dmi_addr; 807 + dmi = dmi->next; 808 + 809 + /* skip non-multicast addresses */ 810 + if (!(*addrs & 1)) 811 + continue; 812 + 813 + crc = ether_crc(ETH_ALEN, addrs); 814 + crc >>= 26; 815 + 816 + if (crc & 0x20) 817 + emac_hashhi |= 1 << (crc & 0x1f); 818 + else 819 + emac_hashlo |= 1 << (crc & 0x1f); 820 + } 821 + 822 + bfin_write_EMAC_HASHHI(emac_hashhi); 823 + bfin_write_EMAC_HASHLO(emac_hashlo); 824 + 825 + return; 826 + } 827 + 786 828 /* 787 829 * This routine will, depending on the values passed to it, 788 830 * either make it accept multicast packets, go into ··· 831 807 sysctl = bfin_read_EMAC_OPMODE(); 832 808 sysctl |= RAF; 833 809 bfin_write_EMAC_OPMODE(sysctl); 834 - } else if (dev->flags & IFF_ALLMULTI || dev->mc_count) { 810 + } else if (dev->flags & IFF_ALLMULTI) { 835 811 /* accept all multicast */ 836 812 sysctl = bfin_read_EMAC_OPMODE(); 837 813 sysctl |= PAM; 838 814 bfin_write_EMAC_OPMODE(sysctl); 815 + } else if (dev->mc_count) { 816 + /* set up multicast hash table */ 817 + sysctl = bfin_read_EMAC_OPMODE(); 818 + sysctl |= HM; 819 + bfin_write_EMAC_OPMODE(sysctl); 820 + bf537mac_multicast_hash(dev); 839 821 } else { 840 822 /* clear promisc or multicast mode */ 841 823 sysctl = bfin_read_EMAC_OPMODE(); ··· 890 860 return retval; 891 861 892 862 phy_start(lp->phydev); 863 + phy_write(lp->phydev, MII_BMCR, BMCR_RESET); 893 864 setup_system_regs(dev); 894 865 bf537mac_disable(); 895 866 bf537mac_enable(); 896 - 897 867 pr_debug("hardware init finished\n"); 898 868 netif_start_queue(dev); 899 869 netif_carrier_on(dev); ··· 916 886 netif_carrier_off(dev); 917 887 918 888 phy_stop(lp->phydev); 889 + phy_write(lp->phydev, MII_BMCR, BMCR_PDOWN); 919 890 920 891 /* clear everything */ 921 892 bf537mac_shutdown(dev); ··· 1001 970 /* register irq handler */ 1002 971 if (request_irq 1003 972 (IRQ_MAC_RX, bf537mac_interrupt, IRQF_DISABLED | IRQF_SHARED, 1004 - "BFIN537_MAC_RX", dev)) { 973 + "EMAC_RX", dev)) { 1005 974 printk(KERN_WARNING DRV_NAME 1006 975 ": Unable to attach BlackFin MAC RX interrupt\n"); 1007 976 return -EBUSY;
+4 -27
drivers/net/bfin_mac.h
··· 1 1 /* 2 - * File: drivers/net/bfin_mac.c 3 - * Based on: 4 - * Maintainer: 5 - * Bryan Wu <bryan.wu@analog.com> 2 + * Blackfin On-Chip MAC Driver 6 3 * 7 - * Original author: 8 - * Luke Yang <luke.yang@analog.com> 4 + * Copyright 2004-2007 Analog Devices Inc. 9 5 * 10 - * Created: 11 - * Description: 6 + * Enter bugs at http://blackfin.uclinux.org/ 12 7 * 13 - * Modified: 14 - * Copyright 2004-2006 Analog Devices Inc. 15 - * 16 - * Bugs: Enter bugs at http://blackfin.uclinux.org/ 17 - * 18 - * This program is free software ; you can redistribute it and/or modify 19 - * it under the terms of the GNU General Public License as published by 20 - * the Free Software Foundation ; either version 2, or (at your option) 21 - * any later version. 22 - * 23 - * This program is distributed in the hope that it will be useful, 24 - * but WITHOUT ANY WARRANTY ; without even the implied warranty of 25 - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 26 - * GNU General Public License for more details. 27 - * 28 - * You should have received a copy of the GNU General Public License 29 - * along with this program ; see the file COPYING. 30 - * If not, write to the Free Software Foundation, 31 - * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 8 + * Licensed under the GPL-2 or later. 32 9 */ 33 10 34 11 #define BFIN_MAC_CSUM_OFFLOAD
+56 -50
drivers/net/bonding/bond_main.c
··· 1464 1464 dev_set_allmulti(slave_dev, 1); 1465 1465 } 1466 1466 1467 + netif_tx_lock_bh(bond_dev); 1467 1468 /* upload master's mc_list to new slave */ 1468 1469 for (dmi = bond_dev->mc_list; dmi; dmi = dmi->next) { 1469 1470 dev_mc_add (slave_dev, dmi->dmi_addr, dmi->dmi_addrlen, 0); 1470 1471 } 1472 + netif_tx_unlock_bh(bond_dev); 1471 1473 } 1472 1474 1473 1475 if (bond->params.mode == BOND_MODE_8023AD) { ··· 1823 1821 } 1824 1822 1825 1823 /* flush master's mc_list from slave */ 1824 + netif_tx_lock_bh(bond_dev); 1826 1825 bond_mc_list_flush(bond_dev, slave_dev); 1826 + netif_tx_unlock_bh(bond_dev); 1827 1827 } 1828 1828 1829 1829 netdev_set_master(slave_dev, NULL); ··· 1946 1942 } 1947 1943 1948 1944 /* flush master's mc_list from slave */ 1945 + netif_tx_lock_bh(bond_dev); 1949 1946 bond_mc_list_flush(bond_dev, slave_dev); 1947 + netif_tx_unlock_bh(bond_dev); 1950 1948 } 1951 1949 1952 1950 netdev_set_master(slave_dev, NULL); ··· 2801 2795 } 2802 2796 2803 2797 if (do_failover) { 2804 - rtnl_lock(); 2805 2798 write_lock_bh(&bond->curr_slave_lock); 2806 2799 2807 2800 bond_select_active_slave(bond); 2808 2801 2809 2802 write_unlock_bh(&bond->curr_slave_lock); 2810 - rtnl_unlock(); 2811 - 2812 2803 } 2813 2804 2814 2805 re_arm: ··· 2862 2859 2863 2860 slave->link = BOND_LINK_UP; 2864 2861 2865 - rtnl_lock(); 2866 - 2867 2862 write_lock_bh(&bond->curr_slave_lock); 2868 2863 2869 2864 if ((!bond->curr_active_slave) && ··· 2897 2896 } 2898 2897 2899 2898 write_unlock_bh(&bond->curr_slave_lock); 2900 - rtnl_unlock(); 2901 2899 } 2902 2900 } else { 2903 2901 read_lock(&bond->curr_slave_lock); ··· 2966 2966 bond->dev->name, 2967 2967 slave->dev->name); 2968 2968 2969 - rtnl_lock(); 2970 2969 write_lock_bh(&bond->curr_slave_lock); 2971 2970 2972 2971 bond_select_active_slave(bond); 2973 2972 slave = bond->curr_active_slave; 2974 2973 2975 2974 write_unlock_bh(&bond->curr_slave_lock); 2976 - 2977 - rtnl_unlock(); 2978 2975 2979 2976 bond->current_arp_slave = slave; 2980 2977 ··· 2990 2993 bond->primary_slave->dev->name); 2991 2994 2992 2995 /* primary is up so switch to it */ 2993 - rtnl_lock(); 2994 2996 write_lock_bh(&bond->curr_slave_lock); 2995 2997 bond_change_active_slave(bond, bond->primary_slave); 2996 2998 write_unlock_bh(&bond->curr_slave_lock); 2997 - 2998 - rtnl_unlock(); 2999 2999 3000 3000 slave = bond->primary_slave; 3001 3001 slave->jiffies = jiffies; ··· 3763 3769 { 3764 3770 struct bonding *bond = bond_dev->priv; 3765 3771 struct net_device_stats *stats = &(bond->stats), *sstats; 3772 + struct net_device_stats local_stats; 3766 3773 struct slave *slave; 3767 3774 int i; 3768 3775 3769 - memset(stats, 0, sizeof(struct net_device_stats)); 3776 + memset(&local_stats, 0, sizeof(struct net_device_stats)); 3770 3777 3771 3778 read_lock_bh(&bond->lock); 3772 3779 3773 3780 bond_for_each_slave(bond, slave, i) { 3774 3781 sstats = slave->dev->get_stats(slave->dev); 3775 - stats->rx_packets += sstats->rx_packets; 3776 - stats->rx_bytes += sstats->rx_bytes; 3777 - stats->rx_errors += sstats->rx_errors; 3778 - stats->rx_dropped += sstats->rx_dropped; 3782 + local_stats.rx_packets += sstats->rx_packets; 3783 + local_stats.rx_bytes += sstats->rx_bytes; 3784 + local_stats.rx_errors += sstats->rx_errors; 3785 + local_stats.rx_dropped += sstats->rx_dropped; 3779 3786 3780 - stats->tx_packets += sstats->tx_packets; 3781 - stats->tx_bytes += sstats->tx_bytes; 3782 - stats->tx_errors += sstats->tx_errors; 3783 - stats->tx_dropped += sstats->tx_dropped; 3787 + local_stats.tx_packets += sstats->tx_packets; 3788 + local_stats.tx_bytes += sstats->tx_bytes; 3789 + local_stats.tx_errors += sstats->tx_errors; 3790 + local_stats.tx_dropped += sstats->tx_dropped; 3784 3791 3785 - stats->multicast += sstats->multicast; 3786 - stats->collisions += sstats->collisions; 3792 + local_stats.multicast += sstats->multicast; 3793 + local_stats.collisions += sstats->collisions; 3787 3794 3788 - stats->rx_length_errors += sstats->rx_length_errors; 3789 - stats->rx_over_errors += sstats->rx_over_errors; 3790 - stats->rx_crc_errors += sstats->rx_crc_errors; 3791 - stats->rx_frame_errors += sstats->rx_frame_errors; 3792 - stats->rx_fifo_errors += sstats->rx_fifo_errors; 3793 - stats->rx_missed_errors += sstats->rx_missed_errors; 3795 + local_stats.rx_length_errors += sstats->rx_length_errors; 3796 + local_stats.rx_over_errors += sstats->rx_over_errors; 3797 + local_stats.rx_crc_errors += sstats->rx_crc_errors; 3798 + local_stats.rx_frame_errors += sstats->rx_frame_errors; 3799 + local_stats.rx_fifo_errors += sstats->rx_fifo_errors; 3800 + local_stats.rx_missed_errors += sstats->rx_missed_errors; 3794 3801 3795 - stats->tx_aborted_errors += sstats->tx_aborted_errors; 3796 - stats->tx_carrier_errors += sstats->tx_carrier_errors; 3797 - stats->tx_fifo_errors += sstats->tx_fifo_errors; 3798 - stats->tx_heartbeat_errors += sstats->tx_heartbeat_errors; 3799 - stats->tx_window_errors += sstats->tx_window_errors; 3802 + local_stats.tx_aborted_errors += sstats->tx_aborted_errors; 3803 + local_stats.tx_carrier_errors += sstats->tx_carrier_errors; 3804 + local_stats.tx_fifo_errors += sstats->tx_fifo_errors; 3805 + local_stats.tx_heartbeat_errors += sstats->tx_heartbeat_errors; 3806 + local_stats.tx_window_errors += sstats->tx_window_errors; 3800 3807 } 3808 + 3809 + memcpy(stats, &local_stats, sizeof(struct net_device_stats)); 3801 3810 3802 3811 read_unlock_bh(&bond->lock); 3803 3812 ··· 3934 3937 struct bonding *bond = bond_dev->priv; 3935 3938 struct dev_mc_list *dmi; 3936 3939 3937 - write_lock_bh(&bond->lock); 3938 - 3939 3940 /* 3940 3941 * Do promisc before checking multicast_mode 3941 3942 */ ··· 3953 3958 if (!(bond_dev->flags & IFF_ALLMULTI) && (bond->flags & IFF_ALLMULTI)) { 3954 3959 bond_set_allmulti(bond, -1); 3955 3960 } 3961 + 3962 + read_lock(&bond->lock); 3956 3963 3957 3964 bond->flags = bond_dev->flags; 3958 3965 ··· 3976 3979 bond_mc_list_destroy(bond); 3977 3980 bond_mc_list_copy(bond_dev->mc_list, bond, GFP_ATOMIC); 3978 3981 3979 - write_unlock_bh(&bond->lock); 3982 + read_unlock(&bond->lock); 3980 3983 } 3981 3984 3982 3985 /* ··· 4523 4526 struct net_device *bond_dev = bond->dev; 4524 4527 4525 4528 bond_work_cancel_all(bond); 4529 + netif_tx_lock_bh(bond_dev); 4526 4530 bond_mc_list_destroy(bond); 4531 + netif_tx_unlock_bh(bond_dev); 4527 4532 /* Release the bonded slaves */ 4528 4533 bond_release_all(bond_dev); 4529 4534 bond_deinit(bond_dev); ··· 4548 4549 int bond_parse_parm(const char *buf, struct bond_parm_tbl *tbl) 4549 4550 { 4550 4551 int mode = -1, i, rv; 4551 - char modestr[BOND_MAX_MODENAME_LEN + 1] = { 0, }; 4552 + char *p, modestr[BOND_MAX_MODENAME_LEN + 1] = { 0, }; 4552 4553 4553 - rv = sscanf(buf, "%d", &mode); 4554 - if (!rv) { 4554 + for (p = (char *)buf; *p; p++) 4555 + if (!(isdigit(*p) || isspace(*p))) 4556 + break; 4557 + 4558 + if (*p) 4555 4559 rv = sscanf(buf, "%20s", modestr); 4556 - if (!rv) 4557 - return -1; 4558 - } 4560 + else 4561 + rv = sscanf(buf, "%d", &mode); 4562 + 4563 + if (!rv) 4564 + return -1; 4559 4565 4560 4566 for (i = 0; tbl[i].modename; i++) { 4561 4567 if (mode == tbl[i].mode) ··· 4887 4883 down_write(&bonding_rwsem); 4888 4884 4889 4885 /* Check to see if the bond already exists. */ 4890 - list_for_each_entry_safe(bond, nxt, &bond_dev_list, bond_list) 4891 - if (strnicmp(bond->dev->name, name, IFNAMSIZ) == 0) { 4892 - printk(KERN_ERR DRV_NAME 4886 + if (name) { 4887 + list_for_each_entry_safe(bond, nxt, &bond_dev_list, bond_list) 4888 + if (strnicmp(bond->dev->name, name, IFNAMSIZ) == 0) { 4889 + printk(KERN_ERR DRV_NAME 4893 4890 ": cannot add bond %s; it already exists\n", 4894 - name); 4895 - res = -EPERM; 4896 - goto out_rtnl; 4897 - } 4891 + name); 4892 + res = -EPERM; 4893 + goto out_rtnl; 4894 + } 4895 + } 4898 4896 4899 4897 bond_dev = alloc_netdev(sizeof(struct bonding), name ? name : "", 4900 4898 ether_setup);
+2 -2
drivers/net/bonding/bonding.h
··· 22 22 #include "bond_3ad.h" 23 23 #include "bond_alb.h" 24 24 25 - #define DRV_VERSION "3.2.3" 26 - #define DRV_RELDATE "December 6, 2007" 25 + #define DRV_VERSION "3.2.4" 26 + #define DRV_RELDATE "January 28, 2008" 27 27 #define DRV_NAME "bonding" 28 28 #define DRV_DESCRIPTION "Ethernet Channel Bonding Driver" 29 29
+1 -1
drivers/net/cxgb3/mc5.c
··· 452 452 t3_write_reg(adap, A_MC5_DB_INT_CAUSE, cause); 453 453 } 454 454 455 - void __devinit t3_mc5_prep(struct adapter *adapter, struct mc5 *mc5, int mode) 455 + void t3_mc5_prep(struct adapter *adapter, struct mc5 *mc5, int mode) 456 456 { 457 457 #define K * 1024 458 458
+1 -1
drivers/net/cxgb3/sge.c
··· 2836 2836 * defaults for the assorted SGE parameters, which admins can change until 2837 2837 * they are used to initialize the SGE. 2838 2838 */ 2839 - void __devinit t3_sge_prep(struct adapter *adap, struct sge_params *p) 2839 + void t3_sge_prep(struct adapter *adap, struct sge_params *p) 2840 2840 { 2841 2841 int i; 2842 2842
+10 -12
drivers/net/cxgb3/t3_hw.c
··· 2675 2675 V_PMMAXXFERLEN0(size) | V_PMMAXXFERLEN1(size)); 2676 2676 } 2677 2677 2678 - static void __devinit init_mtus(unsigned short mtus[]) 2678 + static void init_mtus(unsigned short mtus[]) 2679 2679 { 2680 2680 /* 2681 2681 * See draft-mathis-plpmtud-00.txt for the values. The min is 88 so ··· 2703 2703 /* 2704 2704 * Initial congestion control parameters. 2705 2705 */ 2706 - static void __devinit init_cong_ctrl(unsigned short *a, unsigned short *b) 2706 + static void init_cong_ctrl(unsigned short *a, unsigned short *b) 2707 2707 { 2708 2708 a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1; 2709 2709 a[9] = 2; ··· 3354 3354 * Determines a card's PCI mode and associated parameters, such as speed 3355 3355 * and width. 3356 3356 */ 3357 - static void __devinit get_pci_mode(struct adapter *adapter, 3358 - struct pci_params *p) 3357 + static void get_pci_mode(struct adapter *adapter, struct pci_params *p) 3359 3358 { 3360 3359 static unsigned short speed_map[] = { 33, 66, 100, 133 }; 3361 3360 u32 pci_mode, pcie_cap; ··· 3394 3395 * capabilities and default speed/duplex/flow-control/autonegotiation 3395 3396 * settings. 3396 3397 */ 3397 - static void __devinit init_link_config(struct link_config *lc, 3398 - unsigned int caps) 3398 + static void init_link_config(struct link_config *lc, unsigned int caps) 3399 3399 { 3400 3400 lc->supported = caps; 3401 3401 lc->requested_speed = lc->speed = SPEED_INVALID; ··· 3417 3419 * Calculates the size of an MC7 memory in bytes from the value of its 3418 3420 * configuration register. 3419 3421 */ 3420 - static unsigned int __devinit mc7_calc_size(u32 cfg) 3422 + static unsigned int mc7_calc_size(u32 cfg) 3421 3423 { 3422 3424 unsigned int width = G_WIDTH(cfg); 3423 3425 unsigned int banks = !!(cfg & F_BKS) + 1; ··· 3428 3430 return MBs << 20; 3429 3431 } 3430 3432 3431 - static void __devinit mc7_prep(struct adapter *adapter, struct mc7 *mc7, 3432 - unsigned int base_addr, const char *name) 3433 + static void mc7_prep(struct adapter *adapter, struct mc7 *mc7, 3434 + unsigned int base_addr, const char *name) 3433 3435 { 3434 3436 u32 cfg; 3435 3437 ··· 3515 3517 return 0; 3516 3518 } 3517 3519 3518 - static int __devinit init_parity(struct adapter *adap) 3520 + static int init_parity(struct adapter *adap) 3519 3521 { 3520 3522 int i, err, addr; 3521 3523 ··· 3550 3552 * for some adapter tunables, take PHYs out of reset, and initialize the MDIO 3551 3553 * interface. 3552 3554 */ 3553 - int __devinit t3_prep_adapter(struct adapter *adapter, 3554 - const struct adapter_info *ai, int reset) 3555 + int t3_prep_adapter(struct adapter *adapter, const struct adapter_info *ai, 3556 + int reset) 3555 3557 { 3556 3558 int ret; 3557 3559 unsigned int i, j = 0;
+9 -9
drivers/net/e100.c
··· 94 94 * enabled. 82557 pads with 7Eh, while the later controllers pad 95 95 * with 00h. 96 96 * 97 - * IV. Recieve 97 + * IV. Receive 98 98 * 99 99 * The Receive Frame Area (RFA) comprises a ring of Receive Frame 100 100 * Descriptors (RFD) + data buffer, thus forming the simplified mode ··· 120 120 * and Rx indication and re-allocation happen in the same context, 121 121 * therefore no locking is required. A software-generated interrupt 122 122 * is generated from the watchdog to recover from a failed allocation 123 - * senario where all Rx resources have been indicated and none re- 123 + * scenario where all Rx resources have been indicated and none re- 124 124 * placed. 125 125 * 126 126 * V. Miscellaneous ··· 954 954 /* Quadwords to DMA into FIFO before starting frame transmit */ 955 955 nic->tx_threshold = 0xE0; 956 956 957 - /* no interrupt for every tx completion, delay = 256us if not 557*/ 957 + /* no interrupt for every tx completion, delay = 256us if not 557 */ 958 958 nic->tx_command = cpu_to_le16(cb_tx | cb_tx_sf | 959 959 ((nic->mac >= mac_82558_D101_A4) ? cb_cid : cb_i)); 960 960 ··· 1497 1497 &s->complete; 1498 1498 1499 1499 /* Device's stats reporting may take several microseconds to 1500 - * complete, so where always waiting for results of the 1500 + * complete, so we're always waiting for results of the 1501 1501 * previous command. */ 1502 1502 1503 1503 if(*complete == cpu_to_le32(cuc_dump_reset_complete)) { ··· 1958 1958 1959 1959 if(restart_required) { 1960 1960 // ack the rnr? 1961 - writeb(stat_ack_rnr, &nic->csr->scb.stat_ack); 1961 + iowrite8(stat_ack_rnr, &nic->csr->scb.stat_ack); 1962 1962 e100_start_receiver(nic, nic->rx_to_clean); 1963 1963 if(work_done) 1964 1964 (*work_done)++; ··· 2774 2774 struct nic *nic = netdev_priv(netdev); 2775 2775 unregister_netdev(netdev); 2776 2776 e100_free(nic); 2777 - iounmap(nic->csr); 2777 + pci_iounmap(pdev, nic->csr); 2778 2778 free_netdev(netdev); 2779 2779 pci_release_regions(pdev); 2780 2780 pci_disable_device(pdev); ··· 2858 2858 /** 2859 2859 * e100_io_error_detected - called when PCI error is detected. 2860 2860 * @pdev: Pointer to PCI device 2861 - * @state: The current pci conneection state 2861 + * @state: The current pci connection state 2862 2862 */ 2863 2863 static pci_ers_result_t e100_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state) 2864 2864 { 2865 2865 struct net_device *netdev = pci_get_drvdata(pdev); 2866 2866 struct nic *nic = netdev_priv(netdev); 2867 2867 2868 - /* Similar to calling e100_down(), but avoids adpater I/O. */ 2868 + /* Similar to calling e100_down(), but avoids adapter I/O. */ 2869 2869 netdev->stop(netdev); 2870 2870 2871 - /* Detach; put netif into state similar to hotplug unplug. */ 2871 + /* Detach; put netif into a state similar to hotplug unplug. */ 2872 2872 napi_enable(&nic->napi); 2873 2873 netif_device_detach(netdev); 2874 2874 pci_disable_device(pdev);
+1 -1
drivers/net/e1000/e1000_main.c
··· 853 853 /** 854 854 * Dump the eeprom for users having checksum issues 855 855 **/ 856 - void e1000_dump_eeprom(struct e1000_adapter *adapter) 856 + static void e1000_dump_eeprom(struct e1000_adapter *adapter) 857 857 { 858 858 struct net_device *netdev = adapter->netdev; 859 859 struct ethtool_eeprom eeprom;
+1
drivers/net/e1000e/defines.h
··· 63 63 #define E1000_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */ 64 64 #define E1000_WUFC_MC 0x00000008 /* Directed Multicast Wakeup Enable */ 65 65 #define E1000_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */ 66 + #define E1000_WUFC_ARP 0x00000020 /* ARP Request Packet Wakeup Enable */ 66 67 67 68 /* Extended Device Control */ 68 69 #define E1000_CTRL_EXT_SDP7_DATA 0x00000080 /* Value of SW Defineable Pin 7 */
+13 -4
drivers/net/e1000e/ethtool.c
··· 690 690 return err; 691 691 } 692 692 693 - bool reg_pattern_test_array(struct e1000_adapter *adapter, u64 *data, 694 - int reg, int offset, u32 mask, u32 write) 693 + static bool reg_pattern_test_array(struct e1000_adapter *adapter, u64 *data, 694 + int reg, int offset, u32 mask, u32 write) 695 695 { 696 696 int i; 697 697 u32 read; ··· 1632 1632 return; 1633 1633 1634 1634 wol->supported = WAKE_UCAST | WAKE_MCAST | 1635 - WAKE_BCAST | WAKE_MAGIC; 1635 + WAKE_BCAST | WAKE_MAGIC | 1636 + WAKE_PHY | WAKE_ARP; 1636 1637 1637 1638 /* apply any specific unsupported masks here */ 1638 1639 if (adapter->flags & FLAG_NO_WAKE_UCAST) { ··· 1652 1651 wol->wolopts |= WAKE_BCAST; 1653 1652 if (adapter->wol & E1000_WUFC_MAG) 1654 1653 wol->wolopts |= WAKE_MAGIC; 1654 + if (adapter->wol & E1000_WUFC_LNKC) 1655 + wol->wolopts |= WAKE_PHY; 1656 + if (adapter->wol & E1000_WUFC_ARP) 1657 + wol->wolopts |= WAKE_ARP; 1655 1658 } 1656 1659 1657 1660 static int e1000_set_wol(struct net_device *netdev, ··· 1663 1658 { 1664 1659 struct e1000_adapter *adapter = netdev_priv(netdev); 1665 1660 1666 - if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE)) 1661 + if (wol->wolopts & WAKE_MAGICSECURE) 1667 1662 return -EOPNOTSUPP; 1668 1663 1669 1664 if (!(adapter->flags & FLAG_HAS_WOL)) ··· 1680 1675 adapter->wol |= E1000_WUFC_BC; 1681 1676 if (wol->wolopts & WAKE_MAGIC) 1682 1677 adapter->wol |= E1000_WUFC_MAG; 1678 + if (wol->wolopts & WAKE_PHY) 1679 + adapter->wol |= E1000_WUFC_LNKC; 1680 + if (wol->wolopts & WAKE_ARP) 1681 + adapter->wol |= E1000_WUFC_ARP; 1683 1682 1684 1683 return 0; 1685 1684 }
+5 -7
drivers/net/e1000e/netdev.c
··· 945 945 int irq_flags = IRQF_SHARED; 946 946 int err; 947 947 948 - err = pci_enable_msi(adapter->pdev); 949 - if (err) { 950 - ndev_warn(netdev, 951 - "Unable to allocate MSI interrupt Error: %d\n", err); 952 - } else { 948 + if (!pci_enable_msi(adapter->pdev)) { 953 949 adapter->flags |= FLAG_MSI_ENABLED; 954 950 handler = e1000_intr_msi; 955 951 irq_flags = 0; ··· 954 958 err = request_irq(adapter->pdev->irq, handler, irq_flags, netdev->name, 955 959 netdev); 956 960 if (err) { 961 + ndev_err(netdev, 962 + "Unable to allocate %s interrupt (return: %d)\n", 963 + adapter->flags & FLAG_MSI_ENABLED ? "MSI":"INTx", 964 + err); 957 965 if (adapter->flags & FLAG_MSI_ENABLED) 958 966 pci_disable_msi(adapter->pdev); 959 - ndev_err(netdev, 960 - "Unable to allocate interrupt Error: %d\n", err); 961 967 } 962 968 963 969 return err;
+3
drivers/net/ehea/ehea.h
··· 458 458 int ehea_sense_port_attr(struct ehea_port *port); 459 459 int ehea_set_portspeed(struct ehea_port *port, u32 port_speed); 460 460 461 + extern u64 ehea_driver_flags; 462 + extern struct work_struct ehea_rereg_mr_task; 463 + 461 464 #endif /* __EHEA_H__ */
+2 -2
drivers/net/ehea/ehea_ethtool.c
··· 40 40 return ret; 41 41 42 42 if (netif_carrier_ok(dev)) { 43 - switch(port->port_speed) { 43 + switch (port->port_speed) { 44 44 case EHEA_SPEED_10M: cmd->speed = SPEED_10; break; 45 45 case EHEA_SPEED_100M: cmd->speed = SPEED_100; break; 46 46 case EHEA_SPEED_1G: cmd->speed = SPEED_1000; break; ··· 78 78 goto doit; 79 79 } 80 80 81 - switch(cmd->speed) { 81 + switch (cmd->speed) { 82 82 case SPEED_10: 83 83 if (cmd->duplex == DUPLEX_FULL) 84 84 sp = H_SPEED_10M_F;
+4 -4
drivers/net/ehea/ehea_hw.h
··· 29 29 #ifndef __EHEA_HW_H__ 30 30 #define __EHEA_HW_H__ 31 31 32 - #define QPX_SQA_VALUE EHEA_BMASK_IBM(48,63) 33 - #define QPX_RQ1A_VALUE EHEA_BMASK_IBM(48,63) 34 - #define QPX_RQ2A_VALUE EHEA_BMASK_IBM(48,63) 35 - #define QPX_RQ3A_VALUE EHEA_BMASK_IBM(48,63) 32 + #define QPX_SQA_VALUE EHEA_BMASK_IBM(48, 63) 33 + #define QPX_RQ1A_VALUE EHEA_BMASK_IBM(48, 63) 34 + #define QPX_RQ2A_VALUE EHEA_BMASK_IBM(48, 63) 35 + #define QPX_RQ3A_VALUE EHEA_BMASK_IBM(48, 63) 36 36 37 37 #define QPTEMM_OFFSET(x) offsetof(struct ehea_qptemm, x) 38 38
+46 -78
drivers/net/ehea/ehea_main.c
··· 6 6 * (C) Copyright IBM Corp. 2006 7 7 * 8 8 * Authors: 9 - * Christoph Raisch <raisch@de.ibm.com> 10 - * Jan-Bernd Themann <themann@de.ibm.com> 11 - * Thomas Klein <tklein@de.ibm.com> 9 + * Christoph Raisch <raisch@de.ibm.com> 10 + * Jan-Bernd Themann <themann@de.ibm.com> 11 + * Thomas Klein <tklein@de.ibm.com> 12 12 * 13 13 * 14 14 * This program is free software; you can redistribute it and/or modify ··· 54 54 static int rq2_entries = EHEA_DEF_ENTRIES_RQ2; 55 55 static int rq3_entries = EHEA_DEF_ENTRIES_RQ3; 56 56 static int sq_entries = EHEA_DEF_ENTRIES_SQ; 57 - static int use_mcs = 0; 58 - static int use_lro = 0; 57 + static int use_mcs; 58 + static int use_lro; 59 59 static int lro_max_aggr = EHEA_LRO_MAX_AGGR; 60 60 static int num_tx_qps = EHEA_NUM_TX_QP; 61 - static int prop_carrier_state = 0; 61 + static int prop_carrier_state; 62 62 63 63 module_param(msg_level, int, 0); 64 64 module_param(rq1_entries, int, 0); ··· 94 94 MODULE_PARM_DESC(use_lro, " Large Receive Offload, 1: enable, 0: disable, " 95 95 "Default = 0"); 96 96 97 - static int port_name_cnt = 0; 97 + static int port_name_cnt; 98 98 static LIST_HEAD(adapter_list); 99 - u64 ehea_driver_flags = 0; 99 + u64 ehea_driver_flags; 100 100 struct work_struct ehea_rereg_mr_task; 101 101 102 102 struct semaphore dlpar_mem_lock; ··· 121 121 .remove = ehea_remove, 122 122 }; 123 123 124 - void ehea_dump(void *adr, int len, char *msg) { 124 + void ehea_dump(void *adr, int len, char *msg) 125 + { 125 126 int x; 126 127 unsigned char *deb = adr; 127 128 for (x = 0; x < len; x += 16) { 128 129 printk(DRV_NAME " %s adr=%p ofs=%04x %016lx %016lx\n", msg, 129 - deb, x, *((u64*)&deb[0]), *((u64*)&deb[8])); 130 + deb, x, *((u64 *)&deb[0]), *((u64 *)&deb[8])); 130 131 deb += 16; 131 132 } 132 133 } ··· 519 518 last_wqe_index = wqe_index; 520 519 rmb(); 521 520 if (!ehea_check_cqe(cqe, &rq)) { 522 - if (rq == 1) { /* LL RQ1 */ 521 + if (rq == 1) { 522 + /* LL RQ1 */ 523 523 skb = get_skb_by_index_ll(skb_arr_rq1, 524 524 skb_arr_rq1_len, 525 525 wqe_index); ··· 533 531 if (!skb) 534 532 break; 535 533 } 536 - skb_copy_to_linear_data(skb, ((char*)cqe) + 64, 534 + skb_copy_to_linear_data(skb, ((char *)cqe) + 64, 537 535 cqe->num_bytes_transfered - 4); 538 536 ehea_fill_skb(dev, skb, cqe); 539 - } else if (rq == 2) { /* RQ2 */ 537 + } else if (rq == 2) { 538 + /* RQ2 */ 540 539 skb = get_skb_by_index(skb_arr_rq2, 541 540 skb_arr_rq2_len, cqe); 542 541 if (unlikely(!skb)) { ··· 547 544 } 548 545 ehea_fill_skb(dev, skb, cqe); 549 546 processed_rq2++; 550 - } else { /* RQ3 */ 547 + } else { 548 + /* RQ3 */ 551 549 skb = get_skb_by_index(skb_arr_rq3, 552 550 skb_arr_rq3_len, cqe); 553 551 if (unlikely(!skb)) { ··· 596 592 unsigned long flags; 597 593 598 594 cqe = ehea_poll_cq(send_cq); 599 - while(cqe && (quota > 0)) { 595 + while (cqe && (quota > 0)) { 600 596 ehea_inc_cq(send_cq); 601 597 602 598 cqe_counter++; ··· 647 643 648 644 static int ehea_poll(struct napi_struct *napi, int budget) 649 645 { 650 - struct ehea_port_res *pr = container_of(napi, struct ehea_port_res, napi); 646 + struct ehea_port_res *pr = container_of(napi, struct ehea_port_res, 647 + napi); 651 648 struct net_device *dev = pr->port->netdev; 652 649 struct ehea_cqe *cqe; 653 650 struct ehea_cqe *cqe_skb = NULL; ··· 748 743 u64 hret; 749 744 struct hcp_ehea_port_cb0 *cb0; 750 745 751 - cb0 = kzalloc(PAGE_SIZE, GFP_ATOMIC); /* May be called via */ 752 - if (!cb0) { /* ehea_neq_tasklet() */ 746 + /* may be called via ehea_neq_tasklet() */ 747 + cb0 = kzalloc(PAGE_SIZE, GFP_ATOMIC); 748 + if (!cb0) { 753 749 ehea_error("no mem for cb0"); 754 750 ret = -ENOMEM; 755 751 goto out; ··· 768 762 /* MAC address */ 769 763 port->mac_addr = cb0->port_mac_addr << 16; 770 764 771 - if (!is_valid_ether_addr((u8*)&port->mac_addr)) { 765 + if (!is_valid_ether_addr((u8 *)&port->mac_addr)) { 772 766 ret = -EADDRNOTAVAIL; 773 767 goto out_free; 774 768 } ··· 1000 994 1001 995 static void ehea_neq_tasklet(unsigned long data) 1002 996 { 1003 - struct ehea_adapter *adapter = (struct ehea_adapter*)data; 997 + struct ehea_adapter *adapter = (struct ehea_adapter *)data; 1004 998 struct ehea_eqe *eqe; 1005 999 u64 event_mask; 1006 1000 ··· 1210 1204 1211 1205 static int ehea_init_q_skba(struct ehea_q_skb_arr *q_skba, int max_q_entries) 1212 1206 { 1213 - int arr_size = sizeof(void*) * max_q_entries; 1207 + int arr_size = sizeof(void *) * max_q_entries; 1214 1208 1215 1209 q_skba->arr = vmalloc(arr_size); 1216 1210 if (!q_skba->arr) ··· 1495 1489 1496 1490 nfrags = skb_shinfo(skb)->nr_frags; 1497 1491 sg1entry = &swqe->u.immdata_desc.sg_entry; 1498 - sg_list = (struct ehea_vsgentry*)&swqe->u.immdata_desc.sg_list; 1492 + sg_list = (struct ehea_vsgentry *)&swqe->u.immdata_desc.sg_list; 1499 1493 swqe->descriptors = 0; 1500 1494 sg1entry_contains_frag_data = 0; 1501 1495 ··· 1548 1542 reg_type, port->mac_addr, 0, hcallid); 1549 1543 if (hret != H_SUCCESS) { 1550 1544 ehea_error("%sregistering bc address failed (tagged)", 1551 - hcallid == H_REG_BCMC ? "" : "de"); 1545 + hcallid == H_REG_BCMC ? "" : "de"); 1552 1546 ret = -EIO; 1553 1547 goto out_herr; 1554 1548 } ··· 1738 1732 } 1739 1733 } 1740 1734 1741 - static void ehea_add_multicast_entry(struct ehea_port* port, u8* mc_mac_addr) 1735 + static void ehea_add_multicast_entry(struct ehea_port *port, u8 *mc_mac_addr) 1742 1736 { 1743 1737 struct ehea_mc_list *ehea_mcl_entry; 1744 1738 u64 hret; ··· 1797 1791 goto out; 1798 1792 } 1799 1793 1800 - for (i = 0, k_mcl_entry = dev->mc_list; 1801 - i < dev->mc_count; 1802 - i++, k_mcl_entry = k_mcl_entry->next) { 1794 + for (i = 0, k_mcl_entry = dev->mc_list; i < dev->mc_count; i++, 1795 + k_mcl_entry = k_mcl_entry->next) 1803 1796 ehea_add_multicast_entry(port, k_mcl_entry->dmi_addr); 1804 - } 1797 + 1805 1798 } 1806 1799 out: 1807 1800 return; ··· 1930 1925 1931 1926 if ((skb->protocol == htons(ETH_P_IP)) && 1932 1927 (ip_hdr(skb)->protocol == IPPROTO_TCP)) { 1933 - tcp = (struct tcphdr*)(skb_network_header(skb) + (ip_hdr(skb)->ihl * 4)); 1928 + tcp = (struct tcphdr *)(skb_network_header(skb) + 1929 + (ip_hdr(skb)->ihl * 4)); 1934 1930 tmp = (tcp->source + (tcp->dest << 16)) % 31; 1935 1931 tmp += ip_hdr(skb)->daddr % 31; 1936 1932 return tmp % num_qps; 1937 - } 1938 - else 1933 + } else 1939 1934 return 0; 1940 1935 } 1941 1936 ··· 2127 2122 u64 hret; 2128 2123 u16 dummy16 = 0; 2129 2124 u64 dummy64 = 0; 2130 - struct hcp_modify_qp_cb0* cb0; 2125 + struct hcp_modify_qp_cb0 *cb0; 2131 2126 2132 2127 cb0 = kzalloc(PAGE_SIZE, GFP_KERNEL); 2133 2128 if (!cb0) { ··· 2253 2248 int ret = 0; 2254 2249 int i; 2255 2250 2256 - for(i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) 2251 + for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) 2257 2252 ret |= ehea_clean_portres(port, &port->port_res[i]); 2258 2253 2259 2254 ret |= ehea_destroy_eq(port->qp_eq); ··· 2305 2300 goto out_clean_pr; 2306 2301 } 2307 2302 2308 - for(i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) { 2303 + for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) { 2309 2304 ret = ehea_activate_qp(port->adapter, port->port_res[i].qp); 2310 2305 if (ret) { 2311 2306 ehea_error("activate_qp failed"); ··· 2313 2308 } 2314 2309 } 2315 2310 2316 - for(i = 0; i < port->num_def_qps; i++) { 2311 + for (i = 0; i < port->num_def_qps; i++) { 2317 2312 ret = ehea_fill_port_res(&port->port_res[i]); 2318 2313 if (ret) { 2319 2314 ehea_error("out_free_irqs"); ··· 2430 2425 { 2431 2426 struct ehea_port *port = netdev_priv(dev); 2432 2427 struct ehea_adapter *adapter = port->adapter; 2433 - struct hcp_modify_qp_cb0* cb0; 2428 + struct hcp_modify_qp_cb0 *cb0; 2434 2429 int ret = -EIO; 2435 2430 int dret; 2436 2431 int i; ··· 2495 2490 return ret; 2496 2491 } 2497 2492 2498 - void ehea_update_rqs(struct ehea_qp *orig_qp, struct ehea_port_res * pr) 2493 + void ehea_update_rqs(struct ehea_qp *orig_qp, struct ehea_port_res *pr) 2499 2494 { 2500 2495 struct ehea_qp qp = *orig_qp; 2501 2496 struct ehea_qp_init_attr *init_attr = &qp.init_attr; ··· 2535 2530 int ret = 0; 2536 2531 int i; 2537 2532 2538 - struct hcp_modify_qp_cb0* cb0; 2533 + struct hcp_modify_qp_cb0 *cb0; 2539 2534 u64 hret; 2540 2535 u64 dummy64 = 0; 2541 2536 u16 dummy16 = 0; ··· 2809 2804 of_node_put(port->ofdev.node); 2810 2805 } 2811 2806 2812 - static int ehea_driver_sysfs_add(struct device *dev, 2813 - struct device_driver *driver) 2814 - { 2815 - int ret; 2816 - 2817 - ret = sysfs_create_link(&driver->kobj, &dev->kobj, 2818 - kobject_name(&dev->kobj)); 2819 - if (ret == 0) { 2820 - ret = sysfs_create_link(&dev->kobj, &driver->kobj, 2821 - "driver"); 2822 - if (ret) 2823 - sysfs_remove_link(&driver->kobj, 2824 - kobject_name(&dev->kobj)); 2825 - } 2826 - return ret; 2827 - } 2828 - 2829 - static void ehea_driver_sysfs_remove(struct device *dev, 2830 - struct device_driver *driver) 2831 - { 2832 - struct device_driver *drv = driver; 2833 - 2834 - if (drv) { 2835 - sysfs_remove_link(&drv->kobj, kobject_name(&dev->kobj)); 2836 - sysfs_remove_link(&dev->kobj, "driver"); 2837 - } 2838 - } 2839 - 2840 2807 static struct device *ehea_register_port(struct ehea_port *port, 2841 2808 struct device_node *dn) 2842 2809 { ··· 2833 2856 goto out_unreg_of_dev; 2834 2857 } 2835 2858 2836 - ret = ehea_driver_sysfs_add(&port->ofdev.dev, &ehea_driver.driver); 2837 - if (ret) { 2838 - ehea_error("failed to register sysfs driver link"); 2839 - goto out_rem_dev_file; 2840 - } 2841 - 2842 2859 return &port->ofdev.dev; 2843 2860 2844 - out_rem_dev_file: 2845 - device_remove_file(&port->ofdev.dev, &dev_attr_log_port_id); 2846 2861 out_unreg_of_dev: 2847 2862 of_device_unregister(&port->ofdev); 2848 2863 out: ··· 2843 2874 2844 2875 static void ehea_unregister_port(struct ehea_port *port) 2845 2876 { 2846 - ehea_driver_sysfs_remove(&port->ofdev.dev, &ehea_driver.driver); 2847 2877 device_remove_file(&port->ofdev.dev, &dev_attr_log_port_id); 2848 2878 of_device_unregister(&port->ofdev); 2849 2879 } ··· 3077 3109 of_node_put(eth_dn); 3078 3110 3079 3111 if (port) { 3080 - for (i=0; i < EHEA_MAX_PORTS; i++) 3112 + for (i = 0; i < EHEA_MAX_PORTS; i++) 3081 3113 if (!adapter->port[i]) { 3082 3114 adapter->port[i] = port; 3083 3115 break; ··· 3112 3144 3113 3145 ehea_shutdown_single_port(port); 3114 3146 3115 - for (i=0; i < EHEA_MAX_PORTS; i++) 3147 + for (i = 0; i < EHEA_MAX_PORTS; i++) 3116 3148 if (adapter->port[i] == port) { 3117 3149 adapter->port[i] = NULL; 3118 3150 break; ··· 3281 3313 } 3282 3314 3283 3315 static struct notifier_block ehea_reboot_nb = { 3284 - .notifier_call = ehea_reboot_notifier, 3316 + .notifier_call = ehea_reboot_notifier, 3285 3317 }; 3286 3318 3287 3319 static int check_module_parm(void)
+79 -79
drivers/net/ehea/ehea_phyp.c
··· 6 6 * (C) Copyright IBM Corp. 2006 7 7 * 8 8 * Authors: 9 - * Christoph Raisch <raisch@de.ibm.com> 10 - * Jan-Bernd Themann <themann@de.ibm.com> 11 - * Thomas Klein <tklein@de.ibm.com> 9 + * Christoph Raisch <raisch@de.ibm.com> 10 + * Jan-Bernd Themann <themann@de.ibm.com> 11 + * Thomas Klein <tklein@de.ibm.com> 12 12 * 13 13 * 14 14 * This program is free software; you can redistribute it and/or modify ··· 38 38 } 39 39 40 40 /* Defines for H_CALL H_ALLOC_RESOURCE */ 41 - #define H_ALL_RES_TYPE_QP 1 42 - #define H_ALL_RES_TYPE_CQ 2 43 - #define H_ALL_RES_TYPE_EQ 3 44 - #define H_ALL_RES_TYPE_MR 5 45 - #define H_ALL_RES_TYPE_MW 6 41 + #define H_ALL_RES_TYPE_QP 1 42 + #define H_ALL_RES_TYPE_CQ 2 43 + #define H_ALL_RES_TYPE_EQ 3 44 + #define H_ALL_RES_TYPE_MR 5 45 + #define H_ALL_RES_TYPE_MW 6 46 46 47 47 static long ehea_plpar_hcall_norets(unsigned long opcode, 48 48 unsigned long arg1, ··· 137 137 const u64 qp_handle, const u64 sel_mask, void *cb_addr) 138 138 { 139 139 return ehea_plpar_hcall_norets(H_QUERY_HEA_QP, 140 - adapter_handle, /* R4 */ 141 - qp_category, /* R5 */ 142 - qp_handle, /* R6 */ 143 - sel_mask, /* R7 */ 140 + adapter_handle, /* R4 */ 141 + qp_category, /* R5 */ 142 + qp_handle, /* R6 */ 143 + sel_mask, /* R7 */ 144 144 virt_to_abs(cb_addr), /* R8 */ 145 145 0, 0); 146 146 } 147 147 148 148 /* input param R5 */ 149 - #define H_ALL_RES_QP_EQPO EHEA_BMASK_IBM(9, 11) 150 - #define H_ALL_RES_QP_QPP EHEA_BMASK_IBM(12, 12) 151 - #define H_ALL_RES_QP_RQR EHEA_BMASK_IBM(13, 15) 152 - #define H_ALL_RES_QP_EQEG EHEA_BMASK_IBM(16, 16) 153 - #define H_ALL_RES_QP_LL_QP EHEA_BMASK_IBM(17, 17) 154 - #define H_ALL_RES_QP_DMA128 EHEA_BMASK_IBM(19, 19) 155 - #define H_ALL_RES_QP_HSM EHEA_BMASK_IBM(20, 21) 156 - #define H_ALL_RES_QP_SIGT EHEA_BMASK_IBM(22, 23) 157 - #define H_ALL_RES_QP_TENURE EHEA_BMASK_IBM(48, 55) 158 - #define H_ALL_RES_QP_RES_TYP EHEA_BMASK_IBM(56, 63) 149 + #define H_ALL_RES_QP_EQPO EHEA_BMASK_IBM(9, 11) 150 + #define H_ALL_RES_QP_QPP EHEA_BMASK_IBM(12, 12) 151 + #define H_ALL_RES_QP_RQR EHEA_BMASK_IBM(13, 15) 152 + #define H_ALL_RES_QP_EQEG EHEA_BMASK_IBM(16, 16) 153 + #define H_ALL_RES_QP_LL_QP EHEA_BMASK_IBM(17, 17) 154 + #define H_ALL_RES_QP_DMA128 EHEA_BMASK_IBM(19, 19) 155 + #define H_ALL_RES_QP_HSM EHEA_BMASK_IBM(20, 21) 156 + #define H_ALL_RES_QP_SIGT EHEA_BMASK_IBM(22, 23) 157 + #define H_ALL_RES_QP_TENURE EHEA_BMASK_IBM(48, 55) 158 + #define H_ALL_RES_QP_RES_TYP EHEA_BMASK_IBM(56, 63) 159 159 160 160 /* input param R9 */ 161 - #define H_ALL_RES_QP_TOKEN EHEA_BMASK_IBM(0, 31) 162 - #define H_ALL_RES_QP_PD EHEA_BMASK_IBM(32,63) 161 + #define H_ALL_RES_QP_TOKEN EHEA_BMASK_IBM(0, 31) 162 + #define H_ALL_RES_QP_PD EHEA_BMASK_IBM(32, 63) 163 163 164 164 /* input param R10 */ 165 - #define H_ALL_RES_QP_MAX_SWQE EHEA_BMASK_IBM(4, 7) 166 - #define H_ALL_RES_QP_MAX_R1WQE EHEA_BMASK_IBM(12, 15) 167 - #define H_ALL_RES_QP_MAX_R2WQE EHEA_BMASK_IBM(20, 23) 168 - #define H_ALL_RES_QP_MAX_R3WQE EHEA_BMASK_IBM(28, 31) 165 + #define H_ALL_RES_QP_MAX_SWQE EHEA_BMASK_IBM(4, 7) 166 + #define H_ALL_RES_QP_MAX_R1WQE EHEA_BMASK_IBM(12, 15) 167 + #define H_ALL_RES_QP_MAX_R2WQE EHEA_BMASK_IBM(20, 23) 168 + #define H_ALL_RES_QP_MAX_R3WQE EHEA_BMASK_IBM(28, 31) 169 169 /* Max Send Scatter Gather Elements */ 170 - #define H_ALL_RES_QP_MAX_SSGE EHEA_BMASK_IBM(37, 39) 171 - #define H_ALL_RES_QP_MAX_R1SGE EHEA_BMASK_IBM(45, 47) 170 + #define H_ALL_RES_QP_MAX_SSGE EHEA_BMASK_IBM(37, 39) 171 + #define H_ALL_RES_QP_MAX_R1SGE EHEA_BMASK_IBM(45, 47) 172 172 /* Max Receive SG Elements RQ1 */ 173 - #define H_ALL_RES_QP_MAX_R2SGE EHEA_BMASK_IBM(53, 55) 174 - #define H_ALL_RES_QP_MAX_R3SGE EHEA_BMASK_IBM(61, 63) 173 + #define H_ALL_RES_QP_MAX_R2SGE EHEA_BMASK_IBM(53, 55) 174 + #define H_ALL_RES_QP_MAX_R3SGE EHEA_BMASK_IBM(61, 63) 175 175 176 176 /* input param R11 */ 177 - #define H_ALL_RES_QP_SWQE_IDL EHEA_BMASK_IBM(0, 7) 177 + #define H_ALL_RES_QP_SWQE_IDL EHEA_BMASK_IBM(0, 7) 178 178 /* max swqe immediate data length */ 179 - #define H_ALL_RES_QP_PORT_NUM EHEA_BMASK_IBM(48, 63) 179 + #define H_ALL_RES_QP_PORT_NUM EHEA_BMASK_IBM(48, 63) 180 180 181 181 /* input param R12 */ 182 - #define H_ALL_RES_QP_TH_RQ2 EHEA_BMASK_IBM(0, 15) 182 + #define H_ALL_RES_QP_TH_RQ2 EHEA_BMASK_IBM(0, 15) 183 183 /* Threshold RQ2 */ 184 - #define H_ALL_RES_QP_TH_RQ3 EHEA_BMASK_IBM(16, 31) 184 + #define H_ALL_RES_QP_TH_RQ3 EHEA_BMASK_IBM(16, 31) 185 185 /* Threshold RQ3 */ 186 186 187 187 /* output param R6 */ 188 - #define H_ALL_RES_QP_ACT_SWQE EHEA_BMASK_IBM(0, 15) 189 - #define H_ALL_RES_QP_ACT_R1WQE EHEA_BMASK_IBM(16, 31) 190 - #define H_ALL_RES_QP_ACT_R2WQE EHEA_BMASK_IBM(32, 47) 191 - #define H_ALL_RES_QP_ACT_R3WQE EHEA_BMASK_IBM(48, 63) 188 + #define H_ALL_RES_QP_ACT_SWQE EHEA_BMASK_IBM(0, 15) 189 + #define H_ALL_RES_QP_ACT_R1WQE EHEA_BMASK_IBM(16, 31) 190 + #define H_ALL_RES_QP_ACT_R2WQE EHEA_BMASK_IBM(32, 47) 191 + #define H_ALL_RES_QP_ACT_R3WQE EHEA_BMASK_IBM(48, 63) 192 192 193 193 /* output param, R7 */ 194 - #define H_ALL_RES_QP_ACT_SSGE EHEA_BMASK_IBM(0, 7) 195 - #define H_ALL_RES_QP_ACT_R1SGE EHEA_BMASK_IBM(8, 15) 196 - #define H_ALL_RES_QP_ACT_R2SGE EHEA_BMASK_IBM(16, 23) 197 - #define H_ALL_RES_QP_ACT_R3SGE EHEA_BMASK_IBM(24, 31) 194 + #define H_ALL_RES_QP_ACT_SSGE EHEA_BMASK_IBM(0, 7) 195 + #define H_ALL_RES_QP_ACT_R1SGE EHEA_BMASK_IBM(8, 15) 196 + #define H_ALL_RES_QP_ACT_R2SGE EHEA_BMASK_IBM(16, 23) 197 + #define H_ALL_RES_QP_ACT_R3SGE EHEA_BMASK_IBM(24, 31) 198 198 #define H_ALL_RES_QP_ACT_SWQE_IDL EHEA_BMASK_IBM(32, 39) 199 199 200 200 /* output param R8,R9 */ 201 - #define H_ALL_RES_QP_SIZE_SQ EHEA_BMASK_IBM(0, 31) 202 - #define H_ALL_RES_QP_SIZE_RQ1 EHEA_BMASK_IBM(32, 63) 203 - #define H_ALL_RES_QP_SIZE_RQ2 EHEA_BMASK_IBM(0, 31) 204 - #define H_ALL_RES_QP_SIZE_RQ3 EHEA_BMASK_IBM(32, 63) 201 + #define H_ALL_RES_QP_SIZE_SQ EHEA_BMASK_IBM(0, 31) 202 + #define H_ALL_RES_QP_SIZE_RQ1 EHEA_BMASK_IBM(32, 63) 203 + #define H_ALL_RES_QP_SIZE_RQ2 EHEA_BMASK_IBM(0, 31) 204 + #define H_ALL_RES_QP_SIZE_RQ3 EHEA_BMASK_IBM(32, 63) 205 205 206 206 /* output param R11,R12 */ 207 - #define H_ALL_RES_QP_LIOBN_SQ EHEA_BMASK_IBM(0, 31) 208 - #define H_ALL_RES_QP_LIOBN_RQ1 EHEA_BMASK_IBM(32, 63) 209 - #define H_ALL_RES_QP_LIOBN_RQ2 EHEA_BMASK_IBM(0, 31) 210 - #define H_ALL_RES_QP_LIOBN_RQ3 EHEA_BMASK_IBM(32, 63) 207 + #define H_ALL_RES_QP_LIOBN_SQ EHEA_BMASK_IBM(0, 31) 208 + #define H_ALL_RES_QP_LIOBN_RQ1 EHEA_BMASK_IBM(32, 63) 209 + #define H_ALL_RES_QP_LIOBN_RQ2 EHEA_BMASK_IBM(0, 31) 210 + #define H_ALL_RES_QP_LIOBN_RQ3 EHEA_BMASK_IBM(32, 63) 211 211 212 212 u64 ehea_h_alloc_resource_qp(const u64 adapter_handle, 213 213 struct ehea_qp_init_attr *init_attr, const u32 pd, ··· 334 334 } 335 335 336 336 /* Defines for H_CALL H_ALLOC_RESOURCE */ 337 - #define H_ALL_RES_TYPE_QP 1 338 - #define H_ALL_RES_TYPE_CQ 2 339 - #define H_ALL_RES_TYPE_EQ 3 340 - #define H_ALL_RES_TYPE_MR 5 341 - #define H_ALL_RES_TYPE_MW 6 337 + #define H_ALL_RES_TYPE_QP 1 338 + #define H_ALL_RES_TYPE_CQ 2 339 + #define H_ALL_RES_TYPE_EQ 3 340 + #define H_ALL_RES_TYPE_MR 5 341 + #define H_ALL_RES_TYPE_MW 6 342 342 343 343 /* input param R5 */ 344 - #define H_ALL_RES_EQ_NEQ EHEA_BMASK_IBM(0, 0) 344 + #define H_ALL_RES_EQ_NEQ EHEA_BMASK_IBM(0, 0) 345 345 #define H_ALL_RES_EQ_NON_NEQ_ISN EHEA_BMASK_IBM(6, 7) 346 346 #define H_ALL_RES_EQ_INH_EQE_GEN EHEA_BMASK_IBM(16, 16) 347 - #define H_ALL_RES_EQ_RES_TYPE EHEA_BMASK_IBM(56, 63) 347 + #define H_ALL_RES_EQ_RES_TYPE EHEA_BMASK_IBM(56, 63) 348 348 /* input param R6 */ 349 - #define H_ALL_RES_EQ_MAX_EQE EHEA_BMASK_IBM(32, 63) 349 + #define H_ALL_RES_EQ_MAX_EQE EHEA_BMASK_IBM(32, 63) 350 350 351 351 /* output param R6 */ 352 - #define H_ALL_RES_EQ_LIOBN EHEA_BMASK_IBM(32, 63) 352 + #define H_ALL_RES_EQ_LIOBN EHEA_BMASK_IBM(32, 63) 353 353 354 354 /* output param R7 */ 355 - #define H_ALL_RES_EQ_ACT_EQE EHEA_BMASK_IBM(32, 63) 355 + #define H_ALL_RES_EQ_ACT_EQE EHEA_BMASK_IBM(32, 63) 356 356 357 357 /* output param R8 */ 358 - #define H_ALL_RES_EQ_ACT_PS EHEA_BMASK_IBM(32, 63) 358 + #define H_ALL_RES_EQ_ACT_PS EHEA_BMASK_IBM(32, 63) 359 359 360 360 /* output param R9 */ 361 361 #define H_ALL_RES_EQ_ACT_EQ_IST_C EHEA_BMASK_IBM(30, 31) ··· 453 453 454 454 hret = ehea_plpar_hcall9(H_REGISTER_SMR, 455 455 outs, 456 - adapter_handle , /* R4 */ 457 - orig_mr_handle, /* R5 */ 458 - vaddr_in, /* R6 */ 459 - (((u64)access_ctrl) << 32ULL), /* R7 */ 460 - pd, /* R8 */ 461 - 0, 0, 0, 0); /* R9-R12 */ 456 + adapter_handle , /* R4 */ 457 + orig_mr_handle, /* R5 */ 458 + vaddr_in, /* R6 */ 459 + (((u64)access_ctrl) << 32ULL), /* R7 */ 460 + pd, /* R8 */ 461 + 0, 0, 0, 0); /* R9-R12 */ 462 462 463 463 mr->handle = outs[0]; 464 464 mr->lkey = (u32)outs[2]; ··· 471 471 u64 outs[PLPAR_HCALL9_BUFSIZE]; 472 472 473 473 return ehea_plpar_hcall9(H_DISABLE_AND_GET_HEA, 474 - outs, 474 + outs, 475 475 adapter_handle, /* R4 */ 476 476 H_DISABLE_GET_EHEA_WQE_P, /* R5 */ 477 477 qp_handle, /* R6 */ 478 - 0, 0, 0, 0, 0, 0); /* R7-R12 */ 478 + 0, 0, 0, 0, 0, 0); /* R7-R12 */ 479 479 } 480 480 481 481 u64 ehea_h_free_resource(const u64 adapter_handle, const u64 res_handle, ··· 483 483 { 484 484 return ehea_plpar_hcall_norets(H_FREE_RESOURCE, 485 485 adapter_handle, /* R4 */ 486 - res_handle, /* R5 */ 486 + res_handle, /* R5 */ 487 487 force_bit, 488 - 0, 0, 0, 0); /* R7-R10 */ 488 + 0, 0, 0, 0); /* R7-R10 */ 489 489 } 490 490 491 491 u64 ehea_h_alloc_resource_mr(const u64 adapter_handle, const u64 vaddr, ··· 493 493 const u32 pd, u64 *mr_handle, u32 *lkey) 494 494 { 495 495 u64 hret; 496 - u64 outs[PLPAR_HCALL9_BUFSIZE]; 496 + u64 outs[PLPAR_HCALL9_BUFSIZE]; 497 497 498 498 hret = ehea_plpar_hcall9(H_ALLOC_HEA_RESOURCE, 499 499 outs, 500 500 adapter_handle, /* R4 */ 501 501 5, /* R5 */ 502 - vaddr, /* R6 */ 502 + vaddr, /* R6 */ 503 503 length, /* R7 */ 504 504 (((u64) access_ctrl) << 32ULL), /* R8 */ 505 505 pd, /* R9 */ ··· 619 619 void *rblock) 620 620 { 621 621 return ehea_plpar_hcall_norets(H_ERROR_DATA, 622 - adapter_handle, /* R4 */ 623 - ressource_handle, /* R5 */ 624 - virt_to_abs(rblock), /* R6 */ 625 - 0, 0, 0, 0); /* R7-R12 */ 622 + adapter_handle, /* R4 */ 623 + ressource_handle, /* R5 */ 624 + virt_to_abs(rblock), /* R6 */ 625 + 0, 0, 0, 0); /* R7-R12 */ 626 626 }
+11 -11
drivers/net/ehea/ehea_phyp.h
··· 93 93 static inline void hcp_epas_dtor(struct h_epas *epas) 94 94 { 95 95 if (epas->kernel.addr) 96 - iounmap((void __iomem*)((u64)epas->kernel.addr & PAGE_MASK)); 96 + iounmap((void __iomem *)((u64)epas->kernel.addr & PAGE_MASK)); 97 97 98 98 epas->user.addr = 0; 99 99 epas->kernel.addr = 0; ··· 388 388 const u64 qp_handle, 389 389 const u64 sel_mask, 390 390 void *cb_addr, 391 - u64 * inv_attr_id, 392 - u64 * proc_mask, u16 * out_swr, u16 * out_rwr); 391 + u64 *inv_attr_id, 392 + u64 *proc_mask, u16 *out_swr, u16 *out_rwr); 393 393 394 394 u64 ehea_h_alloc_resource_eq(const u64 adapter_handle, 395 - struct ehea_eq_attr *eq_attr, u64 * eq_handle); 395 + struct ehea_eq_attr *eq_attr, u64 *eq_handle); 396 396 397 397 u64 ehea_h_alloc_resource_cq(const u64 adapter_handle, 398 398 struct ehea_cq_attr *cq_attr, 399 - u64 * cq_handle, struct h_epas *epas); 399 + u64 *cq_handle, struct h_epas *epas); 400 400 401 401 u64 ehea_h_alloc_resource_qp(const u64 adapter_handle, 402 402 struct ehea_qp_init_attr *init_attr, 403 403 const u32 pd, 404 - u64 * qp_handle, struct h_epas *h_epas); 404 + u64 *qp_handle, struct h_epas *h_epas); 405 405 406 - #define H_REG_RPAGE_PAGE_SIZE EHEA_BMASK_IBM(48,55) 407 - #define H_REG_RPAGE_QT EHEA_BMASK_IBM(62,63) 406 + #define H_REG_RPAGE_PAGE_SIZE EHEA_BMASK_IBM(48, 55) 407 + #define H_REG_RPAGE_QT EHEA_BMASK_IBM(62, 63) 408 408 409 409 u64 ehea_h_register_rpage(const u64 adapter_handle, 410 410 const u8 pagesize, ··· 426 426 427 427 u64 ehea_h_alloc_resource_mr(const u64 adapter_handle, const u64 vaddr, 428 428 const u64 length, const u32 access_ctrl, 429 - const u32 pd, u64 * mr_handle, u32 * lkey); 429 + const u32 pd, u64 *mr_handle, u32 *lkey); 430 430 431 431 u64 ehea_h_register_rpage_mr(const u64 adapter_handle, const u64 mr_handle, 432 432 const u8 pagesize, const u8 queue_type, ··· 439 439 u64 ehea_h_query_ehea(const u64 adapter_handle, void *cb_addr); 440 440 441 441 /* output param R5 */ 442 - #define H_MEHEAPORT_CAT EHEA_BMASK_IBM(40,47) 443 - #define H_MEHEAPORT_PN EHEA_BMASK_IBM(48,63) 442 + #define H_MEHEAPORT_CAT EHEA_BMASK_IBM(40, 47) 443 + #define H_MEHEAPORT_PN EHEA_BMASK_IBM(48, 63) 444 444 445 445 u64 ehea_h_query_ehea_port(const u64 adapter_handle, const u16 port_num, 446 446 const u8 cb_cat, const u64 select_mask,
+16 -16
drivers/net/ehea/ehea_qmr.c
··· 33 33 34 34 35 35 struct ehea_busmap ehea_bmap = { 0, 0, NULL }; 36 - extern u64 ehea_driver_flags; 37 - extern struct work_struct ehea_rereg_mr_task; 38 36 39 37 40 38 static void *hw_qpageit_get_inc(struct hw_queue *queue) ··· 63 65 } 64 66 65 67 queue->queue_length = nr_of_pages * pagesize; 66 - queue->queue_pages = kmalloc(nr_of_pages * sizeof(void*), GFP_KERNEL); 68 + queue->queue_pages = kmalloc(nr_of_pages * sizeof(void *), GFP_KERNEL); 67 69 if (!queue->queue_pages) { 68 70 ehea_error("no mem for queue_pages"); 69 71 return -ENOMEM; ··· 76 78 */ 77 79 i = 0; 78 80 while (i < nr_of_pages) { 79 - u8 *kpage = (u8*)get_zeroed_page(GFP_KERNEL); 81 + u8 *kpage = (u8 *)get_zeroed_page(GFP_KERNEL); 80 82 if (!kpage) 81 83 goto out_nomem; 82 84 for (k = 0; k < pages_per_kpage && i < nr_of_pages; k++) { 83 - (queue->queue_pages)[i] = (struct ehea_page*)kpage; 85 + (queue->queue_pages)[i] = (struct ehea_page *)kpage; 84 86 kpage += pagesize; 85 87 i++; 86 88 } ··· 233 235 return 0; 234 236 235 237 hcp_epas_dtor(&cq->epas); 236 - 237 - if ((hret = ehea_destroy_cq_res(cq, NORMAL_FREE)) == H_R_STATE) { 238 + hret = ehea_destroy_cq_res(cq, NORMAL_FREE); 239 + if (hret == H_R_STATE) { 238 240 ehea_error_data(cq->adapter, cq->fw_handle); 239 241 hret = ehea_destroy_cq_res(cq, FORCE_FREE); 240 242 } ··· 299 301 if (i == (eq->attr.nr_pages - 1)) { 300 302 /* last page */ 301 303 vpage = hw_qpageit_get_inc(&eq->hw_queue); 302 - if ((hret != H_SUCCESS) || (vpage)) { 304 + if ((hret != H_SUCCESS) || (vpage)) 303 305 goto out_kill_hwq; 304 - } 306 + 305 307 } else { 306 - if ((hret != H_PAGE_REGISTERED) || (!vpage)) { 308 + if ((hret != H_PAGE_REGISTERED) || (!vpage)) 307 309 goto out_kill_hwq; 308 - } 310 + 309 311 } 310 312 } 311 313 ··· 329 331 unsigned long flags; 330 332 331 333 spin_lock_irqsave(&eq->spinlock, flags); 332 - eqe = (struct ehea_eqe*)hw_eqit_eq_get_inc_valid(&eq->hw_queue); 334 + eqe = (struct ehea_eqe *)hw_eqit_eq_get_inc_valid(&eq->hw_queue); 333 335 spin_unlock_irqrestore(&eq->spinlock, flags); 334 336 335 337 return eqe; ··· 362 364 363 365 hcp_epas_dtor(&eq->epas); 364 366 365 - if ((hret = ehea_destroy_eq_res(eq, NORMAL_FREE)) == H_R_STATE) { 367 + hret = ehea_destroy_eq_res(eq, NORMAL_FREE); 368 + if (hret == H_R_STATE) { 366 369 ehea_error_data(eq->adapter, eq->fw_handle); 367 370 hret = ehea_destroy_eq_res(eq, FORCE_FREE); 368 371 } ··· 545 546 546 547 hcp_epas_dtor(&qp->epas); 547 548 548 - if ((hret = ehea_destroy_qp_res(qp, NORMAL_FREE)) == H_R_STATE) { 549 + hret = ehea_destroy_qp_res(qp, NORMAL_FREE); 550 + if (hret == H_R_STATE) { 549 551 ehea_error_data(qp->adapter, qp->fw_handle); 550 552 hret = ehea_destroy_qp_res(qp, FORCE_FREE); 551 553 } ··· 559 559 return 0; 560 560 } 561 561 562 - int ehea_create_busmap( void ) 562 + int ehea_create_busmap(void) 563 563 { 564 564 u64 vaddr = EHEA_BUSMAP_START; 565 565 unsigned long high_section_index = 0; ··· 595 595 return 0; 596 596 } 597 597 598 - void ehea_destroy_busmap( void ) 598 + void ehea_destroy_busmap(void) 599 599 { 600 600 vfree(ehea_bmap.vaddr); 601 601 }
+8 -8
drivers/net/ehea/ehea_qmr.h
··· 41 41 #define EHEA_SECTSIZE (1UL << 24) 42 42 #define EHEA_PAGES_PER_SECTION (EHEA_SECTSIZE >> EHEA_PAGESHIFT) 43 43 44 - #if (1UL << SECTION_SIZE_BITS) < EHEA_SECTSIZE 45 - #error eHEA module can't work if kernel sectionsize < ehea sectionsize 44 + #if ((1UL << SECTION_SIZE_BITS) < EHEA_SECTSIZE) 45 + #error eHEA module cannot work if kernel sectionsize < ehea sectionsize 46 46 #endif 47 47 48 48 /* Some abbreviations used here: ··· 188 188 u64 entry; 189 189 }; 190 190 191 - #define ERROR_DATA_LENGTH EHEA_BMASK_IBM(52,63) 192 - #define ERROR_DATA_TYPE EHEA_BMASK_IBM(0,7) 191 + #define ERROR_DATA_LENGTH EHEA_BMASK_IBM(52, 63) 192 + #define ERROR_DATA_TYPE EHEA_BMASK_IBM(0, 7) 193 193 194 194 static inline void *hw_qeit_calc(struct hw_queue *queue, u64 q_offset) 195 195 { ··· 279 279 static inline void *hw_eqit_eq_get_inc_valid(struct hw_queue *queue) 280 280 { 281 281 void *retvalue = hw_qeit_get(queue); 282 - u32 qe = *(u8*)retvalue; 282 + u32 qe = *(u8 *)retvalue; 283 283 if ((qe >> 7) == (queue->toggle_state & 1)) 284 284 hw_qeit_eq_get_inc(queue); 285 285 else ··· 364 364 365 365 int ehea_destroy_cq(struct ehea_cq *cq); 366 366 367 - struct ehea_qp *ehea_create_qp(struct ehea_adapter * adapter, u32 pd, 367 + struct ehea_qp *ehea_create_qp(struct ehea_adapter *adapter, u32 pd, 368 368 struct ehea_qp_init_attr *init_attr); 369 369 370 370 int ehea_destroy_qp(struct ehea_qp *qp); ··· 378 378 379 379 void ehea_error_data(struct ehea_adapter *adapter, u64 res_handle); 380 380 381 - int ehea_create_busmap( void ); 382 - void ehea_destroy_busmap( void ); 381 + int ehea_create_busmap(void); 382 + void ehea_destroy_busmap(void); 383 383 u64 ehea_map_vaddr(void *caddr); 384 384 385 385 #endif /* __EHEA_QMR_H__ */
+28 -33
drivers/net/forcedeth.c
··· 13 13 * Copyright (C) 2004 Andrew de Quincey (wol support) 14 14 * Copyright (C) 2004 Carl-Daniel Hailfinger (invalid MAC handling, insane 15 15 * IRQ rate fixes, bigendian fixes, cleanups, verification) 16 - * Copyright (c) 2004,5,6 NVIDIA Corporation 16 + * Copyright (c) 2004,2005,2006,2007,2008 NVIDIA Corporation 17 17 * 18 18 * This program is free software; you can redistribute it and/or modify 19 19 * it under the terms of the GNU General Public License as published by ··· 226 226 #define NVREG_MISC1_HD 0x02 227 227 #define NVREG_MISC1_FORCE 0x3b0f3c 228 228 229 - NvRegMacReset = 0x3c, 229 + NvRegMacReset = 0x34, 230 230 #define NVREG_MAC_RESET_ASSERT 0x0F3 231 231 NvRegTransmitterControl = 0x084, 232 232 #define NVREG_XMITCTL_START 0x01 ··· 277 277 #define NVREG_MCASTADDRA_FORCE 0x01 278 278 NvRegMulticastAddrB = 0xB4, 279 279 NvRegMulticastMaskA = 0xB8, 280 + #define NVREG_MCASTMASKA_NONE 0xffffffff 280 281 NvRegMulticastMaskB = 0xBC, 282 + #define NVREG_MCASTMASKB_NONE 0xffff 281 283 282 284 NvRegPhyInterface = 0xC0, 283 285 #define PHY_RGMII 0x10000000 ··· 318 316 NvRegTxRingPhysAddrHigh = 0x148, 319 317 NvRegRxRingPhysAddrHigh = 0x14C, 320 318 NvRegTxPauseFrame = 0x170, 321 - #define NVREG_TX_PAUSEFRAME_DISABLE 0x1ff0080 322 - #define NVREG_TX_PAUSEFRAME_ENABLE 0x0c00030 319 + #define NVREG_TX_PAUSEFRAME_DISABLE 0x01ff0080 320 + #define NVREG_TX_PAUSEFRAME_ENABLE 0x01800010 323 321 NvRegMIIStatus = 0x180, 324 322 #define NVREG_MIISTAT_ERROR 0x0001 325 323 #define NVREG_MIISTAT_LINKCHANGE 0x0008 ··· 473 471 #define NV_RX_AVAIL (1<<31) 474 472 475 473 #define NV_RX2_CHECKSUMMASK (0x1C000000) 476 - #define NV_RX2_CHECKSUMOK1 (0x10000000) 477 - #define NV_RX2_CHECKSUMOK2 (0x14000000) 478 - #define NV_RX2_CHECKSUMOK3 (0x18000000) 474 + #define NV_RX2_CHECKSUM_IP (0x10000000) 475 + #define NV_RX2_CHECKSUM_IP_TCP (0x14000000) 476 + #define NV_RX2_CHECKSUM_IP_UDP (0x18000000) 479 477 #define NV_RX2_DESCRIPTORVALID (1<<29) 480 478 #define NV_RX2_SUBSTRACT1 (1<<25) 481 479 #define NV_RX2_ERROR1 (1<<18) ··· 2377 2375 goto next_pkt; 2378 2376 } 2379 2377 } 2380 - if ((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUMOK2)/*ip and tcp */ { 2378 + if (((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUM_IP_TCP) || /*ip and tcp */ 2379 + ((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUM_IP_UDP)) /*ip and udp */ 2381 2380 skb->ip_summed = CHECKSUM_UNNECESSARY; 2382 - } else { 2383 - if ((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUMOK1 || 2384 - (flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUMOK3) { 2385 - skb->ip_summed = CHECKSUM_UNNECESSARY; 2386 - } 2387 - } 2388 2381 } else { 2389 2382 dev_kfree_skb(skb); 2390 2383 goto next_pkt; ··· 2471 2474 } 2472 2475 } 2473 2476 2474 - if ((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUMOK2)/*ip and tcp */ { 2477 + if (((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUM_IP_TCP) || /*ip and tcp */ 2478 + ((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUM_IP_UDP)) /*ip and udp */ 2475 2479 skb->ip_summed = CHECKSUM_UNNECESSARY; 2476 - } else { 2477 - if ((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUMOK1 || 2478 - (flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUMOK3) { 2479 - skb->ip_summed = CHECKSUM_UNNECESSARY; 2480 - } 2481 - } 2482 2480 2483 2481 /* got a valid packet - forward it to the network core */ 2484 2482 skb_put(skb, len); ··· 2695 2703 addr[1] = alwaysOn[1]; 2696 2704 mask[0] = alwaysOn[0] | alwaysOff[0]; 2697 2705 mask[1] = alwaysOn[1] | alwaysOff[1]; 2706 + } else { 2707 + mask[0] = NVREG_MCASTMASKA_NONE; 2708 + mask[1] = NVREG_MCASTMASKB_NONE; 2698 2709 } 2699 2710 } 2700 2711 addr[0] |= NVREG_MCASTADDRA_FORCE; ··· 4808 4813 nv_mac_reset(dev); 4809 4814 writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA); 4810 4815 writel(0, base + NvRegMulticastAddrB); 4811 - writel(0, base + NvRegMulticastMaskA); 4812 - writel(0, base + NvRegMulticastMaskB); 4816 + writel(NVREG_MCASTMASKA_NONE, base + NvRegMulticastMaskA); 4817 + writel(NVREG_MCASTMASKB_NONE, base + NvRegMulticastMaskB); 4813 4818 writel(0, base + NvRegPacketFilterFlags); 4814 4819 4815 4820 writel(0, base + NvRegTransmitterControl); ··· 4903 4908 spin_lock_irq(&np->lock); 4904 4909 writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA); 4905 4910 writel(0, base + NvRegMulticastAddrB); 4906 - writel(0, base + NvRegMulticastMaskA); 4907 - writel(0, base + NvRegMulticastMaskB); 4911 + writel(NVREG_MCASTMASKA_NONE, base + NvRegMulticastMaskA); 4912 + writel(NVREG_MCASTMASKB_NONE, base + NvRegMulticastMaskB); 4908 4913 writel(NVREG_PFF_ALWAYS|NVREG_PFF_MYADDR, base + NvRegPacketFilterFlags); 4909 4914 /* One manual link speed update: Interrupts are enabled, future link 4910 4915 * speed changes cause interrupts and are handled by nv_link_irq(). ··· 5598 5603 }, 5599 5604 { /* MCP77 Ethernet Controller */ 5600 5605 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_32), 5601 - .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, 5606 + .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, 5602 5607 }, 5603 5608 { /* MCP77 Ethernet Controller */ 5604 5609 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_33), 5605 - .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, 5610 + .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, 5606 5611 }, 5607 5612 { /* MCP77 Ethernet Controller */ 5608 5613 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_34), 5609 - .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, 5614 + .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, 5610 5615 }, 5611 5616 { /* MCP77 Ethernet Controller */ 5612 5617 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_35), 5613 - .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, 5618 + .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, 5614 5619 }, 5615 5620 { /* MCP79 Ethernet Controller */ 5616 5621 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_36), 5617 - .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, 5622 + .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, 5618 5623 }, 5619 5624 { /* MCP79 Ethernet Controller */ 5620 5625 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_37), 5621 - .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, 5626 + .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, 5622 5627 }, 5623 5628 { /* MCP79 Ethernet Controller */ 5624 5629 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_38), 5625 - .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, 5630 + .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, 5626 5631 }, 5627 5632 { /* MCP79 Ethernet Controller */ 5628 5633 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_39), 5629 - .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, 5634 + .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, 5630 5635 }, 5631 5636 {0,}, 5632 5637 };
+2 -2
drivers/net/ibmlana.c
··· 901 901 0x0000 902 902 }; 903 903 904 - static char *ibmlana_adapter_names[] __initdata = { 904 + static char *ibmlana_adapter_names[] __devinitdata = { 905 905 "IBM LAN Adapter/A", 906 906 NULL 907 907 }; 908 908 909 - static int ibmlana_init_one(struct device *kdev) 909 + static int __devinit ibmlana_init_one(struct device *kdev) 910 910 { 911 911 struct mca_device *mdev = to_mca_device(kdev); 912 912 struct net_device *dev;
-1
drivers/net/igb/igb_main.c
··· 438 438 if (adapter->msix_entries) { 439 439 err = igb_request_msix(adapter); 440 440 if (!err) { 441 - struct e1000_hw *hw = &adapter->hw; 442 441 /* enable IAM, auto-mask, 443 442 * DO NOT USE EIAME or IAME in legacy mode */ 444 443 wr32(E1000_IAM, IMS_ENABLE_MASK);
+4 -5
drivers/net/macb.c
··· 1084 1084 return phy_mii_ioctl(phydev, if_mii(rq), cmd); 1085 1085 } 1086 1086 1087 - static int __devinit macb_probe(struct platform_device *pdev) 1087 + static int __init macb_probe(struct platform_device *pdev) 1088 1088 { 1089 1089 struct eth_platform_data *pdata; 1090 1090 struct resource *regs; ··· 1248 1248 return err; 1249 1249 } 1250 1250 1251 - static int __devexit macb_remove(struct platform_device *pdev) 1251 + static int __exit macb_remove(struct platform_device *pdev) 1252 1252 { 1253 1253 struct net_device *dev; 1254 1254 struct macb *bp; ··· 1276 1276 } 1277 1277 1278 1278 static struct platform_driver macb_driver = { 1279 - .probe = macb_probe, 1280 - .remove = __devexit_p(macb_remove), 1279 + .remove = __exit_p(macb_remove), 1281 1280 .driver = { 1282 1281 .name = "macb", 1283 1282 }, ··· 1284 1285 1285 1286 static int __init macb_init(void) 1286 1287 { 1287 - return platform_driver_register(&macb_driver); 1288 + return platform_driver_probe(&macb_driver, macb_probe); 1288 1289 } 1289 1290 1290 1291 static void __exit macb_exit(void)
+133 -66
drivers/net/mipsnet.c
··· 4 4 * for more details. 5 5 */ 6 6 7 - #define DEBUG 8 - 9 7 #include <linux/init.h> 10 8 #include <linux/io.h> 11 9 #include <linux/kernel.h> ··· 13 15 #include <linux/platform_device.h> 14 16 #include <asm/mips-boards/simint.h> 15 17 16 - #include "mipsnet.h" /* actual device IO mapping */ 18 + #define MIPSNET_VERSION "2007-11-17" 17 19 18 - #define MIPSNET_VERSION "2005-06-20" 20 + /* 21 + * Net status/control block as seen by sw in the core. 22 + */ 23 + struct mipsnet_regs { 24 + /* 25 + * Device info for probing, reads as MIPSNET%d where %d is some 26 + * form of version. 27 + */ 28 + u64 devId; /*0x00 */ 19 29 20 - #define mipsnet_reg_address(dev, field) (dev->base_addr + field_offset(field)) 30 + /* 31 + * read only busy flag. 32 + * Set and cleared by the Net Device to indicate that an rx or a tx 33 + * is in progress. 34 + */ 35 + u32 busy; /*0x08 */ 36 + 37 + /* 38 + * Set by the Net Device. 39 + * The device will set it once data has been received. 40 + * The value is the number of bytes that should be read from 41 + * rxDataBuffer. The value will decrease till 0 until all the data 42 + * from rxDataBuffer has been read. 43 + */ 44 + u32 rxDataCount; /*0x0c */ 45 + #define MIPSNET_MAX_RXTX_DATACOUNT (1 << 16) 46 + 47 + /* 48 + * Settable from the MIPS core, cleared by the Net Device. 49 + * The core should set the number of bytes it wants to send, 50 + * then it should write those bytes of data to txDataBuffer. 51 + * The device will clear txDataCount has been processed (not 52 + * necessarily sent). 53 + */ 54 + u32 txDataCount; /*0x10 */ 55 + 56 + /* 57 + * Interrupt control 58 + * 59 + * Used to clear the interrupted generated by this dev. 60 + * Write a 1 to clear the interrupt. (except bit31). 61 + * 62 + * Bit0 is set if it was a tx-done interrupt. 63 + * Bit1 is set when new rx-data is available. 64 + * Until this bit is cleared there will be no other RXs. 65 + * 66 + * Bit31 is used for testing, it clears after a read. 67 + * Writing 1 to this bit will cause an interrupt to be generated. 68 + * To clear the test interrupt, write 0 to this register. 69 + */ 70 + u32 interruptControl; /*0x14 */ 71 + #define MIPSNET_INTCTL_TXDONE (1u << 0) 72 + #define MIPSNET_INTCTL_RXDONE (1u << 1) 73 + #define MIPSNET_INTCTL_TESTBIT (1u << 31) 74 + 75 + /* 76 + * Readonly core-specific interrupt info for the device to signal 77 + * the core. The meaning of the contents of this field might change. 78 + */ 79 + /* XXX: the whole memIntf interrupt scheme is messy: the device 80 + * should have no control what so ever of what VPE/register set is 81 + * being used. 82 + * The MemIntf should only expose interrupt lines, and something in 83 + * the config should be responsible for the line<->core/vpe bindings. 84 + */ 85 + u32 interruptInfo; /*0x18 */ 86 + 87 + /* 88 + * This is where the received data is read out. 89 + * There is more data to read until rxDataReady is 0. 90 + * Only 1 byte at this regs offset is used. 91 + */ 92 + u32 rxDataBuffer; /*0x1c */ 93 + 94 + /* 95 + * This is where the data to transmit is written. 96 + * Data should be written for the amount specified in the 97 + * txDataCount register. 98 + * Only 1 byte at this regs offset is used. 99 + */ 100 + u32 txDataBuffer; /*0x20 */ 101 + }; 102 + 103 + #define regaddr(dev, field) \ 104 + (dev->base_addr + offsetof(struct mipsnet_regs, field)) 21 105 22 106 static char mipsnet_string[] = "mipsnet"; 23 107 ··· 109 29 static int ioiocpy_frommipsnet(struct net_device *dev, unsigned char *kdata, 110 30 int len) 111 31 { 112 - uint32_t available_len = inl(mipsnet_reg_address(dev, rxDataCount)); 113 - 114 - if (available_len < len) 115 - return -EFAULT; 116 - 117 32 for (; len > 0; len--, kdata++) 118 - *kdata = inb(mipsnet_reg_address(dev, rxDataBuffer)); 33 + *kdata = inb(regaddr(dev, rxDataBuffer)); 119 34 120 - return inl(mipsnet_reg_address(dev, rxDataCount)); 35 + return inl(regaddr(dev, rxDataCount)); 121 36 } 122 37 123 - static inline ssize_t mipsnet_put_todevice(struct net_device *dev, 38 + static inline void mipsnet_put_todevice(struct net_device *dev, 124 39 struct sk_buff *skb) 125 40 { 126 41 int count_to_go = skb->len; 127 42 char *buf_ptr = skb->data; 128 43 129 - outl(skb->len, mipsnet_reg_address(dev, txDataCount)); 44 + outl(skb->len, regaddr(dev, txDataCount)); 130 45 131 46 for (; count_to_go; buf_ptr++, count_to_go--) 132 - outb(*buf_ptr, mipsnet_reg_address(dev, txDataBuffer)); 47 + outb(*buf_ptr, regaddr(dev, txDataBuffer)); 133 48 134 49 dev->stats.tx_packets++; 135 50 dev->stats.tx_bytes += skb->len; 136 51 137 - return skb->len; 52 + dev_kfree_skb(skb); 138 53 } 139 54 140 55 static int mipsnet_xmit(struct sk_buff *skb, struct net_device *dev) ··· 144 69 return 0; 145 70 } 146 71 147 - static inline ssize_t mipsnet_get_fromdev(struct net_device *dev, size_t count) 72 + static inline ssize_t mipsnet_get_fromdev(struct net_device *dev, size_t len) 148 73 { 149 74 struct sk_buff *skb; 150 - size_t len = count; 151 75 152 - skb = alloc_skb(len + 2, GFP_KERNEL); 76 + if (!len) 77 + return len; 78 + 79 + skb = dev_alloc_skb(len + NET_IP_ALIGN); 153 80 if (!skb) { 154 81 dev->stats.rx_dropped++; 155 82 return -ENOMEM; 156 83 } 157 84 158 - skb_reserve(skb, 2); 85 + skb_reserve(skb, NET_IP_ALIGN); 159 86 if (ioiocpy_frommipsnet(dev, skb_put(skb, len), len)) 160 87 return -EFAULT; 161 88 ··· 169 92 dev->stats.rx_packets++; 170 93 dev->stats.rx_bytes += len; 171 94 172 - return count; 95 + return len; 173 96 } 174 97 175 98 static irqreturn_t mipsnet_interrupt(int irq, void *dev_id) 176 99 { 177 100 struct net_device *dev = dev_id; 101 + u32 int_flags; 102 + irqreturn_t ret = IRQ_NONE; 178 103 179 - irqreturn_t retval = IRQ_NONE; 180 - uint64_t interruptFlags; 104 + if (irq != dev->irq) 105 + goto out_badirq; 181 106 182 - if (irq == dev->irq) { 183 - retval = IRQ_HANDLED; 184 - 185 - interruptFlags = 186 - inl(mipsnet_reg_address(dev, interruptControl)); 187 - 188 - if (interruptFlags & MIPSNET_INTCTL_TXDONE) { 189 - outl(MIPSNET_INTCTL_TXDONE, 190 - mipsnet_reg_address(dev, interruptControl)); 191 - /* only one packet at a time, we are done. */ 192 - netif_wake_queue(dev); 193 - } else if (interruptFlags & MIPSNET_INTCTL_RXDONE) { 194 - mipsnet_get_fromdev(dev, 195 - inl(mipsnet_reg_address(dev, rxDataCount))); 196 - outl(MIPSNET_INTCTL_RXDONE, 197 - mipsnet_reg_address(dev, interruptControl)); 198 - 199 - } else if (interruptFlags & MIPSNET_INTCTL_TESTBIT) { 200 - /* 201 - * TESTBIT is cleared on read. 202 - * And takes effect after a write with 0 203 - */ 204 - outl(0, mipsnet_reg_address(dev, interruptControl)); 205 - } else { 206 - /* Maybe shared IRQ, just ignore, no clearing. */ 207 - retval = IRQ_NONE; 208 - } 209 - 210 - } else { 211 - printk(KERN_INFO "%s: %s(): irq %d for unknown device\n", 212 - dev->name, __FUNCTION__, irq); 213 - retval = IRQ_NONE; 107 + /* TESTBIT is cleared on read. */ 108 + int_flags = inl(regaddr(dev, interruptControl)); 109 + if (int_flags & MIPSNET_INTCTL_TESTBIT) { 110 + /* TESTBIT takes effect after a write with 0. */ 111 + outl(0, regaddr(dev, interruptControl)); 112 + ret = IRQ_HANDLED; 113 + } else if (int_flags & MIPSNET_INTCTL_TXDONE) { 114 + /* Only one packet at a time, we are done. */ 115 + dev->stats.tx_packets++; 116 + netif_wake_queue(dev); 117 + outl(MIPSNET_INTCTL_TXDONE, 118 + regaddr(dev, interruptControl)); 119 + ret = IRQ_HANDLED; 120 + } else if (int_flags & MIPSNET_INTCTL_RXDONE) { 121 + mipsnet_get_fromdev(dev, inl(regaddr(dev, rxDataCount))); 122 + outl(MIPSNET_INTCTL_RXDONE, regaddr(dev, interruptControl)); 123 + ret = IRQ_HANDLED; 214 124 } 215 - return retval; 125 + return ret; 126 + 127 + out_badirq: 128 + printk(KERN_INFO "%s: %s(): irq %d for unknown device\n", 129 + dev->name, __FUNCTION__, irq); 130 + return ret; 216 131 } 217 132 218 133 static int mipsnet_open(struct net_device *dev) ··· 213 144 214 145 err = request_irq(dev->irq, &mipsnet_interrupt, 215 146 IRQF_SHARED, dev->name, (void *) dev); 216 - 217 147 if (err) { 218 - release_region(dev->base_addr, MIPSNET_IO_EXTENT); 148 + release_region(dev->base_addr, sizeof(struct mipsnet_regs)); 219 149 return err; 220 150 } 221 151 222 152 netif_start_queue(dev); 223 153 224 154 /* test interrupt handler */ 225 - outl(MIPSNET_INTCTL_TESTBIT, 226 - mipsnet_reg_address(dev, interruptControl)); 227 - 155 + outl(MIPSNET_INTCTL_TESTBIT, regaddr(dev, interruptControl)); 228 156 229 157 return 0; 230 158 } ··· 229 163 static int mipsnet_close(struct net_device *dev) 230 164 { 231 165 netif_stop_queue(dev); 232 - 166 + free_irq(dev->irq, dev); 233 167 return 0; 234 168 } 235 169 ··· 260 194 */ 261 195 netdev->base_addr = 0x4200; 262 196 netdev->irq = MIPS_CPU_IRQ_BASE + MIPSCPU_INT_MB0 + 263 - inl(mipsnet_reg_address(netdev, interruptInfo)); 197 + inl(regaddr(netdev, interruptInfo)); 264 198 265 199 /* Get the io region now, get irq on open() */ 266 - if (!request_region(netdev->base_addr, MIPSNET_IO_EXTENT, "mipsnet")) { 200 + if (!request_region(netdev->base_addr, sizeof(struct mipsnet_regs), 201 + "mipsnet")) { 267 202 err = -EBUSY; 268 203 goto out_free_netdev; 269 204 } ··· 284 217 return 0; 285 218 286 219 out_free_region: 287 - release_region(netdev->base_addr, MIPSNET_IO_EXTENT); 220 + release_region(netdev->base_addr, sizeof(struct mipsnet_regs)); 288 221 289 222 out_free_netdev: 290 223 free_netdev(netdev); ··· 298 231 struct net_device *dev = dev_get_drvdata(device); 299 232 300 233 unregister_netdev(dev); 301 - release_region(dev->base_addr, MIPSNET_IO_EXTENT); 234 + release_region(dev->base_addr, sizeof(struct mipsnet_regs)); 302 235 free_netdev(dev); 303 236 dev_set_drvdata(device, NULL); 304 237
-112
drivers/net/mipsnet.h
··· 1 - /* 2 - * This file is subject to the terms and conditions of the GNU General Public 3 - * License. See the file "COPYING" in the main directory of this archive 4 - * for more details. 5 - */ 6 - #ifndef __MIPSNET_H 7 - #define __MIPSNET_H 8 - 9 - /* 10 - * Id of this Net device, as seen by the core. 11 - */ 12 - #define MIPS_NET_DEV_ID ((uint64_t) \ 13 - ((uint64_t) 'M' << 0)| \ 14 - ((uint64_t) 'I' << 8)| \ 15 - ((uint64_t) 'P' << 16)| \ 16 - ((uint64_t) 'S' << 24)| \ 17 - ((uint64_t) 'N' << 32)| \ 18 - ((uint64_t) 'E' << 40)| \ 19 - ((uint64_t) 'T' << 48)| \ 20 - ((uint64_t) '0' << 56)) 21 - 22 - /* 23 - * Net status/control block as seen by sw in the core. 24 - * (Why not use bit fields? can't be bothered with cross-platform struct 25 - * packing.) 26 - */ 27 - struct net_control_block { 28 - /* 29 - * dev info for probing 30 - * reads as MIPSNET%d where %d is some form of version 31 - */ 32 - uint64_t devId; /* 0x00 */ 33 - 34 - /* 35 - * read only busy flag. 36 - * Set and cleared by the Net Device to indicate that an rx or a tx 37 - * is in progress. 38 - */ 39 - uint32_t busy; /* 0x08 */ 40 - 41 - /* 42 - * Set by the Net Device. 43 - * The device will set it once data has been received. 44 - * The value is the number of bytes that should be read from 45 - * rxDataBuffer. The value will decrease till 0 until all the data 46 - * from rxDataBuffer has been read. 47 - */ 48 - uint32_t rxDataCount; /* 0x0c */ 49 - #define MIPSNET_MAX_RXTX_DATACOUNT (1<<16) 50 - 51 - /* 52 - * Settable from the MIPS core, cleared by the Net Device. The core 53 - * should set the number of bytes it wants to send, then it should 54 - * write those bytes of data to txDataBuffer. The device will clear 55 - * txDataCount has been processed (not necessarily sent). 56 - */ 57 - uint32_t txDataCount; /* 0x10 */ 58 - 59 - /* 60 - * Interrupt control 61 - * 62 - * Used to clear the interrupted generated by this dev. 63 - * Write a 1 to clear the interrupt. (except bit31). 64 - * 65 - * Bit0 is set if it was a tx-done interrupt. 66 - * Bit1 is set when new rx-data is available. 67 - * Until this bit is cleared there will be no other RXs. 68 - * 69 - * Bit31 is used for testing, it clears after a read. 70 - * Writing 1 to this bit will cause an interrupt to be generated. 71 - * To clear the test interrupt, write 0 to this register. 72 - */ 73 - uint32_t interruptControl; /*0x14 */ 74 - #define MIPSNET_INTCTL_TXDONE ((uint32_t)(1 << 0)) 75 - #define MIPSNET_INTCTL_RXDONE ((uint32_t)(1 << 1)) 76 - #define MIPSNET_INTCTL_TESTBIT ((uint32_t)(1 << 31)) 77 - #define MIPSNET_INTCTL_ALLSOURCES (MIPSNET_INTCTL_TXDONE | \ 78 - MIPSNET_INTCTL_RXDONE | \ 79 - MIPSNET_INTCTL_TESTBIT) 80 - 81 - /* 82 - * Readonly core-specific interrupt info for the device to signal the 83 - * core. The meaning of the contents of this field might change. 84 - * 85 - * TODO: the whole memIntf interrupt scheme is messy: the device should 86 - * have no control what so ever of what VPE/register set is being 87 - * used. The MemIntf should only expose interrupt lines, and 88 - * something in the config should be responsible for the 89 - * line<->core/vpe bindings. 90 - */ 91 - uint32_t interruptInfo; /* 0x18 */ 92 - 93 - /* 94 - * This is where the received data is read out. 95 - * There is more data to read until rxDataReady is 0. 96 - * Only 1 byte at this regs offset is used. 97 - */ 98 - uint32_t rxDataBuffer; /* 0x1c */ 99 - 100 - /* 101 - * This is where the data to transmit is written. Data should be 102 - * written for the amount specified in the txDataCount register. Only 103 - * 1 byte at this regs offset is used. 104 - */ 105 - uint32_t txDataBuffer; /* 0x20 */ 106 - }; 107 - 108 - #define MIPSNET_IO_EXTENT 0x40 /* being generous */ 109 - 110 - #define field_offset(field) (offsetof(struct net_control_block, field)) 111 - 112 - #endif /* __MIPSNET_H */
+2 -16
drivers/net/natsemi.c
··· 203 203 IIId. Synchronization 204 204 205 205 Most operations are synchronized on the np->lock irq spinlock, except the 206 - performance critical codepaths: 207 - 208 - The rx process only runs in the interrupt handler. Access from outside 209 - the interrupt handler is only permitted after disable_irq(). 210 - 211 - The rx process usually runs under the netif_tx_lock. If np->intr_tx_reap 212 - is set, then access is permitted under spin_lock_irq(&np->lock). 213 - 214 - Thus configuration functions that want to access everything must call 215 - disable_irq(dev->irq); 216 - netif_tx_lock_bh(dev); 217 - spin_lock_irq(&np->lock); 218 - 219 - IV. Notes 220 - 221 - NatSemi PCI network controllers are very uncommon. 206 + recieve and transmit paths which are synchronised using a combination of 207 + hardware descriptor ownership, disabling interrupts and NAPI poll scheduling. 222 208 223 209 IVb. References 224 210
+187 -72
drivers/net/pasemi_mac.c
··· 62 62 63 63 #define LRO_MAX_AGGR 64 64 64 65 + #define PE_MIN_MTU 64 66 + #define PE_MAX_MTU 1500 67 + #define PE_DEF_MTU ETH_DATA_LEN 68 + 65 69 #define DEFAULT_MSG_ENABLE \ 66 70 (NETIF_MSG_DRV | \ 67 71 NETIF_MSG_PROBE | \ ··· 85 81 #define RING_USED(ring) (((ring)->next_to_fill - (ring)->next_to_clean) \ 86 82 & ((ring)->size - 1)) 87 83 #define RING_AVAIL(ring) ((ring->size) - RING_USED(ring)) 88 - 89 - #define BUF_SIZE 1646 /* 1500 MTU + ETH_HLEN + VLAN_HLEN + 2 64B cachelines */ 90 84 91 85 MODULE_LICENSE("GPL"); 92 86 MODULE_AUTHOR ("Olof Johansson <olof@lixom.net>"); ··· 177 175 return -1; 178 176 } 179 177 178 + static void pasemi_mac_intf_disable(struct pasemi_mac *mac) 179 + { 180 + unsigned int flags; 181 + 182 + flags = read_mac_reg(mac, PAS_MAC_CFG_PCFG); 183 + flags &= ~PAS_MAC_CFG_PCFG_PE; 184 + write_mac_reg(mac, PAS_MAC_CFG_PCFG, flags); 185 + } 186 + 187 + static void pasemi_mac_intf_enable(struct pasemi_mac *mac) 188 + { 189 + unsigned int flags; 190 + 191 + flags = read_mac_reg(mac, PAS_MAC_CFG_PCFG); 192 + flags |= PAS_MAC_CFG_PCFG_PE; 193 + write_mac_reg(mac, PAS_MAC_CFG_PCFG, flags); 194 + } 195 + 180 196 static int pasemi_get_mac_addr(struct pasemi_mac *mac) 181 197 { 182 198 struct pci_dev *pdev = mac->pdev; ··· 237 217 } 238 218 239 219 memcpy(mac->mac_addr, addr, 6); 220 + 221 + return 0; 222 + } 223 + 224 + static int pasemi_mac_set_mac_addr(struct net_device *dev, void *p) 225 + { 226 + struct pasemi_mac *mac = netdev_priv(dev); 227 + struct sockaddr *addr = p; 228 + unsigned int adr0, adr1; 229 + 230 + if (!is_valid_ether_addr(addr->sa_data)) 231 + return -EINVAL; 232 + 233 + memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); 234 + 235 + adr0 = dev->dev_addr[2] << 24 | 236 + dev->dev_addr[3] << 16 | 237 + dev->dev_addr[4] << 8 | 238 + dev->dev_addr[5]; 239 + adr1 = read_mac_reg(mac, PAS_MAC_CFG_ADR1); 240 + adr1 &= ~0xffff; 241 + adr1 |= dev->dev_addr[0] << 8 | dev->dev_addr[1]; 242 + 243 + pasemi_mac_intf_disable(mac); 244 + write_mac_reg(mac, PAS_MAC_CFG_ADR0, adr0); 245 + write_mac_reg(mac, PAS_MAC_CFG_ADR1, adr1); 246 + pasemi_mac_intf_enable(mac); 240 247 241 248 return 0; 242 249 } ··· 500 453 501 454 } 502 455 503 - static void pasemi_mac_free_rx_resources(struct pasemi_mac *mac) 456 + static void pasemi_mac_free_rx_buffers(struct pasemi_mac *mac) 504 457 { 505 458 struct pasemi_mac_rxring *rx = rx_ring(mac); 506 459 unsigned int i; ··· 520 473 } 521 474 522 475 for (i = 0; i < RX_RING_SIZE; i++) 523 - RX_DESC(rx, i) = 0; 476 + RX_BUFF(rx, i) = 0; 477 + } 478 + 479 + static void pasemi_mac_free_rx_resources(struct pasemi_mac *mac) 480 + { 481 + pasemi_mac_free_rx_buffers(mac); 524 482 525 483 dma_free_coherent(&mac->dma_pdev->dev, RX_RING_SIZE * sizeof(u64), 526 484 rx_ring(mac)->buffers, rx_ring(mac)->buf_dma); ··· 555 503 /* Entry in use? */ 556 504 WARN_ON(*buff); 557 505 558 - skb = dev_alloc_skb(BUF_SIZE); 506 + skb = dev_alloc_skb(mac->bufsz); 559 507 skb_reserve(skb, LOCAL_SKB_ALIGN); 560 508 561 509 if (unlikely(!skb)) 562 510 break; 563 511 564 512 dma = pci_map_single(mac->dma_pdev, skb->data, 565 - BUF_SIZE - LOCAL_SKB_ALIGN, 513 + mac->bufsz - LOCAL_SKB_ALIGN, 566 514 PCI_DMA_FROMDEVICE); 567 515 568 516 if (unlikely(dma_mapping_error(dma))) { ··· 572 520 573 521 info->skb = skb; 574 522 info->dma = dma; 575 - *buff = XCT_RXB_LEN(BUF_SIZE) | XCT_RXB_ADDR(dma); 523 + *buff = XCT_RXB_LEN(mac->bufsz) | XCT_RXB_ADDR(dma); 576 524 fill++; 577 525 } 578 526 ··· 702 650 703 651 len = (macrx & XCT_MACRX_LLEN_M) >> XCT_MACRX_LLEN_S; 704 652 705 - pci_unmap_single(pdev, dma, BUF_SIZE-LOCAL_SKB_ALIGN, 653 + pci_unmap_single(pdev, dma, mac->bufsz - LOCAL_SKB_ALIGN, 706 654 PCI_DMA_FROMDEVICE); 707 655 708 656 if (macrx & XCT_MACRX_CRC) { ··· 924 872 write_iob_reg(PAS_IOB_DMA_TXCH_RESET(chan->chno), reg); 925 873 926 874 return IRQ_HANDLED; 927 - } 928 - 929 - static void pasemi_mac_intf_disable(struct pasemi_mac *mac) 930 - { 931 - unsigned int flags; 932 - 933 - flags = read_mac_reg(mac, PAS_MAC_CFG_PCFG); 934 - flags &= ~PAS_MAC_CFG_PCFG_PE; 935 - write_mac_reg(mac, PAS_MAC_CFG_PCFG, flags); 936 - } 937 - 938 - static void pasemi_mac_intf_enable(struct pasemi_mac *mac) 939 - { 940 - unsigned int flags; 941 - 942 - flags = read_mac_reg(mac, PAS_MAC_CFG_PCFG); 943 - flags |= PAS_MAC_CFG_PCFG_PE; 944 - write_mac_reg(mac, PAS_MAC_CFG_PCFG, flags); 945 875 } 946 876 947 877 static void pasemi_adjust_link(struct net_device *dev) ··· 1182 1148 1183 1149 #define MAX_RETRIES 5000 1184 1150 1151 + static void pasemi_mac_pause_txchan(struct pasemi_mac *mac) 1152 + { 1153 + unsigned int sta, retries; 1154 + int txch = tx_ring(mac)->chan.chno; 1155 + 1156 + write_dma_reg(PAS_DMA_TXCHAN_TCMDSTA(txch), 1157 + PAS_DMA_TXCHAN_TCMDSTA_ST); 1158 + 1159 + for (retries = 0; retries < MAX_RETRIES; retries++) { 1160 + sta = read_dma_reg(PAS_DMA_TXCHAN_TCMDSTA(txch)); 1161 + if (!(sta & PAS_DMA_TXCHAN_TCMDSTA_ACT)) 1162 + break; 1163 + cond_resched(); 1164 + } 1165 + 1166 + if (sta & PAS_DMA_TXCHAN_TCMDSTA_ACT) 1167 + dev_err(&mac->dma_pdev->dev, 1168 + "Failed to stop tx channel, tcmdsta %08x\n", sta); 1169 + 1170 + write_dma_reg(PAS_DMA_TXCHAN_TCMDSTA(txch), 0); 1171 + } 1172 + 1173 + static void pasemi_mac_pause_rxchan(struct pasemi_mac *mac) 1174 + { 1175 + unsigned int sta, retries; 1176 + int rxch = rx_ring(mac)->chan.chno; 1177 + 1178 + write_dma_reg(PAS_DMA_RXCHAN_CCMDSTA(rxch), 1179 + PAS_DMA_RXCHAN_CCMDSTA_ST); 1180 + for (retries = 0; retries < MAX_RETRIES; retries++) { 1181 + sta = read_dma_reg(PAS_DMA_RXCHAN_CCMDSTA(rxch)); 1182 + if (!(sta & PAS_DMA_RXCHAN_CCMDSTA_ACT)) 1183 + break; 1184 + cond_resched(); 1185 + } 1186 + 1187 + if (sta & PAS_DMA_RXCHAN_CCMDSTA_ACT) 1188 + dev_err(&mac->dma_pdev->dev, 1189 + "Failed to stop rx channel, ccmdsta 08%x\n", sta); 1190 + write_dma_reg(PAS_DMA_RXCHAN_CCMDSTA(rxch), 0); 1191 + } 1192 + 1193 + static void pasemi_mac_pause_rxint(struct pasemi_mac *mac) 1194 + { 1195 + unsigned int sta, retries; 1196 + 1197 + write_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac->dma_if), 1198 + PAS_DMA_RXINT_RCMDSTA_ST); 1199 + for (retries = 0; retries < MAX_RETRIES; retries++) { 1200 + sta = read_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac->dma_if)); 1201 + if (!(sta & PAS_DMA_RXINT_RCMDSTA_ACT)) 1202 + break; 1203 + cond_resched(); 1204 + } 1205 + 1206 + if (sta & PAS_DMA_RXINT_RCMDSTA_ACT) 1207 + dev_err(&mac->dma_pdev->dev, 1208 + "Failed to stop rx interface, rcmdsta %08x\n", sta); 1209 + write_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac->dma_if), 0); 1210 + } 1211 + 1185 1212 static int pasemi_mac_close(struct net_device *dev) 1186 1213 { 1187 1214 struct pasemi_mac *mac = netdev_priv(dev); 1188 1215 unsigned int sta; 1189 - int retries; 1190 1216 int rxch, txch; 1191 1217 1192 1218 rxch = rx_ring(mac)->chan.chno; ··· 1284 1190 pasemi_mac_clean_tx(tx_ring(mac)); 1285 1191 pasemi_mac_clean_rx(rx_ring(mac), RX_RING_SIZE); 1286 1192 1287 - /* Disable interface */ 1288 - write_dma_reg(PAS_DMA_TXCHAN_TCMDSTA(txch), 1289 - PAS_DMA_TXCHAN_TCMDSTA_ST); 1290 - write_dma_reg( PAS_DMA_RXINT_RCMDSTA(mac->dma_if), 1291 - PAS_DMA_RXINT_RCMDSTA_ST); 1292 - write_dma_reg(PAS_DMA_RXCHAN_CCMDSTA(rxch), 1293 - PAS_DMA_RXCHAN_CCMDSTA_ST); 1294 - 1295 - for (retries = 0; retries < MAX_RETRIES; retries++) { 1296 - sta = read_dma_reg(PAS_DMA_TXCHAN_TCMDSTA(rxch)); 1297 - if (!(sta & PAS_DMA_TXCHAN_TCMDSTA_ACT)) 1298 - break; 1299 - cond_resched(); 1300 - } 1301 - 1302 - if (sta & PAS_DMA_TXCHAN_TCMDSTA_ACT) 1303 - dev_err(&mac->dma_pdev->dev, "Failed to stop tx channel\n"); 1304 - 1305 - for (retries = 0; retries < MAX_RETRIES; retries++) { 1306 - sta = read_dma_reg(PAS_DMA_RXCHAN_CCMDSTA(rxch)); 1307 - if (!(sta & PAS_DMA_RXCHAN_CCMDSTA_ACT)) 1308 - break; 1309 - cond_resched(); 1310 - } 1311 - 1312 - if (sta & PAS_DMA_RXCHAN_CCMDSTA_ACT) 1313 - dev_err(&mac->dma_pdev->dev, "Failed to stop rx channel\n"); 1314 - 1315 - for (retries = 0; retries < MAX_RETRIES; retries++) { 1316 - sta = read_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac->dma_if)); 1317 - if (!(sta & PAS_DMA_RXINT_RCMDSTA_ACT)) 1318 - break; 1319 - cond_resched(); 1320 - } 1321 - 1322 - if (sta & PAS_DMA_RXINT_RCMDSTA_ACT) 1323 - dev_err(&mac->dma_pdev->dev, "Failed to stop rx interface\n"); 1324 - 1325 - /* Then, disable the channel. This must be done separately from 1326 - * stopping, since you can't disable when active. 1327 - */ 1328 - 1329 - write_dma_reg(PAS_DMA_TXCHAN_TCMDSTA(txch), 0); 1330 - write_dma_reg(PAS_DMA_RXCHAN_CCMDSTA(rxch), 0); 1331 - write_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac->dma_if), 0); 1193 + pasemi_mac_pause_txchan(mac); 1194 + pasemi_mac_pause_rxint(mac); 1195 + pasemi_mac_pause_rxchan(mac); 1196 + pasemi_mac_intf_disable(mac); 1332 1197 1333 1198 free_irq(mac->tx->chan.irq, mac->tx); 1334 1199 free_irq(mac->rx->chan.irq, mac->rx); ··· 1441 1388 return pkts; 1442 1389 } 1443 1390 1391 + static int pasemi_mac_change_mtu(struct net_device *dev, int new_mtu) 1392 + { 1393 + struct pasemi_mac *mac = netdev_priv(dev); 1394 + unsigned int reg; 1395 + unsigned int rcmdsta; 1396 + int running; 1397 + 1398 + if (new_mtu < PE_MIN_MTU || new_mtu > PE_MAX_MTU) 1399 + return -EINVAL; 1400 + 1401 + running = netif_running(dev); 1402 + 1403 + if (running) { 1404 + /* Need to stop the interface, clean out all already 1405 + * received buffers, free all unused buffers on the RX 1406 + * interface ring, then finally re-fill the rx ring with 1407 + * the new-size buffers and restart. 1408 + */ 1409 + 1410 + napi_disable(&mac->napi); 1411 + netif_tx_disable(dev); 1412 + pasemi_mac_intf_disable(mac); 1413 + 1414 + rcmdsta = read_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac->dma_if)); 1415 + pasemi_mac_pause_rxint(mac); 1416 + pasemi_mac_clean_rx(rx_ring(mac), RX_RING_SIZE); 1417 + pasemi_mac_free_rx_buffers(mac); 1418 + } 1419 + 1420 + /* Change maxf, i.e. what size frames are accepted. 1421 + * Need room for ethernet header and CRC word 1422 + */ 1423 + reg = read_mac_reg(mac, PAS_MAC_CFG_MACCFG); 1424 + reg &= ~PAS_MAC_CFG_MACCFG_MAXF_M; 1425 + reg |= PAS_MAC_CFG_MACCFG_MAXF(new_mtu + ETH_HLEN + 4); 1426 + write_mac_reg(mac, PAS_MAC_CFG_MACCFG, reg); 1427 + 1428 + dev->mtu = new_mtu; 1429 + /* MTU + ETH_HLEN + VLAN_HLEN + 2 64B cachelines */ 1430 + mac->bufsz = new_mtu + ETH_HLEN + ETH_FCS_LEN + LOCAL_SKB_ALIGN + 128; 1431 + 1432 + if (running) { 1433 + write_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac->dma_if), 1434 + rcmdsta | PAS_DMA_RXINT_RCMDSTA_EN); 1435 + 1436 + rx_ring(mac)->next_to_fill = 0; 1437 + pasemi_mac_replenish_rx_ring(dev, RX_RING_SIZE-1); 1438 + 1439 + napi_enable(&mac->napi); 1440 + netif_start_queue(dev); 1441 + pasemi_mac_intf_enable(mac); 1442 + } 1443 + 1444 + return 0; 1445 + } 1446 + 1444 1447 static int __devinit 1445 1448 pasemi_mac_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 1446 1449 { ··· 1584 1475 dev->stop = pasemi_mac_close; 1585 1476 dev->hard_start_xmit = pasemi_mac_start_tx; 1586 1477 dev->set_multicast_list = pasemi_mac_set_rx_mode; 1478 + dev->set_mac_address = pasemi_mac_set_mac_addr; 1479 + dev->mtu = PE_DEF_MTU; 1480 + /* 1500 MTU + ETH_HLEN + VLAN_HLEN + 2 64B cachelines */ 1481 + mac->bufsz = dev->mtu + ETH_HLEN + ETH_FCS_LEN + LOCAL_SKB_ALIGN + 128; 1482 + 1483 + dev->change_mtu = pasemi_mac_change_mtu; 1587 1484 1588 1485 if (err) 1589 1486 goto out;
+16
drivers/net/pasemi_mac.h
··· 59 59 struct phy_device *phydev; 60 60 struct napi_struct napi; 61 61 62 + int bufsz; /* RX ring buffer size */ 62 63 u8 type; 63 64 #define MAC_TYPE_GMAC 1 64 65 #define MAC_TYPE_XAUI 2 ··· 97 96 /* MAC CFG register offsets */ 98 97 enum { 99 98 PAS_MAC_CFG_PCFG = 0x80, 99 + PAS_MAC_CFG_MACCFG = 0x84, 100 + PAS_MAC_CFG_ADR0 = 0x8c, 101 + PAS_MAC_CFG_ADR1 = 0x90, 100 102 PAS_MAC_CFG_TXP = 0x98, 101 103 PAS_MAC_IPC_CHNL = 0x208, 102 104 }; ··· 134 130 #define PAS_MAC_CFG_PCFG_SPD_100M 0x00000001 135 131 #define PAS_MAC_CFG_PCFG_SPD_1G 0x00000002 136 132 #define PAS_MAC_CFG_PCFG_SPD_10G 0x00000003 133 + 134 + #define PAS_MAC_CFG_MACCFG_TXT_M 0x70000000 135 + #define PAS_MAC_CFG_MACCFG_TXT_S 28 136 + #define PAS_MAC_CFG_MACCFG_PRES_M 0x0f000000 137 + #define PAS_MAC_CFG_MACCFG_PRES_S 24 138 + #define PAS_MAC_CFG_MACCFG_MAXF_M 0x00ffff00 139 + #define PAS_MAC_CFG_MACCFG_MAXF_S 8 140 + #define PAS_MAC_CFG_MACCFG_MAXF(x) (((x) << PAS_MAC_CFG_MACCFG_MAXF_S) & \ 141 + PAS_MAC_CFG_MACCFG_MAXF_M) 142 + #define PAS_MAC_CFG_MACCFG_MINF_M 0x000000ff 143 + #define PAS_MAC_CFG_MACCFG_MINF_S 0 144 + 137 145 #define PAS_MAC_CFG_TXP_FCF 0x01000000 138 146 #define PAS_MAC_CFG_TXP_FCE 0x00800000 139 147 #define PAS_MAC_CFG_TXP_FC 0x00400000
+25 -24
drivers/net/pci-skeleton.c
··· 541 541 #define NETDRV_W32_F(reg, val32) do { writel ((val32), ioaddr + (reg)); readl (ioaddr + (reg)); } while (0) 542 542 543 543 544 - #if MMIO_FLUSH_AUDIT_COMPLETE 544 + #ifdef MMIO_FLUSH_AUDIT_COMPLETE 545 545 546 546 /* write MMIO register */ 547 547 #define NETDRV_W8(reg, val8) writeb ((val8), ioaddr + (reg)) ··· 603 603 return -ENOMEM; 604 604 } 605 605 SET_NETDEV_DEV(dev, &pdev->dev); 606 - tp = dev->priv; 606 + tp = netdev_priv(dev); 607 607 608 608 /* enable device (incl. PCI PM wakeup), and bus-mastering */ 609 609 rc = pci_enable_device (pdev); ··· 759 759 return i; 760 760 } 761 761 762 - tp = dev->priv; 762 + tp = netdev_priv(dev); 763 763 764 764 assert (ioaddr != NULL); 765 765 assert (dev != NULL); ··· 783 783 dev->base_addr = (unsigned long) ioaddr; 784 784 785 785 /* dev->priv/tp zeroed and aligned in alloc_etherdev */ 786 - tp = dev->priv; 786 + tp = netdev_priv(dev); 787 787 788 788 /* note: tp->chipset set in netdrv_init_board */ 789 789 tp->drv_flags = PCI_COMMAND_IO | PCI_COMMAND_MEMORY | ··· 841 841 842 842 assert (dev != NULL); 843 843 844 - np = dev->priv; 844 + np = netdev_priv(dev); 845 845 assert (np != NULL); 846 846 847 847 unregister_netdev (dev); ··· 974 974 975 975 static int mdio_read (struct net_device *dev, int phy_id, int location) 976 976 { 977 - struct netdrv_private *tp = dev->priv; 977 + struct netdrv_private *tp = netdev_priv(dev); 978 978 void *mdio_addr = tp->mmio_addr + Config4; 979 979 int mii_cmd = (0xf6 << 10) | (phy_id << 5) | location; 980 980 int retval = 0; ··· 1017 1017 static void mdio_write (struct net_device *dev, int phy_id, int location, 1018 1018 int value) 1019 1019 { 1020 - struct netdrv_private *tp = dev->priv; 1020 + struct netdrv_private *tp = netdev_priv(dev); 1021 1021 void *mdio_addr = tp->mmio_addr + Config4; 1022 1022 int mii_cmd = 1023 1023 (0x5002 << 16) | (phy_id << 23) | (location << 18) | value; ··· 1060 1060 1061 1061 static int netdrv_open (struct net_device *dev) 1062 1062 { 1063 - struct netdrv_private *tp = dev->priv; 1063 + struct netdrv_private *tp = netdev_priv(dev); 1064 1064 int retval; 1065 1065 #ifdef NETDRV_DEBUG 1066 1066 void *ioaddr = tp->mmio_addr; ··· 1121 1121 /* Start the hardware at open or resume. */ 1122 1122 static void netdrv_hw_start (struct net_device *dev) 1123 1123 { 1124 - struct netdrv_private *tp = dev->priv; 1124 + struct netdrv_private *tp = netdev_priv(dev); 1125 1125 void *ioaddr = tp->mmio_addr; 1126 1126 u32 i; 1127 1127 ··· 1191 1191 /* Initialize the Rx and Tx rings, along with various 'dev' bits. */ 1192 1192 static void netdrv_init_ring (struct net_device *dev) 1193 1193 { 1194 - struct netdrv_private *tp = dev->priv; 1194 + struct netdrv_private *tp = netdev_priv(dev); 1195 1195 int i; 1196 1196 1197 1197 DPRINTK ("ENTER\n"); ··· 1213 1213 static void netdrv_timer (unsigned long data) 1214 1214 { 1215 1215 struct net_device *dev = (struct net_device *) data; 1216 - struct netdrv_private *tp = dev->priv; 1216 + struct netdrv_private *tp = netdev_priv(dev); 1217 1217 void *ioaddr = tp->mmio_addr; 1218 1218 int next_tick = 60 * HZ; 1219 1219 int mii_lpa; ··· 1252 1252 } 1253 1253 1254 1254 1255 - static void netdrv_tx_clear (struct netdrv_private *tp) 1255 + static void netdrv_tx_clear (struct net_device *dev) 1256 1256 { 1257 1257 int i; 1258 + struct netdrv_private *tp = netdev_priv(dev); 1258 1259 1259 1260 atomic_set (&tp->cur_tx, 0); 1260 1261 atomic_set (&tp->dirty_tx, 0); ··· 1279 1278 1280 1279 static void netdrv_tx_timeout (struct net_device *dev) 1281 1280 { 1282 - struct netdrv_private *tp = dev->priv; 1281 + struct netdrv_private *tp = netdev_priv(dev); 1283 1282 void *ioaddr = tp->mmio_addr; 1284 1283 int i; 1285 1284 u8 tmp8; ··· 1312 1311 /* Stop a shared interrupt from scavenging while we are. */ 1313 1312 spin_lock_irqsave (&tp->lock, flags); 1314 1313 1315 - netdrv_tx_clear (tp); 1314 + netdrv_tx_clear (dev); 1316 1315 1317 1316 spin_unlock_irqrestore (&tp->lock, flags); 1318 1317 ··· 1326 1325 1327 1326 static int netdrv_start_xmit (struct sk_buff *skb, struct net_device *dev) 1328 1327 { 1329 - struct netdrv_private *tp = dev->priv; 1328 + struct netdrv_private *tp = netdev_priv(dev); 1330 1329 void *ioaddr = tp->mmio_addr; 1331 1330 int entry; 1332 1331 ··· 1526 1525 DPRINTK ("%s: netdrv_rx() status %4.4x, size %4.4x," 1527 1526 " cur %4.4x.\n", dev->name, rx_status, 1528 1527 rx_size, cur_rx); 1529 - #if NETDRV_DEBUG > 2 1528 + #if defined(NETDRV_DEBUG) && (NETDRV_DEBUG > 2) 1530 1529 { 1531 1530 int i; 1532 1531 DPRINTK ("%s: Frame contents ", dev->name); ··· 1649 1648 static irqreturn_t netdrv_interrupt (int irq, void *dev_instance) 1650 1649 { 1651 1650 struct net_device *dev = (struct net_device *) dev_instance; 1652 - struct netdrv_private *tp = dev->priv; 1651 + struct netdrv_private *tp = netdev_priv(dev); 1653 1652 int boguscnt = max_interrupt_work; 1654 1653 void *ioaddr = tp->mmio_addr; 1655 1654 int status = 0, link_changed = 0; /* avoid bogus "uninit" warning */ ··· 1712 1711 1713 1712 static int netdrv_close (struct net_device *dev) 1714 1713 { 1715 - struct netdrv_private *tp = dev->priv; 1714 + struct netdrv_private *tp = netdev_priv(dev); 1716 1715 void *ioaddr = tp->mmio_addr; 1717 1716 unsigned long flags; 1718 1717 ··· 1739 1738 1740 1739 spin_unlock_irqrestore (&tp->lock, flags); 1741 1740 1742 - synchronize_irq (); 1741 + synchronize_irq (dev->irq); 1743 1742 free_irq (dev->irq, dev); 1744 1743 1745 - netdrv_tx_clear (tp); 1744 + netdrv_tx_clear (dev); 1746 1745 1747 1746 pci_free_consistent(tp->pci_dev, RX_BUF_TOT_LEN, 1748 1747 tp->rx_ring, tp->rx_ring_dma); ··· 1763 1762 1764 1763 static int netdrv_ioctl (struct net_device *dev, struct ifreq *rq, int cmd) 1765 1764 { 1766 - struct netdrv_private *tp = dev->priv; 1765 + struct netdrv_private *tp = netdev_priv(dev); 1767 1766 struct mii_ioctl_data *data = if_mii(rq); 1768 1767 unsigned long flags; 1769 1768 int rc = 0; ··· 1806 1805 1807 1806 static void netdrv_set_rx_mode (struct net_device *dev) 1808 1807 { 1809 - struct netdrv_private *tp = dev->priv; 1808 + struct netdrv_private *tp = netdev_priv(dev); 1810 1809 void *ioaddr = tp->mmio_addr; 1811 1810 u32 mc_filter[2]; /* Multicast hash filter */ 1812 1811 int i, rx_mode; ··· 1863 1862 static int netdrv_suspend (struct pci_dev *pdev, pm_message_t state) 1864 1863 { 1865 1864 struct net_device *dev = pci_get_drvdata (pdev); 1866 - struct netdrv_private *tp = dev->priv; 1865 + struct netdrv_private *tp = netdev_priv(dev); 1867 1866 void *ioaddr = tp->mmio_addr; 1868 1867 unsigned long flags; 1869 1868 ··· 1893 1892 static int netdrv_resume (struct pci_dev *pdev) 1894 1893 { 1895 1894 struct net_device *dev = pci_get_drvdata (pdev); 1896 - struct netdrv_private *tp = dev->priv; 1895 + /*struct netdrv_private *tp = netdev_priv(dev);*/ 1897 1896 1898 1897 if (!netif_running(dev)) 1899 1898 return 0;
+5
drivers/net/phy/Kconfig
··· 60 60 ---help--- 61 61 Currently supports the IP175C PHY. 62 62 63 + config REALTEK_PHY 64 + tristate "Drivers for Realtek PHYs" 65 + ---help--- 66 + Supports the Realtek 821x PHY. 67 + 63 68 config FIXED_PHY 64 69 bool "Driver for MDIO Bus/PHY emulation with fixed speed/link PHYs" 65 70 ---help---
+1
drivers/net/phy/Makefile
··· 12 12 obj-$(CONFIG_VITESSE_PHY) += vitesse.o 13 13 obj-$(CONFIG_BROADCOM_PHY) += broadcom.o 14 14 obj-$(CONFIG_ICPLUS_PHY) += icplus.o 15 + obj-$(CONFIG_REALTEK_PHY) += realtek.o 15 16 obj-$(CONFIG_FIXED_PHY) += fixed.o 16 17 obj-$(CONFIG_MDIO_BITBANG) += mdio-bitbang.o
+20
drivers/net/phy/broadcom.c
··· 141 141 .driver = { .owner = THIS_MODULE }, 142 142 }; 143 143 144 + static struct phy_driver bcm5482_driver = { 145 + .phy_id = 0x0143bcb0, 146 + .phy_id_mask = 0xfffffff0, 147 + .name = "Broadcom BCM5482", 148 + .features = PHY_GBIT_FEATURES, 149 + .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT, 150 + .config_init = bcm54xx_config_init, 151 + .config_aneg = genphy_config_aneg, 152 + .read_status = genphy_read_status, 153 + .ack_interrupt = bcm54xx_ack_interrupt, 154 + .config_intr = bcm54xx_config_intr, 155 + .driver = { .owner = THIS_MODULE }, 156 + }; 157 + 144 158 static int __init broadcom_init(void) 145 159 { 146 160 int ret; ··· 168 154 ret = phy_driver_register(&bcm5461_driver); 169 155 if (ret) 170 156 goto out_5461; 157 + ret = phy_driver_register(&bcm5482_driver); 158 + if (ret) 159 + goto out_5482; 171 160 return ret; 172 161 162 + out_5482: 163 + phy_driver_unregister(&bcm5461_driver); 173 164 out_5461: 174 165 phy_driver_unregister(&bcm5421_driver); 175 166 out_5421: ··· 185 166 186 167 static void __exit broadcom_exit(void) 187 168 { 169 + phy_driver_unregister(&bcm5482_driver); 188 170 phy_driver_unregister(&bcm5461_driver); 189 171 phy_driver_unregister(&bcm5421_driver); 190 172 phy_driver_unregister(&bcm5411_driver);
+1 -1
drivers/net/phy/mdio_bus.c
··· 49 49 int i; 50 50 int err = 0; 51 51 52 - spin_lock_init(&bus->mdio_lock); 52 + mutex_init(&bus->mdio_lock); 53 53 54 54 if (NULL == bus || NULL == bus->name || 55 55 NULL == bus->read ||
+46 -22
drivers/net/phy/phy.c
··· 26 26 #include <linux/netdevice.h> 27 27 #include <linux/etherdevice.h> 28 28 #include <linux/skbuff.h> 29 - #include <linux/spinlock.h> 30 29 #include <linux/mm.h> 31 30 #include <linux/module.h> 32 31 #include <linux/mii.h> ··· 71 72 int retval; 72 73 struct mii_bus *bus = phydev->bus; 73 74 74 - spin_lock_bh(&bus->mdio_lock); 75 + BUG_ON(in_interrupt()); 76 + 77 + mutex_lock(&bus->mdio_lock); 75 78 retval = bus->read(bus, phydev->addr, regnum); 76 - spin_unlock_bh(&bus->mdio_lock); 79 + mutex_unlock(&bus->mdio_lock); 77 80 78 81 return retval; 79 82 } ··· 96 95 int err; 97 96 struct mii_bus *bus = phydev->bus; 98 97 99 - spin_lock_bh(&bus->mdio_lock); 98 + BUG_ON(in_interrupt()); 99 + 100 + mutex_lock(&bus->mdio_lock); 100 101 err = bus->write(bus, phydev->addr, regnum, val); 101 - spin_unlock_bh(&bus->mdio_lock); 102 + mutex_unlock(&bus->mdio_lock); 102 103 103 104 return err; 104 105 } ··· 431 428 { 432 429 int err; 433 430 434 - spin_lock_bh(&phydev->lock); 431 + mutex_lock(&phydev->lock); 435 432 436 433 if (AUTONEG_DISABLE == phydev->autoneg) 437 434 phy_sanitize_settings(phydev); ··· 452 449 } 453 450 454 451 out_unlock: 455 - spin_unlock_bh(&phydev->lock); 452 + mutex_unlock(&phydev->lock); 456 453 return err; 457 454 } 458 455 EXPORT_SYMBOL(phy_start_aneg); 459 456 460 457 461 458 static void phy_change(struct work_struct *work); 459 + static void phy_state_machine(struct work_struct *work); 462 460 static void phy_timer(unsigned long data); 463 461 464 462 /** ··· 480 476 { 481 477 phydev->adjust_state = handler; 482 478 479 + INIT_WORK(&phydev->state_queue, phy_state_machine); 483 480 init_timer(&phydev->phy_timer); 484 481 phydev->phy_timer.function = &phy_timer; 485 482 phydev->phy_timer.data = (unsigned long) phydev; ··· 498 493 void phy_stop_machine(struct phy_device *phydev) 499 494 { 500 495 del_timer_sync(&phydev->phy_timer); 496 + cancel_work_sync(&phydev->state_queue); 501 497 502 - spin_lock_bh(&phydev->lock); 498 + mutex_lock(&phydev->lock); 503 499 if (phydev->state > PHY_UP) 504 500 phydev->state = PHY_UP; 505 - spin_unlock_bh(&phydev->lock); 501 + mutex_unlock(&phydev->lock); 506 502 507 503 phydev->adjust_state = NULL; 508 504 } ··· 547 541 */ 548 542 void phy_error(struct phy_device *phydev) 549 543 { 550 - spin_lock_bh(&phydev->lock); 544 + mutex_lock(&phydev->lock); 551 545 phydev->state = PHY_HALTED; 552 - spin_unlock_bh(&phydev->lock); 546 + mutex_unlock(&phydev->lock); 553 547 } 554 548 555 549 /** ··· 711 705 if (err) 712 706 goto phy_err; 713 707 714 - spin_lock_bh(&phydev->lock); 708 + mutex_lock(&phydev->lock); 715 709 if ((PHY_RUNNING == phydev->state) || (PHY_NOLINK == phydev->state)) 716 710 phydev->state = PHY_CHANGELINK; 717 - spin_unlock_bh(&phydev->lock); 711 + mutex_unlock(&phydev->lock); 718 712 719 713 atomic_dec(&phydev->irq_disable); 720 714 enable_irq(phydev->irq); ··· 741 735 */ 742 736 void phy_stop(struct phy_device *phydev) 743 737 { 744 - spin_lock_bh(&phydev->lock); 738 + mutex_lock(&phydev->lock); 745 739 746 740 if (PHY_HALTED == phydev->state) 747 741 goto out_unlock; ··· 757 751 phydev->state = PHY_HALTED; 758 752 759 753 out_unlock: 760 - spin_unlock_bh(&phydev->lock); 754 + mutex_unlock(&phydev->lock); 761 755 762 756 /* 763 757 * Cannot call flush_scheduled_work() here as desired because ··· 779 773 */ 780 774 void phy_start(struct phy_device *phydev) 781 775 { 782 - spin_lock_bh(&phydev->lock); 776 + mutex_lock(&phydev->lock); 783 777 784 778 switch (phydev->state) { 785 779 case PHY_STARTING: ··· 793 787 default: 794 788 break; 795 789 } 796 - spin_unlock_bh(&phydev->lock); 790 + mutex_unlock(&phydev->lock); 797 791 } 798 792 EXPORT_SYMBOL(phy_stop); 799 793 EXPORT_SYMBOL(phy_start); 800 794 801 - /* PHY timer which handles the state machine */ 802 - static void phy_timer(unsigned long data) 795 + /** 796 + * phy_state_machine - Handle the state machine 797 + * @work: work_struct that describes the work to be done 798 + * 799 + * Description: Scheduled by the state_queue workqueue each time 800 + * phy_timer is triggered. 801 + */ 802 + static void phy_state_machine(struct work_struct *work) 803 803 { 804 - struct phy_device *phydev = (struct phy_device *)data; 804 + struct phy_device *phydev = 805 + container_of(work, struct phy_device, state_queue); 805 806 int needs_aneg = 0; 806 807 int err = 0; 807 808 808 - spin_lock_bh(&phydev->lock); 809 + mutex_lock(&phydev->lock); 809 810 810 811 if (phydev->adjust_state) 811 812 phydev->adjust_state(phydev->attached_dev); ··· 978 965 break; 979 966 } 980 967 981 - spin_unlock_bh(&phydev->lock); 968 + mutex_unlock(&phydev->lock); 982 969 983 970 if (needs_aneg) 984 971 err = phy_start_aneg(phydev); ··· 989 976 mod_timer(&phydev->phy_timer, jiffies + PHY_STATE_TIME * HZ); 990 977 } 991 978 979 + /* PHY timer which schedules the state machine work */ 980 + static void phy_timer(unsigned long data) 981 + { 982 + struct phy_device *phydev = (struct phy_device *)data; 983 + 984 + /* 985 + * PHY I/O operations can potentially sleep so we ensure that 986 + * it's done from a process context 987 + */ 988 + schedule_work(&phydev->state_queue); 989 + }
+5 -6
drivers/net/phy/phy_device.c
··· 25 25 #include <linux/netdevice.h> 26 26 #include <linux/etherdevice.h> 27 27 #include <linux/skbuff.h> 28 - #include <linux/spinlock.h> 29 28 #include <linux/mm.h> 30 29 #include <linux/module.h> 31 30 #include <linux/mii.h> ··· 79 80 80 81 dev->state = PHY_DOWN; 81 82 82 - spin_lock_init(&dev->lock); 83 + mutex_init(&dev->lock); 83 84 84 85 return dev; 85 86 } ··· 655 656 if (!(phydrv->flags & PHY_HAS_INTERRUPT)) 656 657 phydev->irq = PHY_POLL; 657 658 658 - spin_lock_bh(&phydev->lock); 659 + mutex_lock(&phydev->lock); 659 660 660 661 /* Start out supporting everything. Eventually, 661 662 * a controller will attach, and may modify one ··· 669 670 if (phydev->drv->probe) 670 671 err = phydev->drv->probe(phydev); 671 672 672 - spin_unlock_bh(&phydev->lock); 673 + mutex_unlock(&phydev->lock); 673 674 674 675 return err; 675 676 ··· 681 682 682 683 phydev = to_phy_device(dev); 683 684 684 - spin_lock_bh(&phydev->lock); 685 + mutex_lock(&phydev->lock); 685 686 phydev->state = PHY_DOWN; 686 - spin_unlock_bh(&phydev->lock); 687 + mutex_unlock(&phydev->lock); 687 688 688 689 if (phydev->drv->remove) 689 690 phydev->drv->remove(phydev);
+80
drivers/net/phy/realtek.c
··· 1 + /* 2 + * drivers/net/phy/realtek.c 3 + * 4 + * Driver for Realtek PHYs 5 + * 6 + * Author: Johnson Leung <r58129@freescale.com> 7 + * 8 + * Copyright (c) 2004 Freescale Semiconductor, Inc. 9 + * 10 + * This program is free software; you can redistribute it and/or modify it 11 + * under the terms of the GNU General Public License as published by the 12 + * Free Software Foundation; either version 2 of the License, or (at your 13 + * option) any later version. 14 + * 15 + */ 16 + #include <linux/phy.h> 17 + 18 + #define RTL821x_PHYSR 0x11 19 + #define RTL821x_PHYSR_DUPLEX 0x2000 20 + #define RTL821x_PHYSR_SPEED 0xc000 21 + #define RTL821x_INER 0x12 22 + #define RTL821x_INER_INIT 0x6400 23 + #define RTL821x_INSR 0x13 24 + 25 + MODULE_DESCRIPTION("Realtek PHY driver"); 26 + MODULE_AUTHOR("Johnson Leung"); 27 + MODULE_LICENSE("GPL"); 28 + 29 + static int rtl821x_ack_interrupt(struct phy_device *phydev) 30 + { 31 + int err; 32 + 33 + err = phy_read(phydev, RTL821x_INSR); 34 + 35 + return (err < 0) ? err : 0; 36 + } 37 + 38 + static int rtl821x_config_intr(struct phy_device *phydev) 39 + { 40 + int err; 41 + 42 + if (phydev->interrupts == PHY_INTERRUPT_ENABLED) 43 + err = phy_write(phydev, RTL821x_INER, 44 + RTL821x_INER_INIT); 45 + else 46 + err = phy_write(phydev, RTL821x_INER, 0); 47 + 48 + return err; 49 + } 50 + 51 + /* RTL8211B */ 52 + static struct phy_driver rtl821x_driver = { 53 + .phy_id = 0x001cc912, 54 + .name = "RTL821x Gigabit Ethernet", 55 + .phy_id_mask = 0x001fffff, 56 + .features = PHY_GBIT_FEATURES, 57 + .flags = PHY_HAS_INTERRUPT, 58 + .config_aneg = &genphy_config_aneg, 59 + .read_status = &genphy_read_status, 60 + .ack_interrupt = &rtl821x_ack_interrupt, 61 + .config_intr = &rtl821x_config_intr, 62 + .driver = { .owner = THIS_MODULE,}, 63 + }; 64 + 65 + static int __init realtek_init(void) 66 + { 67 + int ret; 68 + 69 + ret = phy_driver_register(&rtl821x_driver); 70 + 71 + return ret; 72 + } 73 + 74 + static void __exit realtek_exit(void) 75 + { 76 + phy_driver_unregister(&rtl821x_driver); 77 + } 78 + 79 + module_init(realtek_init); 80 + module_exit(realtek_exit);
+10 -10
drivers/net/s2io.c
··· 8118 8118 lro->iph = ip; 8119 8119 lro->tcph = tcp; 8120 8120 lro->tcp_next_seq = tcp_pyld_len + ntohl(tcp->seq); 8121 - lro->tcp_ack = ntohl(tcp->ack_seq); 8121 + lro->tcp_ack = tcp->ack_seq; 8122 8122 lro->sg_num = 1; 8123 8123 lro->total_len = ntohs(ip->tot_len); 8124 8124 lro->frags_len = 0; ··· 8127 8127 * already been done. 8128 8128 */ 8129 8129 if (tcp->doff == 8) { 8130 - u32 *ptr; 8131 - ptr = (u32 *)(tcp+1); 8130 + __be32 *ptr; 8131 + ptr = (__be32 *)(tcp+1); 8132 8132 lro->saw_ts = 1; 8133 - lro->cur_tsval = *(ptr+1); 8133 + lro->cur_tsval = ntohl(*(ptr+1)); 8134 8134 lro->cur_tsecr = *(ptr+2); 8135 8135 } 8136 8136 lro->in_use = 1; ··· 8156 8156 8157 8157 /* Update tsecr field if this session has timestamps enabled */ 8158 8158 if (lro->saw_ts) { 8159 - u32 *ptr = (u32 *)(tcp + 1); 8159 + __be32 *ptr = (__be32 *)(tcp + 1); 8160 8160 *(ptr+2) = lro->cur_tsecr; 8161 8161 } 8162 8162 ··· 8181 8181 lro->window = tcp->window; 8182 8182 8183 8183 if (lro->saw_ts) { 8184 - u32 *ptr; 8184 + __be32 *ptr; 8185 8185 /* Update tsecr and tsval from this packet */ 8186 - ptr = (u32 *) (tcp + 1); 8187 - lro->cur_tsval = *(ptr + 1); 8186 + ptr = (__be32 *)(tcp+1); 8187 + lro->cur_tsval = ntohl(*(ptr+1)); 8188 8188 lro->cur_tsecr = *(ptr + 2); 8189 8189 } 8190 8190 } ··· 8235 8235 8236 8236 /* Ensure timestamp value increases monotonically */ 8237 8237 if (l_lro) 8238 - if (l_lro->cur_tsval > *((u32 *)(ptr+2))) 8238 + if (l_lro->cur_tsval > ntohl(*((__be32 *)(ptr+2)))) 8239 8239 return -1; 8240 8240 8241 8241 /* timestamp echo reply should be non-zero */ 8242 - if (*((u32 *)(ptr+6)) == 0) 8242 + if (*((__be32 *)(ptr+6)) == 0) 8243 8243 return -1; 8244 8244 } 8245 8245
+1 -1
drivers/net/s2io.h
··· 809 809 int in_use; 810 810 __be16 window; 811 811 u32 cur_tsval; 812 - u32 cur_tsecr; 812 + __be32 cur_tsecr; 813 813 u8 saw_ts; 814 814 }; 815 815
+1 -1
drivers/net/sis190.c
··· 326 326 { "SiS 191 PCI Gigabit Ethernet adapter" }, 327 327 }; 328 328 329 - static struct pci_device_id sis190_pci_tbl[] __devinitdata = { 329 + static struct pci_device_id sis190_pci_tbl[] = { 330 330 { PCI_DEVICE(PCI_VENDOR_ID_SI, 0x0190), 0, 0, 0 }, 331 331 { PCI_DEVICE(PCI_VENDOR_ID_SI, 0x0191), 0, 0, 1 }, 332 332 { 0, },
+9 -5
drivers/net/sky2.c
··· 623 623 static const u32 phy_power[] = { PCI_Y2_PHY1_POWD, PCI_Y2_PHY2_POWD }; 624 624 static const u32 coma_mode[] = { PCI_Y2_PHY1_COMA, PCI_Y2_PHY2_COMA }; 625 625 626 + sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); 626 627 reg1 = sky2_pci_read32(hw, PCI_DEV_REG1); 627 628 /* Turn on/off phy power saving */ 628 629 if (onoff) ··· 635 634 reg1 |= coma_mode[port]; 636 635 637 636 sky2_pci_write32(hw, PCI_DEV_REG1, reg1); 638 - reg1 = sky2_pci_read32(hw, PCI_DEV_REG1); 637 + sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); 638 + sky2_pci_read32(hw, PCI_DEV_REG1); 639 639 640 640 udelay(100); 641 641 } ··· 1424 1422 imask |= portirq_msk[port]; 1425 1423 sky2_write32(hw, B0_IMSK, imask); 1426 1424 1425 + sky2_set_multicast(dev); 1427 1426 return 0; 1428 1427 1429 1428 err_out: ··· 2439 2436 if (status & (Y2_IS_MST_ERR | Y2_IS_IRQ_STAT)) { 2440 2437 u16 pci_err; 2441 2438 2439 + sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); 2442 2440 pci_err = sky2_pci_read16(hw, PCI_STATUS); 2443 2441 if (net_ratelimit()) 2444 2442 dev_err(&pdev->dev, "PCI hardware error (0x%x)\n", ··· 2447 2443 2448 2444 sky2_pci_write16(hw, PCI_STATUS, 2449 2445 pci_err | PCI_STATUS_ERROR_BITS); 2446 + sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); 2450 2447 } 2451 2448 2452 2449 if (status & Y2_IS_PCI_EXP) { 2453 2450 /* PCI-Express uncorrectable Error occurred */ 2454 2451 u32 err; 2455 2452 2453 + sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); 2456 2454 err = sky2_read32(hw, Y2_CFG_AER + PCI_ERR_UNCOR_STATUS); 2457 2455 sky2_write32(hw, Y2_CFG_AER + PCI_ERR_UNCOR_STATUS, 2458 2456 0xfffffffful); ··· 2462 2456 dev_err(&pdev->dev, "PCI Express error (0x%x)\n", err); 2463 2457 2464 2458 sky2_read32(hw, Y2_CFG_AER + PCI_ERR_UNCOR_STATUS); 2459 + sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); 2465 2460 } 2466 2461 2467 2462 if (status & Y2_HWE_L1_MASK) ··· 2838 2831 } 2839 2832 2840 2833 sky2_power_on(hw); 2834 + sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); 2841 2835 2842 2836 for (i = 0; i < hw->ports; i++) { 2843 2837 sky2_write8(hw, SK_REG(i, GMAC_LINK_CTRL), GMLC_RST_SET); ··· 3562 3554 err = sky2_up(dev); 3563 3555 if (err) 3564 3556 dev_close(dev); 3565 - else 3566 - sky2_set_multicast(dev); 3567 3557 } 3568 3558 3569 3559 return err; ··· 4395 4389 dev_close(dev); 4396 4390 goto out; 4397 4391 } 4398 - 4399 - sky2_set_multicast(dev); 4400 4392 } 4401 4393 } 4402 4394
+1 -1
drivers/net/sunbmac.c
··· 1075 1075 .get_link = bigmac_get_link, 1076 1076 }; 1077 1077 1078 - static int __init bigmac_ether_init(struct sbus_dev *qec_sdev) 1078 + static int __devinit bigmac_ether_init(struct sbus_dev *qec_sdev) 1079 1079 { 1080 1080 struct net_device *dev; 1081 1081 static int version_printed;
+3 -3
drivers/net/sunqe.c
··· 747 747 qecp->gregs + GLOB_RSIZE); 748 748 } 749 749 750 - static u8 __init qec_get_burst(struct device_node *dp) 750 + static u8 __devinit qec_get_burst(struct device_node *dp) 751 751 { 752 752 u8 bsizes, bsizes_more; 753 753 ··· 767 767 return bsizes; 768 768 } 769 769 770 - static struct sunqec * __init get_qec(struct sbus_dev *child_sdev) 770 + static struct sunqec * __devinit get_qec(struct sbus_dev *child_sdev) 771 771 { 772 772 struct sbus_dev *qec_sdev = child_sdev->parent; 773 773 struct sunqec *qecp; ··· 823 823 return NULL; 824 824 } 825 825 826 - static int __init qec_ether_init(struct sbus_dev *sdev) 826 + static int __devinit qec_ether_init(struct sbus_dev *sdev) 827 827 { 828 828 static unsigned version_printed; 829 829 struct net_device *dev;
+1 -1
drivers/net/sunvnet.c
··· 1130 1130 .handshake_complete = vnet_handshake_complete, 1131 1131 }; 1132 1132 1133 - static void print_version(void) 1133 + static void __devinit print_version(void) 1134 1134 { 1135 1135 static int version_printed; 1136 1136
+1 -1
drivers/net/tokenring/olympic.c
··· 434 434 435 435 } 436 436 437 - static int olympic_open(struct net_device *dev) 437 + static int __devinit olympic_open(struct net_device *dev) 438 438 { 439 439 struct olympic_private *olympic_priv=netdev_priv(dev); 440 440 u8 __iomem *olympic_mmio=olympic_priv->olympic_mmio,*init_srb;
+30 -7
drivers/net/ucc_geth.c
··· 2084 2084 if (!ugeth) 2085 2085 return; 2086 2086 2087 - if (ugeth->uccf) 2087 + if (ugeth->uccf) { 2088 2088 ucc_fast_free(ugeth->uccf); 2089 + ugeth->uccf = NULL; 2090 + } 2089 2091 2090 2092 if (ugeth->p_thread_data_tx) { 2091 2093 qe_muram_free(ugeth->thread_dat_tx_offset); ··· 2306 2304 2307 2305 ug_info = ugeth->ug_info; 2308 2306 uf_info = &ug_info->uf_info; 2309 - 2310 - /* Create CQs for hash tables */ 2311 - INIT_LIST_HEAD(&ugeth->group_hash_q); 2312 - INIT_LIST_HEAD(&ugeth->ind_hash_q); 2313 2307 2314 2308 if (!((uf_info->bd_mem_part == MEM_PART_SYSTEM) || 2315 2309 (uf_info->bd_mem_part == MEM_PART_MURAM))) { ··· 3666 3668 return IRQ_HANDLED; 3667 3669 } 3668 3670 3671 + #ifdef CONFIG_NET_POLL_CONTROLLER 3672 + /* 3673 + * Polling 'interrupt' - used by things like netconsole to send skbs 3674 + * without having to re-enable interrupts. It's not called while 3675 + * the interrupt routine is executing. 3676 + */ 3677 + static void ucc_netpoll(struct net_device *dev) 3678 + { 3679 + struct ucc_geth_private *ugeth = netdev_priv(dev); 3680 + int irq = ugeth->ug_info->uf_info.irq; 3681 + 3682 + disable_irq(irq); 3683 + ucc_geth_irq_handler(irq, dev); 3684 + enable_irq(irq); 3685 + } 3686 + #endif /* CONFIG_NET_POLL_CONTROLLER */ 3687 + 3669 3688 /* Called when something needs to use the ethernet device */ 3670 3689 /* Returns 0 for success. */ 3671 3690 static int ucc_geth_open(struct net_device *dev) ··· 4005 3990 ugeth = netdev_priv(dev); 4006 3991 spin_lock_init(&ugeth->lock); 4007 3992 3993 + /* Create CQs for hash tables */ 3994 + INIT_LIST_HEAD(&ugeth->group_hash_q); 3995 + INIT_LIST_HEAD(&ugeth->ind_hash_q); 3996 + 4008 3997 dev_set_drvdata(device, dev); 4009 3998 4010 3999 /* Set the dev->base_addr to the gfar reg region */ ··· 4025 4006 #ifdef CONFIG_UGETH_NAPI 4026 4007 netif_napi_add(dev, &ugeth->napi, ucc_geth_poll, UCC_GETH_DEV_WEIGHT); 4027 4008 #endif /* CONFIG_UGETH_NAPI */ 4009 + #ifdef CONFIG_NET_POLL_CONTROLLER 4010 + dev->poll_controller = ucc_netpoll; 4011 + #endif 4028 4012 dev->stop = ucc_geth_close; 4029 4013 // dev->change_mtu = ucc_geth_change_mtu; 4030 4014 dev->mtu = 1500; ··· 4062 4040 struct net_device *dev = dev_get_drvdata(device); 4063 4041 struct ucc_geth_private *ugeth = netdev_priv(dev); 4064 4042 4065 - dev_set_drvdata(device, NULL); 4066 - ucc_geth_memclean(ugeth); 4043 + unregister_netdev(dev); 4067 4044 free_netdev(dev); 4045 + ucc_geth_memclean(ugeth); 4046 + dev_set_drvdata(device, NULL); 4068 4047 4069 4048 return 0; 4070 4049 }
-1
drivers/net/usb/rtl8150.c
··· 926 926 netdev->set_multicast_list = rtl8150_set_multicast; 927 927 netdev->set_mac_address = rtl8150_set_mac_address; 928 928 netdev->get_stats = rtl8150_netdev_stats; 929 - netdev->mtu = RTL8150_MTU; 930 929 SET_ETHTOOL_OPS(netdev, &ops); 931 930 dev->intr_interval = 100; /* 100ms */ 932 931
+1 -1
drivers/net/via-rhine.c
··· 606 606 } 607 607 #endif 608 608 609 - static void rhine_hw_init(struct net_device *dev, long pioaddr) 609 + static void __devinit rhine_hw_init(struct net_device *dev, long pioaddr) 610 610 { 611 611 struct rhine_private *rp = netdev_priv(dev); 612 612
+32 -38
drivers/net/via-velocity.c
··· 8 8 * for 64bit hardware platforms. 9 9 * 10 10 * TODO 11 - * Big-endian support 12 11 * rx_copybreak/alignment 13 12 * Scatter gather 14 13 * More testing ··· 680 681 * Init state, all RD entries belong to the NIC 681 682 */ 682 683 for (i = 0; i < vptr->options.numrx; ++i) 683 - vptr->rd_ring[i].rdesc0.owner = OWNED_BY_NIC; 684 + vptr->rd_ring[i].rdesc0.len |= OWNED_BY_NIC; 684 685 685 686 writew(vptr->options.numrx, &regs->RBRDU); 686 687 writel(vptr->rd_pool_dma, &regs->RDBaseLo); ··· 776 777 777 778 vptr->int_mask = INT_MASK_DEF; 778 779 779 - writel(cpu_to_le32(vptr->rd_pool_dma), &regs->RDBaseLo); 780 + writel(vptr->rd_pool_dma, &regs->RDBaseLo); 780 781 writew(vptr->options.numrx - 1, &regs->RDCSize); 781 782 mac_rx_queue_run(regs); 782 783 mac_rx_queue_wake(regs); ··· 784 785 writew(vptr->options.numtx - 1, &regs->TDCSize); 785 786 786 787 for (i = 0; i < vptr->num_txq; i++) { 787 - writel(cpu_to_le32(vptr->td_pool_dma[i]), &(regs->TDBaseLo[i])); 788 + writel(vptr->td_pool_dma[i], &regs->TDBaseLo[i]); 788 789 mac_tx_queue_run(regs, i); 789 790 } 790 791 ··· 1194 1195 dirty = vptr->rd_dirty - unusable; 1195 1196 for (avail = vptr->rd_filled & 0xfffc; avail; avail--) { 1196 1197 dirty = (dirty > 0) ? dirty - 1 : vptr->options.numrx - 1; 1197 - vptr->rd_ring[dirty].rdesc0.owner = OWNED_BY_NIC; 1198 + vptr->rd_ring[dirty].rdesc0.len |= OWNED_BY_NIC; 1198 1199 } 1199 1200 1200 1201 writew(vptr->rd_filled & 0xfffc, &regs->RBRDU); ··· 1209 1210 struct rx_desc *rd = vptr->rd_ring + dirty; 1210 1211 1211 1212 /* Fine for an all zero Rx desc at init time as well */ 1212 - if (rd->rdesc0.owner == OWNED_BY_NIC) 1213 + if (rd->rdesc0.len & OWNED_BY_NIC) 1213 1214 break; 1214 1215 1215 1216 if (!vptr->rd_info[dirty].skb) { ··· 1412 1413 if (!vptr->rd_info[rd_curr].skb) 1413 1414 break; 1414 1415 1415 - if (rd->rdesc0.owner == OWNED_BY_NIC) 1416 + if (rd->rdesc0.len & OWNED_BY_NIC) 1416 1417 break; 1417 1418 1418 1419 rmb(); ··· 1420 1421 /* 1421 1422 * Don't drop CE or RL error frame although RXOK is off 1422 1423 */ 1423 - if ((rd->rdesc0.RSR & RSR_RXOK) || (!(rd->rdesc0.RSR & RSR_RXOK) && (rd->rdesc0.RSR & (RSR_CE | RSR_RL)))) { 1424 + if (rd->rdesc0.RSR & (RSR_RXOK | RSR_CE | RSR_RL)) { 1424 1425 if (velocity_receive_frame(vptr, rd_curr) < 0) 1425 1426 stats->rx_dropped++; 1426 1427 } else { ··· 1432 1433 stats->rx_dropped++; 1433 1434 } 1434 1435 1435 - rd->inten = 1; 1436 + rd->size |= RX_INTEN; 1436 1437 1437 1438 vptr->dev->last_rx = jiffies; 1438 1439 ··· 1553 1554 struct net_device_stats *stats = &vptr->stats; 1554 1555 struct velocity_rd_info *rd_info = &(vptr->rd_info[idx]); 1555 1556 struct rx_desc *rd = &(vptr->rd_ring[idx]); 1556 - int pkt_len = rd->rdesc0.len; 1557 + int pkt_len = le16_to_cpu(rd->rdesc0.len) & 0x3fff; 1557 1558 struct sk_buff *skb; 1558 1559 1559 1560 if (rd->rdesc0.RSR & (RSR_STP | RSR_EDP)) { ··· 1636 1637 */ 1637 1638 1638 1639 *((u32 *) & (rd->rdesc0)) = 0; 1639 - rd->len = cpu_to_le32(vptr->rx_buf_sz); 1640 - rd->inten = 1; 1640 + rd->size = cpu_to_le16(vptr->rx_buf_sz) | RX_INTEN; 1641 1641 rd->pa_low = cpu_to_le32(rd_info->skb_dma); 1642 1642 rd->pa_high = 0; 1643 1643 return 0; ··· 1672 1674 td = &(vptr->td_rings[qnum][idx]); 1673 1675 tdinfo = &(vptr->td_infos[qnum][idx]); 1674 1676 1675 - if (td->tdesc0.owner == OWNED_BY_NIC) 1677 + if (td->tdesc0.len & OWNED_BY_NIC) 1676 1678 break; 1677 1679 1678 1680 if ((works++ > 15)) ··· 1872 1874 1873 1875 for (i = 0; i < tdinfo->nskb_dma; i++) { 1874 1876 #ifdef VELOCITY_ZERO_COPY_SUPPORT 1875 - pci_unmap_single(vptr->pdev, tdinfo->skb_dma[i], td->tdesc1.len, PCI_DMA_TODEVICE); 1877 + pci_unmap_single(vptr->pdev, tdinfo->skb_dma[i], le16_to_cpu(td->tdesc1.len), PCI_DMA_TODEVICE); 1876 1878 #else 1877 1879 pci_unmap_single(vptr->pdev, tdinfo->skb_dma[i], skb->len, PCI_DMA_TODEVICE); 1878 1880 #endif ··· 2065 2067 struct velocity_td_info *tdinfo; 2066 2068 unsigned long flags; 2067 2069 int index; 2068 - 2069 2070 int pktlen = skb->len; 2071 + __le16 len = cpu_to_le16(pktlen); 2070 2072 2071 2073 #ifdef VELOCITY_ZERO_COPY_SUPPORT 2072 2074 if (skb_shinfo(skb)->nr_frags > 6 && __skb_linearize(skb)) { ··· 2081 2083 td_ptr = &(vptr->td_rings[qnum][index]); 2082 2084 tdinfo = &(vptr->td_infos[qnum][index]); 2083 2085 2084 - td_ptr->tdesc1.TCPLS = TCPLS_NORMAL; 2085 2086 td_ptr->tdesc1.TCR = TCR0_TIC; 2086 - td_ptr->td_buf[0].queue = 0; 2087 + td_ptr->td_buf[0].size &= ~TD_QUEUE; 2087 2088 2088 2089 /* 2089 2090 * Pad short frames. ··· 2090 2093 if (pktlen < ETH_ZLEN) { 2091 2094 /* Cannot occur until ZC support */ 2092 2095 pktlen = ETH_ZLEN; 2096 + len = cpu_to_le16(ETH_ZLEN); 2093 2097 skb_copy_from_linear_data(skb, tdinfo->buf, skb->len); 2094 2098 memset(tdinfo->buf + skb->len, 0, ETH_ZLEN - skb->len); 2095 2099 tdinfo->skb = skb; 2096 2100 tdinfo->skb_dma[0] = tdinfo->buf_dma; 2097 - td_ptr->tdesc0.pktsize = pktlen; 2101 + td_ptr->tdesc0.len = len; 2098 2102 td_ptr->td_buf[0].pa_low = cpu_to_le32(tdinfo->skb_dma[0]); 2099 2103 td_ptr->td_buf[0].pa_high = 0; 2100 - td_ptr->td_buf[0].bufsize = td_ptr->tdesc0.pktsize; 2104 + td_ptr->td_buf[0].size = len; /* queue is 0 anyway */ 2101 2105 tdinfo->nskb_dma = 1; 2102 - td_ptr->tdesc1.CMDZ = 2; 2103 2106 } else 2104 2107 #ifdef VELOCITY_ZERO_COPY_SUPPORT 2105 2108 if (skb_shinfo(skb)->nr_frags > 0) { ··· 2108 2111 if (nfrags > 6) { 2109 2112 skb_copy_from_linear_data(skb, tdinfo->buf, skb->len); 2110 2113 tdinfo->skb_dma[0] = tdinfo->buf_dma; 2111 - td_ptr->tdesc0.pktsize = 2114 + td_ptr->tdesc0.len = len; 2112 2115 td_ptr->td_buf[0].pa_low = cpu_to_le32(tdinfo->skb_dma[0]); 2113 2116 td_ptr->td_buf[0].pa_high = 0; 2114 - td_ptr->td_buf[0].bufsize = td_ptr->tdesc0.pktsize; 2117 + td_ptr->td_buf[0].size = len; /* queue is 0 anyway */ 2115 2118 tdinfo->nskb_dma = 1; 2116 - td_ptr->tdesc1.CMDZ = 2; 2117 2119 } else { 2118 2120 int i = 0; 2119 2121 tdinfo->nskb_dma = 0; 2120 - tdinfo->skb_dma[i] = pci_map_single(vptr->pdev, skb->data, skb->len - skb->data_len, PCI_DMA_TODEVICE); 2122 + tdinfo->skb_dma[i] = pci_map_single(vptr->pdev, skb->data, 2123 + skb_headlen(skb), PCI_DMA_TODEVICE); 2121 2124 2122 - td_ptr->tdesc0.pktsize = pktlen; 2125 + td_ptr->tdesc0.len = len; 2123 2126 2124 2127 /* FIXME: support 48bit DMA later */ 2125 2128 td_ptr->td_buf[i].pa_low = cpu_to_le32(tdinfo->skb_dma); 2126 2129 td_ptr->td_buf[i].pa_high = 0; 2127 - td_ptr->td_buf[i].bufsize = skb->len->skb->data_len; 2130 + td_ptr->td_buf[i].size = cpu_to_le16(skb_headlen(skb)); 2128 2131 2129 2132 for (i = 0; i < nfrags; i++) { 2130 2133 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 2131 - void *addr = ((void *) page_address(frag->page + frag->page_offset)); 2134 + void *addr = (void *)page_address(frag->page) + frag->page_offset; 2132 2135 2133 2136 tdinfo->skb_dma[i + 1] = pci_map_single(vptr->pdev, addr, frag->size, PCI_DMA_TODEVICE); 2134 2137 2135 2138 td_ptr->td_buf[i + 1].pa_low = cpu_to_le32(tdinfo->skb_dma[i + 1]); 2136 2139 td_ptr->td_buf[i + 1].pa_high = 0; 2137 - td_ptr->td_buf[i + 1].bufsize = frag->size; 2140 + td_ptr->td_buf[i + 1].size = cpu_to_le16(frag->size); 2138 2141 } 2139 2142 tdinfo->nskb_dma = i - 1; 2140 - td_ptr->tdesc1.CMDZ = i; 2141 2143 } 2142 2144 2143 2145 } else ··· 2148 2152 */ 2149 2153 tdinfo->skb = skb; 2150 2154 tdinfo->skb_dma[0] = pci_map_single(vptr->pdev, skb->data, pktlen, PCI_DMA_TODEVICE); 2151 - td_ptr->tdesc0.pktsize = pktlen; 2155 + td_ptr->tdesc0.len = len; 2152 2156 td_ptr->td_buf[0].pa_low = cpu_to_le32(tdinfo->skb_dma[0]); 2153 2157 td_ptr->td_buf[0].pa_high = 0; 2154 - td_ptr->td_buf[0].bufsize = td_ptr->tdesc0.pktsize; 2158 + td_ptr->td_buf[0].size = len; 2155 2159 tdinfo->nskb_dma = 1; 2156 - td_ptr->tdesc1.CMDZ = 2; 2157 2160 } 2161 + td_ptr->tdesc1.cmd = TCPLS_NORMAL + (tdinfo->nskb_dma + 1) * 16; 2158 2162 2159 2163 if (vptr->vlgrp && vlan_tx_tag_present(skb)) { 2160 - td_ptr->tdesc1.pqinf.VID = vlan_tx_tag_get(skb); 2161 - td_ptr->tdesc1.pqinf.priority = 0; 2162 - td_ptr->tdesc1.pqinf.CFI = 0; 2164 + td_ptr->tdesc1.vlan = cpu_to_le16(vlan_tx_tag_get(skb)); 2163 2165 td_ptr->tdesc1.TCR |= TCR0_VETAG; 2164 2166 } 2165 2167 ··· 2179 2185 2180 2186 if (prev < 0) 2181 2187 prev = vptr->options.numtx - 1; 2182 - td_ptr->tdesc0.owner = OWNED_BY_NIC; 2188 + td_ptr->tdesc0.len |= OWNED_BY_NIC; 2183 2189 vptr->td_used[qnum]++; 2184 2190 vptr->td_curr[qnum] = (index + 1) % vptr->options.numtx; 2185 2191 ··· 2187 2193 netif_stop_queue(dev); 2188 2194 2189 2195 td_ptr = &(vptr->td_rings[qnum][prev]); 2190 - td_ptr->td_buf[0].queue = 1; 2196 + td_ptr->td_buf[0].size |= TD_QUEUE; 2191 2197 mac_tx_queue_wake(vptr->mac_regs, qnum); 2192 2198 } 2193 2199 dev->trans_start = jiffies; ··· 3404 3410 velocity_save_context(vptr, &vptr->context); 3405 3411 velocity_shutdown(vptr); 3406 3412 velocity_set_wol(vptr); 3407 - pci_enable_wake(pdev, 3, 1); 3413 + pci_enable_wake(pdev, PCI_D3hot, 1); 3408 3414 pci_set_power_state(pdev, PCI_D3hot); 3409 3415 } else { 3410 3416 velocity_save_context(vptr, &vptr->context);
+97 -129
drivers/net/via-velocity.h
··· 70 70 * Bits in the RSR0 register 71 71 */ 72 72 73 - #define RSR_DETAG 0x0080 74 - #define RSR_SNTAG 0x0040 75 - #define RSR_RXER 0x0020 76 - #define RSR_RL 0x0010 77 - #define RSR_CE 0x0008 78 - #define RSR_FAE 0x0004 79 - #define RSR_CRC 0x0002 80 - #define RSR_VIDM 0x0001 73 + #define RSR_DETAG cpu_to_le16(0x0080) 74 + #define RSR_SNTAG cpu_to_le16(0x0040) 75 + #define RSR_RXER cpu_to_le16(0x0020) 76 + #define RSR_RL cpu_to_le16(0x0010) 77 + #define RSR_CE cpu_to_le16(0x0008) 78 + #define RSR_FAE cpu_to_le16(0x0004) 79 + #define RSR_CRC cpu_to_le16(0x0002) 80 + #define RSR_VIDM cpu_to_le16(0x0001) 81 81 82 82 /* 83 83 * Bits in the RSR1 register 84 84 */ 85 85 86 - #define RSR_RXOK 0x8000 // rx OK 87 - #define RSR_PFT 0x4000 // Perfect filtering address match 88 - #define RSR_MAR 0x2000 // MAC accept multicast address packet 89 - #define RSR_BAR 0x1000 // MAC accept broadcast address packet 90 - #define RSR_PHY 0x0800 // MAC accept physical address packet 91 - #define RSR_VTAG 0x0400 // 802.1p/1q tagging packet indicator 92 - #define RSR_STP 0x0200 // start of packet 93 - #define RSR_EDP 0x0100 // end of packet 94 - 95 - /* 96 - * Bits in the RSR1 register 97 - */ 98 - 99 - #define RSR1_RXOK 0x80 // rx OK 100 - #define RSR1_PFT 0x40 // Perfect filtering address match 101 - #define RSR1_MAR 0x20 // MAC accept multicast address packet 102 - #define RSR1_BAR 0x10 // MAC accept broadcast address packet 103 - #define RSR1_PHY 0x08 // MAC accept physical address packet 104 - #define RSR1_VTAG 0x04 // 802.1p/1q tagging packet indicator 105 - #define RSR1_STP 0x02 // start of packet 106 - #define RSR1_EDP 0x01 // end of packet 86 + #define RSR_RXOK cpu_to_le16(0x8000) // rx OK 87 + #define RSR_PFT cpu_to_le16(0x4000) // Perfect filtering address match 88 + #define RSR_MAR cpu_to_le16(0x2000) // MAC accept multicast address packet 89 + #define RSR_BAR cpu_to_le16(0x1000) // MAC accept broadcast address packet 90 + #define RSR_PHY cpu_to_le16(0x0800) // MAC accept physical address packet 91 + #define RSR_VTAG cpu_to_le16(0x0400) // 802.1p/1q tagging packet indicator 92 + #define RSR_STP cpu_to_le16(0x0200) // start of packet 93 + #define RSR_EDP cpu_to_le16(0x0100) // end of packet 107 94 108 95 /* 109 96 * Bits in the CSM register ··· 107 120 * Bits in the TSR0 register 108 121 */ 109 122 110 - #define TSR0_ABT 0x0080 // Tx abort because of excessive collision 111 - #define TSR0_OWT 0x0040 // Jumbo frame Tx abort 112 - #define TSR0_OWC 0x0020 // Out of window collision 113 - #define TSR0_COLS 0x0010 // experience collision in this transmit event 114 - #define TSR0_NCR3 0x0008 // collision retry counter[3] 115 - #define TSR0_NCR2 0x0004 // collision retry counter[2] 116 - #define TSR0_NCR1 0x0002 // collision retry counter[1] 117 - #define TSR0_NCR0 0x0001 // collision retry counter[0] 118 - #define TSR0_TERR 0x8000 // 119 - #define TSR0_FDX 0x4000 // current transaction is serviced by full duplex mode 120 - #define TSR0_GMII 0x2000 // current transaction is serviced by GMII mode 121 - #define TSR0_LNKFL 0x1000 // packet serviced during link down 122 - #define TSR0_SHDN 0x0400 // shutdown case 123 - #define TSR0_CRS 0x0200 // carrier sense lost 124 - #define TSR0_CDH 0x0100 // AQE test fail (CD heartbeat) 125 - 126 - /* 127 - * Bits in the TSR1 register 128 - */ 129 - 130 - #define TSR1_TERR 0x80 // 131 - #define TSR1_FDX 0x40 // current transaction is serviced by full duplex mode 132 - #define TSR1_GMII 0x20 // current transaction is serviced by GMII mode 133 - #define TSR1_LNKFL 0x10 // packet serviced during link down 134 - #define TSR1_SHDN 0x04 // shutdown case 135 - #define TSR1_CRS 0x02 // carrier sense lost 136 - #define TSR1_CDH 0x01 // AQE test fail (CD heartbeat) 123 + #define TSR0_ABT cpu_to_le16(0x0080) // Tx abort because of excessive collision 124 + #define TSR0_OWT cpu_to_le16(0x0040) // Jumbo frame Tx abort 125 + #define TSR0_OWC cpu_to_le16(0x0020) // Out of window collision 126 + #define TSR0_COLS cpu_to_le16(0x0010) // experience collision in this transmit event 127 + #define TSR0_NCR3 cpu_to_le16(0x0008) // collision retry counter[3] 128 + #define TSR0_NCR2 cpu_to_le16(0x0004) // collision retry counter[2] 129 + #define TSR0_NCR1 cpu_to_le16(0x0002) // collision retry counter[1] 130 + #define TSR0_NCR0 cpu_to_le16(0x0001) // collision retry counter[0] 131 + #define TSR0_TERR cpu_to_le16(0x8000) // 132 + #define TSR0_FDX cpu_to_le16(0x4000) // current transaction is serviced by full duplex mode 133 + #define TSR0_GMII cpu_to_le16(0x2000) // current transaction is serviced by GMII mode 134 + #define TSR0_LNKFL cpu_to_le16(0x1000) // packet serviced during link down 135 + #define TSR0_SHDN cpu_to_le16(0x0400) // shutdown case 136 + #define TSR0_CRS cpu_to_le16(0x0200) // carrier sense lost 137 + #define TSR0_CDH cpu_to_le16(0x0100) // AQE test fail (CD heartbeat) 137 138 138 139 // 139 140 // Bits in the TCR0 register ··· 172 197 */ 173 198 174 199 struct rdesc0 { 175 - u16 RSR; /* Receive status */ 176 - u16 len:14; /* Received packet length */ 177 - u16 reserved:1; 178 - u16 owner:1; /* Who owns this buffer ? */ 200 + __le16 RSR; /* Receive status */ 201 + __le16 len; /* bits 0--13; bit 15 - owner */ 179 202 }; 180 203 181 204 struct rdesc1 { 182 - u16 PQTAG; 205 + __le16 PQTAG; 183 206 u8 CSM; 184 207 u8 IPKT; 208 + }; 209 + 210 + enum { 211 + RX_INTEN = __constant_cpu_to_le16(0x8000) 185 212 }; 186 213 187 214 struct rx_desc { 188 215 struct rdesc0 rdesc0; 189 216 struct rdesc1 rdesc1; 190 - u32 pa_low; /* Low 32 bit PCI address */ 191 - u16 pa_high; /* Next 16 bit PCI address (48 total) */ 192 - u16 len:15; /* Frame size */ 193 - u16 inten:1; /* Enable interrupt */ 217 + __le32 pa_low; /* Low 32 bit PCI address */ 218 + __le16 pa_high; /* Next 16 bit PCI address (48 total) */ 219 + __le16 size; /* bits 0--14 - frame size, bit 15 - enable int. */ 194 220 } __attribute__ ((__packed__)); 195 221 196 222 /* ··· 199 223 */ 200 224 201 225 struct tdesc0 { 202 - u16 TSR; /* Transmit status register */ 203 - u16 pktsize:14; /* Size of frame */ 204 - u16 reserved:1; 205 - u16 owner:1; /* Who owns the buffer */ 226 + __le16 TSR; /* Transmit status register */ 227 + __le16 len; /* bits 0--13 - size of frame, bit 15 - owner */ 206 228 }; 207 229 208 - struct pqinf { /* Priority queue info */ 209 - u16 VID:12; 210 - u16 CFI:1; 211 - u16 priority:3; 230 + struct tdesc1 { 231 + __le16 vlan; 232 + u8 TCR; 233 + u8 cmd; /* bits 0--1 - TCPLS, bits 4--7 - CMDZ */ 212 234 } __attribute__ ((__packed__)); 213 235 214 - struct tdesc1 { 215 - struct pqinf pqinf; 216 - u8 TCR; 217 - u8 TCPLS:2; 218 - u8 reserved:2; 219 - u8 CMDZ:4; 220 - } __attribute__ ((__packed__)); 236 + enum { 237 + TD_QUEUE = __constant_cpu_to_le16(0x8000) 238 + }; 221 239 222 240 struct td_buf { 223 - u32 pa_low; 224 - u16 pa_high; 225 - u16 bufsize:14; 226 - u16 reserved:1; 227 - u16 queue:1; 241 + __le32 pa_low; 242 + __le16 pa_high; 243 + __le16 size; /* bits 0--13 - size, bit 15 - queue */ 228 244 } __attribute__ ((__packed__)); 229 245 230 246 struct tx_desc { ··· 244 276 245 277 enum velocity_owner { 246 278 OWNED_BY_HOST = 0, 247 - OWNED_BY_NIC = 1 279 + OWNED_BY_NIC = __constant_cpu_to_le16(0x8000) 248 280 }; 249 281 250 282 ··· 980 1012 volatile u8 RCR; 981 1013 volatile u8 TCR; 982 1014 983 - volatile u32 CR0Set; /* 0x08 */ 984 - volatile u32 CR0Clr; /* 0x0C */ 1015 + volatile __le32 CR0Set; /* 0x08 */ 1016 + volatile __le32 CR0Clr; /* 0x0C */ 985 1017 986 1018 volatile u8 MARCAM[8]; /* 0x10 */ 987 1019 988 - volatile u32 DecBaseHi; /* 0x18 */ 989 - volatile u16 DbfBaseHi; /* 0x1C */ 990 - volatile u16 reserved_1E; 1020 + volatile __le32 DecBaseHi; /* 0x18 */ 1021 + volatile __le16 DbfBaseHi; /* 0x1C */ 1022 + volatile __le16 reserved_1E; 991 1023 992 - volatile u16 ISRCTL; /* 0x20 */ 1024 + volatile __le16 ISRCTL; /* 0x20 */ 993 1025 volatile u8 TXESR; 994 1026 volatile u8 RXESR; 995 1027 996 - volatile u32 ISR; /* 0x24 */ 997 - volatile u32 IMR; 1028 + volatile __le32 ISR; /* 0x24 */ 1029 + volatile __le32 IMR; 998 1030 999 - volatile u32 TDStatusPort; /* 0x2C */ 1031 + volatile __le32 TDStatusPort; /* 0x2C */ 1000 1032 1001 - volatile u16 TDCSRSet; /* 0x30 */ 1033 + volatile __le16 TDCSRSet; /* 0x30 */ 1002 1034 volatile u8 RDCSRSet; 1003 1035 volatile u8 reserved_33; 1004 - volatile u16 TDCSRClr; 1036 + volatile __le16 TDCSRClr; 1005 1037 volatile u8 RDCSRClr; 1006 1038 volatile u8 reserved_37; 1007 1039 1008 - volatile u32 RDBaseLo; /* 0x38 */ 1009 - volatile u16 RDIdx; /* 0x3C */ 1010 - volatile u16 reserved_3E; 1040 + volatile __le32 RDBaseLo; /* 0x38 */ 1041 + volatile __le16 RDIdx; /* 0x3C */ 1042 + volatile __le16 reserved_3E; 1011 1043 1012 - volatile u32 TDBaseLo[4]; /* 0x40 */ 1044 + volatile __le32 TDBaseLo[4]; /* 0x40 */ 1013 1045 1014 - volatile u16 RDCSize; /* 0x50 */ 1015 - volatile u16 TDCSize; /* 0x52 */ 1016 - volatile u16 TDIdx[4]; /* 0x54 */ 1017 - volatile u16 tx_pause_timer; /* 0x5C */ 1018 - volatile u16 RBRDU; /* 0x5E */ 1046 + volatile __le16 RDCSize; /* 0x50 */ 1047 + volatile __le16 TDCSize; /* 0x52 */ 1048 + volatile __le16 TDIdx[4]; /* 0x54 */ 1049 + volatile __le16 tx_pause_timer; /* 0x5C */ 1050 + volatile __le16 RBRDU; /* 0x5E */ 1019 1051 1020 - volatile u32 FIFOTest0; /* 0x60 */ 1021 - volatile u32 FIFOTest1; /* 0x64 */ 1052 + volatile __le32 FIFOTest0; /* 0x60 */ 1053 + volatile __le32 FIFOTest1; /* 0x64 */ 1022 1054 1023 1055 volatile u8 CAMADDR; /* 0x68 */ 1024 1056 volatile u8 CAMCR; /* 0x69 */ ··· 1031 1063 volatile u8 PHYSR1; 1032 1064 volatile u8 MIICR; 1033 1065 volatile u8 MIIADR; 1034 - volatile u16 MIIDATA; 1066 + volatile __le16 MIIDATA; 1035 1067 1036 - volatile u16 SoftTimer0; /* 0x74 */ 1037 - volatile u16 SoftTimer1; 1068 + volatile __le16 SoftTimer0; /* 0x74 */ 1069 + volatile __le16 SoftTimer1; 1038 1070 1039 1071 volatile u8 CFGA; /* 0x78 */ 1040 1072 volatile u8 CFGB; 1041 1073 volatile u8 CFGC; 1042 1074 volatile u8 CFGD; 1043 1075 1044 - volatile u16 DCFG; /* 0x7C */ 1045 - volatile u16 MCFG; 1076 + volatile __le16 DCFG; /* 0x7C */ 1077 + volatile __le16 MCFG; 1046 1078 1047 1079 volatile u8 TBIST; /* 0x80 */ 1048 1080 volatile u8 RBIST; ··· 1054 1086 volatile u8 rev_id; 1055 1087 volatile u8 PORSTS; 1056 1088 1057 - volatile u32 MIBData; /* 0x88 */ 1089 + volatile __le32 MIBData; /* 0x88 */ 1058 1090 1059 - volatile u16 EEWrData; 1091 + volatile __le16 EEWrData; 1060 1092 1061 1093 volatile u8 reserved_8E; 1062 1094 volatile u8 BPMDWr; ··· 1066 1098 volatile u8 EECHKSUM; /* 0x92 */ 1067 1099 volatile u8 EECSR; 1068 1100 1069 - volatile u16 EERdData; /* 0x94 */ 1101 + volatile __le16 EERdData; /* 0x94 */ 1070 1102 volatile u8 EADDR; 1071 1103 volatile u8 EMBCMD; 1072 1104 ··· 1080 1112 volatile u8 DEBUG; 1081 1113 volatile u8 CHIPGCR; 1082 1114 1083 - volatile u16 WOLCRSet; /* 0xA0 */ 1115 + volatile __le16 WOLCRSet; /* 0xA0 */ 1084 1116 volatile u8 PWCFGSet; 1085 1117 volatile u8 WOLCFGSet; 1086 1118 1087 - volatile u16 WOLCRClr; /* 0xA4 */ 1119 + volatile __le16 WOLCRClr; /* 0xA4 */ 1088 1120 volatile u8 PWCFGCLR; 1089 1121 volatile u8 WOLCFGClr; 1090 1122 1091 - volatile u16 WOLSRSet; /* 0xA8 */ 1092 - volatile u16 reserved_AA; 1123 + volatile __le16 WOLSRSet; /* 0xA8 */ 1124 + volatile __le16 reserved_AA; 1093 1125 1094 - volatile u16 WOLSRClr; /* 0xAC */ 1095 - volatile u16 reserved_AE; 1126 + volatile __le16 WOLSRClr; /* 0xAC */ 1127 + volatile __le16 reserved_AE; 1096 1128 1097 - volatile u16 PatternCRC[8]; /* 0xB0 */ 1098 - volatile u32 ByteMask[4][4]; /* 0xC0 */ 1129 + volatile __le16 PatternCRC[8]; /* 0xB0 */ 1130 + volatile __le32 ByteMask[4][4]; /* 0xC0 */ 1099 1131 } __attribute__ ((__packed__)); 1100 1132 1101 1133 ··· 1206 1238 struct arp_packet { 1207 1239 u8 dest_mac[ETH_ALEN]; 1208 1240 u8 src_mac[ETH_ALEN]; 1209 - u16 type; 1210 - u16 ar_hrd; 1211 - u16 ar_pro; 1241 + __be16 type; 1242 + __be16 ar_hrd; 1243 + __be16 ar_pro; 1212 1244 u8 ar_hln; 1213 1245 u8 ar_pln; 1214 - u16 ar_op; 1246 + __be16 ar_op; 1215 1247 u8 ar_sha[ETH_ALEN]; 1216 1248 u8 ar_sip[4]; 1217 1249 u8 ar_tha[ETH_ALEN]; ··· 1221 1253 struct _magic_packet { 1222 1254 u8 dest_mac[6]; 1223 1255 u8 src_mac[6]; 1224 - u16 type; 1256 + __be16 type; 1225 1257 u8 MAC[16][6]; 1226 1258 u8 password[6]; 1227 1259 } __attribute__ ((__packed__));
+3 -3
drivers/net/wireless/ath5k/base.c
··· 153 153 #define ath5k_pci_resume NULL 154 154 #endif /* CONFIG_PM */ 155 155 156 - static struct pci_driver ath5k_pci_drv_id = { 156 + static struct pci_driver ath5k_pci_driver = { 157 157 .name = "ath5k_pci", 158 158 .id_table = ath5k_pci_id_table, 159 159 .probe = ath5k_pci_probe, ··· 329 329 330 330 ath5k_debug_init(); 331 331 332 - ret = pci_register_driver(&ath5k_pci_drv_id); 332 + ret = pci_register_driver(&ath5k_pci_driver); 333 333 if (ret) { 334 334 printk(KERN_ERR "ath5k_pci: can't register pci driver\n"); 335 335 return ret; ··· 341 341 static void __exit 342 342 exit_ath5k_pci(void) 343 343 { 344 - pci_unregister_driver(&ath5k_pci_drv_id); 344 + pci_unregister_driver(&ath5k_pci_driver); 345 345 346 346 ath5k_debug_finish(); 347 347 }
+4 -3
drivers/net/wireless/iwlwifi/iwl-3945.c
··· 238 238 priv->last_statistics_time = jiffies; 239 239 } 240 240 241 - void iwl3945_add_radiotap(struct iwl3945_priv *priv, struct sk_buff *skb, 242 - struct iwl3945_rx_frame_hdr *rx_hdr, 243 - struct ieee80211_rx_status *stats) 241 + static void iwl3945_add_radiotap(struct iwl3945_priv *priv, 242 + struct sk_buff *skb, 243 + struct iwl3945_rx_frame_hdr *rx_hdr, 244 + struct ieee80211_rx_status *stats) 244 245 { 245 246 /* First cache any information we need before we overwrite 246 247 * the information provided in the skb from the hardware */
+18 -5
drivers/net/wireless/iwlwifi/iwl-4965.c
··· 4658 4658 struct ieee80211_ht_info *sta_ht_inf) 4659 4659 { 4660 4660 __le32 sta_flags; 4661 + u8 mimo_ps_mode; 4661 4662 4662 4663 if (!sta_ht_inf || !sta_ht_inf->ht_supported) 4663 4664 goto done; 4664 4665 4666 + mimo_ps_mode = (sta_ht_inf->cap & IEEE80211_HT_CAP_MIMO_PS) >> 2; 4667 + 4665 4668 sta_flags = priv->stations[index].sta.station_flags; 4666 4669 4667 - if (((sta_ht_inf->cap & IEEE80211_HT_CAP_MIMO_PS >> 2)) 4668 - == IWL_MIMO_PS_DYNAMIC) 4670 + sta_flags &= ~(STA_FLG_RTS_MIMO_PROT_MSK | STA_FLG_MIMO_DIS_MSK); 4671 + 4672 + switch (mimo_ps_mode) { 4673 + case WLAN_HT_CAP_MIMO_PS_STATIC: 4674 + sta_flags |= STA_FLG_MIMO_DIS_MSK; 4675 + break; 4676 + case WLAN_HT_CAP_MIMO_PS_DYNAMIC: 4669 4677 sta_flags |= STA_FLG_RTS_MIMO_PROT_MSK; 4670 - else 4671 - sta_flags &= ~STA_FLG_RTS_MIMO_PROT_MSK; 4678 + break; 4679 + case WLAN_HT_CAP_MIMO_PS_DISABLED: 4680 + break; 4681 + default: 4682 + IWL_WARNING("Invalid MIMO PS mode %d", mimo_ps_mode); 4683 + break; 4684 + } 4672 4685 4673 4686 sta_flags |= cpu_to_le32( 4674 4687 (u32)sta_ht_inf->ampdu_factor << STA_FLG_MAX_AGG_SIZE_POS); ··· 4692 4679 if (iwl4965_is_fat_tx_allowed(priv, sta_ht_inf)) 4693 4680 sta_flags |= STA_FLG_FAT_EN_MSK; 4694 4681 else 4695 - sta_flags &= (~STA_FLG_FAT_EN_MSK); 4682 + sta_flags &= ~STA_FLG_FAT_EN_MSK; 4696 4683 4697 4684 priv->stations[index].sta.station_flags = sta_flags; 4698 4685 done:
-3
drivers/net/wireless/iwlwifi/iwl-helpers.h
··· 147 147 148 148 #define QOS_CONTROL_LEN 2 149 149 150 - #define IEEE80211_STYPE_BACK_REQ 0x0080 151 - #define IEEE80211_STYPE_BACK 0x0090 152 - 153 150 154 151 static inline int ieee80211_is_management(u16 fc) 155 152 {
+5 -5
drivers/net/wireless/iwlwifi/iwl3945-base.c
··· 6330 6330 return -ENODEV; 6331 6331 } 6332 6332 6333 + if (!priv->ucode_data_backup.v_addr || !priv->ucode_data.v_addr) { 6334 + IWL_ERROR("ucode not available for device bringup\n"); 6335 + return -EIO; 6336 + } 6337 + 6333 6338 /* If platform's RF_KILL switch is NOT set to KILL */ 6334 6339 if (iwl3945_read32(priv, CSR_GP_CNTRL) & 6335 6340 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW) ··· 6345 6340 IWL_WARNING("Radio disabled by HW RF Kill switch\n"); 6346 6341 return -ENODEV; 6347 6342 } 6348 - } 6349 - 6350 - if (!priv->ucode_data_backup.v_addr || !priv->ucode_data.v_addr) { 6351 - IWL_ERROR("ucode not available for device bringup\n"); 6352 - return -EIO; 6353 6343 } 6354 6344 6355 6345 iwl3945_write32(priv, CSR_INT, 0xFFFFFFFF);
+5 -5
drivers/net/wireless/iwlwifi/iwl4965-base.c
··· 6755 6755 return -ENODEV; 6756 6756 } 6757 6757 6758 + if (!priv->ucode_data_backup.v_addr || !priv->ucode_data.v_addr) { 6759 + IWL_ERROR("ucode not available for device bringup\n"); 6760 + return -EIO; 6761 + } 6762 + 6758 6763 /* If platform's RF_KILL switch is NOT set to KILL */ 6759 6764 if (iwl4965_read32(priv, CSR_GP_CNTRL) & 6760 6765 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW) ··· 6770 6765 IWL_WARNING("Radio disabled by HW RF Kill switch\n"); 6771 6766 return -ENODEV; 6772 6767 } 6773 - } 6774 - 6775 - if (!priv->ucode_data_backup.v_addr || !priv->ucode_data.v_addr) { 6776 - IWL_ERROR("ucode not available for device bringup\n"); 6777 - return -EIO; 6778 6768 } 6779 6769 6780 6770 iwl4965_write32(priv, CSR_INT, 0xFFFFFFFF);
+1 -1
include/linux/dccp.h
··· 525 525 __u64 dccps_gsr; 526 526 __u64 dccps_gar; 527 527 __be32 dccps_service; 528 + __u32 dccps_mss_cache; 528 529 struct dccp_service_list *dccps_service_list; 529 530 __u32 dccps_timestamp_echo; 530 531 __u32 dccps_timestamp_time; ··· 534 533 __u16 dccps_pcslen; 535 534 __u16 dccps_pcrlen; 536 535 unsigned long dccps_ndp_count; 537 - __u32 dccps_mss_cache; 538 536 unsigned long dccps_rate_last; 539 537 struct dccp_minisock dccps_minisock; 540 538 struct dccp_ackvec *dccps_hc_rx_ackvec;
+6
include/linux/ieee80211.h
··· 287 287 #define IEEE80211_HT_IE_NON_GF_STA_PRSNT 0x0004 288 288 #define IEEE80211_HT_IE_NON_HT_STA_PRSNT 0x0010 289 289 290 + /* MIMO Power Save Modes */ 291 + #define WLAN_HT_CAP_MIMO_PS_STATIC 0 292 + #define WLAN_HT_CAP_MIMO_PS_DYNAMIC 1 293 + #define WLAN_HT_CAP_MIMO_PS_INVALID 2 294 + #define WLAN_HT_CAP_MIMO_PS_DISABLED 3 295 + 290 296 /* Authentication algorithms */ 291 297 #define WLAN_AUTH_OPEN 0 292 298 #define WLAN_AUTH_SHARED_KEY 1
+3 -2
include/linux/phy.h
··· 88 88 89 89 /* A lock to ensure that only one thing can read/write 90 90 * the MDIO bus at a time */ 91 - spinlock_t mdio_lock; 91 + struct mutex mdio_lock; 92 92 93 93 struct device *dev; 94 94 ··· 284 284 285 285 /* Interrupt and Polling infrastructure */ 286 286 struct work_struct phy_queue; 287 + struct work_struct state_queue; 287 288 struct timer_list phy_timer; 288 289 atomic_t irq_disable; 289 290 290 - spinlock_t lock; 291 + struct mutex lock; 291 292 292 293 struct net_device *attached_dev; 293 294
+3 -3
include/net/if_inet6.h
··· 112 112 struct ip6_sf_list *mca_sources; 113 113 struct ip6_sf_list *mca_tomb; 114 114 unsigned int mca_sfmode; 115 + unsigned char mca_crcount; 115 116 unsigned long mca_sfcount[2]; 116 117 struct timer_list mca_timer; 117 118 unsigned mca_flags; 118 119 int mca_users; 119 120 atomic_t mca_refcnt; 120 121 spinlock_t mca_lock; 121 - unsigned char mca_crcount; 122 122 unsigned long mca_cstamp; 123 123 unsigned long mca_tstamp; 124 124 }; ··· 166 166 struct ifmcaddr6 *mc_list; 167 167 struct ifmcaddr6 *mc_tomb; 168 168 rwlock_t mc_lock; 169 - unsigned long mc_v1_seen; 170 - unsigned long mc_maxdelay; 171 169 unsigned char mc_qrv; 172 170 unsigned char mc_gq_running; 173 171 unsigned char mc_ifc_count; 172 + unsigned long mc_v1_seen; 173 + unsigned long mc_maxdelay; 174 174 struct timer_list mc_gq_timer; /* general query timer */ 175 175 struct timer_list mc_ifc_timer; /* interface change timer */ 176 176
+1 -1
include/net/inet6_hashtables.h
··· 49 49 return inet6_ehashfn(laddr, lport, faddr, fport); 50 50 } 51 51 52 - extern void __inet6_hash(struct inet_hashinfo *hashinfo, struct sock *sk); 52 + extern void __inet6_hash(struct sock *sk); 53 53 54 54 /* 55 55 * Sockets in TCP_CLOSE state are _always_ taken out of the hash, so
+3 -5
include/net/inet_connection_sock.h
··· 29 29 #undef INET_CSK_CLEAR_TIMERS 30 30 31 31 struct inet_bind_bucket; 32 - struct inet_hashinfo; 33 32 struct tcp_congestion_ops; 34 33 35 34 /* ··· 58 59 int level, int optname, 59 60 char __user *optval, int __user *optlen); 60 61 void (*addr2sockaddr)(struct sock *sk, struct sockaddr *); 62 + int (*bind_conflict)(const struct sock *sk, 63 + const struct inet_bind_bucket *tb); 61 64 }; 62 65 63 66 /** inet_connection_sock - INET connection oriented sock ··· 245 244 const __be32 laddr); 246 245 extern int inet_csk_bind_conflict(const struct sock *sk, 247 246 const struct inet_bind_bucket *tb); 248 - extern int inet_csk_get_port(struct inet_hashinfo *hashinfo, 249 - struct sock *sk, unsigned short snum, 250 - int (*bind_conflict)(const struct sock *sk, 251 - const struct inet_bind_bucket *tb)); 247 + extern int inet_csk_get_port(struct sock *sk, unsigned short snum); 252 248 253 249 extern struct dst_entry* inet_csk_route_req(struct sock *sk, 254 250 const struct request_sock *req);
+9 -42
include/net/inet_hashtables.h
··· 221 221 } 222 222 223 223 /* Caller must disable local BH processing. */ 224 - static inline void __inet_inherit_port(struct inet_hashinfo *table, 225 - struct sock *sk, struct sock *child) 224 + static inline void __inet_inherit_port(struct sock *sk, struct sock *child) 226 225 { 226 + struct inet_hashinfo *table = sk->sk_prot->hashinfo; 227 227 const int bhash = inet_bhashfn(inet_sk(child)->num, table->bhash_size); 228 228 struct inet_bind_hashbucket *head = &table->bhash[bhash]; 229 229 struct inet_bind_bucket *tb; ··· 235 235 spin_unlock(&head->lock); 236 236 } 237 237 238 - static inline void inet_inherit_port(struct inet_hashinfo *table, 239 - struct sock *sk, struct sock *child) 238 + static inline void inet_inherit_port(struct sock *sk, struct sock *child) 240 239 { 241 240 local_bh_disable(); 242 - __inet_inherit_port(table, sk, child); 241 + __inet_inherit_port(sk, child); 243 242 local_bh_enable(); 244 243 } 245 244 246 - extern void inet_put_port(struct inet_hashinfo *table, struct sock *sk); 245 + extern void inet_put_port(struct sock *sk); 247 246 248 247 extern void inet_listen_wlock(struct inet_hashinfo *hashinfo); 249 248 ··· 265 266 wake_up(&hashinfo->lhash_wait); 266 267 } 267 268 268 - extern void __inet_hash(struct inet_hashinfo *hashinfo, struct sock *sk); 269 - extern void __inet_hash_nolisten(struct inet_hashinfo *hinfo, struct sock *sk); 270 - 271 - static inline void inet_hash(struct inet_hashinfo *hashinfo, struct sock *sk) 272 - { 273 - if (sk->sk_state != TCP_CLOSE) { 274 - local_bh_disable(); 275 - __inet_hash(hashinfo, sk); 276 - local_bh_enable(); 277 - } 278 - } 279 - 280 - static inline void inet_unhash(struct inet_hashinfo *hashinfo, struct sock *sk) 281 - { 282 - rwlock_t *lock; 283 - 284 - if (sk_unhashed(sk)) 285 - goto out; 286 - 287 - if (sk->sk_state == TCP_LISTEN) { 288 - local_bh_disable(); 289 - inet_listen_wlock(hashinfo); 290 - lock = &hashinfo->lhash_lock; 291 - } else { 292 - lock = inet_ehash_lockp(hashinfo, sk->sk_hash); 293 - write_lock_bh(lock); 294 - } 295 - 296 - if (__sk_del_node_init(sk)) 297 - sock_prot_inuse_add(sk->sk_prot, -1); 298 - write_unlock_bh(lock); 299 - out: 300 - if (sk->sk_state == TCP_LISTEN) 301 - wake_up(&hashinfo->lhash_wait); 302 - } 269 + extern void __inet_hash_nolisten(struct sock *sk); 270 + extern void inet_hash(struct sock *sk); 271 + extern void inet_unhash(struct sock *sk); 303 272 304 273 extern struct sock *__inet_lookup_listener(struct net *net, 305 274 struct inet_hashinfo *hashinfo, ··· 392 425 struct sock *sk, 393 426 int (*check_established)(struct inet_timewait_death_row *, 394 427 struct sock *, __u16, struct inet_timewait_sock **), 395 - void (*hash)(struct inet_hashinfo *, struct sock *)); 428 + void (*hash)(struct sock *sk)); 396 429 extern int inet_hash_connect(struct inet_timewait_death_row *death_row, 397 430 struct sock *sk); 398 431 #endif /* _INET_HASHTABLES_H */
+1 -1
include/net/inet_timewait_sock.h
··· 116 116 #define tw_hash __tw_common.skc_hash 117 117 #define tw_prot __tw_common.skc_prot 118 118 #define tw_net __tw_common.skc_net 119 + int tw_timeout; 119 120 volatile unsigned char tw_substate; 120 121 /* 3 bits hole, try to pack */ 121 122 unsigned char tw_rcv_wscale; ··· 131 130 __u8 tw_ipv6only:1; 132 131 /* 15 bits hole, try to pack */ 133 132 __u16 tw_ipv6_offset; 134 - int tw_timeout; 135 133 unsigned long tw_ttd; 136 134 struct inet_bind_bucket *tw_tb; 137 135 struct hlist_node tw_death_node;
+3
include/net/sock.h
··· 496 496 497 497 struct request_sock_ops; 498 498 struct timewait_sock_ops; 499 + struct inet_hashinfo; 499 500 500 501 /* Networking protocol blocks we attach to sockets. 501 502 * socket layer -> transport layer interface ··· 578 577 579 578 struct request_sock_ops *rsk_prot; 580 579 struct timewait_sock_ops *twsk_prot; 580 + 581 + struct inet_hashinfo *hashinfo; 581 582 582 583 struct module *owner; 583 584
-2
net/dccp/dccp.h
··· 271 271 272 272 extern int dccp_connect(struct sock *sk); 273 273 extern int dccp_disconnect(struct sock *sk, int flags); 274 - extern void dccp_hash(struct sock *sk); 275 - extern void dccp_unhash(struct sock *sk); 276 274 extern int dccp_getsockopt(struct sock *sk, int level, int optname, 277 275 char __user *optval, int __user *optlen); 278 276 extern int dccp_setsockopt(struct sock *sk, int level, int optname,
+7 -11
net/dccp/ipv4.c
··· 38 38 */ 39 39 static struct socket *dccp_v4_ctl_socket; 40 40 41 - static int dccp_v4_get_port(struct sock *sk, const unsigned short snum) 42 - { 43 - return inet_csk_get_port(&dccp_hashinfo, sk, snum, 44 - inet_csk_bind_conflict); 45 - } 46 - 47 41 int dccp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) 48 42 { 49 43 struct inet_sock *inet = inet_sk(sk); ··· 402 408 403 409 dccp_sync_mss(newsk, dst_mtu(dst)); 404 410 405 - __inet_hash_nolisten(&dccp_hashinfo, newsk); 406 - __inet_inherit_port(&dccp_hashinfo, sk, newsk); 411 + __inet_hash_nolisten(newsk); 412 + __inet_inherit_port(sk, newsk); 407 413 408 414 return newsk; 409 415 ··· 892 898 .getsockopt = ip_getsockopt, 893 899 .addr2sockaddr = inet_csk_addr2sockaddr, 894 900 .sockaddr_len = sizeof(struct sockaddr_in), 901 + .bind_conflict = inet_csk_bind_conflict, 895 902 #ifdef CONFIG_COMPAT 896 903 .compat_setsockopt = compat_ip_setsockopt, 897 904 .compat_getsockopt = compat_ip_getsockopt, ··· 932 937 .sendmsg = dccp_sendmsg, 933 938 .recvmsg = dccp_recvmsg, 934 939 .backlog_rcv = dccp_v4_do_rcv, 935 - .hash = dccp_hash, 936 - .unhash = dccp_unhash, 940 + .hash = inet_hash, 941 + .unhash = inet_unhash, 937 942 .accept = inet_csk_accept, 938 - .get_port = dccp_v4_get_port, 943 + .get_port = inet_csk_get_port, 939 944 .shutdown = dccp_shutdown, 940 945 .destroy = dccp_destroy_sock, 941 946 .orphan_count = &dccp_orphan_count, ··· 943 948 .obj_size = sizeof(struct dccp_sock), 944 949 .rsk_prot = &dccp_request_sock_ops, 945 950 .twsk_prot = &dccp_timewait_sock_ops, 951 + .hashinfo = &dccp_hashinfo, 946 952 #ifdef CONFIG_COMPAT 947 953 .compat_setsockopt = compat_dccp_setsockopt, 948 954 .compat_getsockopt = compat_dccp_getsockopt,
+8 -12
net/dccp/ipv6.c
··· 39 39 static struct inet_connection_sock_af_ops dccp_ipv6_mapped; 40 40 static struct inet_connection_sock_af_ops dccp_ipv6_af_ops; 41 41 42 - static int dccp_v6_get_port(struct sock *sk, unsigned short snum) 43 - { 44 - return inet_csk_get_port(&dccp_hashinfo, sk, snum, 45 - inet6_csk_bind_conflict); 46 - } 47 - 48 42 static void dccp_v6_hash(struct sock *sk) 49 43 { 50 44 if (sk->sk_state != DCCP_CLOSED) { 51 45 if (inet_csk(sk)->icsk_af_ops == &dccp_ipv6_mapped) { 52 - dccp_hash(sk); 46 + inet_hash(sk); 53 47 return; 54 48 } 55 49 local_bh_disable(); 56 - __inet6_hash(&dccp_hashinfo, sk); 50 + __inet6_hash(sk); 57 51 local_bh_enable(); 58 52 } 59 53 } ··· 624 630 625 631 newinet->daddr = newinet->saddr = newinet->rcv_saddr = LOOPBACK4_IPV6; 626 632 627 - __inet6_hash(&dccp_hashinfo, newsk); 628 - inet_inherit_port(&dccp_hashinfo, sk, newsk); 633 + __inet6_hash(newsk); 634 + inet_inherit_port(sk, newsk); 629 635 630 636 return newsk; 631 637 ··· 1048 1054 .getsockopt = ipv6_getsockopt, 1049 1055 .addr2sockaddr = inet6_csk_addr2sockaddr, 1050 1056 .sockaddr_len = sizeof(struct sockaddr_in6), 1057 + .bind_conflict = inet6_csk_bind_conflict, 1051 1058 #ifdef CONFIG_COMPAT 1052 1059 .compat_setsockopt = compat_ipv6_setsockopt, 1053 1060 .compat_getsockopt = compat_ipv6_getsockopt, ··· 1118 1123 .recvmsg = dccp_recvmsg, 1119 1124 .backlog_rcv = dccp_v6_do_rcv, 1120 1125 .hash = dccp_v6_hash, 1121 - .unhash = dccp_unhash, 1126 + .unhash = inet_unhash, 1122 1127 .accept = inet_csk_accept, 1123 - .get_port = dccp_v6_get_port, 1128 + .get_port = inet_csk_get_port, 1124 1129 .shutdown = dccp_shutdown, 1125 1130 .destroy = dccp_v6_destroy_sock, 1126 1131 .orphan_count = &dccp_orphan_count, ··· 1128 1133 .obj_size = sizeof(struct dccp6_sock), 1129 1134 .rsk_prot = &dccp6_request_sock_ops, 1130 1135 .twsk_prot = &dccp6_timewait_sock_ops, 1136 + .hashinfo = &dccp_hashinfo, 1131 1137 #ifdef CONFIG_COMPAT 1132 1138 .compat_setsockopt = compat_dccp_setsockopt, 1133 1139 .compat_getsockopt = compat_dccp_getsockopt,
+2 -16
net/dccp/proto.c
··· 78 78 sk->sk_prot->unhash(sk); 79 79 if (inet_csk(sk)->icsk_bind_hash != NULL && 80 80 !(sk->sk_userlocks & SOCK_BINDPORT_LOCK)) 81 - inet_put_port(&dccp_hashinfo, sk); 81 + inet_put_port(sk); 82 82 /* fall through */ 83 83 default: 84 84 if (oldstate == DCCP_OPEN) ··· 173 173 174 174 EXPORT_SYMBOL_GPL(dccp_state_name); 175 175 176 - void dccp_hash(struct sock *sk) 177 - { 178 - inet_hash(&dccp_hashinfo, sk); 179 - } 180 - 181 - EXPORT_SYMBOL_GPL(dccp_hash); 182 - 183 - void dccp_unhash(struct sock *sk) 184 - { 185 - inet_unhash(&dccp_hashinfo, sk); 186 - } 187 - 188 - EXPORT_SYMBOL_GPL(dccp_unhash); 189 - 190 176 int dccp_init_sock(struct sock *sk, const __u8 ctl_sock_initialized) 191 177 { 192 178 struct dccp_sock *dp = dccp_sk(sk); ··· 254 268 255 269 /* Clean up a referenced DCCP bind bucket. */ 256 270 if (inet_csk(sk)->icsk_bind_hash != NULL) 257 - inet_put_port(&dccp_hashinfo, sk); 271 + inet_put_port(sk); 258 272 259 273 kfree(dp->dccps_service_list); 260 274 dp->dccps_service_list = NULL;
+3 -5
net/ipv4/inet_connection_sock.c
··· 78 78 /* Obtain a reference to a local port for the given sock, 79 79 * if snum is zero it means select any available local port. 80 80 */ 81 - int inet_csk_get_port(struct inet_hashinfo *hashinfo, 82 - struct sock *sk, unsigned short snum, 83 - int (*bind_conflict)(const struct sock *sk, 84 - const struct inet_bind_bucket *tb)) 81 + int inet_csk_get_port(struct sock *sk, unsigned short snum) 85 82 { 83 + struct inet_hashinfo *hashinfo = sk->sk_prot->hashinfo; 86 84 struct inet_bind_hashbucket *head; 87 85 struct hlist_node *node; 88 86 struct inet_bind_bucket *tb; ··· 140 142 goto success; 141 143 } else { 142 144 ret = 1; 143 - if (bind_conflict(sk, tb)) 145 + if (inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb)) 144 146 goto fail_unlock; 145 147 } 146 148 }
+48 -10
net/ipv4/inet_hashtables.c
··· 66 66 /* 67 67 * Get rid of any references to a local port held by the given sock. 68 68 */ 69 - static void __inet_put_port(struct inet_hashinfo *hashinfo, struct sock *sk) 69 + static void __inet_put_port(struct sock *sk) 70 70 { 71 + struct inet_hashinfo *hashinfo = sk->sk_prot->hashinfo; 71 72 const int bhash = inet_bhashfn(inet_sk(sk)->num, hashinfo->bhash_size); 72 73 struct inet_bind_hashbucket *head = &hashinfo->bhash[bhash]; 73 74 struct inet_bind_bucket *tb; ··· 82 81 spin_unlock(&head->lock); 83 82 } 84 83 85 - void inet_put_port(struct inet_hashinfo *hashinfo, struct sock *sk) 84 + void inet_put_port(struct sock *sk) 86 85 { 87 86 local_bh_disable(); 88 - __inet_put_port(hashinfo, sk); 87 + __inet_put_port(sk); 89 88 local_bh_enable(); 90 89 } 91 90 ··· 318 317 inet->dport); 319 318 } 320 319 321 - void __inet_hash_nolisten(struct inet_hashinfo *hashinfo, struct sock *sk) 320 + void __inet_hash_nolisten(struct sock *sk) 322 321 { 322 + struct inet_hashinfo *hashinfo = sk->sk_prot->hashinfo; 323 323 struct hlist_head *list; 324 324 rwlock_t *lock; 325 325 struct inet_ehash_bucket *head; ··· 339 337 } 340 338 EXPORT_SYMBOL_GPL(__inet_hash_nolisten); 341 339 342 - void __inet_hash(struct inet_hashinfo *hashinfo, struct sock *sk) 340 + static void __inet_hash(struct sock *sk) 343 341 { 342 + struct inet_hashinfo *hashinfo = sk->sk_prot->hashinfo; 344 343 struct hlist_head *list; 345 344 rwlock_t *lock; 346 345 347 346 if (sk->sk_state != TCP_LISTEN) { 348 - __inet_hash_nolisten(hashinfo, sk); 347 + __inet_hash_nolisten(sk); 349 348 return; 350 349 } 351 350 ··· 360 357 write_unlock(lock); 361 358 wake_up(&hashinfo->lhash_wait); 362 359 } 363 - EXPORT_SYMBOL_GPL(__inet_hash); 360 + 361 + void inet_hash(struct sock *sk) 362 + { 363 + if (sk->sk_state != TCP_CLOSE) { 364 + local_bh_disable(); 365 + __inet_hash(sk); 366 + local_bh_enable(); 367 + } 368 + } 369 + EXPORT_SYMBOL_GPL(inet_hash); 370 + 371 + void inet_unhash(struct sock *sk) 372 + { 373 + rwlock_t *lock; 374 + struct inet_hashinfo *hashinfo = sk->sk_prot->hashinfo; 375 + 376 + if (sk_unhashed(sk)) 377 + goto out; 378 + 379 + if (sk->sk_state == TCP_LISTEN) { 380 + local_bh_disable(); 381 + inet_listen_wlock(hashinfo); 382 + lock = &hashinfo->lhash_lock; 383 + } else { 384 + lock = inet_ehash_lockp(hashinfo, sk->sk_hash); 385 + write_lock_bh(lock); 386 + } 387 + 388 + if (__sk_del_node_init(sk)) 389 + sock_prot_inuse_add(sk->sk_prot, -1); 390 + write_unlock_bh(lock); 391 + out: 392 + if (sk->sk_state == TCP_LISTEN) 393 + wake_up(&hashinfo->lhash_wait); 394 + } 395 + EXPORT_SYMBOL_GPL(inet_unhash); 364 396 365 397 int __inet_hash_connect(struct inet_timewait_death_row *death_row, 366 398 struct sock *sk, 367 399 int (*check_established)(struct inet_timewait_death_row *, 368 400 struct sock *, __u16, struct inet_timewait_sock **), 369 - void (*hash)(struct inet_hashinfo *, struct sock *)) 401 + void (*hash)(struct sock *sk)) 370 402 { 371 403 struct inet_hashinfo *hinfo = death_row->hashinfo; 372 404 const unsigned short snum = inet_sk(sk)->num; ··· 465 427 inet_bind_hash(sk, tb, port); 466 428 if (sk_unhashed(sk)) { 467 429 inet_sk(sk)->sport = htons(port); 468 - hash(hinfo, sk); 430 + hash(sk); 469 431 } 470 432 spin_unlock(&head->lock); 471 433 ··· 482 444 tb = inet_csk(sk)->icsk_bind_hash; 483 445 spin_lock_bh(&head->lock); 484 446 if (sk_head(&tb->owners) == sk && !sk->sk_bind_node.next) { 485 - hash(hinfo, sk); 447 + hash(sk); 486 448 spin_unlock_bh(&head->lock); 487 449 return 0; 488 450 } else {
+1 -1
net/ipv4/tcp.c
··· 1669 1669 sk->sk_prot->unhash(sk); 1670 1670 if (inet_csk(sk)->icsk_bind_hash && 1671 1671 !(sk->sk_userlocks & SOCK_BINDPORT_LOCK)) 1672 - inet_put_port(&tcp_hashinfo, sk); 1672 + inet_put_port(sk); 1673 1673 /* fall through */ 1674 1674 default: 1675 1675 if (oldstate==TCP_ESTABLISHED)
+8 -23
net/ipv4/tcp_ipv4.c
··· 108 108 .lhash_wait = __WAIT_QUEUE_HEAD_INITIALIZER(tcp_hashinfo.lhash_wait), 109 109 }; 110 110 111 - static int tcp_v4_get_port(struct sock *sk, unsigned short snum) 112 - { 113 - return inet_csk_get_port(&tcp_hashinfo, sk, snum, 114 - inet_csk_bind_conflict); 115 - } 116 - 117 - static void tcp_v4_hash(struct sock *sk) 118 - { 119 - inet_hash(&tcp_hashinfo, sk); 120 - } 121 - 122 - void tcp_unhash(struct sock *sk) 123 - { 124 - inet_unhash(&tcp_hashinfo, sk); 125 - } 126 - 127 111 static inline __u32 tcp_v4_init_sequence(struct sk_buff *skb) 128 112 { 129 113 return secure_tcp_sequence_number(ip_hdr(skb)->daddr, ··· 1462 1478 } 1463 1479 #endif 1464 1480 1465 - __inet_hash_nolisten(&tcp_hashinfo, newsk); 1466 - __inet_inherit_port(&tcp_hashinfo, sk, newsk); 1481 + __inet_hash_nolisten(newsk); 1482 + __inet_inherit_port(sk, newsk); 1467 1483 1468 1484 return newsk; 1469 1485 ··· 1811 1827 .getsockopt = ip_getsockopt, 1812 1828 .addr2sockaddr = inet_csk_addr2sockaddr, 1813 1829 .sockaddr_len = sizeof(struct sockaddr_in), 1830 + .bind_conflict = inet_csk_bind_conflict, 1814 1831 #ifdef CONFIG_COMPAT 1815 1832 .compat_setsockopt = compat_ip_setsockopt, 1816 1833 .compat_getsockopt = compat_ip_getsockopt, ··· 1911 1926 1912 1927 /* Clean up a referenced TCP bind bucket. */ 1913 1928 if (inet_csk(sk)->icsk_bind_hash) 1914 - inet_put_port(&tcp_hashinfo, sk); 1929 + inet_put_port(sk); 1915 1930 1916 1931 /* 1917 1932 * If sendmsg cached page exists, toss it. ··· 2420 2435 .getsockopt = tcp_getsockopt, 2421 2436 .recvmsg = tcp_recvmsg, 2422 2437 .backlog_rcv = tcp_v4_do_rcv, 2423 - .hash = tcp_v4_hash, 2424 - .unhash = tcp_unhash, 2425 - .get_port = tcp_v4_get_port, 2438 + .hash = inet_hash, 2439 + .unhash = inet_unhash, 2440 + .get_port = inet_csk_get_port, 2426 2441 .enter_memory_pressure = tcp_enter_memory_pressure, 2427 2442 .sockets_allocated = &tcp_sockets_allocated, 2428 2443 .orphan_count = &tcp_orphan_count, ··· 2435 2450 .obj_size = sizeof(struct tcp_sock), 2436 2451 .twsk_prot = &tcp_timewait_sock_ops, 2437 2452 .rsk_prot = &tcp_request_sock_ops, 2453 + .hashinfo = &tcp_hashinfo, 2438 2454 #ifdef CONFIG_COMPAT 2439 2455 .compat_setsockopt = compat_tcp_setsockopt, 2440 2456 .compat_getsockopt = compat_tcp_getsockopt, ··· 2453 2467 EXPORT_SYMBOL(ipv4_specific); 2454 2468 EXPORT_SYMBOL(tcp_hashinfo); 2455 2469 EXPORT_SYMBOL(tcp_prot); 2456 - EXPORT_SYMBOL(tcp_unhash); 2457 2470 EXPORT_SYMBOL(tcp_v4_conn_request); 2458 2471 EXPORT_SYMBOL(tcp_v4_connect); 2459 2472 EXPORT_SYMBOL(tcp_v4_do_rcv);
+2 -2
net/ipv6/inet6_hashtables.c
··· 22 22 #include <net/inet6_hashtables.h> 23 23 #include <net/ip.h> 24 24 25 - void __inet6_hash(struct inet_hashinfo *hashinfo, 26 - struct sock *sk) 25 + void __inet6_hash(struct sock *sk) 27 26 { 27 + struct inet_hashinfo *hashinfo = sk->sk_prot->hashinfo; 28 28 struct hlist_head *list; 29 29 rwlock_t *lock; 30 30
+8 -11
net/ipv6/tcp_ipv6.c
··· 86 86 static struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific; 87 87 #endif 88 88 89 - static int tcp_v6_get_port(struct sock *sk, unsigned short snum) 90 - { 91 - return inet_csk_get_port(&tcp_hashinfo, sk, snum, 92 - inet6_csk_bind_conflict); 93 - } 94 - 95 89 static void tcp_v6_hash(struct sock *sk) 96 90 { 97 91 if (sk->sk_state != TCP_CLOSE) { ··· 94 100 return; 95 101 } 96 102 local_bh_disable(); 97 - __inet6_hash(&tcp_hashinfo, sk); 103 + __inet6_hash(sk); 98 104 local_bh_enable(); 99 105 } 100 106 } ··· 1498 1504 } 1499 1505 #endif 1500 1506 1501 - __inet6_hash(&tcp_hashinfo, newsk); 1502 - inet_inherit_port(&tcp_hashinfo, sk, newsk); 1507 + __inet6_hash(newsk); 1508 + inet_inherit_port(sk, newsk); 1503 1509 1504 1510 return newsk; 1505 1511 ··· 1827 1833 .getsockopt = ipv6_getsockopt, 1828 1834 .addr2sockaddr = inet6_csk_addr2sockaddr, 1829 1835 .sockaddr_len = sizeof(struct sockaddr_in6), 1836 + .bind_conflict = inet6_csk_bind_conflict, 1830 1837 #ifdef CONFIG_COMPAT 1831 1838 .compat_setsockopt = compat_ipv6_setsockopt, 1832 1839 .compat_getsockopt = compat_ipv6_getsockopt, ··· 1859 1864 .getsockopt = ipv6_getsockopt, 1860 1865 .addr2sockaddr = inet6_csk_addr2sockaddr, 1861 1866 .sockaddr_len = sizeof(struct sockaddr_in6), 1867 + .bind_conflict = inet6_csk_bind_conflict, 1862 1868 #ifdef CONFIG_COMPAT 1863 1869 .compat_setsockopt = compat_ipv6_setsockopt, 1864 1870 .compat_getsockopt = compat_ipv6_getsockopt, ··· 2123 2127 .recvmsg = tcp_recvmsg, 2124 2128 .backlog_rcv = tcp_v6_do_rcv, 2125 2129 .hash = tcp_v6_hash, 2126 - .unhash = tcp_unhash, 2127 - .get_port = tcp_v6_get_port, 2130 + .unhash = inet_unhash, 2131 + .get_port = inet_csk_get_port, 2128 2132 .enter_memory_pressure = tcp_enter_memory_pressure, 2129 2133 .sockets_allocated = &tcp_sockets_allocated, 2130 2134 .memory_allocated = &tcp_memory_allocated, ··· 2137 2141 .obj_size = sizeof(struct tcp6_sock), 2138 2142 .twsk_prot = &tcp6_timewait_sock_ops, 2139 2143 .rsk_prot = &tcp6_request_sock_ops, 2144 + .hashinfo = &tcp_hashinfo, 2140 2145 #ifdef CONFIG_COMPAT 2141 2146 .compat_setsockopt = compat_tcp_setsockopt, 2142 2147 .compat_getsockopt = compat_tcp_getsockopt,
+12
net/mac80211/Kconfig
··· 98 98 99 99 Say N unless you know you need this. 100 100 101 + config MAC80211_DEBUG_PACKET_ALIGNMENT 102 + bool "Enable packet alignment debugging" 103 + depends on MAC80211 104 + help 105 + This option is recommended for driver authors and strongly 106 + discouraged for everybody else, it will trigger a warning 107 + when a driver hands mac80211 a buffer that is aligned in 108 + a way that will cause problems with the IP stack on some 109 + architectures. 110 + 111 + Say N unless you're writing a mac80211 based driver. 112 + 101 113 config MAC80211_DEBUG 102 114 bool "Enable debugging output" 103 115 depends on MAC80211
+7 -7
net/mac80211/ieee80211.c
··· 1344 1344 1345 1345 ret = rc80211_simple_init(); 1346 1346 if (ret) 1347 - goto fail; 1347 + goto out; 1348 1348 1349 1349 ret = rc80211_pid_init(); 1350 1350 if (ret) 1351 - goto fail_simple; 1351 + goto out_cleanup_simple; 1352 1352 1353 1353 ret = ieee80211_wme_register(); 1354 1354 if (ret) { 1355 1355 printk(KERN_DEBUG "ieee80211_init: failed to " 1356 1356 "initialize WME (err=%d)\n", ret); 1357 - goto fail_pid; 1357 + goto out_cleanup_pid; 1358 1358 } 1359 1359 1360 1360 ieee80211_debugfs_netdev_init(); ··· 1362 1362 1363 1363 return 0; 1364 1364 1365 - fail_pid: 1366 - rc80211_simple_exit(); 1367 - fail_simple: 1365 + out_cleanup_pid: 1368 1366 rc80211_pid_exit(); 1369 - fail: 1367 + out_cleanup_simple: 1368 + rc80211_simple_exit(); 1369 + out: 1370 1370 return ret; 1371 1371 } 1372 1372
+1 -1
net/mac80211/rc80211_pid_algo.c
··· 538 538 return ieee80211_rate_control_register(&mac80211_rcpid); 539 539 } 540 540 541 - void __exit rc80211_pid_exit(void) 541 + void rc80211_pid_exit(void) 542 542 { 543 543 ieee80211_rate_control_unregister(&mac80211_rcpid); 544 544 }
+1 -1
net/mac80211/rc80211_simple.c
··· 389 389 return ieee80211_rate_control_register(&mac80211_rcsimple); 390 390 } 391 391 392 - void __exit rc80211_simple_exit(void) 392 + void rc80211_simple_exit(void) 393 393 { 394 394 ieee80211_rate_control_unregister(&mac80211_rcsimple); 395 395 }
+7
net/mac80211/rx.c
··· 340 340 return load; 341 341 } 342 342 343 + #ifdef CONFIG_MAC80211_DEBUG_PACKET_ALIGNMENT 343 344 static ieee80211_txrx_result 344 345 ieee80211_rx_h_verify_ip_alignment(struct ieee80211_txrx_data *rx) 345 346 { 346 347 int hdrlen; 348 + 349 + if (!WLAN_FC_DATA_PRESENT(rx->fc)) 350 + return TXRX_CONTINUE; 347 351 348 352 /* 349 353 * Drivers are required to align the payload data in a way that ··· 375 371 376 372 return TXRX_CONTINUE; 377 373 } 374 + #endif 378 375 379 376 ieee80211_rx_handler ieee80211_rx_pre_handlers[] = 380 377 { 381 378 ieee80211_rx_h_parse_qos, 379 + #ifdef CONFIG_MAC80211_DEBUG_PACKET_ALIGNMENT 382 380 ieee80211_rx_h_verify_ip_alignment, 381 + #endif 383 382 NULL 384 383 }; 385 384