Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6: (73 commits)
net: Fix typo in net/core/sock.c.
ppp: Do not free not yet unregistered net device.
netfilter: xt_iprange: module aliases for xt_iprange
netfilter: ctnetlink: dump conntrack ID in event messages
irda: Fix a misalign access issue. (v2)
sctp: Fix use of uninitialized pointer
cipso: Relax too much careful cipso hash function.
tcp FRTO: work-around inorder receivers
tcp FRTO: Fix fallback to conventional recovery
New maintainer for Intel ethernet adapters
DM9000: Use delayed work to update MII PHY state
DM9000: Update and fix driver debugging messages
DM9000: Add __devinit and __devexit attributes to probe and remove
sky2: fix simple define thinko
[netdrvr] sfc: sfc: Add self-test support
[netdrvr] sfc: Increment rx_reset when reported as driver event
[netdrvr] sfc: Remove unused macro EFX_XAUI_RETRAIN_MAX
[netdrvr] sfc: Fix code formatting
[netdrvr] sfc: Remove kernel-doc comments for removed members of struct efx_nic
[netdrvr] sfc: Remove garbage from comment
...

+3378 -734
+2
drivers/base/memory.c
··· 53 { 54 return blocking_notifier_chain_register(&memory_chain, nb); 55 } 56 57 void unregister_memory_notifier(struct notifier_block *nb) 58 { 59 blocking_notifier_chain_unregister(&memory_chain, nb); 60 } 61 62 /* 63 * register_memory - Setup a sysfs device for a memory block
··· 53 { 54 return blocking_notifier_chain_register(&memory_chain, nb); 55 } 56 + EXPORT_SYMBOL(register_memory_notifier); 57 58 void unregister_memory_notifier(struct notifier_block *nb) 59 { 60 blocking_notifier_chain_unregister(&memory_chain, nb); 61 } 62 + EXPORT_SYMBOL(unregister_memory_notifier); 63 64 /* 65 * register_memory - Setup a sysfs device for a memory block
+1 -1
drivers/net/Kconfig
··· 2426 2427 config EHEA 2428 tristate "eHEA Ethernet support" 2429 - depends on IBMEBUS && INET && SPARSEMEM 2430 select INET_LRO 2431 ---help--- 2432 This driver supports the IBM pSeries eHEA ethernet adapter.
··· 2426 2427 config EHEA 2428 tristate "eHEA Ethernet support" 2429 + depends on IBMEBUS && INET && SPARSEMEM && MEMORY_HOTPLUG 2430 select INET_LRO 2431 ---help--- 2432 This driver supports the IBM pSeries eHEA ethernet adapter.
+97 -58
drivers/net/atlx/atl1.c
··· 1 /* 2 * Copyright(c) 2005 - 2006 Attansic Corporation. All rights reserved. 3 * Copyright(c) 2006 - 2007 Chris Snook <csnook@redhat.com> 4 - * Copyright(c) 2006 Jay Cliburn <jcliburn@gmail.com> 5 * 6 * Derived from Intel e1000 driver 7 * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved. ··· 36 * A very incomplete list of things that need to be dealt with: 37 * 38 * TODO: 39 - * Wake on LAN. 40 * Add more ethtool functions. 41 * Fix abstruse irq enable/disable condition described here: 42 * http://marc.theaimsgroup.com/?l=linux-netdev&m=116398508500553&w=2 ··· 637 } 638 639 /* 640 - *TODO: do something or get rid of this 641 */ 642 #ifdef CONFIG_PM 643 - static s32 atl1_phy_enter_power_saving(struct atl1_hw *hw) 644 { 645 - /* s32 ret_val; 646 - * u16 phy_data; 647 - */ 648 649 - /* 650 - ret_val = atl1_write_phy_reg(hw, ...); 651 - ret_val = atl1_write_phy_reg(hw, ...); 652 - .... 653 - */ 654 - return 0; 655 } 656 #endif 657 ··· 2780 struct atl1_hw *hw = &adapter->hw; 2781 u32 ctrl = 0; 2782 u32 wufc = adapter->wol; 2783 2784 netif_device_detach(netdev); 2785 if (netif_running(netdev)) 2786 atl1_down(adapter); 2787 2788 atl1_read_phy_reg(hw, MII_BMSR, (u16 *) & ctrl); 2789 atl1_read_phy_reg(hw, MII_BMSR, (u16 *) & ctrl); 2790 - if (ctrl & BMSR_LSTATUS) 2791 wufc &= ~ATLX_WUFC_LNKC; 2792 2793 - /* reduce speed to 10/100M */ 2794 - if (wufc) { 2795 - atl1_phy_enter_power_saving(hw); 2796 - /* if resume, let driver to re- setup link */ 2797 - hw->phy_configured = false; 2798 - atl1_set_mac_addr(hw); 2799 - atlx_set_multi(netdev); 2800 2801 ctrl = 0; 2802 - /* turn on magic packet wol */ 2803 if (wufc & ATLX_WUFC_MAG) 2804 - ctrl = WOL_MAGIC_EN | WOL_MAGIC_PME_EN; 2805 - 2806 - /* turn on Link change WOL */ 2807 - if (wufc & ATLX_WUFC_LNKC) 2808 - ctrl |= (WOL_LINK_CHG_EN | WOL_LINK_CHG_PME_EN); 2809 iowrite32(ctrl, hw->hw_addr + REG_WOL_CTRL); 2810 2811 - /* turn on all-multi mode if wake on multicast is enabled */ 2812 - ctrl = ioread32(hw->hw_addr + REG_MAC_CTRL); 2813 - ctrl &= ~MAC_CTRL_DBG; 2814 - ctrl &= ~MAC_CTRL_PROMIS_EN; 2815 - if (wufc & ATLX_WUFC_MC) 2816 - ctrl |= MAC_CTRL_MC_ALL_EN; 2817 - else 2818 - ctrl &= ~MAC_CTRL_MC_ALL_EN; 2819 - 2820 - /* turn on broadcast mode if wake on-BC is enabled */ 2821 - if (wufc & ATLX_WUFC_BC) 2822 ctrl |= MAC_CTRL_BC_EN; 2823 - else 2824 - ctrl &= ~MAC_CTRL_BC_EN; 2825 - 2826 - /* enable RX */ 2827 - ctrl |= MAC_CTRL_RX_EN; 2828 iowrite32(ctrl, hw->hw_addr + REG_MAC_CTRL); 2829 - pci_enable_wake(pdev, PCI_D3hot, 1); 2830 - pci_enable_wake(pdev, PCI_D3cold, 1); 2831 - } else { 2832 - iowrite32(0, hw->hw_addr + REG_WOL_CTRL); 2833 - pci_enable_wake(pdev, PCI_D3hot, 0); 2834 - pci_enable_wake(pdev, PCI_D3cold, 0); 2835 } 2836 2837 - pci_save_state(pdev); 2838 - pci_disable_device(pdev); 2839 2840 - pci_set_power_state(pdev, PCI_D3hot); 2841 2842 return 0; 2843 } ··· 2880 pci_set_power_state(pdev, PCI_D0); 2881 pci_restore_state(pdev); 2882 2883 - /* FIXME: check and handle */ 2884 err = pci_enable_device(pdev); 2885 pci_enable_wake(pdev, PCI_D3hot, 0); 2886 pci_enable_wake(pdev, PCI_D3cold, 0); 2887 2888 - iowrite32(0, adapter->hw.hw_addr + REG_WOL_CTRL); 2889 - atl1_reset(adapter); 2890 2891 if (netif_running(netdev)) 2892 atl1_up(adapter); 2893 netif_device_attach(netdev); 2894 - 2895 - atl1_via_workaround(adapter); 2896 2897 return 0; 2898 } ··· 2906 #define atl1_suspend NULL 2907 #define atl1_resume NULL 2908 #endif 2909 2910 #ifdef CONFIG_NET_POLL_CONTROLLER 2911 static void atl1_poll_controller(struct net_device *netdev) ··· 3160 .probe = atl1_probe, 3161 .remove = __devexit_p(atl1_remove), 3162 .suspend = atl1_suspend, 3163 - .resume = atl1_resume 3164 }; 3165 3166 /*
··· 1 /* 2 * Copyright(c) 2005 - 2006 Attansic Corporation. All rights reserved. 3 * Copyright(c) 2006 - 2007 Chris Snook <csnook@redhat.com> 4 + * Copyright(c) 2006 - 2008 Jay Cliburn <jcliburn@gmail.com> 5 * 6 * Derived from Intel e1000 driver 7 * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved. ··· 36 * A very incomplete list of things that need to be dealt with: 37 * 38 * TODO: 39 * Add more ethtool functions. 40 * Fix abstruse irq enable/disable condition described here: 41 * http://marc.theaimsgroup.com/?l=linux-netdev&m=116398508500553&w=2 ··· 638 } 639 640 /* 641 + * Force the PHY into power saving mode using vendor magic. 642 */ 643 #ifdef CONFIG_PM 644 + static void atl1_phy_enter_power_saving(struct atl1_hw *hw) 645 { 646 + atl1_write_phy_reg(hw, MII_DBG_ADDR, 0); 647 + atl1_write_phy_reg(hw, MII_DBG_DATA, 0x124E); 648 + atl1_write_phy_reg(hw, MII_DBG_ADDR, 2); 649 + atl1_write_phy_reg(hw, MII_DBG_DATA, 0x3000); 650 + atl1_write_phy_reg(hw, MII_DBG_ADDR, 3); 651 + atl1_write_phy_reg(hw, MII_DBG_DATA, 0); 652 653 } 654 #endif 655 ··· 2784 struct atl1_hw *hw = &adapter->hw; 2785 u32 ctrl = 0; 2786 u32 wufc = adapter->wol; 2787 + u32 val; 2788 + int retval; 2789 + u16 speed; 2790 + u16 duplex; 2791 2792 netif_device_detach(netdev); 2793 if (netif_running(netdev)) 2794 atl1_down(adapter); 2795 2796 + retval = pci_save_state(pdev); 2797 + if (retval) 2798 + return retval; 2799 + 2800 atl1_read_phy_reg(hw, MII_BMSR, (u16 *) & ctrl); 2801 atl1_read_phy_reg(hw, MII_BMSR, (u16 *) & ctrl); 2802 + val = ctrl & BMSR_LSTATUS; 2803 + if (val) 2804 wufc &= ~ATLX_WUFC_LNKC; 2805 2806 + if (val && wufc) { 2807 + val = atl1_get_speed_and_duplex(hw, &speed, &duplex); 2808 + if (val) { 2809 + if (netif_msg_ifdown(adapter)) 2810 + dev_printk(KERN_DEBUG, &pdev->dev, 2811 + "error getting speed/duplex\n"); 2812 + goto disable_wol; 2813 + } 2814 2815 ctrl = 0; 2816 + 2817 + /* enable magic packet WOL */ 2818 if (wufc & ATLX_WUFC_MAG) 2819 + ctrl |= (WOL_MAGIC_EN | WOL_MAGIC_PME_EN); 2820 iowrite32(ctrl, hw->hw_addr + REG_WOL_CTRL); 2821 + ioread32(hw->hw_addr + REG_WOL_CTRL); 2822 2823 + /* configure the mac */ 2824 + ctrl = MAC_CTRL_RX_EN; 2825 + ctrl |= ((u32)((speed == SPEED_1000) ? MAC_CTRL_SPEED_1000 : 2826 + MAC_CTRL_SPEED_10_100) << MAC_CTRL_SPEED_SHIFT); 2827 + if (duplex == FULL_DUPLEX) 2828 + ctrl |= MAC_CTRL_DUPLX; 2829 + ctrl |= (((u32)adapter->hw.preamble_len & 2830 + MAC_CTRL_PRMLEN_MASK) << MAC_CTRL_PRMLEN_SHIFT); 2831 + if (adapter->vlgrp) 2832 + ctrl |= MAC_CTRL_RMV_VLAN; 2833 + if (wufc & ATLX_WUFC_MAG) 2834 ctrl |= MAC_CTRL_BC_EN; 2835 iowrite32(ctrl, hw->hw_addr + REG_MAC_CTRL); 2836 + ioread32(hw->hw_addr + REG_MAC_CTRL); 2837 + 2838 + /* poke the PHY */ 2839 + ctrl = ioread32(hw->hw_addr + REG_PCIE_PHYMISC); 2840 + ctrl |= PCIE_PHYMISC_FORCE_RCV_DET; 2841 + iowrite32(ctrl, hw->hw_addr + REG_PCIE_PHYMISC); 2842 + ioread32(hw->hw_addr + REG_PCIE_PHYMISC); 2843 + 2844 + pci_enable_wake(pdev, pci_choose_state(pdev, state), 1); 2845 + goto exit; 2846 } 2847 2848 + if (!val && wufc) { 2849 + ctrl |= (WOL_LINK_CHG_EN | WOL_LINK_CHG_PME_EN); 2850 + iowrite32(ctrl, hw->hw_addr + REG_WOL_CTRL); 2851 + ioread32(hw->hw_addr + REG_WOL_CTRL); 2852 + iowrite32(0, hw->hw_addr + REG_MAC_CTRL); 2853 + ioread32(hw->hw_addr + REG_MAC_CTRL); 2854 + hw->phy_configured = false; 2855 + pci_enable_wake(pdev, pci_choose_state(pdev, state), 1); 2856 + goto exit; 2857 + } 2858 2859 + disable_wol: 2860 + iowrite32(0, hw->hw_addr + REG_WOL_CTRL); 2861 + ioread32(hw->hw_addr + REG_WOL_CTRL); 2862 + ctrl = ioread32(hw->hw_addr + REG_PCIE_PHYMISC); 2863 + ctrl |= PCIE_PHYMISC_FORCE_RCV_DET; 2864 + iowrite32(ctrl, hw->hw_addr + REG_PCIE_PHYMISC); 2865 + ioread32(hw->hw_addr + REG_PCIE_PHYMISC); 2866 + atl1_phy_enter_power_saving(hw); 2867 + hw->phy_configured = false; 2868 + pci_enable_wake(pdev, pci_choose_state(pdev, state), 0); 2869 + exit: 2870 + if (netif_running(netdev)) 2871 + pci_disable_msi(adapter->pdev); 2872 + pci_disable_device(pdev); 2873 + pci_set_power_state(pdev, pci_choose_state(pdev, state)); 2874 2875 return 0; 2876 } ··· 2855 pci_set_power_state(pdev, PCI_D0); 2856 pci_restore_state(pdev); 2857 2858 err = pci_enable_device(pdev); 2859 + if (err) { 2860 + if (netif_msg_ifup(adapter)) 2861 + dev_printk(KERN_DEBUG, &pdev->dev, 2862 + "error enabling pci device\n"); 2863 + return err; 2864 + } 2865 + 2866 + pci_set_master(pdev); 2867 + iowrite32(0, adapter->hw.hw_addr + REG_WOL_CTRL); 2868 pci_enable_wake(pdev, PCI_D3hot, 0); 2869 pci_enable_wake(pdev, PCI_D3cold, 0); 2870 2871 + atl1_reset_hw(&adapter->hw); 2872 + adapter->cmb.cmb->int_stats = 0; 2873 2874 if (netif_running(netdev)) 2875 atl1_up(adapter); 2876 netif_device_attach(netdev); 2877 2878 return 0; 2879 } ··· 2875 #define atl1_suspend NULL 2876 #define atl1_resume NULL 2877 #endif 2878 + 2879 + static void atl1_shutdown(struct pci_dev *pdev) 2880 + { 2881 + #ifdef CONFIG_PM 2882 + atl1_suspend(pdev, PMSG_SUSPEND); 2883 + #endif 2884 + } 2885 2886 #ifdef CONFIG_NET_POLL_CONTROLLER 2887 static void atl1_poll_controller(struct net_device *netdev) ··· 3122 .probe = atl1_probe, 3123 .remove = __devexit_p(atl1_remove), 3124 .suspend = atl1_suspend, 3125 + .resume = atl1_resume, 3126 + .shutdown = atl1_shutdown 3127 }; 3128 3129 /*
+1 -1
drivers/net/atlx/atl1.h
··· 1 /* 2 * Copyright(c) 2005 - 2006 Attansic Corporation. All rights reserved. 3 * Copyright(c) 2006 - 2007 Chris Snook <csnook@redhat.com> 4 - * Copyright(c) 2006 Jay Cliburn <jcliburn@gmail.com> 5 * 6 * Derived from Intel e1000 driver 7 * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
··· 1 /* 2 * Copyright(c) 2005 - 2006 Attansic Corporation. All rights reserved. 3 * Copyright(c) 2006 - 2007 Chris Snook <csnook@redhat.com> 4 + * Copyright(c) 2006 - 2008 Jay Cliburn <jcliburn@gmail.com> 5 * 6 * Derived from Intel e1000 driver 7 * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
+1 -1
drivers/net/atlx/atlx.c
··· 2 * 3 * Copyright(c) 2005 - 2006 Attansic Corporation. All rights reserved. 4 * Copyright(c) 2006 - 2007 Chris Snook <csnook@redhat.com> 5 - * Copyright(c) 2006 Jay Cliburn <jcliburn@gmail.com> 6 * Copyright(c) 2007 Atheros Corporation. All rights reserved. 7 * 8 * Derived from Intel e1000 driver
··· 2 * 3 * Copyright(c) 2005 - 2006 Attansic Corporation. All rights reserved. 4 * Copyright(c) 2006 - 2007 Chris Snook <csnook@redhat.com> 5 + * Copyright(c) 2006 - 2008 Jay Cliburn <jcliburn@gmail.com> 6 * Copyright(c) 2007 Atheros Corporation. All rights reserved. 7 * 8 * Derived from Intel e1000 driver
+5 -2
drivers/net/atlx/atlx.h
··· 2 * 3 * Copyright(c) 2005 - 2006 Attansic Corporation. All rights reserved. 4 * Copyright(c) 2006 - 2007 Chris Snook <csnook@redhat.com> 5 - * Copyright(c) 2006 Jay Cliburn <jcliburn@gmail.com> 6 * Copyright(c) 2007 Atheros Corporation. All rights reserved. 7 * 8 * Derived from Intel e1000 driver ··· 29 #include <linux/module.h> 30 #include <linux/types.h> 31 32 - #define ATLX_DRIVER_VERSION "2.1.1" 33 MODULE_AUTHOR("Xiong Huang <xiong.huang@atheros.com>, \ 34 Chris Snook <csnook@redhat.com>, Jay Cliburn <jcliburn@gmail.com>"); 35 MODULE_LICENSE("GPL"); ··· 459 #define MII_ATLX_PSSR_10MBS 0x0000 /* 00=10Mbs */ 460 #define MII_ATLX_PSSR_100MBS 0x4000 /* 01=100Mbs */ 461 #define MII_ATLX_PSSR_1000MBS 0x8000 /* 10=1000Mbs */ 462 463 /* PCI Command Register Bit Definitions */ 464 #define PCI_REG_COMMAND 0x04 /* PCI Command Register */
··· 2 * 3 * Copyright(c) 2005 - 2006 Attansic Corporation. All rights reserved. 4 * Copyright(c) 2006 - 2007 Chris Snook <csnook@redhat.com> 5 + * Copyright(c) 2006 - 2008 Jay Cliburn <jcliburn@gmail.com> 6 * Copyright(c) 2007 Atheros Corporation. All rights reserved. 7 * 8 * Derived from Intel e1000 driver ··· 29 #include <linux/module.h> 30 #include <linux/types.h> 31 32 + #define ATLX_DRIVER_VERSION "2.1.3" 33 MODULE_AUTHOR("Xiong Huang <xiong.huang@atheros.com>, \ 34 Chris Snook <csnook@redhat.com>, Jay Cliburn <jcliburn@gmail.com>"); 35 MODULE_LICENSE("GPL"); ··· 459 #define MII_ATLX_PSSR_10MBS 0x0000 /* 00=10Mbs */ 460 #define MII_ATLX_PSSR_100MBS 0x4000 /* 01=100Mbs */ 461 #define MII_ATLX_PSSR_1000MBS 0x8000 /* 10=1000Mbs */ 462 + 463 + #define MII_DBG_ADDR 0x1D 464 + #define MII_DBG_DATA 0x1E 465 466 /* PCI Command Register Bit Definitions */ 467 #define PCI_REG_COMMAND 0x04 /* PCI Command Register */
+1
drivers/net/cxgb3/adapter.h
··· 71 USING_MSIX = (1 << 2), 72 QUEUES_BOUND = (1 << 3), 73 TP_PARITY_INIT = (1 << 4), 74 }; 75 76 struct fl_pg_chunk {
··· 71 USING_MSIX = (1 << 2), 72 QUEUES_BOUND = (1 << 3), 73 TP_PARITY_INIT = (1 << 4), 74 + NAPI_INIT = (1 << 5), 75 }; 76 77 struct fl_pg_chunk {
+1
drivers/net/cxgb3/common.h
··· 698 void early_hw_init(struct adapter *adapter, const struct adapter_info *ai); 699 int t3_prep_adapter(struct adapter *adapter, const struct adapter_info *ai, 700 int reset); 701 void t3_led_ready(struct adapter *adapter); 702 void t3_fatal_err(struct adapter *adapter); 703 void t3_set_vlan_accel(struct adapter *adapter, unsigned int ports, int on);
··· 698 void early_hw_init(struct adapter *adapter, const struct adapter_info *ai); 699 int t3_prep_adapter(struct adapter *adapter, const struct adapter_info *ai, 700 int reset); 701 + int t3_replay_prep_adapter(struct adapter *adapter); 702 void t3_led_ready(struct adapter *adapter); 703 void t3_fatal_err(struct adapter *adapter); 704 void t3_set_vlan_accel(struct adapter *adapter, unsigned int ports, int on);
+22 -18
drivers/net/cxgb3/cxgb3_main.c
··· 421 netif_napi_add(qs->netdev, &qs->napi, qs->napi.poll, 422 64); 423 } 424 } 425 426 /* ··· 903 goto out; 904 905 setup_rss(adap); 906 - init_napi(adap); 907 adap->flags |= FULL_INIT_DONE; 908 } 909 ··· 1007 return 0; 1008 1009 if (!adap_up && (err = cxgb_up(adapter)) < 0) 1010 - return err; 1011 1012 t3_tp_set_offload_mode(adapter, 1); 1013 tdev->lldev = adapter->port[0]; ··· 1069 int other_ports = adapter->open_device_map & PORT_MASK; 1070 int err; 1071 1072 - if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0) { 1073 - quiesce_rx(adapter); 1074 return err; 1075 - } 1076 1077 set_bit(pi->port_id, &adapter->open_device_map); 1078 if (is_offload(adapter) && !ofld_disable) { ··· 2430 test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) 2431 offload_close(&adapter->tdev); 2432 2433 - /* Free sge resources */ 2434 - t3_free_sge_resources(adapter); 2435 - 2436 adapter->flags &= ~FULL_INIT_DONE; 2437 2438 pci_disable_device(pdev); 2439 2440 - /* Request a slot slot reset. */ 2441 return PCI_ERS_RESULT_NEED_RESET; 2442 } 2443 ··· 2451 if (pci_enable_device(pdev)) { 2452 dev_err(&pdev->dev, 2453 "Cannot re-enable PCI device after reset.\n"); 2454 - return PCI_ERS_RESULT_DISCONNECT; 2455 } 2456 pci_set_master(pdev); 2457 2458 - t3_prep_adapter(adapter, adapter->params.info, 1); 2459 2460 return PCI_ERS_RESULT_RECOVERED; 2461 } 2462 2463 /** ··· 2492 } 2493 netif_device_attach(netdev); 2494 } 2495 - } 2496 - 2497 - if (is_offload(adapter)) { 2498 - __set_bit(OFFLOAD_DEVMAP_BIT, &adapter->registered_device_map); 2499 - if (offload_open(adapter->port[0])) 2500 - printk(KERN_WARNING 2501 - "Could not bring back offload capabilities\n"); 2502 } 2503 } 2504 ··· 2611 } 2612 2613 pci_set_master(pdev); 2614 2615 mmio_start = pci_resource_start(pdev, 0); 2616 mmio_len = pci_resource_len(pdev, 0);
··· 421 netif_napi_add(qs->netdev, &qs->napi, qs->napi.poll, 422 64); 423 } 424 + 425 + /* 426 + * netif_napi_add() can be called only once per napi_struct because it 427 + * adds each new napi_struct to a list. Be careful not to call it a 428 + * second time, e.g., during EEH recovery, by making a note of it. 429 + */ 430 + adap->flags |= NAPI_INIT; 431 } 432 433 /* ··· 896 goto out; 897 898 setup_rss(adap); 899 + if (!(adap->flags & NAPI_INIT)) 900 + init_napi(adap); 901 adap->flags |= FULL_INIT_DONE; 902 } 903 ··· 999 return 0; 1000 1001 if (!adap_up && (err = cxgb_up(adapter)) < 0) 1002 + goto out; 1003 1004 t3_tp_set_offload_mode(adapter, 1); 1005 tdev->lldev = adapter->port[0]; ··· 1061 int other_ports = adapter->open_device_map & PORT_MASK; 1062 int err; 1063 1064 + if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0) 1065 return err; 1066 1067 set_bit(pi->port_id, &adapter->open_device_map); 1068 if (is_offload(adapter) && !ofld_disable) { ··· 2424 test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) 2425 offload_close(&adapter->tdev); 2426 2427 adapter->flags &= ~FULL_INIT_DONE; 2428 2429 pci_disable_device(pdev); 2430 2431 + /* Request a slot reset. */ 2432 return PCI_ERS_RESULT_NEED_RESET; 2433 } 2434 ··· 2448 if (pci_enable_device(pdev)) { 2449 dev_err(&pdev->dev, 2450 "Cannot re-enable PCI device after reset.\n"); 2451 + goto err; 2452 } 2453 pci_set_master(pdev); 2454 + pci_restore_state(pdev); 2455 2456 + /* Free sge resources */ 2457 + t3_free_sge_resources(adapter); 2458 + 2459 + if (t3_replay_prep_adapter(adapter)) 2460 + goto err; 2461 2462 return PCI_ERS_RESULT_RECOVERED; 2463 + err: 2464 + return PCI_ERS_RESULT_DISCONNECT; 2465 } 2466 2467 /** ··· 2482 } 2483 netif_device_attach(netdev); 2484 } 2485 } 2486 } 2487 ··· 2608 } 2609 2610 pci_set_master(pdev); 2611 + pci_save_state(pdev); 2612 2613 mmio_start = pci_resource_start(pdev, 0); 2614 mmio_len = pci_resource_len(pdev, 0);
+8
drivers/net/cxgb3/regs.h
··· 444 445 #define A_PCIE_CFG 0x88 446 447 #define S_PCIE_CLIDECEN 16 448 #define V_PCIE_CLIDECEN(x) ((x) << S_PCIE_CLIDECEN) 449 #define F_PCIE_CLIDECEN V_PCIE_CLIDECEN(1U)
··· 444 445 #define A_PCIE_CFG 0x88 446 447 + #define S_ENABLELINKDWNDRST 21 448 + #define V_ENABLELINKDWNDRST(x) ((x) << S_ENABLELINKDWNDRST) 449 + #define F_ENABLELINKDWNDRST V_ENABLELINKDWNDRST(1U) 450 + 451 + #define S_ENABLELINKDOWNRST 20 452 + #define V_ENABLELINKDOWNRST(x) ((x) << S_ENABLELINKDOWNRST) 453 + #define F_ENABLELINKDOWNRST V_ENABLELINKDOWNRST(1U) 454 + 455 #define S_PCIE_CLIDECEN 16 456 #define V_PCIE_CLIDECEN(x) ((x) << S_PCIE_CLIDECEN) 457 #define F_PCIE_CLIDECEN V_PCIE_CLIDECEN(1U)
+27 -2
drivers/net/cxgb3/sge.c
··· 539 } 540 541 /** 542 * free_qset - free the resources of an SGE queue set 543 * @adapter: the adapter owning the queue set 544 * @q: the queue set ··· 619 q->rspq.desc, q->rspq.phys_addr); 620 } 621 622 - memset(q, 0, sizeof(*q)); 623 } 624 625 /** ··· 1390 */ 1391 int t3_mgmt_tx(struct adapter *adap, struct sk_buff *skb) 1392 { 1393 - int ret; 1394 local_bh_disable(); 1395 ret = ctrl_xmit(adap, &adap->sge.qs[0].txq[TXQ_CTRL], skb); 1396 local_bh_enable();
··· 539 } 540 541 /** 542 + * t3_reset_qset - reset a sge qset 543 + * @q: the queue set 544 + * 545 + * Reset the qset structure. 546 + * the NAPI structure is preserved in the event of 547 + * the qset's reincarnation, for example during EEH recovery. 548 + */ 549 + static void t3_reset_qset(struct sge_qset *q) 550 + { 551 + if (q->adap && 552 + !(q->adap->flags & NAPI_INIT)) { 553 + memset(q, 0, sizeof(*q)); 554 + return; 555 + } 556 + 557 + q->adap = NULL; 558 + memset(&q->rspq, 0, sizeof(q->rspq)); 559 + memset(q->fl, 0, sizeof(struct sge_fl) * SGE_RXQ_PER_SET); 560 + memset(q->txq, 0, sizeof(struct sge_txq) * SGE_TXQ_PER_SET); 561 + q->txq_stopped = 0; 562 + memset(&q->tx_reclaim_timer, 0, sizeof(q->tx_reclaim_timer)); 563 + } 564 + 565 + 566 + /** 567 * free_qset - free the resources of an SGE queue set 568 * @adapter: the adapter owning the queue set 569 * @q: the queue set ··· 594 q->rspq.desc, q->rspq.phys_addr); 595 } 596 597 + t3_reset_qset(q); 598 } 599 600 /** ··· 1365 */ 1366 int t3_mgmt_tx(struct adapter *adap, struct sk_buff *skb) 1367 { 1368 + int ret; 1369 local_bh_disable(); 1370 ret = ctrl_xmit(adap, &adap->sge.qs[0].txq[TXQ_CTRL], skb); 1371 local_bh_enable();
+28
drivers/net/cxgb3/t3_hw.c
··· 3264 3265 t3_write_reg(adap, A_PCIE_PEX_ERR, 0xffffffff); 3266 t3_set_reg_field(adap, A_PCIE_CFG, 0, 3267 F_PCIE_DMASTOPEN | F_PCIE_CLIDECEN); 3268 } 3269 ··· 3656 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL, 3657 F_GPIO0_OUT_VAL); 3658 }
··· 3264 3265 t3_write_reg(adap, A_PCIE_PEX_ERR, 0xffffffff); 3266 t3_set_reg_field(adap, A_PCIE_CFG, 0, 3267 + F_ENABLELINKDWNDRST | F_ENABLELINKDOWNRST | 3268 F_PCIE_DMASTOPEN | F_PCIE_CLIDECEN); 3269 } 3270 ··· 3655 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL, 3656 F_GPIO0_OUT_VAL); 3657 } 3658 + 3659 + int t3_replay_prep_adapter(struct adapter *adapter) 3660 + { 3661 + const struct adapter_info *ai = adapter->params.info; 3662 + unsigned int i, j = 0; 3663 + int ret; 3664 + 3665 + early_hw_init(adapter, ai); 3666 + ret = init_parity(adapter); 3667 + if (ret) 3668 + return ret; 3669 + 3670 + for_each_port(adapter, i) { 3671 + struct port_info *p = adap2pinfo(adapter, i); 3672 + while (!adapter->params.vpd.port_type[j]) 3673 + ++j; 3674 + 3675 + p->port_type->phy_prep(&p->phy, adapter, ai->phy_base_addr + j, 3676 + ai->mdio_ops); 3677 + 3678 + p->phy.ops->power_down(&p->phy, 1); 3679 + ++j; 3680 + } 3681 + 3682 + return 0; 3683 + } 3684 +
+33 -4
drivers/net/dm9000.c
··· 117 118 struct mutex addr_lock; /* phy and eeprom access lock */ 119 120 spinlock_t lock; 121 122 struct mii_if_info mii; ··· 300 } 301 } 302 303 304 /* Our watchdog timed out. Called by the networking layer */ 305 static void dm9000_timeout(struct net_device *dev) ··· 472 .set_eeprom = dm9000_set_eeprom, 473 }; 474 475 476 /* dm9000_release_board 477 * ··· 521 /* 522 * Search DM9000 board, allocate space and register it 523 */ 524 - static int 525 dm9000_probe(struct platform_device *pdev) 526 { 527 struct dm9000_plat_data *pdata = pdev->dev.platform_data; ··· 543 544 SET_NETDEV_DEV(ndev, &pdev->dev); 545 546 - dev_dbg(&pdev->dev, "dm9000_probe()"); 547 548 /* setup board info structure */ 549 db = (struct board_info *) ndev->priv; 550 memset(db, 0, sizeof (*db)); 551 552 db->dev = &pdev->dev; 553 554 spin_lock_init(&db->lock); 555 mutex_init(&db->addr_lock); 556 557 if (pdev->num_resources < 2) { 558 ret = -ENODEV; ··· 783 784 mii_check_media(&db->mii, netif_msg_link(db), 1); 785 netif_start_queue(dev); 786 787 return 0; 788 } ··· 902 903 if (netif_msg_ifdown(db)) 904 dev_dbg(db->dev, "shutting down %s\n", ndev->name); 905 906 netif_stop_queue(ndev); 907 netif_carrier_off(ndev); ··· 1314 spin_unlock_irqrestore(&db->lock,flags); 1315 1316 mutex_unlock(&db->addr_lock); 1317 return ret; 1318 } 1319 ··· 1329 unsigned long flags; 1330 unsigned long reg_save; 1331 1332 mutex_lock(&db->addr_lock); 1333 1334 spin_lock_irqsave(&db->lock,flags); ··· 1401 return 0; 1402 } 1403 1404 - static int 1405 dm9000_drv_remove(struct platform_device *pdev) 1406 { 1407 struct net_device *ndev = platform_get_drvdata(pdev); ··· 1422 .owner = THIS_MODULE, 1423 }, 1424 .probe = dm9000_probe, 1425 - .remove = dm9000_drv_remove, 1426 .suspend = dm9000_drv_suspend, 1427 .resume = dm9000_drv_resume, 1428 };
··· 117 118 struct mutex addr_lock; /* phy and eeprom access lock */ 119 120 + struct delayed_work phy_poll; 121 + struct net_device *ndev; 122 + 123 spinlock_t lock; 124 125 struct mii_if_info mii; ··· 297 } 298 } 299 300 + static void dm9000_schedule_poll(board_info_t *db) 301 + { 302 + schedule_delayed_work(&db->phy_poll, HZ * 2); 303 + } 304 305 /* Our watchdog timed out. Called by the networking layer */ 306 static void dm9000_timeout(struct net_device *dev) ··· 465 .set_eeprom = dm9000_set_eeprom, 466 }; 467 468 + static void 469 + dm9000_poll_work(struct work_struct *w) 470 + { 471 + struct delayed_work *dw = container_of(w, struct delayed_work, work); 472 + board_info_t *db = container_of(dw, board_info_t, phy_poll); 473 + 474 + mii_check_media(&db->mii, netif_msg_link(db), 0); 475 + 476 + if (netif_running(db->ndev)) 477 + dm9000_schedule_poll(db); 478 + } 479 480 /* dm9000_release_board 481 * ··· 503 /* 504 * Search DM9000 board, allocate space and register it 505 */ 506 + static int __devinit 507 dm9000_probe(struct platform_device *pdev) 508 { 509 struct dm9000_plat_data *pdata = pdev->dev.platform_data; ··· 525 526 SET_NETDEV_DEV(ndev, &pdev->dev); 527 528 + dev_dbg(&pdev->dev, "dm9000_probe()\n"); 529 530 /* setup board info structure */ 531 db = (struct board_info *) ndev->priv; 532 memset(db, 0, sizeof (*db)); 533 534 db->dev = &pdev->dev; 535 + db->ndev = ndev; 536 537 spin_lock_init(&db->lock); 538 mutex_init(&db->addr_lock); 539 + 540 + INIT_DELAYED_WORK(&db->phy_poll, dm9000_poll_work); 541 + 542 543 if (pdev->num_resources < 2) { 544 ret = -ENODEV; ··· 761 762 mii_check_media(&db->mii, netif_msg_link(db), 1); 763 netif_start_queue(dev); 764 + 765 + dm9000_schedule_poll(db); 766 767 return 0; 768 } ··· 878 879 if (netif_msg_ifdown(db)) 880 dev_dbg(db->dev, "shutting down %s\n", ndev->name); 881 + 882 + cancel_delayed_work(&db->phy_poll); 883 884 netif_stop_queue(ndev); 885 netif_carrier_off(ndev); ··· 1288 spin_unlock_irqrestore(&db->lock,flags); 1289 1290 mutex_unlock(&db->addr_lock); 1291 + 1292 + dm9000_dbg(db, 5, "phy_read[%02x] -> %04x\n", reg, ret); 1293 return ret; 1294 } 1295 ··· 1301 unsigned long flags; 1302 unsigned long reg_save; 1303 1304 + dm9000_dbg(db, 5, "phy_write[%02x] = %04x\n", reg, value); 1305 mutex_lock(&db->addr_lock); 1306 1307 spin_lock_irqsave(&db->lock,flags); ··· 1372 return 0; 1373 } 1374 1375 + static int __devexit 1376 dm9000_drv_remove(struct platform_device *pdev) 1377 { 1378 struct net_device *ndev = platform_get_drvdata(pdev); ··· 1393 .owner = THIS_MODULE, 1394 }, 1395 .probe = dm9000_probe, 1396 + .remove = __devexit_p(dm9000_drv_remove), 1397 .suspend = dm9000_drv_suspend, 1398 .resume = dm9000_drv_resume, 1399 };
+22 -5
drivers/net/ehea/ehea.h
··· 40 #include <asm/io.h> 41 42 #define DRV_NAME "ehea" 43 - #define DRV_VERSION "EHEA_0090" 44 45 /* eHEA capability flags */ 46 #define DLPAR_PORT_ADD_REM 1 ··· 118 #define EHEA_MR_ACC_CTRL 0x00800000 119 120 #define EHEA_BUSMAP_START 0x8000000000000000ULL 121 122 #define EHEA_WATCH_DOG_TIMEOUT 10*HZ 123 ··· 199 set to 0 if unused */ 200 }; 201 202 - struct ehea_busmap { 203 - unsigned int entries; /* total number of entries */ 204 - unsigned int valid_sections; /* number of valid sections */ 205 - u64 *vaddr; 206 }; 207 208 struct ehea_qp;
··· 40 #include <asm/io.h> 41 42 #define DRV_NAME "ehea" 43 + #define DRV_VERSION "EHEA_0091" 44 45 /* eHEA capability flags */ 46 #define DLPAR_PORT_ADD_REM 1 ··· 118 #define EHEA_MR_ACC_CTRL 0x00800000 119 120 #define EHEA_BUSMAP_START 0x8000000000000000ULL 121 + #define EHEA_INVAL_ADDR 0xFFFFFFFFFFFFFFFFULL 122 + #define EHEA_DIR_INDEX_SHIFT 13 /* 8k Entries in 64k block */ 123 + #define EHEA_TOP_INDEX_SHIFT (EHEA_DIR_INDEX_SHIFT * 2) 124 + #define EHEA_MAP_ENTRIES (1 << EHEA_DIR_INDEX_SHIFT) 125 + #define EHEA_MAP_SIZE (0x10000) /* currently fixed map size */ 126 + #define EHEA_INDEX_MASK (EHEA_MAP_ENTRIES - 1) 127 + 128 129 #define EHEA_WATCH_DOG_TIMEOUT 10*HZ 130 ··· 192 set to 0 if unused */ 193 }; 194 195 + /* 196 + * Memory map data structures 197 + */ 198 + struct ehea_dir_bmap 199 + { 200 + u64 ent[EHEA_MAP_ENTRIES]; 201 + }; 202 + struct ehea_top_bmap 203 + { 204 + struct ehea_dir_bmap *dir[EHEA_MAP_ENTRIES]; 205 + }; 206 + struct ehea_bmap 207 + { 208 + struct ehea_top_bmap *top[EHEA_MAP_ENTRIES]; 209 }; 210 211 struct ehea_qp;
+25
drivers/net/ehea/ehea_main.c
··· 35 #include <linux/if_ether.h> 36 #include <linux/notifier.h> 37 #include <linux/reboot.h> 38 #include <asm/kexec.h> 39 #include <linux/mutex.h> 40 ··· 3504 0, H_DEREG_BCMC); 3505 } 3506 3507 static int ehea_reboot_notifier(struct notifier_block *nb, 3508 unsigned long action, void *unused) 3509 { ··· 3600 if (ret) 3601 ehea_info("failed registering reboot notifier"); 3602 3603 ret = crash_shutdown_register(&ehea_crash_handler); 3604 if (ret) 3605 ehea_info("failed registering crash handler"); ··· 3627 out3: 3628 ibmebus_unregister_driver(&ehea_driver); 3629 out2: 3630 unregister_reboot_notifier(&ehea_reboot_nb); 3631 crash_shutdown_unregister(&ehea_crash_handler); 3632 out: ··· 3645 ret = crash_shutdown_unregister(&ehea_crash_handler); 3646 if (ret) 3647 ehea_info("failed unregistering crash handler"); 3648 kfree(ehea_fw_handles.arr); 3649 kfree(ehea_bcmc_regs.arr); 3650 ehea_destroy_busmap();
··· 35 #include <linux/if_ether.h> 36 #include <linux/notifier.h> 37 #include <linux/reboot.h> 38 + #include <linux/memory.h> 39 #include <asm/kexec.h> 40 #include <linux/mutex.h> 41 ··· 3503 0, H_DEREG_BCMC); 3504 } 3505 3506 + static int ehea_mem_notifier(struct notifier_block *nb, 3507 + unsigned long action, void *data) 3508 + { 3509 + switch (action) { 3510 + case MEM_OFFLINE: 3511 + ehea_info("memory has been removed"); 3512 + ehea_rereg_mrs(NULL); 3513 + break; 3514 + default: 3515 + break; 3516 + } 3517 + return NOTIFY_OK; 3518 + } 3519 + 3520 + static struct notifier_block ehea_mem_nb = { 3521 + .notifier_call = ehea_mem_notifier, 3522 + }; 3523 + 3524 static int ehea_reboot_notifier(struct notifier_block *nb, 3525 unsigned long action, void *unused) 3526 { ··· 3581 if (ret) 3582 ehea_info("failed registering reboot notifier"); 3583 3584 + ret = register_memory_notifier(&ehea_mem_nb); 3585 + if (ret) 3586 + ehea_info("failed registering memory remove notifier"); 3587 + 3588 ret = crash_shutdown_register(&ehea_crash_handler); 3589 if (ret) 3590 ehea_info("failed registering crash handler"); ··· 3604 out3: 3605 ibmebus_unregister_driver(&ehea_driver); 3606 out2: 3607 + unregister_memory_notifier(&ehea_mem_nb); 3608 unregister_reboot_notifier(&ehea_reboot_nb); 3609 crash_shutdown_unregister(&ehea_crash_handler); 3610 out: ··· 3621 ret = crash_shutdown_unregister(&ehea_crash_handler); 3622 if (ret) 3623 ehea_info("failed unregistering crash handler"); 3624 + unregister_memory_notifier(&ehea_mem_nb); 3625 kfree(ehea_fw_handles.arr); 3626 kfree(ehea_bcmc_regs.arr); 3627 ehea_destroy_busmap();
+202 -74
drivers/net/ehea/ehea_qmr.c
··· 31 #include "ehea_phyp.h" 32 #include "ehea_qmr.h" 33 34 35 - struct ehea_busmap ehea_bmap = { 0, 0, NULL }; 36 37 38 static void *hw_qpageit_get_inc(struct hw_queue *queue) ··· 559 return 0; 560 } 561 562 - int ehea_create_busmap(void) 563 { 564 - u64 vaddr = EHEA_BUSMAP_START; 565 - unsigned long high_section_index = 0; 566 - int i; 567 568 - /* 569 - * Sections are not in ascending order -> Loop over all sections and 570 - * find the highest PFN to compute the required map size. 571 - */ 572 - ehea_bmap.valid_sections = 0; 573 574 - for (i = 0; i < NR_MEM_SECTIONS; i++) 575 - if (valid_section_nr(i)) 576 - high_section_index = i; 577 578 - ehea_bmap.entries = high_section_index + 1; 579 - ehea_bmap.vaddr = vmalloc(ehea_bmap.entries * sizeof(*ehea_bmap.vaddr)); 580 581 - if (!ehea_bmap.vaddr) 582 return -ENOMEM; 583 584 - for (i = 0 ; i < ehea_bmap.entries; i++) { 585 - unsigned long pfn = section_nr_to_pfn(i); 586 587 - if (pfn_valid(pfn)) { 588 - ehea_bmap.vaddr[i] = vaddr; 589 - vaddr += EHEA_SECTSIZE; 590 - ehea_bmap.valid_sections++; 591 - } else 592 - ehea_bmap.vaddr[i] = 0; 593 } 594 595 return 0; 596 } 597 598 void ehea_destroy_busmap(void) 599 { 600 - vfree(ehea_bmap.vaddr); 601 } 602 603 u64 ehea_map_vaddr(void *caddr) 604 { 605 - u64 mapped_addr; 606 - unsigned long index = __pa(caddr) >> SECTION_SIZE_BITS; 607 608 - if (likely(index < ehea_bmap.entries)) { 609 - mapped_addr = ehea_bmap.vaddr[index]; 610 - if (likely(mapped_addr)) 611 - mapped_addr |= (((unsigned long)caddr) 612 - & (EHEA_SECTSIZE - 1)); 613 - else 614 - mapped_addr = -1; 615 - } else 616 - mapped_addr = -1; 617 618 - if (unlikely(mapped_addr == -1)) 619 - if (!test_and_set_bit(__EHEA_STOP_XFER, &ehea_driver_flags)) 620 - schedule_work(&ehea_rereg_mr_task); 621 622 - return mapped_addr; 623 } 624 625 int ehea_reg_kernel_mr(struct ehea_adapter *adapter, struct ehea_mr *mr) 626 { 627 int ret; 628 u64 *pt; 629 - void *pg; 630 - u64 hret, pt_abs, i, j, m, mr_len; 631 u32 acc_ctrl = EHEA_MR_ACC_CTRL; 632 633 - mr_len = ehea_bmap.valid_sections * EHEA_SECTSIZE; 634 635 - pt = kzalloc(PAGE_SIZE, GFP_KERNEL); 636 if (!pt) { 637 ehea_error("no mem"); 638 ret = -ENOMEM; 639 goto out; 640 } 641 - pt_abs = virt_to_abs(pt); 642 643 - hret = ehea_h_alloc_resource_mr(adapter->handle, 644 - EHEA_BUSMAP_START, mr_len, 645 - acc_ctrl, adapter->pd, 646 &mr->handle, &mr->lkey); 647 if (hret != H_SUCCESS) { 648 ehea_error("alloc_resource_mr failed"); 649 ret = -EIO; 650 goto out; 651 } 652 653 - for (i = 0 ; i < ehea_bmap.entries; i++) 654 - if (ehea_bmap.vaddr[i]) { 655 - void *sectbase = __va(i << SECTION_SIZE_BITS); 656 - unsigned long k = 0; 657 658 - for (j = 0; j < (EHEA_PAGES_PER_SECTION / 659 - EHEA_MAX_RPAGE); j++) { 660 661 - for (m = 0; m < EHEA_MAX_RPAGE; m++) { 662 - pg = sectbase + ((k++) * EHEA_PAGESIZE); 663 - pt[m] = virt_to_abs(pg); 664 - } 665 - 666 - hret = ehea_h_register_rpage_mr(adapter->handle, 667 - mr->handle, 668 - 0, 0, pt_abs, 669 - EHEA_MAX_RPAGE); 670 - if ((hret != H_SUCCESS) 671 - && (hret != H_PAGE_REGISTERED)) { 672 - ehea_h_free_resource(adapter->handle, 673 - mr->handle, 674 - FORCE_FREE); 675 - ehea_error("register_rpage_mr failed"); 676 - ret = -EIO; 677 - goto out; 678 - } 679 - } 680 - } 681 682 if (hret != H_SUCCESS) { 683 ehea_h_free_resource(adapter->handle, mr->handle, FORCE_FREE);
··· 31 #include "ehea_phyp.h" 32 #include "ehea_qmr.h" 33 34 + struct ehea_bmap *ehea_bmap = NULL; 35 36 37 38 static void *hw_qpageit_get_inc(struct hw_queue *queue) ··· 559 return 0; 560 } 561 562 + static inline int ehea_calc_index(unsigned long i, unsigned long s) 563 { 564 + return (i >> s) & EHEA_INDEX_MASK; 565 + } 566 567 + static inline int ehea_init_top_bmap(struct ehea_top_bmap *ehea_top_bmap, 568 + int dir) 569 + { 570 + if(!ehea_top_bmap->dir[dir]) { 571 + ehea_top_bmap->dir[dir] = 572 + kzalloc(sizeof(struct ehea_dir_bmap), GFP_KERNEL); 573 + if (!ehea_top_bmap->dir[dir]) 574 + return -ENOMEM; 575 + } 576 + return 0; 577 + } 578 579 + static inline int ehea_init_bmap(struct ehea_bmap *ehea_bmap, int top, int dir) 580 + { 581 + if(!ehea_bmap->top[top]) { 582 + ehea_bmap->top[top] = 583 + kzalloc(sizeof(struct ehea_top_bmap), GFP_KERNEL); 584 + if (!ehea_bmap->top[top]) 585 + return -ENOMEM; 586 + } 587 + return ehea_init_top_bmap(ehea_bmap->top[top], dir); 588 + } 589 590 + static int ehea_create_busmap_callback(unsigned long pfn, 591 + unsigned long nr_pages, void *arg) 592 + { 593 + unsigned long i, mr_len, start_section, end_section; 594 + start_section = (pfn * PAGE_SIZE) / EHEA_SECTSIZE; 595 + end_section = start_section + ((nr_pages * PAGE_SIZE) / EHEA_SECTSIZE); 596 + mr_len = *(unsigned long *)arg; 597 598 + ehea_bmap = kzalloc(sizeof(struct ehea_bmap), GFP_KERNEL); 599 + if (!ehea_bmap) 600 return -ENOMEM; 601 602 + for (i = start_section; i < end_section; i++) { 603 + int ret; 604 + int top, dir, idx; 605 + u64 vaddr; 606 607 + top = ehea_calc_index(i, EHEA_TOP_INDEX_SHIFT); 608 + dir = ehea_calc_index(i, EHEA_DIR_INDEX_SHIFT); 609 + 610 + ret = ehea_init_bmap(ehea_bmap, top, dir); 611 + if(ret) 612 + return ret; 613 + 614 + idx = i & EHEA_INDEX_MASK; 615 + vaddr = EHEA_BUSMAP_START + mr_len + i * EHEA_SECTSIZE; 616 + 617 + ehea_bmap->top[top]->dir[dir]->ent[idx] = vaddr; 618 } 619 + 620 + mr_len += nr_pages * PAGE_SIZE; 621 + *(unsigned long *)arg = mr_len; 622 623 return 0; 624 } 625 626 + static unsigned long ehea_mr_len; 627 + 628 + static DEFINE_MUTEX(ehea_busmap_mutex); 629 + 630 + int ehea_create_busmap(void) 631 + { 632 + int ret; 633 + mutex_lock(&ehea_busmap_mutex); 634 + ehea_mr_len = 0; 635 + ret = walk_memory_resource(0, 1ULL << MAX_PHYSMEM_BITS, &ehea_mr_len, 636 + ehea_create_busmap_callback); 637 + mutex_unlock(&ehea_busmap_mutex); 638 + return ret; 639 + } 640 + 641 void ehea_destroy_busmap(void) 642 { 643 + int top, dir; 644 + mutex_lock(&ehea_busmap_mutex); 645 + if (!ehea_bmap) 646 + goto out_destroy; 647 + 648 + for (top = 0; top < EHEA_MAP_ENTRIES; top++) { 649 + if (!ehea_bmap->top[top]) 650 + continue; 651 + 652 + for (dir = 0; dir < EHEA_MAP_ENTRIES; dir++) { 653 + if (!ehea_bmap->top[top]->dir[dir]) 654 + continue; 655 + 656 + kfree(ehea_bmap->top[top]->dir[dir]); 657 + } 658 + 659 + kfree(ehea_bmap->top[top]); 660 + } 661 + 662 + kfree(ehea_bmap); 663 + ehea_bmap = NULL; 664 + out_destroy: 665 + mutex_unlock(&ehea_busmap_mutex); 666 } 667 668 u64 ehea_map_vaddr(void *caddr) 669 { 670 + int top, dir, idx; 671 + unsigned long index, offset; 672 673 + if (!ehea_bmap) 674 + return EHEA_INVAL_ADDR; 675 676 + index = virt_to_abs(caddr) >> SECTION_SIZE_BITS; 677 + top = (index >> EHEA_TOP_INDEX_SHIFT) & EHEA_INDEX_MASK; 678 + if (!ehea_bmap->top[top]) 679 + return EHEA_INVAL_ADDR; 680 681 + dir = (index >> EHEA_DIR_INDEX_SHIFT) & EHEA_INDEX_MASK; 682 + if (!ehea_bmap->top[top]->dir[dir]) 683 + return EHEA_INVAL_ADDR; 684 + 685 + idx = index & EHEA_INDEX_MASK; 686 + if (!ehea_bmap->top[top]->dir[dir]->ent[idx]) 687 + return EHEA_INVAL_ADDR; 688 + 689 + offset = (unsigned long)caddr & (EHEA_SECTSIZE - 1); 690 + return ehea_bmap->top[top]->dir[dir]->ent[idx] | offset; 691 + } 692 + 693 + static inline void *ehea_calc_sectbase(int top, int dir, int idx) 694 + { 695 + unsigned long ret = idx; 696 + ret |= dir << EHEA_DIR_INDEX_SHIFT; 697 + ret |= top << EHEA_TOP_INDEX_SHIFT; 698 + return abs_to_virt(ret << SECTION_SIZE_BITS); 699 + } 700 + 701 + static u64 ehea_reg_mr_section(int top, int dir, int idx, u64 *pt, 702 + struct ehea_adapter *adapter, 703 + struct ehea_mr *mr) 704 + { 705 + void *pg; 706 + u64 j, m, hret; 707 + unsigned long k = 0; 708 + u64 pt_abs = virt_to_abs(pt); 709 + 710 + void *sectbase = ehea_calc_sectbase(top, dir, idx); 711 + 712 + for (j = 0; j < (EHEA_PAGES_PER_SECTION / EHEA_MAX_RPAGE); j++) { 713 + 714 + for (m = 0; m < EHEA_MAX_RPAGE; m++) { 715 + pg = sectbase + ((k++) * EHEA_PAGESIZE); 716 + pt[m] = virt_to_abs(pg); 717 + } 718 + hret = ehea_h_register_rpage_mr(adapter->handle, mr->handle, 0, 719 + 0, pt_abs, EHEA_MAX_RPAGE); 720 + 721 + if ((hret != H_SUCCESS) 722 + && (hret != H_PAGE_REGISTERED)) { 723 + ehea_h_free_resource(adapter->handle, mr->handle, 724 + FORCE_FREE); 725 + ehea_error("register_rpage_mr failed"); 726 + return hret; 727 + } 728 + } 729 + return hret; 730 + } 731 + 732 + static u64 ehea_reg_mr_sections(int top, int dir, u64 *pt, 733 + struct ehea_adapter *adapter, 734 + struct ehea_mr *mr) 735 + { 736 + u64 hret = H_SUCCESS; 737 + int idx; 738 + 739 + for (idx = 0; idx < EHEA_MAP_ENTRIES; idx++) { 740 + if (!ehea_bmap->top[top]->dir[dir]->ent[idx]) 741 + continue; 742 + 743 + hret = ehea_reg_mr_section(top, dir, idx, pt, adapter, mr); 744 + if ((hret != H_SUCCESS) && (hret != H_PAGE_REGISTERED)) 745 + return hret; 746 + } 747 + return hret; 748 + } 749 + 750 + static u64 ehea_reg_mr_dir_sections(int top, u64 *pt, 751 + struct ehea_adapter *adapter, 752 + struct ehea_mr *mr) 753 + { 754 + u64 hret = H_SUCCESS; 755 + int dir; 756 + 757 + for (dir = 0; dir < EHEA_MAP_ENTRIES; dir++) { 758 + if (!ehea_bmap->top[top]->dir[dir]) 759 + continue; 760 + 761 + hret = ehea_reg_mr_sections(top, dir, pt, adapter, mr); 762 + if ((hret != H_SUCCESS) && (hret != H_PAGE_REGISTERED)) 763 + return hret; 764 + } 765 + return hret; 766 } 767 768 int ehea_reg_kernel_mr(struct ehea_adapter *adapter, struct ehea_mr *mr) 769 { 770 int ret; 771 u64 *pt; 772 + u64 hret; 773 u32 acc_ctrl = EHEA_MR_ACC_CTRL; 774 775 + unsigned long top; 776 777 + pt = kzalloc(PAGE_SIZE, GFP_KERNEL); 778 if (!pt) { 779 ehea_error("no mem"); 780 ret = -ENOMEM; 781 goto out; 782 } 783 784 + hret = ehea_h_alloc_resource_mr(adapter->handle, EHEA_BUSMAP_START, 785 + ehea_mr_len, acc_ctrl, adapter->pd, 786 &mr->handle, &mr->lkey); 787 + 788 if (hret != H_SUCCESS) { 789 ehea_error("alloc_resource_mr failed"); 790 ret = -EIO; 791 goto out; 792 } 793 794 + if (!ehea_bmap) { 795 + ehea_h_free_resource(adapter->handle, mr->handle, FORCE_FREE); 796 + ehea_error("no busmap available"); 797 + ret = -EIO; 798 + goto out; 799 + } 800 801 + for (top = 0; top < EHEA_MAP_ENTRIES; top++) { 802 + if (!ehea_bmap->top[top]) 803 + continue; 804 805 + hret = ehea_reg_mr_dir_sections(top, pt, adapter, mr); 806 + if((hret != H_PAGE_REGISTERED) && (hret != H_SUCCESS)) 807 + break; 808 + } 809 810 if (hret != H_SUCCESS) { 811 ehea_h_free_resource(adapter->handle, mr->handle, FORCE_FREE);
+2
drivers/net/gianfar.c
··· 635 dev_kfree_skb_any(priv->tx_skbuff[i]); 636 priv->tx_skbuff[i] = NULL; 637 } 638 } 639 640 kfree(priv->tx_skbuff);
··· 635 dev_kfree_skb_any(priv->tx_skbuff[i]); 636 priv->tx_skbuff[i] = NULL; 637 } 638 + 639 + txbdp++; 640 } 641 642 kfree(priv->tx_skbuff);
+419 -323
drivers/net/myri10ge/myri10ge.c
··· 144 char *req_bytes; 145 struct myri10ge_tx_buffer_state *info; 146 int mask; /* number of transmit slots -1 */ 147 - int boundary; /* boundary transmits cannot cross */ 148 int req ____cacheline_aligned; /* transmit slots submitted */ 149 int pkt_start; /* packets started */ 150 int done ____cacheline_aligned; /* transmit slots completed */ 151 int pkt_done; /* packets completed */ 152 }; 153 154 struct myri10ge_rx_done { ··· 162 struct net_lro_desc lro_desc[MYRI10GE_MAX_LRO_DESCRIPTORS]; 163 }; 164 165 - struct myri10ge_priv { 166 - int running; /* running? */ 167 - int csum_flag; /* rx_csums? */ 168 struct myri10ge_tx_buf tx; /* transmit ring */ 169 struct myri10ge_rx_buf rx_small; 170 struct myri10ge_rx_buf rx_big; 171 struct myri10ge_rx_done rx_done; 172 - int small_bytes; 173 - int big_bytes; 174 struct net_device *dev; 175 struct napi_struct napi; 176 struct net_device_stats stats; 177 u8 __iomem *sram; 178 int sram_size; 179 unsigned long board_span; 180 unsigned long iomem_base; 181 - __be32 __iomem *irq_claim; 182 __be32 __iomem *irq_deassert; 183 char *mac_addr_string; 184 struct mcp_cmd_response *cmd; 185 dma_addr_t cmd_bus; 186 - struct mcp_irq_data *fw_stats; 187 - dma_addr_t fw_stats_bus; 188 struct pci_dev *pdev; 189 int msi_enabled; 190 u32 link_state; ··· 214 __be32 __iomem *intr_coal_delay_ptr; 215 int mtrr; 216 int wc_enabled; 217 - int wake_queue; 218 - int stop_queue; 219 int down_cnt; 220 wait_queue_head_t down_wq; 221 struct work_struct watchdog_work; 222 struct timer_list watchdog_timer; 223 - int watchdog_tx_done; 224 - int watchdog_tx_req; 225 - int watchdog_pause; 226 int watchdog_resets; 227 - int tx_linearized; 228 int pause; 229 char *fw_name; 230 char eeprom_strings[MYRI10GE_EEPROM_STRINGS_SIZE]; 231 char fw_version[128]; 232 int fw_ver_major; 233 int fw_ver_minor; ··· 247 248 static char *myri10ge_fw_name = NULL; 249 module_param(myri10ge_fw_name, charp, S_IRUGO | S_IWUSR); 250 - MODULE_PARM_DESC(myri10ge_fw_name, "Firmware image name\n"); 251 252 static int myri10ge_ecrc_enable = 1; 253 module_param(myri10ge_ecrc_enable, int, S_IRUGO); 254 - MODULE_PARM_DESC(myri10ge_ecrc_enable, "Enable Extended CRC on PCI-E\n"); 255 - 256 - static int myri10ge_max_intr_slots = 1024; 257 - module_param(myri10ge_max_intr_slots, int, S_IRUGO); 258 - MODULE_PARM_DESC(myri10ge_max_intr_slots, "Interrupt queue slots\n"); 259 260 static int myri10ge_small_bytes = -1; /* -1 == auto */ 261 module_param(myri10ge_small_bytes, int, S_IRUGO | S_IWUSR); 262 - MODULE_PARM_DESC(myri10ge_small_bytes, "Threshold of small packets\n"); 263 264 static int myri10ge_msi = 1; /* enable msi by default */ 265 module_param(myri10ge_msi, int, S_IRUGO | S_IWUSR); 266 - MODULE_PARM_DESC(myri10ge_msi, "Enable Message Signalled Interrupts\n"); 267 268 static int myri10ge_intr_coal_delay = 75; 269 module_param(myri10ge_intr_coal_delay, int, S_IRUGO); 270 - MODULE_PARM_DESC(myri10ge_intr_coal_delay, "Interrupt coalescing delay\n"); 271 272 static int myri10ge_flow_control = 1; 273 module_param(myri10ge_flow_control, int, S_IRUGO); 274 - MODULE_PARM_DESC(myri10ge_flow_control, "Pause parameter\n"); 275 276 static int myri10ge_deassert_wait = 1; 277 module_param(myri10ge_deassert_wait, int, S_IRUGO | S_IWUSR); 278 MODULE_PARM_DESC(myri10ge_deassert_wait, 279 - "Wait when deasserting legacy interrupts\n"); 280 281 static int myri10ge_force_firmware = 0; 282 module_param(myri10ge_force_firmware, int, S_IRUGO); 283 MODULE_PARM_DESC(myri10ge_force_firmware, 284 - "Force firmware to assume aligned completions\n"); 285 286 static int myri10ge_initial_mtu = MYRI10GE_MAX_ETHER_MTU - ETH_HLEN; 287 module_param(myri10ge_initial_mtu, int, S_IRUGO); 288 - MODULE_PARM_DESC(myri10ge_initial_mtu, "Initial MTU\n"); 289 290 static int myri10ge_napi_weight = 64; 291 module_param(myri10ge_napi_weight, int, S_IRUGO); 292 - MODULE_PARM_DESC(myri10ge_napi_weight, "Set NAPI weight\n"); 293 294 static int myri10ge_watchdog_timeout = 1; 295 module_param(myri10ge_watchdog_timeout, int, S_IRUGO); 296 - MODULE_PARM_DESC(myri10ge_watchdog_timeout, "Set watchdog timeout\n"); 297 298 static int myri10ge_max_irq_loops = 1048576; 299 module_param(myri10ge_max_irq_loops, int, S_IRUGO); 300 MODULE_PARM_DESC(myri10ge_max_irq_loops, 301 - "Set stuck legacy IRQ detection threshold\n"); 302 303 #define MYRI10GE_MSG_DEFAULT NETIF_MSG_LINK 304 ··· 304 305 static int myri10ge_lro = 1; 306 module_param(myri10ge_lro, int, S_IRUGO); 307 - MODULE_PARM_DESC(myri10ge_lro, "Enable large receive offload\n"); 308 309 static int myri10ge_lro_max_pkts = MYRI10GE_LRO_MAX_PKTS; 310 module_param(myri10ge_lro_max_pkts, int, S_IRUGO); 311 - MODULE_PARM_DESC(myri10ge_lro, "Number of LRO packets to be aggregated\n"); 312 313 static int myri10ge_fill_thresh = 256; 314 module_param(myri10ge_fill_thresh, int, S_IRUGO | S_IWUSR); 315 - MODULE_PARM_DESC(myri10ge_fill_thresh, "Number of empty rx slots allowed\n"); 316 317 static int myri10ge_reset_recover = 1; 318 319 static int myri10ge_wcfifo = 0; 320 module_param(myri10ge_wcfifo, int, S_IRUGO); 321 - MODULE_PARM_DESC(myri10ge_wcfifo, "Enable WC Fifo when WC is enabled\n"); 322 323 #define MYRI10GE_FW_OFFSET 1024*1024 324 #define MYRI10GE_HIGHPART_TO_U32(X) \ ··· 375 for (sleep_total = 0; 376 sleep_total < 1000 377 && response->result == htonl(MYRI10GE_NO_RESPONSE_RESULT); 378 - sleep_total += 10) 379 udelay(10); 380 } else { 381 /* use msleep for most command */ 382 for (sleep_total = 0; ··· 438 ptr += 1; 439 } 440 } 441 if (memcmp((const void *)ptr, "SN=", 3) == 0) { 442 ptr += 3; 443 mgp->serial_number = simple_strtoul(ptr, &ptr, 10); ··· 464 static void myri10ge_dummy_rdma(struct myri10ge_priv *mgp, int enable) 465 { 466 char __iomem *submit; 467 - __be32 buf[16]; 468 u32 dma_low, dma_high; 469 int i; 470 ··· 631 return status; 632 } 633 634 static int myri10ge_load_firmware(struct myri10ge_priv *mgp) 635 { 636 char __iomem *submit; 637 - __be32 buf[16]; 638 u32 dma_low, dma_high, size; 639 int status, i; 640 - struct myri10ge_cmd cmd; 641 642 size = 0; 643 status = myri10ge_load_hotplug_firmware(mgp, &size); ··· 682 } 683 dev_info(&mgp->pdev->dev, 684 "Successfully adopted running firmware\n"); 685 - if (mgp->tx.boundary == 4096) { 686 dev_warn(&mgp->pdev->dev, 687 "Using firmware currently running on NIC" 688 ". For optimal\n"); ··· 693 } 694 695 mgp->fw_name = "adopted"; 696 - mgp->tx.boundary = 2048; 697 return status; 698 } 699 ··· 730 msleep(1); 731 mb(); 732 i = 0; 733 - while (mgp->cmd->data != MYRI10GE_NO_CONFIRM_DATA && i < 20) { 734 - msleep(1); 735 i++; 736 } 737 if (mgp->cmd->data != MYRI10GE_NO_CONFIRM_DATA) { 738 dev_err(&mgp->pdev->dev, "handoff failed\n"); 739 return -ENXIO; 740 } 741 - dev_info(&mgp->pdev->dev, "handoff confirmed\n"); 742 myri10ge_dummy_rdma(mgp, 1); 743 744 - /* probe for IPv6 TSO support */ 745 - mgp->features = NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_TSO; 746 - status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_MAX_TSO6_HDR_SIZE, 747 - &cmd, 0); 748 - if (status == 0) { 749 - mgp->max_tso6 = cmd.data0; 750 - mgp->features |= NETIF_F_TSO6; 751 - } 752 - return 0; 753 } 754 755 static int myri10ge_update_mac_address(struct myri10ge_priv *mgp, u8 * addr) ··· 813 * transfers took to complete. 814 */ 815 816 - len = mgp->tx.boundary; 817 818 cmd.data0 = MYRI10GE_LOWPART_TO_U32(dmatest_bus); 819 cmd.data1 = MYRI10GE_HIGHPART_TO_U32(dmatest_bus); ··· 875 876 /* Now exchange information about interrupts */ 877 878 - bytes = myri10ge_max_intr_slots * sizeof(*mgp->rx_done.entry); 879 - memset(mgp->rx_done.entry, 0, bytes); 880 cmd.data0 = (u32) bytes; 881 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_INTRQ_SIZE, &cmd, 0); 882 - cmd.data0 = MYRI10GE_LOWPART_TO_U32(mgp->rx_done.bus); 883 - cmd.data1 = MYRI10GE_HIGHPART_TO_U32(mgp->rx_done.bus); 884 status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_INTRQ_DMA, &cmd, 0); 885 886 status |= 887 myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_IRQ_ACK_OFFSET, &cmd, 0); 888 - mgp->irq_claim = (__iomem __be32 *) (mgp->sram + cmd.data0); 889 status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_IRQ_DEASSERT_OFFSET, 890 &cmd, 0); 891 mgp->irq_deassert = (__iomem __be32 *) (mgp->sram + cmd.data0); ··· 899 } 900 put_be32(htonl(mgp->intr_coal_delay), mgp->intr_coal_delay_ptr); 901 902 - memset(mgp->rx_done.entry, 0, bytes); 903 904 /* reset mcp/driver shared state back to 0 */ 905 - mgp->tx.req = 0; 906 - mgp->tx.done = 0; 907 - mgp->tx.pkt_start = 0; 908 - mgp->tx.pkt_done = 0; 909 - mgp->rx_big.cnt = 0; 910 - mgp->rx_small.cnt = 0; 911 - mgp->rx_done.idx = 0; 912 - mgp->rx_done.cnt = 0; 913 mgp->link_changes = 0; 914 status = myri10ge_update_mac_address(mgp, mgp->dev->dev_addr); 915 myri10ge_change_pause(mgp, mgp->pause); ··· 1061 * page into an skb */ 1062 1063 static inline int 1064 - myri10ge_rx_done(struct myri10ge_priv *mgp, struct myri10ge_rx_buf *rx, 1065 int bytes, int len, __wsum csum) 1066 { 1067 struct sk_buff *skb; 1068 struct skb_frag_struct rx_frags[MYRI10GE_MAX_FRAGS_PER_FRAME]; 1069 int i, idx, hlen, remainder; ··· 1094 rx_frags[0].page_offset += MXGEFW_PAD; 1095 rx_frags[0].size -= MXGEFW_PAD; 1096 len -= MXGEFW_PAD; 1097 - lro_receive_frags(&mgp->rx_done.lro_mgr, rx_frags, 1098 len, len, 1099 - /* opaque, will come back in get_frag_header */ 1100 - (void *)(__force unsigned long)csum, 1101 - csum); 1102 return 1; 1103 } 1104 ··· 1137 return 1; 1138 } 1139 1140 - static inline void myri10ge_tx_done(struct myri10ge_priv *mgp, int mcp_index) 1141 { 1142 - struct pci_dev *pdev = mgp->pdev; 1143 - struct myri10ge_tx_buf *tx = &mgp->tx; 1144 struct sk_buff *skb; 1145 int idx, len; 1146 ··· 1159 len = pci_unmap_len(&tx->info[idx], len); 1160 pci_unmap_len_set(&tx->info[idx], len, 0); 1161 if (skb) { 1162 - mgp->stats.tx_bytes += skb->len; 1163 - mgp->stats.tx_packets++; 1164 dev_kfree_skb_irq(skb); 1165 if (len) 1166 pci_unmap_single(pdev, ··· 1176 } 1177 } 1178 /* start the queue if we've stopped it */ 1179 - if (netif_queue_stopped(mgp->dev) 1180 && tx->req - tx->done < (tx->mask >> 1)) { 1181 - mgp->wake_queue++; 1182 - netif_wake_queue(mgp->dev); 1183 } 1184 } 1185 1186 - static inline int myri10ge_clean_rx_done(struct myri10ge_priv *mgp, int budget) 1187 { 1188 - struct myri10ge_rx_done *rx_done = &mgp->rx_done; 1189 unsigned long rx_bytes = 0; 1190 unsigned long rx_packets = 0; 1191 unsigned long rx_ok; ··· 1203 rx_done->entry[idx].length = 0; 1204 checksum = csum_unfold(rx_done->entry[idx].checksum); 1205 if (length <= mgp->small_bytes) 1206 - rx_ok = myri10ge_rx_done(mgp, &mgp->rx_small, 1207 mgp->small_bytes, 1208 length, checksum); 1209 else 1210 - rx_ok = myri10ge_rx_done(mgp, &mgp->rx_big, 1211 mgp->big_bytes, 1212 length, checksum); 1213 rx_packets += rx_ok; 1214 rx_bytes += rx_ok * (unsigned long)length; 1215 cnt++; 1216 - idx = cnt & (myri10ge_max_intr_slots - 1); 1217 work_done++; 1218 } 1219 rx_done->idx = idx; 1220 rx_done->cnt = cnt; 1221 - mgp->stats.rx_packets += rx_packets; 1222 - mgp->stats.rx_bytes += rx_bytes; 1223 1224 if (myri10ge_lro) 1225 lro_flush_all(&rx_done->lro_mgr); 1226 1227 /* restock receive rings if needed */ 1228 - if (mgp->rx_small.fill_cnt - mgp->rx_small.cnt < myri10ge_fill_thresh) 1229 - myri10ge_alloc_rx_pages(mgp, &mgp->rx_small, 1230 mgp->small_bytes + MXGEFW_PAD, 0); 1231 - if (mgp->rx_big.fill_cnt - mgp->rx_big.cnt < myri10ge_fill_thresh) 1232 - myri10ge_alloc_rx_pages(mgp, &mgp->rx_big, mgp->big_bytes, 0); 1233 1234 return work_done; 1235 } 1236 1237 static inline void myri10ge_check_statblock(struct myri10ge_priv *mgp) 1238 { 1239 - struct mcp_irq_data *stats = mgp->fw_stats; 1240 1241 if (unlikely(stats->stats_updated)) { 1242 unsigned link_up = ntohl(stats->link_up); ··· 1263 } 1264 } 1265 if (mgp->rdma_tags_available != 1266 - ntohl(mgp->fw_stats->rdma_tags_available)) { 1267 mgp->rdma_tags_available = 1268 - ntohl(mgp->fw_stats->rdma_tags_available); 1269 printk(KERN_WARNING "myri10ge: %s: RDMA timed out! " 1270 "%d tags left\n", mgp->dev->name, 1271 mgp->rdma_tags_available); ··· 1278 1279 static int myri10ge_poll(struct napi_struct *napi, int budget) 1280 { 1281 - struct myri10ge_priv *mgp = 1282 - container_of(napi, struct myri10ge_priv, napi); 1283 - struct net_device *netdev = mgp->dev; 1284 int work_done; 1285 1286 /* process as many rx events as NAPI will allow */ 1287 - work_done = myri10ge_clean_rx_done(mgp, budget); 1288 1289 if (work_done < budget) { 1290 netif_rx_complete(netdev, napi); 1291 - put_be32(htonl(3), mgp->irq_claim); 1292 } 1293 return work_done; 1294 } 1295 1296 static irqreturn_t myri10ge_intr(int irq, void *arg) 1297 { 1298 - struct myri10ge_priv *mgp = arg; 1299 - struct mcp_irq_data *stats = mgp->fw_stats; 1300 - struct myri10ge_tx_buf *tx = &mgp->tx; 1301 u32 send_done_count; 1302 int i; 1303 ··· 1309 /* low bit indicates receives are present, so schedule 1310 * napi poll handler */ 1311 if (stats->valid & 1) 1312 - netif_rx_schedule(mgp->dev, &mgp->napi); 1313 1314 if (!mgp->msi_enabled) { 1315 put_be32(0, mgp->irq_deassert); ··· 1326 /* check for transmit completes and receives */ 1327 send_done_count = ntohl(stats->send_done_count); 1328 if (send_done_count != tx->pkt_done) 1329 - myri10ge_tx_done(mgp, (int)send_done_count); 1330 if (unlikely(i > myri10ge_max_irq_loops)) { 1331 printk(KERN_WARNING "myri10ge: %s: irq stuck?\n", 1332 mgp->dev->name); ··· 1341 1342 myri10ge_check_statblock(mgp); 1343 1344 - put_be32(htonl(3), mgp->irq_claim + 1); 1345 return (IRQ_HANDLED); 1346 } 1347 1348 static int 1349 myri10ge_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd) 1350 { 1351 cmd->autoneg = AUTONEG_DISABLE; 1352 cmd->speed = SPEED_10000; 1353 cmd->duplex = DUPLEX_FULL; 1354 return 0; 1355 } 1356 ··· 1399 myri10ge_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *coal) 1400 { 1401 struct myri10ge_priv *mgp = netdev_priv(netdev); 1402 coal->rx_coalesce_usecs = mgp->intr_coal_delay; 1403 return 0; 1404 } ··· 1446 { 1447 struct myri10ge_priv *mgp = netdev_priv(netdev); 1448 1449 - ring->rx_mini_max_pending = mgp->rx_small.mask + 1; 1450 - ring->rx_max_pending = mgp->rx_big.mask + 1; 1451 ring->rx_jumbo_max_pending = 0; 1452 - ring->tx_max_pending = mgp->rx_small.mask + 1; 1453 ring->rx_mini_pending = ring->rx_mini_max_pending; 1454 ring->rx_pending = ring->rx_max_pending; 1455 ring->rx_jumbo_pending = ring->rx_jumbo_max_pending; ··· 1459 static u32 myri10ge_get_rx_csum(struct net_device *netdev) 1460 { 1461 struct myri10ge_priv *mgp = netdev_priv(netdev); 1462 if (mgp->csum_flag) 1463 return 1; 1464 else ··· 1469 static int myri10ge_set_rx_csum(struct net_device *netdev, u32 csum_enabled) 1470 { 1471 struct myri10ge_priv *mgp = netdev_priv(netdev); 1472 if (csum_enabled) 1473 mgp->csum_flag = MXGEFW_FLAGS_CKSUM; 1474 else ··· 1489 return 0; 1490 } 1491 1492 - static const char myri10ge_gstrings_stats[][ETH_GSTRING_LEN] = { 1493 "rx_packets", "tx_packets", "rx_bytes", "tx_bytes", "rx_errors", 1494 "tx_errors", "rx_dropped", "tx_dropped", "multicast", "collisions", 1495 "rx_length_errors", "rx_over_errors", "rx_crc_errors", ··· 1499 /* device-specific stats */ 1500 "tx_boundary", "WC", "irq", "MSI", 1501 "read_dma_bw_MBs", "write_dma_bw_MBs", "read_write_dma_bw_MBs", 1502 - "serial_number", "tx_pkt_start", "tx_pkt_done", 1503 - "tx_req", "tx_done", "rx_small_cnt", "rx_big_cnt", 1504 - "wake_queue", "stop_queue", "watchdog_resets", "tx_linearized", 1505 "link_changes", "link_up", "dropped_link_overflow", 1506 "dropped_link_error_or_filtered", 1507 "dropped_pause", "dropped_bad_phy", "dropped_bad_crc32", 1508 "dropped_unicast_filtered", "dropped_multicast_filtered", 1509 "dropped_runt", "dropped_overrun", "dropped_no_small_buffer", 1510 - "dropped_no_big_buffer", "LRO aggregated", "LRO flushed", 1511 "LRO avg aggr", "LRO no_desc" 1512 }; 1513 1514 #define MYRI10GE_NET_STATS_LEN 21 1515 - #define MYRI10GE_STATS_LEN ARRAY_SIZE(myri10ge_gstrings_stats) 1516 1517 static void 1518 myri10ge_get_strings(struct net_device *netdev, u32 stringset, u8 * data) 1519 { 1520 switch (stringset) { 1521 case ETH_SS_STATS: 1522 - memcpy(data, *myri10ge_gstrings_stats, 1523 - sizeof(myri10ge_gstrings_stats)); 1524 break; 1525 } 1526 } ··· 1540 { 1541 switch (sset) { 1542 case ETH_SS_STATS: 1543 - return MYRI10GE_STATS_LEN; 1544 default: 1545 return -EOPNOTSUPP; 1546 } ··· 1551 struct ethtool_stats *stats, u64 * data) 1552 { 1553 struct myri10ge_priv *mgp = netdev_priv(netdev); 1554 int i; 1555 1556 for (i = 0; i < MYRI10GE_NET_STATS_LEN; i++) 1557 data[i] = ((unsigned long *)&mgp->stats)[i]; 1558 1559 - data[i++] = (unsigned int)mgp->tx.boundary; 1560 data[i++] = (unsigned int)mgp->wc_enabled; 1561 data[i++] = (unsigned int)mgp->pdev->irq; 1562 data[i++] = (unsigned int)mgp->msi_enabled; ··· 1565 data[i++] = (unsigned int)mgp->write_dma; 1566 data[i++] = (unsigned int)mgp->read_write_dma; 1567 data[i++] = (unsigned int)mgp->serial_number; 1568 - data[i++] = (unsigned int)mgp->tx.pkt_start; 1569 - data[i++] = (unsigned int)mgp->tx.pkt_done; 1570 - data[i++] = (unsigned int)mgp->tx.req; 1571 - data[i++] = (unsigned int)mgp->tx.done; 1572 - data[i++] = (unsigned int)mgp->rx_small.cnt; 1573 - data[i++] = (unsigned int)mgp->rx_big.cnt; 1574 - data[i++] = (unsigned int)mgp->wake_queue; 1575 - data[i++] = (unsigned int)mgp->stop_queue; 1576 data[i++] = (unsigned int)mgp->watchdog_resets; 1577 - data[i++] = (unsigned int)mgp->tx_linearized; 1578 data[i++] = (unsigned int)mgp->link_changes; 1579 - data[i++] = (unsigned int)ntohl(mgp->fw_stats->link_up); 1580 - data[i++] = (unsigned int)ntohl(mgp->fw_stats->dropped_link_overflow); 1581 data[i++] = 1582 - (unsigned int)ntohl(mgp->fw_stats->dropped_link_error_or_filtered); 1583 - data[i++] = (unsigned int)ntohl(mgp->fw_stats->dropped_pause); 1584 - data[i++] = (unsigned int)ntohl(mgp->fw_stats->dropped_bad_phy); 1585 - data[i++] = (unsigned int)ntohl(mgp->fw_stats->dropped_bad_crc32); 1586 data[i++] = 1587 - (unsigned int)ntohl(mgp->fw_stats->dropped_unicast_filtered); 1588 - data[i++] = 1589 - (unsigned int)ntohl(mgp->fw_stats->dropped_multicast_filtered); 1590 - data[i++] = (unsigned int)ntohl(mgp->fw_stats->dropped_runt); 1591 - data[i++] = (unsigned int)ntohl(mgp->fw_stats->dropped_overrun); 1592 - data[i++] = (unsigned int)ntohl(mgp->fw_stats->dropped_no_small_buffer); 1593 - data[i++] = (unsigned int)ntohl(mgp->fw_stats->dropped_no_big_buffer); 1594 - data[i++] = mgp->rx_done.lro_mgr.stats.aggregated; 1595 - data[i++] = mgp->rx_done.lro_mgr.stats.flushed; 1596 - if (mgp->rx_done.lro_mgr.stats.flushed) 1597 - data[i++] = mgp->rx_done.lro_mgr.stats.aggregated / 1598 - mgp->rx_done.lro_mgr.stats.flushed; 1599 else 1600 data[i++] = 0; 1601 - data[i++] = mgp->rx_done.lro_mgr.stats.no_desc; 1602 } 1603 1604 static void myri10ge_set_msglevel(struct net_device *netdev, u32 value) ··· 1638 .get_msglevel = myri10ge_get_msglevel 1639 }; 1640 1641 - static int myri10ge_allocate_rings(struct net_device *dev) 1642 { 1643 - struct myri10ge_priv *mgp; 1644 struct myri10ge_cmd cmd; 1645 int tx_ring_size, rx_ring_size; 1646 int tx_ring_entries, rx_ring_entries; 1647 int i, status; 1648 size_t bytes; 1649 1650 - mgp = netdev_priv(dev); 1651 - 1652 /* get ring sizes */ 1653 - 1654 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_SEND_RING_SIZE, &cmd, 0); 1655 tx_ring_size = cmd.data0; 1656 status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_RX_RING_SIZE, &cmd, 0); ··· 1658 1659 tx_ring_entries = tx_ring_size / sizeof(struct mcp_kreq_ether_send); 1660 rx_ring_entries = rx_ring_size / sizeof(struct mcp_dma_addr); 1661 - mgp->tx.mask = tx_ring_entries - 1; 1662 - mgp->rx_small.mask = mgp->rx_big.mask = rx_ring_entries - 1; 1663 1664 status = -ENOMEM; 1665 1666 /* allocate the host shadow rings */ 1667 1668 bytes = 8 + (MYRI10GE_MAX_SEND_DESC_TSO + 4) 1669 - * sizeof(*mgp->tx.req_list); 1670 - mgp->tx.req_bytes = kzalloc(bytes, GFP_KERNEL); 1671 - if (mgp->tx.req_bytes == NULL) 1672 goto abort_with_nothing; 1673 1674 /* ensure req_list entries are aligned to 8 bytes */ 1675 - mgp->tx.req_list = (struct mcp_kreq_ether_send *) 1676 - ALIGN((unsigned long)mgp->tx.req_bytes, 8); 1677 1678 - bytes = rx_ring_entries * sizeof(*mgp->rx_small.shadow); 1679 - mgp->rx_small.shadow = kzalloc(bytes, GFP_KERNEL); 1680 - if (mgp->rx_small.shadow == NULL) 1681 goto abort_with_tx_req_bytes; 1682 1683 - bytes = rx_ring_entries * sizeof(*mgp->rx_big.shadow); 1684 - mgp->rx_big.shadow = kzalloc(bytes, GFP_KERNEL); 1685 - if (mgp->rx_big.shadow == NULL) 1686 goto abort_with_rx_small_shadow; 1687 1688 /* allocate the host info rings */ 1689 1690 - bytes = tx_ring_entries * sizeof(*mgp->tx.info); 1691 - mgp->tx.info = kzalloc(bytes, GFP_KERNEL); 1692 - if (mgp->tx.info == NULL) 1693 goto abort_with_rx_big_shadow; 1694 1695 - bytes = rx_ring_entries * sizeof(*mgp->rx_small.info); 1696 - mgp->rx_small.info = kzalloc(bytes, GFP_KERNEL); 1697 - if (mgp->rx_small.info == NULL) 1698 goto abort_with_tx_info; 1699 1700 - bytes = rx_ring_entries * sizeof(*mgp->rx_big.info); 1701 - mgp->rx_big.info = kzalloc(bytes, GFP_KERNEL); 1702 - if (mgp->rx_big.info == NULL) 1703 goto abort_with_rx_small_info; 1704 1705 /* Fill the receive rings */ 1706 - mgp->rx_big.cnt = 0; 1707 - mgp->rx_small.cnt = 0; 1708 - mgp->rx_big.fill_cnt = 0; 1709 - mgp->rx_small.fill_cnt = 0; 1710 - mgp->rx_small.page_offset = MYRI10GE_ALLOC_SIZE; 1711 - mgp->rx_big.page_offset = MYRI10GE_ALLOC_SIZE; 1712 - mgp->rx_small.watchdog_needed = 0; 1713 - mgp->rx_big.watchdog_needed = 0; 1714 - myri10ge_alloc_rx_pages(mgp, &mgp->rx_small, 1715 mgp->small_bytes + MXGEFW_PAD, 0); 1716 1717 - if (mgp->rx_small.fill_cnt < mgp->rx_small.mask + 1) { 1718 printk(KERN_ERR "myri10ge: %s: alloced only %d small bufs\n", 1719 - dev->name, mgp->rx_small.fill_cnt); 1720 goto abort_with_rx_small_ring; 1721 } 1722 1723 - myri10ge_alloc_rx_pages(mgp, &mgp->rx_big, mgp->big_bytes, 0); 1724 - if (mgp->rx_big.fill_cnt < mgp->rx_big.mask + 1) { 1725 printk(KERN_ERR "myri10ge: %s: alloced only %d big bufs\n", 1726 - dev->name, mgp->rx_big.fill_cnt); 1727 goto abort_with_rx_big_ring; 1728 } 1729 1730 return 0; 1731 1732 abort_with_rx_big_ring: 1733 - for (i = mgp->rx_big.cnt; i < mgp->rx_big.fill_cnt; i++) { 1734 - int idx = i & mgp->rx_big.mask; 1735 - myri10ge_unmap_rx_page(mgp->pdev, &mgp->rx_big.info[idx], 1736 mgp->big_bytes); 1737 - put_page(mgp->rx_big.info[idx].page); 1738 } 1739 1740 abort_with_rx_small_ring: 1741 - for (i = mgp->rx_small.cnt; i < mgp->rx_small.fill_cnt; i++) { 1742 - int idx = i & mgp->rx_small.mask; 1743 - myri10ge_unmap_rx_page(mgp->pdev, &mgp->rx_small.info[idx], 1744 mgp->small_bytes + MXGEFW_PAD); 1745 - put_page(mgp->rx_small.info[idx].page); 1746 } 1747 1748 - kfree(mgp->rx_big.info); 1749 1750 abort_with_rx_small_info: 1751 - kfree(mgp->rx_small.info); 1752 1753 abort_with_tx_info: 1754 - kfree(mgp->tx.info); 1755 1756 abort_with_rx_big_shadow: 1757 - kfree(mgp->rx_big.shadow); 1758 1759 abort_with_rx_small_shadow: 1760 - kfree(mgp->rx_small.shadow); 1761 1762 abort_with_tx_req_bytes: 1763 - kfree(mgp->tx.req_bytes); 1764 - mgp->tx.req_bytes = NULL; 1765 - mgp->tx.req_list = NULL; 1766 1767 abort_with_nothing: 1768 return status; 1769 } 1770 1771 - static void myri10ge_free_rings(struct net_device *dev) 1772 { 1773 - struct myri10ge_priv *mgp; 1774 struct sk_buff *skb; 1775 struct myri10ge_tx_buf *tx; 1776 int i, len, idx; 1777 1778 - mgp = netdev_priv(dev); 1779 - 1780 - for (i = mgp->rx_big.cnt; i < mgp->rx_big.fill_cnt; i++) { 1781 - idx = i & mgp->rx_big.mask; 1782 - if (i == mgp->rx_big.fill_cnt - 1) 1783 - mgp->rx_big.info[idx].page_offset = MYRI10GE_ALLOC_SIZE; 1784 - myri10ge_unmap_rx_page(mgp->pdev, &mgp->rx_big.info[idx], 1785 mgp->big_bytes); 1786 - put_page(mgp->rx_big.info[idx].page); 1787 } 1788 1789 - for (i = mgp->rx_small.cnt; i < mgp->rx_small.fill_cnt; i++) { 1790 - idx = i & mgp->rx_small.mask; 1791 - if (i == mgp->rx_small.fill_cnt - 1) 1792 - mgp->rx_small.info[idx].page_offset = 1793 MYRI10GE_ALLOC_SIZE; 1794 - myri10ge_unmap_rx_page(mgp->pdev, &mgp->rx_small.info[idx], 1795 mgp->small_bytes + MXGEFW_PAD); 1796 - put_page(mgp->rx_small.info[idx].page); 1797 } 1798 - tx = &mgp->tx; 1799 while (tx->done != tx->req) { 1800 idx = tx->done & tx->mask; 1801 skb = tx->info[idx].skb; ··· 1804 len = pci_unmap_len(&tx->info[idx], len); 1805 pci_unmap_len_set(&tx->info[idx], len, 0); 1806 if (skb) { 1807 - mgp->stats.tx_dropped++; 1808 dev_kfree_skb_any(skb); 1809 if (len) 1810 pci_unmap_single(mgp->pdev, ··· 1819 PCI_DMA_TODEVICE); 1820 } 1821 } 1822 - kfree(mgp->rx_big.info); 1823 1824 - kfree(mgp->rx_small.info); 1825 1826 - kfree(mgp->tx.info); 1827 1828 - kfree(mgp->rx_big.shadow); 1829 1830 - kfree(mgp->rx_small.shadow); 1831 1832 - kfree(mgp->tx.req_bytes); 1833 - mgp->tx.req_bytes = NULL; 1834 - mgp->tx.req_list = NULL; 1835 } 1836 1837 static int myri10ge_request_irq(struct myri10ge_priv *mgp) ··· 1930 1931 static int myri10ge_open(struct net_device *dev) 1932 { 1933 - struct myri10ge_priv *mgp; 1934 struct myri10ge_cmd cmd; 1935 struct net_lro_mgr *lro_mgr; 1936 int status, big_pow2; 1937 - 1938 - mgp = netdev_priv(dev); 1939 1940 if (mgp->running != MYRI10GE_ETH_STOPPED) 1941 return -EBUSY; ··· 1971 /* get the lanai pointers to the send and receive rings */ 1972 1973 status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_SEND_OFFSET, &cmd, 0); 1974 - mgp->tx.lanai = 1975 (struct mcp_kreq_ether_send __iomem *)(mgp->sram + cmd.data0); 1976 1977 status |= 1978 myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_SMALL_RX_OFFSET, &cmd, 0); 1979 - mgp->rx_small.lanai = 1980 (struct mcp_kreq_ether_recv __iomem *)(mgp->sram + cmd.data0); 1981 1982 status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_BIG_RX_OFFSET, &cmd, 0); 1983 - mgp->rx_big.lanai = 1984 (struct mcp_kreq_ether_recv __iomem *)(mgp->sram + cmd.data0); 1985 1986 if (status != 0) { ··· 1992 } 1993 1994 if (myri10ge_wcfifo && mgp->wc_enabled) { 1995 - mgp->tx.wc_fifo = (u8 __iomem *) mgp->sram + MXGEFW_ETH_SEND_4; 1996 - mgp->rx_small.wc_fifo = 1997 (u8 __iomem *) mgp->sram + MXGEFW_ETH_RECV_SMALL; 1998 - mgp->rx_big.wc_fifo = 1999 (u8 __iomem *) mgp->sram + MXGEFW_ETH_RECV_BIG; 2000 } else { 2001 - mgp->tx.wc_fifo = NULL; 2002 - mgp->rx_small.wc_fifo = NULL; 2003 - mgp->rx_big.wc_fifo = NULL; 2004 } 2005 2006 /* Firmware needs the big buff size as a power of 2. Lie and ··· 2017 mgp->big_bytes = big_pow2; 2018 } 2019 2020 - status = myri10ge_allocate_rings(dev); 2021 if (status != 0) 2022 goto abort_with_irq; 2023 ··· 2036 goto abort_with_rings; 2037 } 2038 2039 - cmd.data0 = MYRI10GE_LOWPART_TO_U32(mgp->fw_stats_bus); 2040 - cmd.data1 = MYRI10GE_HIGHPART_TO_U32(mgp->fw_stats_bus); 2041 cmd.data2 = sizeof(struct mcp_irq_data); 2042 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_STATS_DMA_V2, &cmd, 0); 2043 if (status == -ENOSYS) { 2044 - dma_addr_t bus = mgp->fw_stats_bus; 2045 bus += offsetof(struct mcp_irq_data, send_done_count); 2046 cmd.data0 = MYRI10GE_LOWPART_TO_U32(bus); 2047 cmd.data1 = MYRI10GE_HIGHPART_TO_U32(bus); ··· 2062 mgp->link_state = ~0U; 2063 mgp->rdma_tags_available = 15; 2064 2065 - lro_mgr = &mgp->rx_done.lro_mgr; 2066 lro_mgr->dev = dev; 2067 lro_mgr->features = LRO_F_NAPI; 2068 lro_mgr->ip_summed = CHECKSUM_COMPLETE; 2069 lro_mgr->ip_summed_aggr = CHECKSUM_UNNECESSARY; 2070 lro_mgr->max_desc = MYRI10GE_MAX_LRO_DESCRIPTORS; 2071 - lro_mgr->lro_arr = mgp->rx_done.lro_desc; 2072 lro_mgr->get_frag_header = myri10ge_get_frag_header; 2073 lro_mgr->max_aggr = myri10ge_lro_max_pkts; 2074 lro_mgr->frag_align_pad = 2; 2075 if (lro_mgr->max_aggr > MAX_SKB_FRAGS) 2076 lro_mgr->max_aggr = MAX_SKB_FRAGS; 2077 2078 - napi_enable(&mgp->napi); /* must happen prior to any irq */ 2079 2080 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_ETHERNET_UP, &cmd, 0); 2081 if (status) { ··· 2084 goto abort_with_rings; 2085 } 2086 2087 - mgp->wake_queue = 0; 2088 - mgp->stop_queue = 0; 2089 mgp->running = MYRI10GE_ETH_RUNNING; 2090 mgp->watchdog_timer.expires = jiffies + myri10ge_watchdog_timeout * HZ; 2091 add_timer(&mgp->watchdog_timer); ··· 2093 return 0; 2094 2095 abort_with_rings: 2096 - myri10ge_free_rings(dev); 2097 2098 abort_with_irq: 2099 myri10ge_free_irq(mgp); ··· 2105 2106 static int myri10ge_close(struct net_device *dev) 2107 { 2108 - struct myri10ge_priv *mgp; 2109 struct myri10ge_cmd cmd; 2110 int status, old_down_cnt; 2111 - 2112 - mgp = netdev_priv(dev); 2113 2114 if (mgp->running != MYRI10GE_ETH_RUNNING) 2115 return 0; 2116 2117 - if (mgp->tx.req_bytes == NULL) 2118 return 0; 2119 2120 del_timer_sync(&mgp->watchdog_timer); 2121 mgp->running = MYRI10GE_ETH_STOPPING; 2122 - napi_disable(&mgp->napi); 2123 netif_carrier_off(dev); 2124 netif_stop_queue(dev); 2125 old_down_cnt = mgp->down_cnt; ··· 2133 2134 netif_tx_disable(dev); 2135 myri10ge_free_irq(mgp); 2136 - myri10ge_free_rings(dev); 2137 2138 mgp->running = MYRI10GE_ETH_STOPPED; 2139 return 0; ··· 2229 2230 /* 2231 * Transmit a packet. We need to split the packet so that a single 2232 - * segment does not cross myri10ge->tx.boundary, so this makes segment 2233 * counting tricky. So rather than try to count segments up front, we 2234 * just give up if there are too few segments to hold a reasonably 2235 * fragmented packet currently available. If we run ··· 2240 static int myri10ge_xmit(struct sk_buff *skb, struct net_device *dev) 2241 { 2242 struct myri10ge_priv *mgp = netdev_priv(dev); 2243 struct mcp_kreq_ether_send *req; 2244 - struct myri10ge_tx_buf *tx = &mgp->tx; 2245 struct skb_frag_struct *frag; 2246 dma_addr_t bus; 2247 u32 low; ··· 2253 int cum_len, seglen, boundary, rdma_count; 2254 u8 flags, odd_flag; 2255 2256 again: 2257 req = tx->req_list; 2258 avail = tx->mask - 1 - (tx->req - tx->done); ··· 2270 2271 if ((unlikely(avail < max_segments))) { 2272 /* we are out of transmit resources */ 2273 - mgp->stop_queue++; 2274 netif_stop_queue(dev); 2275 return 1; 2276 } ··· 2332 if (skb_padto(skb, ETH_ZLEN)) { 2333 /* The packet is gone, so we must 2334 * return 0 */ 2335 - mgp->stats.tx_dropped += 1; 2336 return 0; 2337 } 2338 /* adjust the len to account for the zero pad ··· 2374 2375 while (1) { 2376 /* Break the SKB or Fragment up into pieces which 2377 - * do not cross mgp->tx.boundary */ 2378 low = MYRI10GE_LOWPART_TO_U32(bus); 2379 high_swapped = htonl(MYRI10GE_HIGHPART_TO_U32(bus)); 2380 while (len) { ··· 2384 if (unlikely(count == max_segments)) 2385 goto abort_linearize; 2386 2387 - boundary = (low + tx->boundary) & ~(tx->boundary - 1); 2388 seglen = boundary - low; 2389 if (seglen > len) 2390 seglen = len; ··· 2469 myri10ge_submit_req_wc(tx, tx->req_list, count); 2470 tx->pkt_start++; 2471 if ((avail - count) < MXGEFW_MAX_SEND_DESC) { 2472 - mgp->stop_queue++; 2473 netif_stop_queue(dev); 2474 } 2475 dev->trans_start = jiffies; ··· 2511 if (skb_linearize(skb)) 2512 goto drop; 2513 2514 - mgp->tx_linearized++; 2515 goto again; 2516 2517 drop: 2518 dev_kfree_skb_any(skb); 2519 - mgp->stats.tx_dropped += 1; 2520 return 0; 2521 2522 } ··· 2524 static int myri10ge_sw_tso(struct sk_buff *skb, struct net_device *dev) 2525 { 2526 struct sk_buff *segs, *curr; 2527 - struct myri10ge_priv *mgp = dev->priv; 2528 int status; 2529 2530 segs = skb_gso_segment(skb, dev->features & ~NETIF_F_TSO6); ··· 2564 2565 static void myri10ge_set_multicast_list(struct net_device *dev) 2566 { 2567 struct myri10ge_cmd cmd; 2568 - struct myri10ge_priv *mgp; 2569 struct dev_mc_list *mc_list; 2570 __be32 data[2] = { 0, 0 }; 2571 int err; 2572 DECLARE_MAC_BUF(mac); 2573 2574 - mgp = netdev_priv(dev); 2575 /* can be called from atomic contexts, 2576 * pass 1 to force atomicity in myri10ge_send_cmd() */ 2577 myri10ge_change_promisc(mgp, dev->flags & IFF_PROMISC, 1); ··· 2706 ext_type = (val & PCI_EXP_FLAGS_TYPE) >> 4; 2707 if (ext_type != PCI_EXP_TYPE_ROOT_PORT) { 2708 if (myri10ge_ecrc_enable > 1) { 2709 - struct pci_dev *old_bridge = bridge; 2710 2711 /* Walk the hierarchy up to the root port 2712 * where ECRC has to be enabled */ 2713 do { 2714 bridge = bridge->bus->self; 2715 - if (!bridge) { 2716 dev_err(dev, 2717 "Failed to find root port" 2718 " to force ECRC\n"); ··· 2772 * already been enabled, then it must use a firmware image which works 2773 * around unaligned completion packets (myri10ge_ethp_z8e.dat), and it 2774 * should also ensure that it never gives the device a Read-DMA which is 2775 - * larger than 2KB by setting the tx.boundary to 2KB. If ECRC is 2776 * enabled, then the driver should use the aligned (myri10ge_eth_z8e.dat) 2777 - * firmware image, and set tx.boundary to 4KB. 2778 */ 2779 2780 static void myri10ge_firmware_probe(struct myri10ge_priv *mgp) ··· 2783 struct device *dev = &pdev->dev; 2784 int status; 2785 2786 - mgp->tx.boundary = 4096; 2787 /* 2788 * Verify the max read request size was set to 4KB 2789 * before trying the test with 4KB. ··· 2795 } 2796 if (status != 4096) { 2797 dev_warn(dev, "Max Read Request size != 4096 (%d)\n", status); 2798 - mgp->tx.boundary = 2048; 2799 } 2800 /* 2801 * load the optimized firmware (which assumes aligned PCIe ··· 2828 "Please install up to date fw\n"); 2829 abort: 2830 /* fall back to using the unaligned firmware */ 2831 - mgp->tx.boundary = 2048; 2832 mgp->fw_name = myri10ge_fw_unaligned; 2833 2834 } ··· 2849 if (link_width < 8) { 2850 dev_info(&mgp->pdev->dev, "PCIE x%d Link\n", 2851 link_width); 2852 - mgp->tx.boundary = 4096; 2853 mgp->fw_name = myri10ge_fw_aligned; 2854 } else { 2855 myri10ge_firmware_probe(mgp); ··· 2858 if (myri10ge_force_firmware == 1) { 2859 dev_info(&mgp->pdev->dev, 2860 "Assuming aligned completions (forced)\n"); 2861 - mgp->tx.boundary = 4096; 2862 mgp->fw_name = myri10ge_fw_aligned; 2863 } else { 2864 dev_info(&mgp->pdev->dev, 2865 "Assuming unaligned completions (forced)\n"); 2866 - mgp->tx.boundary = 2048; 2867 mgp->fw_name = myri10ge_fw_unaligned; 2868 } 2869 } ··· 2980 { 2981 struct myri10ge_priv *mgp = 2982 container_of(work, struct myri10ge_priv, watchdog_work); 2983 u32 reboot; 2984 int status; 2985 u16 cmd, vendor; ··· 3030 3031 printk(KERN_ERR "myri10ge: %s: device timeout, resetting\n", 3032 mgp->dev->name); 3033 printk(KERN_INFO "myri10ge: %s: %d %d %d %d %d\n", 3034 - mgp->dev->name, mgp->tx.req, mgp->tx.done, 3035 - mgp->tx.pkt_start, mgp->tx.pkt_done, 3036 - (int)ntohl(mgp->fw_stats->send_done_count)); 3037 msleep(2000); 3038 printk(KERN_INFO "myri10ge: %s: %d %d %d %d %d\n", 3039 - mgp->dev->name, mgp->tx.req, mgp->tx.done, 3040 - mgp->tx.pkt_start, mgp->tx.pkt_done, 3041 - (int)ntohl(mgp->fw_stats->send_done_count)); 3042 } 3043 rtnl_lock(); 3044 myri10ge_close(mgp->dev); ··· 3062 static void myri10ge_watchdog_timer(unsigned long arg) 3063 { 3064 struct myri10ge_priv *mgp; 3065 u32 rx_pause_cnt; 3066 3067 mgp = (struct myri10ge_priv *)arg; 3068 3069 - if (mgp->rx_small.watchdog_needed) { 3070 - myri10ge_alloc_rx_pages(mgp, &mgp->rx_small, 3071 - mgp->small_bytes + MXGEFW_PAD, 1); 3072 - if (mgp->rx_small.fill_cnt - mgp->rx_small.cnt >= 3073 - myri10ge_fill_thresh) 3074 - mgp->rx_small.watchdog_needed = 0; 3075 - } 3076 - if (mgp->rx_big.watchdog_needed) { 3077 - myri10ge_alloc_rx_pages(mgp, &mgp->rx_big, mgp->big_bytes, 1); 3078 - if (mgp->rx_big.fill_cnt - mgp->rx_big.cnt >= 3079 - myri10ge_fill_thresh) 3080 - mgp->rx_big.watchdog_needed = 0; 3081 - } 3082 - rx_pause_cnt = ntohl(mgp->fw_stats->dropped_pause); 3083 3084 - if (mgp->tx.req != mgp->tx.done && 3085 - mgp->tx.done == mgp->watchdog_tx_done && 3086 - mgp->watchdog_tx_req != mgp->watchdog_tx_done) { 3087 /* nic seems like it might be stuck.. */ 3088 if (rx_pause_cnt != mgp->watchdog_pause) { 3089 if (net_ratelimit()) ··· 3101 /* rearm timer */ 3102 mod_timer(&mgp->watchdog_timer, 3103 jiffies + myri10ge_watchdog_timeout * HZ); 3104 - mgp->watchdog_tx_done = mgp->tx.done; 3105 - mgp->watchdog_tx_req = mgp->tx.req; 3106 mgp->watchdog_pause = rx_pause_cnt; 3107 } 3108 ··· 3126 3127 mgp = netdev_priv(netdev); 3128 mgp->dev = netdev; 3129 - netif_napi_add(netdev, &mgp->napi, myri10ge_poll, myri10ge_napi_weight); 3130 mgp->pdev = pdev; 3131 mgp->csum_flag = MXGEFW_FLAGS_CKSUM; 3132 mgp->pause = myri10ge_flow_control; ··· 3172 if (mgp->cmd == NULL) 3173 goto abort_with_netdev; 3174 3175 - mgp->fw_stats = dma_alloc_coherent(&pdev->dev, sizeof(*mgp->fw_stats), 3176 - &mgp->fw_stats_bus, GFP_KERNEL); 3177 - if (mgp->fw_stats == NULL) 3178 goto abort_with_cmd; 3179 3180 mgp->board_span = pci_resource_len(pdev, 0); ··· 3214 netdev->dev_addr[i] = mgp->mac_addr[i]; 3215 3216 /* allocate rx done ring */ 3217 - bytes = myri10ge_max_intr_slots * sizeof(*mgp->rx_done.entry); 3218 - mgp->rx_done.entry = dma_alloc_coherent(&pdev->dev, bytes, 3219 - &mgp->rx_done.bus, GFP_KERNEL); 3220 - if (mgp->rx_done.entry == NULL) 3221 goto abort_with_ioremap; 3222 - memset(mgp->rx_done.entry, 0, bytes); 3223 3224 myri10ge_select_firmware(mgp); 3225 ··· 3279 } 3280 dev_info(dev, "%s IRQ %d, tx bndry %d, fw %s, WC %s\n", 3281 (mgp->msi_enabled ? "MSI" : "xPIC"), 3282 - netdev->irq, mgp->tx.boundary, mgp->fw_name, 3283 (mgp->wc_enabled ? "Enabled" : "Disabled")); 3284 3285 return 0; ··· 3291 myri10ge_dummy_rdma(mgp, 0); 3292 3293 abort_with_rx_done: 3294 - bytes = myri10ge_max_intr_slots * sizeof(*mgp->rx_done.entry); 3295 dma_free_coherent(&pdev->dev, bytes, 3296 - mgp->rx_done.entry, mgp->rx_done.bus); 3297 3298 abort_with_ioremap: 3299 iounmap(mgp->sram); ··· 3303 if (mgp->mtrr >= 0) 3304 mtrr_del(mgp->mtrr, mgp->iomem_base, mgp->board_span); 3305 #endif 3306 - dma_free_coherent(&pdev->dev, sizeof(*mgp->fw_stats), 3307 - mgp->fw_stats, mgp->fw_stats_bus); 3308 3309 abort_with_cmd: 3310 dma_free_coherent(&pdev->dev, sizeof(*mgp->cmd), ··· 3342 /* avoid a memory leak */ 3343 pci_restore_state(pdev); 3344 3345 - bytes = myri10ge_max_intr_slots * sizeof(*mgp->rx_done.entry); 3346 dma_free_coherent(&pdev->dev, bytes, 3347 - mgp->rx_done.entry, mgp->rx_done.bus); 3348 3349 iounmap(mgp->sram); 3350 ··· 3352 if (mgp->mtrr >= 0) 3353 mtrr_del(mgp->mtrr, mgp->iomem_base, mgp->board_span); 3354 #endif 3355 - dma_free_coherent(&pdev->dev, sizeof(*mgp->fw_stats), 3356 - mgp->fw_stats, mgp->fw_stats_bus); 3357 3358 dma_free_coherent(&pdev->dev, sizeof(*mgp->cmd), 3359 mgp->cmd, mgp->cmd_bus);
··· 144 char *req_bytes; 145 struct myri10ge_tx_buffer_state *info; 146 int mask; /* number of transmit slots -1 */ 147 int req ____cacheline_aligned; /* transmit slots submitted */ 148 int pkt_start; /* packets started */ 149 + int stop_queue; 150 + int linearized; 151 int done ____cacheline_aligned; /* transmit slots completed */ 152 int pkt_done; /* packets completed */ 153 + int wake_queue; 154 }; 155 156 struct myri10ge_rx_done { ··· 160 struct net_lro_desc lro_desc[MYRI10GE_MAX_LRO_DESCRIPTORS]; 161 }; 162 163 + struct myri10ge_slice_netstats { 164 + unsigned long rx_packets; 165 + unsigned long tx_packets; 166 + unsigned long rx_bytes; 167 + unsigned long tx_bytes; 168 + unsigned long rx_dropped; 169 + unsigned long tx_dropped; 170 + }; 171 + 172 + struct myri10ge_slice_state { 173 struct myri10ge_tx_buf tx; /* transmit ring */ 174 struct myri10ge_rx_buf rx_small; 175 struct myri10ge_rx_buf rx_big; 176 struct myri10ge_rx_done rx_done; 177 struct net_device *dev; 178 struct napi_struct napi; 179 + struct myri10ge_priv *mgp; 180 + struct myri10ge_slice_netstats stats; 181 + __be32 __iomem *irq_claim; 182 + struct mcp_irq_data *fw_stats; 183 + dma_addr_t fw_stats_bus; 184 + int watchdog_tx_done; 185 + int watchdog_tx_req; 186 + }; 187 + 188 + struct myri10ge_priv { 189 + struct myri10ge_slice_state ss; 190 + int tx_boundary; /* boundary transmits cannot cross */ 191 + int running; /* running? */ 192 + int csum_flag; /* rx_csums? */ 193 + int small_bytes; 194 + int big_bytes; 195 + int max_intr_slots; 196 + struct net_device *dev; 197 struct net_device_stats stats; 198 + spinlock_t stats_lock; 199 u8 __iomem *sram; 200 int sram_size; 201 unsigned long board_span; 202 unsigned long iomem_base; 203 __be32 __iomem *irq_deassert; 204 char *mac_addr_string; 205 struct mcp_cmd_response *cmd; 206 dma_addr_t cmd_bus; 207 struct pci_dev *pdev; 208 int msi_enabled; 209 u32 link_state; ··· 191 __be32 __iomem *intr_coal_delay_ptr; 192 int mtrr; 193 int wc_enabled; 194 int down_cnt; 195 wait_queue_head_t down_wq; 196 struct work_struct watchdog_work; 197 struct timer_list watchdog_timer; 198 int watchdog_resets; 199 + int watchdog_pause; 200 int pause; 201 char *fw_name; 202 char eeprom_strings[MYRI10GE_EEPROM_STRINGS_SIZE]; 203 + char *product_code_string; 204 char fw_version[128]; 205 int fw_ver_major; 206 int fw_ver_minor; ··· 228 229 static char *myri10ge_fw_name = NULL; 230 module_param(myri10ge_fw_name, charp, S_IRUGO | S_IWUSR); 231 + MODULE_PARM_DESC(myri10ge_fw_name, "Firmware image name"); 232 233 static int myri10ge_ecrc_enable = 1; 234 module_param(myri10ge_ecrc_enable, int, S_IRUGO); 235 + MODULE_PARM_DESC(myri10ge_ecrc_enable, "Enable Extended CRC on PCI-E"); 236 237 static int myri10ge_small_bytes = -1; /* -1 == auto */ 238 module_param(myri10ge_small_bytes, int, S_IRUGO | S_IWUSR); 239 + MODULE_PARM_DESC(myri10ge_small_bytes, "Threshold of small packets"); 240 241 static int myri10ge_msi = 1; /* enable msi by default */ 242 module_param(myri10ge_msi, int, S_IRUGO | S_IWUSR); 243 + MODULE_PARM_DESC(myri10ge_msi, "Enable Message Signalled Interrupts"); 244 245 static int myri10ge_intr_coal_delay = 75; 246 module_param(myri10ge_intr_coal_delay, int, S_IRUGO); 247 + MODULE_PARM_DESC(myri10ge_intr_coal_delay, "Interrupt coalescing delay"); 248 249 static int myri10ge_flow_control = 1; 250 module_param(myri10ge_flow_control, int, S_IRUGO); 251 + MODULE_PARM_DESC(myri10ge_flow_control, "Pause parameter"); 252 253 static int myri10ge_deassert_wait = 1; 254 module_param(myri10ge_deassert_wait, int, S_IRUGO | S_IWUSR); 255 MODULE_PARM_DESC(myri10ge_deassert_wait, 256 + "Wait when deasserting legacy interrupts"); 257 258 static int myri10ge_force_firmware = 0; 259 module_param(myri10ge_force_firmware, int, S_IRUGO); 260 MODULE_PARM_DESC(myri10ge_force_firmware, 261 + "Force firmware to assume aligned completions"); 262 263 static int myri10ge_initial_mtu = MYRI10GE_MAX_ETHER_MTU - ETH_HLEN; 264 module_param(myri10ge_initial_mtu, int, S_IRUGO); 265 + MODULE_PARM_DESC(myri10ge_initial_mtu, "Initial MTU"); 266 267 static int myri10ge_napi_weight = 64; 268 module_param(myri10ge_napi_weight, int, S_IRUGO); 269 + MODULE_PARM_DESC(myri10ge_napi_weight, "Set NAPI weight"); 270 271 static int myri10ge_watchdog_timeout = 1; 272 module_param(myri10ge_watchdog_timeout, int, S_IRUGO); 273 + MODULE_PARM_DESC(myri10ge_watchdog_timeout, "Set watchdog timeout"); 274 275 static int myri10ge_max_irq_loops = 1048576; 276 module_param(myri10ge_max_irq_loops, int, S_IRUGO); 277 MODULE_PARM_DESC(myri10ge_max_irq_loops, 278 + "Set stuck legacy IRQ detection threshold"); 279 280 #define MYRI10GE_MSG_DEFAULT NETIF_MSG_LINK 281 ··· 289 290 static int myri10ge_lro = 1; 291 module_param(myri10ge_lro, int, S_IRUGO); 292 + MODULE_PARM_DESC(myri10ge_lro, "Enable large receive offload"); 293 294 static int myri10ge_lro_max_pkts = MYRI10GE_LRO_MAX_PKTS; 295 module_param(myri10ge_lro_max_pkts, int, S_IRUGO); 296 + MODULE_PARM_DESC(myri10ge_lro_max_pkts, 297 + "Number of LRO packets to be aggregated"); 298 299 static int myri10ge_fill_thresh = 256; 300 module_param(myri10ge_fill_thresh, int, S_IRUGO | S_IWUSR); 301 + MODULE_PARM_DESC(myri10ge_fill_thresh, "Number of empty rx slots allowed"); 302 303 static int myri10ge_reset_recover = 1; 304 305 static int myri10ge_wcfifo = 0; 306 module_param(myri10ge_wcfifo, int, S_IRUGO); 307 + MODULE_PARM_DESC(myri10ge_wcfifo, "Enable WC Fifo when WC is enabled"); 308 309 #define MYRI10GE_FW_OFFSET 1024*1024 310 #define MYRI10GE_HIGHPART_TO_U32(X) \ ··· 359 for (sleep_total = 0; 360 sleep_total < 1000 361 && response->result == htonl(MYRI10GE_NO_RESPONSE_RESULT); 362 + sleep_total += 10) { 363 udelay(10); 364 + mb(); 365 + } 366 } else { 367 /* use msleep for most command */ 368 for (sleep_total = 0; ··· 420 ptr += 1; 421 } 422 } 423 + if (memcmp(ptr, "PC=", 3) == 0) { 424 + ptr += 3; 425 + mgp->product_code_string = ptr; 426 + } 427 if (memcmp((const void *)ptr, "SN=", 3) == 0) { 428 ptr += 3; 429 mgp->serial_number = simple_strtoul(ptr, &ptr, 10); ··· 442 static void myri10ge_dummy_rdma(struct myri10ge_priv *mgp, int enable) 443 { 444 char __iomem *submit; 445 + __be32 buf[16] __attribute__ ((__aligned__(8))); 446 u32 dma_low, dma_high; 447 int i; 448 ··· 609 return status; 610 } 611 612 + int myri10ge_get_firmware_capabilities(struct myri10ge_priv *mgp) 613 + { 614 + struct myri10ge_cmd cmd; 615 + int status; 616 + 617 + /* probe for IPv6 TSO support */ 618 + mgp->features = NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_TSO; 619 + status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_MAX_TSO6_HDR_SIZE, 620 + &cmd, 0); 621 + if (status == 0) { 622 + mgp->max_tso6 = cmd.data0; 623 + mgp->features |= NETIF_F_TSO6; 624 + } 625 + 626 + status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_RX_RING_SIZE, &cmd, 0); 627 + if (status != 0) { 628 + dev_err(&mgp->pdev->dev, 629 + "failed MXGEFW_CMD_GET_RX_RING_SIZE\n"); 630 + return -ENXIO; 631 + } 632 + 633 + mgp->max_intr_slots = 2 * (cmd.data0 / sizeof(struct mcp_dma_addr)); 634 + 635 + return 0; 636 + } 637 + 638 static int myri10ge_load_firmware(struct myri10ge_priv *mgp) 639 { 640 char __iomem *submit; 641 + __be32 buf[16] __attribute__ ((__aligned__(8))); 642 u32 dma_low, dma_high, size; 643 int status, i; 644 645 size = 0; 646 status = myri10ge_load_hotplug_firmware(mgp, &size); ··· 635 } 636 dev_info(&mgp->pdev->dev, 637 "Successfully adopted running firmware\n"); 638 + if (mgp->tx_boundary == 4096) { 639 dev_warn(&mgp->pdev->dev, 640 "Using firmware currently running on NIC" 641 ". For optimal\n"); ··· 646 } 647 648 mgp->fw_name = "adopted"; 649 + mgp->tx_boundary = 2048; 650 + myri10ge_dummy_rdma(mgp, 1); 651 + status = myri10ge_get_firmware_capabilities(mgp); 652 return status; 653 } 654 ··· 681 msleep(1); 682 mb(); 683 i = 0; 684 + while (mgp->cmd->data != MYRI10GE_NO_CONFIRM_DATA && i < 9) { 685 + msleep(1 << i); 686 i++; 687 } 688 if (mgp->cmd->data != MYRI10GE_NO_CONFIRM_DATA) { 689 dev_err(&mgp->pdev->dev, "handoff failed\n"); 690 return -ENXIO; 691 } 692 myri10ge_dummy_rdma(mgp, 1); 693 + status = myri10ge_get_firmware_capabilities(mgp); 694 695 + return status; 696 } 697 698 static int myri10ge_update_mac_address(struct myri10ge_priv *mgp, u8 * addr) ··· 772 * transfers took to complete. 773 */ 774 775 + len = mgp->tx_boundary; 776 777 cmd.data0 = MYRI10GE_LOWPART_TO_U32(dmatest_bus); 778 cmd.data1 = MYRI10GE_HIGHPART_TO_U32(dmatest_bus); ··· 834 835 /* Now exchange information about interrupts */ 836 837 + bytes = mgp->max_intr_slots * sizeof(*mgp->ss.rx_done.entry); 838 + memset(mgp->ss.rx_done.entry, 0, bytes); 839 cmd.data0 = (u32) bytes; 840 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_INTRQ_SIZE, &cmd, 0); 841 + cmd.data0 = MYRI10GE_LOWPART_TO_U32(mgp->ss.rx_done.bus); 842 + cmd.data1 = MYRI10GE_HIGHPART_TO_U32(mgp->ss.rx_done.bus); 843 status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_INTRQ_DMA, &cmd, 0); 844 845 status |= 846 myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_IRQ_ACK_OFFSET, &cmd, 0); 847 + mgp->ss.irq_claim = (__iomem __be32 *) (mgp->sram + cmd.data0); 848 status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_IRQ_DEASSERT_OFFSET, 849 &cmd, 0); 850 mgp->irq_deassert = (__iomem __be32 *) (mgp->sram + cmd.data0); ··· 858 } 859 put_be32(htonl(mgp->intr_coal_delay), mgp->intr_coal_delay_ptr); 860 861 + memset(mgp->ss.rx_done.entry, 0, bytes); 862 863 /* reset mcp/driver shared state back to 0 */ 864 + mgp->ss.tx.req = 0; 865 + mgp->ss.tx.done = 0; 866 + mgp->ss.tx.pkt_start = 0; 867 + mgp->ss.tx.pkt_done = 0; 868 + mgp->ss.rx_big.cnt = 0; 869 + mgp->ss.rx_small.cnt = 0; 870 + mgp->ss.rx_done.idx = 0; 871 + mgp->ss.rx_done.cnt = 0; 872 mgp->link_changes = 0; 873 status = myri10ge_update_mac_address(mgp, mgp->dev->dev_addr); 874 myri10ge_change_pause(mgp, mgp->pause); ··· 1020 * page into an skb */ 1021 1022 static inline int 1023 + myri10ge_rx_done(struct myri10ge_slice_state *ss, struct myri10ge_rx_buf *rx, 1024 int bytes, int len, __wsum csum) 1025 { 1026 + struct myri10ge_priv *mgp = ss->mgp; 1027 struct sk_buff *skb; 1028 struct skb_frag_struct rx_frags[MYRI10GE_MAX_FRAGS_PER_FRAME]; 1029 int i, idx, hlen, remainder; ··· 1052 rx_frags[0].page_offset += MXGEFW_PAD; 1053 rx_frags[0].size -= MXGEFW_PAD; 1054 len -= MXGEFW_PAD; 1055 + lro_receive_frags(&ss->rx_done.lro_mgr, rx_frags, 1056 len, len, 1057 + /* opaque, will come back in get_frag_header */ 1058 + (void *)(__force unsigned long)csum, csum); 1059 return 1; 1060 } 1061 ··· 1096 return 1; 1097 } 1098 1099 + static inline void 1100 + myri10ge_tx_done(struct myri10ge_slice_state *ss, int mcp_index) 1101 { 1102 + struct pci_dev *pdev = ss->mgp->pdev; 1103 + struct myri10ge_tx_buf *tx = &ss->tx; 1104 struct sk_buff *skb; 1105 int idx, len; 1106 ··· 1117 len = pci_unmap_len(&tx->info[idx], len); 1118 pci_unmap_len_set(&tx->info[idx], len, 0); 1119 if (skb) { 1120 + ss->stats.tx_bytes += skb->len; 1121 + ss->stats.tx_packets++; 1122 dev_kfree_skb_irq(skb); 1123 if (len) 1124 pci_unmap_single(pdev, ··· 1134 } 1135 } 1136 /* start the queue if we've stopped it */ 1137 + if (netif_queue_stopped(ss->dev) 1138 && tx->req - tx->done < (tx->mask >> 1)) { 1139 + tx->wake_queue++; 1140 + netif_wake_queue(ss->dev); 1141 } 1142 } 1143 1144 + static inline int 1145 + myri10ge_clean_rx_done(struct myri10ge_slice_state *ss, int budget) 1146 { 1147 + struct myri10ge_rx_done *rx_done = &ss->rx_done; 1148 + struct myri10ge_priv *mgp = ss->mgp; 1149 unsigned long rx_bytes = 0; 1150 unsigned long rx_packets = 0; 1151 unsigned long rx_ok; ··· 1159 rx_done->entry[idx].length = 0; 1160 checksum = csum_unfold(rx_done->entry[idx].checksum); 1161 if (length <= mgp->small_bytes) 1162 + rx_ok = myri10ge_rx_done(ss, &ss->rx_small, 1163 mgp->small_bytes, 1164 length, checksum); 1165 else 1166 + rx_ok = myri10ge_rx_done(ss, &ss->rx_big, 1167 mgp->big_bytes, 1168 length, checksum); 1169 rx_packets += rx_ok; 1170 rx_bytes += rx_ok * (unsigned long)length; 1171 cnt++; 1172 + idx = cnt & (mgp->max_intr_slots - 1); 1173 work_done++; 1174 } 1175 rx_done->idx = idx; 1176 rx_done->cnt = cnt; 1177 + ss->stats.rx_packets += rx_packets; 1178 + ss->stats.rx_bytes += rx_bytes; 1179 1180 if (myri10ge_lro) 1181 lro_flush_all(&rx_done->lro_mgr); 1182 1183 /* restock receive rings if needed */ 1184 + if (ss->rx_small.fill_cnt - ss->rx_small.cnt < myri10ge_fill_thresh) 1185 + myri10ge_alloc_rx_pages(mgp, &ss->rx_small, 1186 mgp->small_bytes + MXGEFW_PAD, 0); 1187 + if (ss->rx_big.fill_cnt - ss->rx_big.cnt < myri10ge_fill_thresh) 1188 + myri10ge_alloc_rx_pages(mgp, &ss->rx_big, mgp->big_bytes, 0); 1189 1190 return work_done; 1191 } 1192 1193 static inline void myri10ge_check_statblock(struct myri10ge_priv *mgp) 1194 { 1195 + struct mcp_irq_data *stats = mgp->ss.fw_stats; 1196 1197 if (unlikely(stats->stats_updated)) { 1198 unsigned link_up = ntohl(stats->link_up); ··· 1219 } 1220 } 1221 if (mgp->rdma_tags_available != 1222 + ntohl(stats->rdma_tags_available)) { 1223 mgp->rdma_tags_available = 1224 + ntohl(stats->rdma_tags_available); 1225 printk(KERN_WARNING "myri10ge: %s: RDMA timed out! " 1226 "%d tags left\n", mgp->dev->name, 1227 mgp->rdma_tags_available); ··· 1234 1235 static int myri10ge_poll(struct napi_struct *napi, int budget) 1236 { 1237 + struct myri10ge_slice_state *ss = 1238 + container_of(napi, struct myri10ge_slice_state, napi); 1239 + struct net_device *netdev = ss->mgp->dev; 1240 int work_done; 1241 1242 /* process as many rx events as NAPI will allow */ 1243 + work_done = myri10ge_clean_rx_done(ss, budget); 1244 1245 if (work_done < budget) { 1246 netif_rx_complete(netdev, napi); 1247 + put_be32(htonl(3), ss->irq_claim); 1248 } 1249 return work_done; 1250 } 1251 1252 static irqreturn_t myri10ge_intr(int irq, void *arg) 1253 { 1254 + struct myri10ge_slice_state *ss = arg; 1255 + struct myri10ge_priv *mgp = ss->mgp; 1256 + struct mcp_irq_data *stats = ss->fw_stats; 1257 + struct myri10ge_tx_buf *tx = &ss->tx; 1258 u32 send_done_count; 1259 int i; 1260 ··· 1264 /* low bit indicates receives are present, so schedule 1265 * napi poll handler */ 1266 if (stats->valid & 1) 1267 + netif_rx_schedule(ss->dev, &ss->napi); 1268 1269 if (!mgp->msi_enabled) { 1270 put_be32(0, mgp->irq_deassert); ··· 1281 /* check for transmit completes and receives */ 1282 send_done_count = ntohl(stats->send_done_count); 1283 if (send_done_count != tx->pkt_done) 1284 + myri10ge_tx_done(ss, (int)send_done_count); 1285 if (unlikely(i > myri10ge_max_irq_loops)) { 1286 printk(KERN_WARNING "myri10ge: %s: irq stuck?\n", 1287 mgp->dev->name); ··· 1296 1297 myri10ge_check_statblock(mgp); 1298 1299 + put_be32(htonl(3), ss->irq_claim + 1); 1300 return (IRQ_HANDLED); 1301 } 1302 1303 static int 1304 myri10ge_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd) 1305 { 1306 + struct myri10ge_priv *mgp = netdev_priv(netdev); 1307 + char *ptr; 1308 + int i; 1309 + 1310 cmd->autoneg = AUTONEG_DISABLE; 1311 cmd->speed = SPEED_10000; 1312 cmd->duplex = DUPLEX_FULL; 1313 + 1314 + /* 1315 + * parse the product code to deterimine the interface type 1316 + * (CX4, XFP, Quad Ribbon Fiber) by looking at the character 1317 + * after the 3rd dash in the driver's cached copy of the 1318 + * EEPROM's product code string. 1319 + */ 1320 + ptr = mgp->product_code_string; 1321 + if (ptr == NULL) { 1322 + printk(KERN_ERR "myri10ge: %s: Missing product code\n", 1323 + netdev->name); 1324 + return 0; 1325 + } 1326 + for (i = 0; i < 3; i++, ptr++) { 1327 + ptr = strchr(ptr, '-'); 1328 + if (ptr == NULL) { 1329 + printk(KERN_ERR "myri10ge: %s: Invalid product " 1330 + "code %s\n", netdev->name, 1331 + mgp->product_code_string); 1332 + return 0; 1333 + } 1334 + } 1335 + if (*ptr == 'R' || *ptr == 'Q') { 1336 + /* We've found either an XFP or quad ribbon fiber */ 1337 + cmd->port = PORT_FIBRE; 1338 + } 1339 return 0; 1340 } 1341 ··· 1324 myri10ge_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *coal) 1325 { 1326 struct myri10ge_priv *mgp = netdev_priv(netdev); 1327 + 1328 coal->rx_coalesce_usecs = mgp->intr_coal_delay; 1329 return 0; 1330 } ··· 1370 { 1371 struct myri10ge_priv *mgp = netdev_priv(netdev); 1372 1373 + ring->rx_mini_max_pending = mgp->ss.rx_small.mask + 1; 1374 + ring->rx_max_pending = mgp->ss.rx_big.mask + 1; 1375 ring->rx_jumbo_max_pending = 0; 1376 + ring->tx_max_pending = mgp->ss.rx_small.mask + 1; 1377 ring->rx_mini_pending = ring->rx_mini_max_pending; 1378 ring->rx_pending = ring->rx_max_pending; 1379 ring->rx_jumbo_pending = ring->rx_jumbo_max_pending; ··· 1383 static u32 myri10ge_get_rx_csum(struct net_device *netdev) 1384 { 1385 struct myri10ge_priv *mgp = netdev_priv(netdev); 1386 + 1387 if (mgp->csum_flag) 1388 return 1; 1389 else ··· 1392 static int myri10ge_set_rx_csum(struct net_device *netdev, u32 csum_enabled) 1393 { 1394 struct myri10ge_priv *mgp = netdev_priv(netdev); 1395 + 1396 if (csum_enabled) 1397 mgp->csum_flag = MXGEFW_FLAGS_CKSUM; 1398 else ··· 1411 return 0; 1412 } 1413 1414 + static const char myri10ge_gstrings_main_stats[][ETH_GSTRING_LEN] = { 1415 "rx_packets", "tx_packets", "rx_bytes", "tx_bytes", "rx_errors", 1416 "tx_errors", "rx_dropped", "tx_dropped", "multicast", "collisions", 1417 "rx_length_errors", "rx_over_errors", "rx_crc_errors", ··· 1421 /* device-specific stats */ 1422 "tx_boundary", "WC", "irq", "MSI", 1423 "read_dma_bw_MBs", "write_dma_bw_MBs", "read_write_dma_bw_MBs", 1424 + "serial_number", "watchdog_resets", 1425 "link_changes", "link_up", "dropped_link_overflow", 1426 "dropped_link_error_or_filtered", 1427 "dropped_pause", "dropped_bad_phy", "dropped_bad_crc32", 1428 "dropped_unicast_filtered", "dropped_multicast_filtered", 1429 "dropped_runt", "dropped_overrun", "dropped_no_small_buffer", 1430 + "dropped_no_big_buffer" 1431 + }; 1432 + 1433 + static const char myri10ge_gstrings_slice_stats[][ETH_GSTRING_LEN] = { 1434 + "----------- slice ---------", 1435 + "tx_pkt_start", "tx_pkt_done", "tx_req", "tx_done", 1436 + "rx_small_cnt", "rx_big_cnt", 1437 + "wake_queue", "stop_queue", "tx_linearized", "LRO aggregated", 1438 + "LRO flushed", 1439 "LRO avg aggr", "LRO no_desc" 1440 }; 1441 1442 #define MYRI10GE_NET_STATS_LEN 21 1443 + #define MYRI10GE_MAIN_STATS_LEN ARRAY_SIZE(myri10ge_gstrings_main_stats) 1444 + #define MYRI10GE_SLICE_STATS_LEN ARRAY_SIZE(myri10ge_gstrings_slice_stats) 1445 1446 static void 1447 myri10ge_get_strings(struct net_device *netdev, u32 stringset, u8 * data) 1448 { 1449 switch (stringset) { 1450 case ETH_SS_STATS: 1451 + memcpy(data, *myri10ge_gstrings_main_stats, 1452 + sizeof(myri10ge_gstrings_main_stats)); 1453 + data += sizeof(myri10ge_gstrings_main_stats); 1454 + memcpy(data, *myri10ge_gstrings_slice_stats, 1455 + sizeof(myri10ge_gstrings_slice_stats)); 1456 + data += sizeof(myri10ge_gstrings_slice_stats); 1457 break; 1458 } 1459 } ··· 1451 { 1452 switch (sset) { 1453 case ETH_SS_STATS: 1454 + return MYRI10GE_MAIN_STATS_LEN + MYRI10GE_SLICE_STATS_LEN; 1455 default: 1456 return -EOPNOTSUPP; 1457 } ··· 1462 struct ethtool_stats *stats, u64 * data) 1463 { 1464 struct myri10ge_priv *mgp = netdev_priv(netdev); 1465 + struct myri10ge_slice_state *ss; 1466 int i; 1467 1468 for (i = 0; i < MYRI10GE_NET_STATS_LEN; i++) 1469 data[i] = ((unsigned long *)&mgp->stats)[i]; 1470 1471 + data[i++] = (unsigned int)mgp->tx_boundary; 1472 data[i++] = (unsigned int)mgp->wc_enabled; 1473 data[i++] = (unsigned int)mgp->pdev->irq; 1474 data[i++] = (unsigned int)mgp->msi_enabled; ··· 1475 data[i++] = (unsigned int)mgp->write_dma; 1476 data[i++] = (unsigned int)mgp->read_write_dma; 1477 data[i++] = (unsigned int)mgp->serial_number; 1478 data[i++] = (unsigned int)mgp->watchdog_resets; 1479 data[i++] = (unsigned int)mgp->link_changes; 1480 + 1481 + /* firmware stats are useful only in the first slice */ 1482 + ss = &mgp->ss; 1483 + data[i++] = (unsigned int)ntohl(ss->fw_stats->link_up); 1484 + data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_link_overflow); 1485 data[i++] = 1486 + (unsigned int)ntohl(ss->fw_stats->dropped_link_error_or_filtered); 1487 + data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_pause); 1488 + data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_bad_phy); 1489 + data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_bad_crc32); 1490 + data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_unicast_filtered); 1491 data[i++] = 1492 + (unsigned int)ntohl(ss->fw_stats->dropped_multicast_filtered); 1493 + data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_runt); 1494 + data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_overrun); 1495 + data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_no_small_buffer); 1496 + data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_no_big_buffer); 1497 + 1498 + data[i++] = 0; 1499 + data[i++] = (unsigned int)ss->tx.pkt_start; 1500 + data[i++] = (unsigned int)ss->tx.pkt_done; 1501 + data[i++] = (unsigned int)ss->tx.req; 1502 + data[i++] = (unsigned int)ss->tx.done; 1503 + data[i++] = (unsigned int)ss->rx_small.cnt; 1504 + data[i++] = (unsigned int)ss->rx_big.cnt; 1505 + data[i++] = (unsigned int)ss->tx.wake_queue; 1506 + data[i++] = (unsigned int)ss->tx.stop_queue; 1507 + data[i++] = (unsigned int)ss->tx.linearized; 1508 + data[i++] = ss->rx_done.lro_mgr.stats.aggregated; 1509 + data[i++] = ss->rx_done.lro_mgr.stats.flushed; 1510 + if (ss->rx_done.lro_mgr.stats.flushed) 1511 + data[i++] = ss->rx_done.lro_mgr.stats.aggregated / 1512 + ss->rx_done.lro_mgr.stats.flushed; 1513 else 1514 data[i++] = 0; 1515 + data[i++] = ss->rx_done.lro_mgr.stats.no_desc; 1516 } 1517 1518 static void myri10ge_set_msglevel(struct net_device *netdev, u32 value) ··· 1544 .get_msglevel = myri10ge_get_msglevel 1545 }; 1546 1547 + static int myri10ge_allocate_rings(struct myri10ge_slice_state *ss) 1548 { 1549 + struct myri10ge_priv *mgp = ss->mgp; 1550 struct myri10ge_cmd cmd; 1551 + struct net_device *dev = mgp->dev; 1552 int tx_ring_size, rx_ring_size; 1553 int tx_ring_entries, rx_ring_entries; 1554 int i, status; 1555 size_t bytes; 1556 1557 /* get ring sizes */ 1558 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_SEND_RING_SIZE, &cmd, 0); 1559 tx_ring_size = cmd.data0; 1560 status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_RX_RING_SIZE, &cmd, 0); ··· 1566 1567 tx_ring_entries = tx_ring_size / sizeof(struct mcp_kreq_ether_send); 1568 rx_ring_entries = rx_ring_size / sizeof(struct mcp_dma_addr); 1569 + ss->tx.mask = tx_ring_entries - 1; 1570 + ss->rx_small.mask = ss->rx_big.mask = rx_ring_entries - 1; 1571 1572 status = -ENOMEM; 1573 1574 /* allocate the host shadow rings */ 1575 1576 bytes = 8 + (MYRI10GE_MAX_SEND_DESC_TSO + 4) 1577 + * sizeof(*ss->tx.req_list); 1578 + ss->tx.req_bytes = kzalloc(bytes, GFP_KERNEL); 1579 + if (ss->tx.req_bytes == NULL) 1580 goto abort_with_nothing; 1581 1582 /* ensure req_list entries are aligned to 8 bytes */ 1583 + ss->tx.req_list = (struct mcp_kreq_ether_send *) 1584 + ALIGN((unsigned long)ss->tx.req_bytes, 8); 1585 1586 + bytes = rx_ring_entries * sizeof(*ss->rx_small.shadow); 1587 + ss->rx_small.shadow = kzalloc(bytes, GFP_KERNEL); 1588 + if (ss->rx_small.shadow == NULL) 1589 goto abort_with_tx_req_bytes; 1590 1591 + bytes = rx_ring_entries * sizeof(*ss->rx_big.shadow); 1592 + ss->rx_big.shadow = kzalloc(bytes, GFP_KERNEL); 1593 + if (ss->rx_big.shadow == NULL) 1594 goto abort_with_rx_small_shadow; 1595 1596 /* allocate the host info rings */ 1597 1598 + bytes = tx_ring_entries * sizeof(*ss->tx.info); 1599 + ss->tx.info = kzalloc(bytes, GFP_KERNEL); 1600 + if (ss->tx.info == NULL) 1601 goto abort_with_rx_big_shadow; 1602 1603 + bytes = rx_ring_entries * sizeof(*ss->rx_small.info); 1604 + ss->rx_small.info = kzalloc(bytes, GFP_KERNEL); 1605 + if (ss->rx_small.info == NULL) 1606 goto abort_with_tx_info; 1607 1608 + bytes = rx_ring_entries * sizeof(*ss->rx_big.info); 1609 + ss->rx_big.info = kzalloc(bytes, GFP_KERNEL); 1610 + if (ss->rx_big.info == NULL) 1611 goto abort_with_rx_small_info; 1612 1613 /* Fill the receive rings */ 1614 + ss->rx_big.cnt = 0; 1615 + ss->rx_small.cnt = 0; 1616 + ss->rx_big.fill_cnt = 0; 1617 + ss->rx_small.fill_cnt = 0; 1618 + ss->rx_small.page_offset = MYRI10GE_ALLOC_SIZE; 1619 + ss->rx_big.page_offset = MYRI10GE_ALLOC_SIZE; 1620 + ss->rx_small.watchdog_needed = 0; 1621 + ss->rx_big.watchdog_needed = 0; 1622 + myri10ge_alloc_rx_pages(mgp, &ss->rx_small, 1623 mgp->small_bytes + MXGEFW_PAD, 0); 1624 1625 + if (ss->rx_small.fill_cnt < ss->rx_small.mask + 1) { 1626 printk(KERN_ERR "myri10ge: %s: alloced only %d small bufs\n", 1627 + dev->name, ss->rx_small.fill_cnt); 1628 goto abort_with_rx_small_ring; 1629 } 1630 1631 + myri10ge_alloc_rx_pages(mgp, &ss->rx_big, mgp->big_bytes, 0); 1632 + if (ss->rx_big.fill_cnt < ss->rx_big.mask + 1) { 1633 printk(KERN_ERR "myri10ge: %s: alloced only %d big bufs\n", 1634 + dev->name, ss->rx_big.fill_cnt); 1635 goto abort_with_rx_big_ring; 1636 } 1637 1638 return 0; 1639 1640 abort_with_rx_big_ring: 1641 + for (i = ss->rx_big.cnt; i < ss->rx_big.fill_cnt; i++) { 1642 + int idx = i & ss->rx_big.mask; 1643 + myri10ge_unmap_rx_page(mgp->pdev, &ss->rx_big.info[idx], 1644 mgp->big_bytes); 1645 + put_page(ss->rx_big.info[idx].page); 1646 } 1647 1648 abort_with_rx_small_ring: 1649 + for (i = ss->rx_small.cnt; i < ss->rx_small.fill_cnt; i++) { 1650 + int idx = i & ss->rx_small.mask; 1651 + myri10ge_unmap_rx_page(mgp->pdev, &ss->rx_small.info[idx], 1652 mgp->small_bytes + MXGEFW_PAD); 1653 + put_page(ss->rx_small.info[idx].page); 1654 } 1655 1656 + kfree(ss->rx_big.info); 1657 1658 abort_with_rx_small_info: 1659 + kfree(ss->rx_small.info); 1660 1661 abort_with_tx_info: 1662 + kfree(ss->tx.info); 1663 1664 abort_with_rx_big_shadow: 1665 + kfree(ss->rx_big.shadow); 1666 1667 abort_with_rx_small_shadow: 1668 + kfree(ss->rx_small.shadow); 1669 1670 abort_with_tx_req_bytes: 1671 + kfree(ss->tx.req_bytes); 1672 + ss->tx.req_bytes = NULL; 1673 + ss->tx.req_list = NULL; 1674 1675 abort_with_nothing: 1676 return status; 1677 } 1678 1679 + static void myri10ge_free_rings(struct myri10ge_slice_state *ss) 1680 { 1681 + struct myri10ge_priv *mgp = ss->mgp; 1682 struct sk_buff *skb; 1683 struct myri10ge_tx_buf *tx; 1684 int i, len, idx; 1685 1686 + for (i = ss->rx_big.cnt; i < ss->rx_big.fill_cnt; i++) { 1687 + idx = i & ss->rx_big.mask; 1688 + if (i == ss->rx_big.fill_cnt - 1) 1689 + ss->rx_big.info[idx].page_offset = MYRI10GE_ALLOC_SIZE; 1690 + myri10ge_unmap_rx_page(mgp->pdev, &ss->rx_big.info[idx], 1691 mgp->big_bytes); 1692 + put_page(ss->rx_big.info[idx].page); 1693 } 1694 1695 + for (i = ss->rx_small.cnt; i < ss->rx_small.fill_cnt; i++) { 1696 + idx = i & ss->rx_small.mask; 1697 + if (i == ss->rx_small.fill_cnt - 1) 1698 + ss->rx_small.info[idx].page_offset = 1699 MYRI10GE_ALLOC_SIZE; 1700 + myri10ge_unmap_rx_page(mgp->pdev, &ss->rx_small.info[idx], 1701 mgp->small_bytes + MXGEFW_PAD); 1702 + put_page(ss->rx_small.info[idx].page); 1703 } 1704 + tx = &ss->tx; 1705 while (tx->done != tx->req) { 1706 idx = tx->done & tx->mask; 1707 skb = tx->info[idx].skb; ··· 1714 len = pci_unmap_len(&tx->info[idx], len); 1715 pci_unmap_len_set(&tx->info[idx], len, 0); 1716 if (skb) { 1717 + ss->stats.tx_dropped++; 1718 dev_kfree_skb_any(skb); 1719 if (len) 1720 pci_unmap_single(mgp->pdev, ··· 1729 PCI_DMA_TODEVICE); 1730 } 1731 } 1732 + kfree(ss->rx_big.info); 1733 1734 + kfree(ss->rx_small.info); 1735 1736 + kfree(ss->tx.info); 1737 1738 + kfree(ss->rx_big.shadow); 1739 1740 + kfree(ss->rx_small.shadow); 1741 1742 + kfree(ss->tx.req_bytes); 1743 + ss->tx.req_bytes = NULL; 1744 + ss->tx.req_list = NULL; 1745 } 1746 1747 static int myri10ge_request_irq(struct myri10ge_priv *mgp) ··· 1840 1841 static int myri10ge_open(struct net_device *dev) 1842 { 1843 + struct myri10ge_priv *mgp = netdev_priv(dev); 1844 struct myri10ge_cmd cmd; 1845 struct net_lro_mgr *lro_mgr; 1846 int status, big_pow2; 1847 1848 if (mgp->running != MYRI10GE_ETH_STOPPED) 1849 return -EBUSY; ··· 1883 /* get the lanai pointers to the send and receive rings */ 1884 1885 status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_SEND_OFFSET, &cmd, 0); 1886 + mgp->ss.tx.lanai = 1887 (struct mcp_kreq_ether_send __iomem *)(mgp->sram + cmd.data0); 1888 1889 status |= 1890 myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_SMALL_RX_OFFSET, &cmd, 0); 1891 + mgp->ss.rx_small.lanai = 1892 (struct mcp_kreq_ether_recv __iomem *)(mgp->sram + cmd.data0); 1893 1894 status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_BIG_RX_OFFSET, &cmd, 0); 1895 + mgp->ss.rx_big.lanai = 1896 (struct mcp_kreq_ether_recv __iomem *)(mgp->sram + cmd.data0); 1897 1898 if (status != 0) { ··· 1904 } 1905 1906 if (myri10ge_wcfifo && mgp->wc_enabled) { 1907 + mgp->ss.tx.wc_fifo = (u8 __iomem *) mgp->sram + MXGEFW_ETH_SEND_4; 1908 + mgp->ss.rx_small.wc_fifo = 1909 (u8 __iomem *) mgp->sram + MXGEFW_ETH_RECV_SMALL; 1910 + mgp->ss.rx_big.wc_fifo = 1911 (u8 __iomem *) mgp->sram + MXGEFW_ETH_RECV_BIG; 1912 } else { 1913 + mgp->ss.tx.wc_fifo = NULL; 1914 + mgp->ss.rx_small.wc_fifo = NULL; 1915 + mgp->ss.rx_big.wc_fifo = NULL; 1916 } 1917 1918 /* Firmware needs the big buff size as a power of 2. Lie and ··· 1929 mgp->big_bytes = big_pow2; 1930 } 1931 1932 + status = myri10ge_allocate_rings(&mgp->ss); 1933 if (status != 0) 1934 goto abort_with_irq; 1935 ··· 1948 goto abort_with_rings; 1949 } 1950 1951 + cmd.data0 = MYRI10GE_LOWPART_TO_U32(mgp->ss.fw_stats_bus); 1952 + cmd.data1 = MYRI10GE_HIGHPART_TO_U32(mgp->ss.fw_stats_bus); 1953 cmd.data2 = sizeof(struct mcp_irq_data); 1954 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_STATS_DMA_V2, &cmd, 0); 1955 if (status == -ENOSYS) { 1956 + dma_addr_t bus = mgp->ss.fw_stats_bus; 1957 bus += offsetof(struct mcp_irq_data, send_done_count); 1958 cmd.data0 = MYRI10GE_LOWPART_TO_U32(bus); 1959 cmd.data1 = MYRI10GE_HIGHPART_TO_U32(bus); ··· 1974 mgp->link_state = ~0U; 1975 mgp->rdma_tags_available = 15; 1976 1977 + lro_mgr = &mgp->ss.rx_done.lro_mgr; 1978 lro_mgr->dev = dev; 1979 lro_mgr->features = LRO_F_NAPI; 1980 lro_mgr->ip_summed = CHECKSUM_COMPLETE; 1981 lro_mgr->ip_summed_aggr = CHECKSUM_UNNECESSARY; 1982 lro_mgr->max_desc = MYRI10GE_MAX_LRO_DESCRIPTORS; 1983 + lro_mgr->lro_arr = mgp->ss.rx_done.lro_desc; 1984 lro_mgr->get_frag_header = myri10ge_get_frag_header; 1985 lro_mgr->max_aggr = myri10ge_lro_max_pkts; 1986 lro_mgr->frag_align_pad = 2; 1987 if (lro_mgr->max_aggr > MAX_SKB_FRAGS) 1988 lro_mgr->max_aggr = MAX_SKB_FRAGS; 1989 1990 + napi_enable(&mgp->ss.napi); /* must happen prior to any irq */ 1991 1992 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_ETHERNET_UP, &cmd, 0); 1993 if (status) { ··· 1996 goto abort_with_rings; 1997 } 1998 1999 + mgp->ss.tx.wake_queue = 0; 2000 + mgp->ss.tx.stop_queue = 0; 2001 mgp->running = MYRI10GE_ETH_RUNNING; 2002 mgp->watchdog_timer.expires = jiffies + myri10ge_watchdog_timeout * HZ; 2003 add_timer(&mgp->watchdog_timer); ··· 2005 return 0; 2006 2007 abort_with_rings: 2008 + myri10ge_free_rings(&mgp->ss); 2009 2010 abort_with_irq: 2011 myri10ge_free_irq(mgp); ··· 2017 2018 static int myri10ge_close(struct net_device *dev) 2019 { 2020 + struct myri10ge_priv *mgp = netdev_priv(dev); 2021 struct myri10ge_cmd cmd; 2022 int status, old_down_cnt; 2023 2024 if (mgp->running != MYRI10GE_ETH_RUNNING) 2025 return 0; 2026 2027 + if (mgp->ss.tx.req_bytes == NULL) 2028 return 0; 2029 2030 del_timer_sync(&mgp->watchdog_timer); 2031 mgp->running = MYRI10GE_ETH_STOPPING; 2032 + napi_disable(&mgp->ss.napi); 2033 netif_carrier_off(dev); 2034 netif_stop_queue(dev); 2035 old_down_cnt = mgp->down_cnt; ··· 2047 2048 netif_tx_disable(dev); 2049 myri10ge_free_irq(mgp); 2050 + myri10ge_free_rings(&mgp->ss); 2051 2052 mgp->running = MYRI10GE_ETH_STOPPED; 2053 return 0; ··· 2143 2144 /* 2145 * Transmit a packet. We need to split the packet so that a single 2146 + * segment does not cross myri10ge->tx_boundary, so this makes segment 2147 * counting tricky. So rather than try to count segments up front, we 2148 * just give up if there are too few segments to hold a reasonably 2149 * fragmented packet currently available. If we run ··· 2154 static int myri10ge_xmit(struct sk_buff *skb, struct net_device *dev) 2155 { 2156 struct myri10ge_priv *mgp = netdev_priv(dev); 2157 + struct myri10ge_slice_state *ss; 2158 struct mcp_kreq_ether_send *req; 2159 + struct myri10ge_tx_buf *tx; 2160 struct skb_frag_struct *frag; 2161 dma_addr_t bus; 2162 u32 low; ··· 2166 int cum_len, seglen, boundary, rdma_count; 2167 u8 flags, odd_flag; 2168 2169 + /* always transmit through slot 0 */ 2170 + ss = &mgp->ss; 2171 + tx = &ss->tx; 2172 again: 2173 req = tx->req_list; 2174 avail = tx->mask - 1 - (tx->req - tx->done); ··· 2180 2181 if ((unlikely(avail < max_segments))) { 2182 /* we are out of transmit resources */ 2183 + tx->stop_queue++; 2184 netif_stop_queue(dev); 2185 return 1; 2186 } ··· 2242 if (skb_padto(skb, ETH_ZLEN)) { 2243 /* The packet is gone, so we must 2244 * return 0 */ 2245 + ss->stats.tx_dropped += 1; 2246 return 0; 2247 } 2248 /* adjust the len to account for the zero pad ··· 2284 2285 while (1) { 2286 /* Break the SKB or Fragment up into pieces which 2287 + * do not cross mgp->tx_boundary */ 2288 low = MYRI10GE_LOWPART_TO_U32(bus); 2289 high_swapped = htonl(MYRI10GE_HIGHPART_TO_U32(bus)); 2290 while (len) { ··· 2294 if (unlikely(count == max_segments)) 2295 goto abort_linearize; 2296 2297 + boundary = 2298 + (low + mgp->tx_boundary) & ~(mgp->tx_boundary - 1); 2299 seglen = boundary - low; 2300 if (seglen > len) 2301 seglen = len; ··· 2378 myri10ge_submit_req_wc(tx, tx->req_list, count); 2379 tx->pkt_start++; 2380 if ((avail - count) < MXGEFW_MAX_SEND_DESC) { 2381 + tx->stop_queue++; 2382 netif_stop_queue(dev); 2383 } 2384 dev->trans_start = jiffies; ··· 2420 if (skb_linearize(skb)) 2421 goto drop; 2422 2423 + tx->linearized++; 2424 goto again; 2425 2426 drop: 2427 dev_kfree_skb_any(skb); 2428 + ss->stats.tx_dropped += 1; 2429 return 0; 2430 2431 } ··· 2433 static int myri10ge_sw_tso(struct sk_buff *skb, struct net_device *dev) 2434 { 2435 struct sk_buff *segs, *curr; 2436 + struct myri10ge_priv *mgp = netdev_priv(dev); 2437 int status; 2438 2439 segs = skb_gso_segment(skb, dev->features & ~NETIF_F_TSO6); ··· 2473 2474 static void myri10ge_set_multicast_list(struct net_device *dev) 2475 { 2476 + struct myri10ge_priv *mgp = netdev_priv(dev); 2477 struct myri10ge_cmd cmd; 2478 struct dev_mc_list *mc_list; 2479 __be32 data[2] = { 0, 0 }; 2480 int err; 2481 DECLARE_MAC_BUF(mac); 2482 2483 /* can be called from atomic contexts, 2484 * pass 1 to force atomicity in myri10ge_send_cmd() */ 2485 myri10ge_change_promisc(mgp, dev->flags & IFF_PROMISC, 1); ··· 2616 ext_type = (val & PCI_EXP_FLAGS_TYPE) >> 4; 2617 if (ext_type != PCI_EXP_TYPE_ROOT_PORT) { 2618 if (myri10ge_ecrc_enable > 1) { 2619 + struct pci_dev *prev_bridge, *old_bridge = bridge; 2620 2621 /* Walk the hierarchy up to the root port 2622 * where ECRC has to be enabled */ 2623 do { 2624 + prev_bridge = bridge; 2625 bridge = bridge->bus->self; 2626 + if (!bridge || prev_bridge == bridge) { 2627 dev_err(dev, 2628 "Failed to find root port" 2629 " to force ECRC\n"); ··· 2681 * already been enabled, then it must use a firmware image which works 2682 * around unaligned completion packets (myri10ge_ethp_z8e.dat), and it 2683 * should also ensure that it never gives the device a Read-DMA which is 2684 + * larger than 2KB by setting the tx_boundary to 2KB. If ECRC is 2685 * enabled, then the driver should use the aligned (myri10ge_eth_z8e.dat) 2686 + * firmware image, and set tx_boundary to 4KB. 2687 */ 2688 2689 static void myri10ge_firmware_probe(struct myri10ge_priv *mgp) ··· 2692 struct device *dev = &pdev->dev; 2693 int status; 2694 2695 + mgp->tx_boundary = 4096; 2696 /* 2697 * Verify the max read request size was set to 4KB 2698 * before trying the test with 4KB. ··· 2704 } 2705 if (status != 4096) { 2706 dev_warn(dev, "Max Read Request size != 4096 (%d)\n", status); 2707 + mgp->tx_boundary = 2048; 2708 } 2709 /* 2710 * load the optimized firmware (which assumes aligned PCIe ··· 2737 "Please install up to date fw\n"); 2738 abort: 2739 /* fall back to using the unaligned firmware */ 2740 + mgp->tx_boundary = 2048; 2741 mgp->fw_name = myri10ge_fw_unaligned; 2742 2743 } ··· 2758 if (link_width < 8) { 2759 dev_info(&mgp->pdev->dev, "PCIE x%d Link\n", 2760 link_width); 2761 + mgp->tx_boundary = 4096; 2762 mgp->fw_name = myri10ge_fw_aligned; 2763 } else { 2764 myri10ge_firmware_probe(mgp); ··· 2767 if (myri10ge_force_firmware == 1) { 2768 dev_info(&mgp->pdev->dev, 2769 "Assuming aligned completions (forced)\n"); 2770 + mgp->tx_boundary = 4096; 2771 mgp->fw_name = myri10ge_fw_aligned; 2772 } else { 2773 dev_info(&mgp->pdev->dev, 2774 "Assuming unaligned completions (forced)\n"); 2775 + mgp->tx_boundary = 2048; 2776 mgp->fw_name = myri10ge_fw_unaligned; 2777 } 2778 } ··· 2889 { 2890 struct myri10ge_priv *mgp = 2891 container_of(work, struct myri10ge_priv, watchdog_work); 2892 + struct myri10ge_tx_buf *tx; 2893 u32 reboot; 2894 int status; 2895 u16 cmd, vendor; ··· 2938 2939 printk(KERN_ERR "myri10ge: %s: device timeout, resetting\n", 2940 mgp->dev->name); 2941 + tx = &mgp->ss.tx; 2942 printk(KERN_INFO "myri10ge: %s: %d %d %d %d %d\n", 2943 + mgp->dev->name, tx->req, tx->done, 2944 + tx->pkt_start, tx->pkt_done, 2945 + (int)ntohl(mgp->ss.fw_stats->send_done_count)); 2946 msleep(2000); 2947 printk(KERN_INFO "myri10ge: %s: %d %d %d %d %d\n", 2948 + mgp->dev->name, tx->req, tx->done, 2949 + tx->pkt_start, tx->pkt_done, 2950 + (int)ntohl(mgp->ss.fw_stats->send_done_count)); 2951 } 2952 rtnl_lock(); 2953 myri10ge_close(mgp->dev); ··· 2969 static void myri10ge_watchdog_timer(unsigned long arg) 2970 { 2971 struct myri10ge_priv *mgp; 2972 + struct myri10ge_slice_state *ss; 2973 u32 rx_pause_cnt; 2974 2975 mgp = (struct myri10ge_priv *)arg; 2976 2977 + rx_pause_cnt = ntohl(mgp->ss.fw_stats->dropped_pause); 2978 2979 + ss = &mgp->ss; 2980 + if (ss->rx_small.watchdog_needed) { 2981 + myri10ge_alloc_rx_pages(mgp, &ss->rx_small, 2982 + mgp->small_bytes + MXGEFW_PAD, 1); 2983 + if (ss->rx_small.fill_cnt - ss->rx_small.cnt >= 2984 + myri10ge_fill_thresh) 2985 + ss->rx_small.watchdog_needed = 0; 2986 + } 2987 + if (ss->rx_big.watchdog_needed) { 2988 + myri10ge_alloc_rx_pages(mgp, &ss->rx_big, mgp->big_bytes, 1); 2989 + if (ss->rx_big.fill_cnt - ss->rx_big.cnt >= 2990 + myri10ge_fill_thresh) 2991 + ss->rx_big.watchdog_needed = 0; 2992 + } 2993 + 2994 + if (ss->tx.req != ss->tx.done && 2995 + ss->tx.done == ss->watchdog_tx_done && 2996 + ss->watchdog_tx_req != ss->watchdog_tx_done) { 2997 /* nic seems like it might be stuck.. */ 2998 if (rx_pause_cnt != mgp->watchdog_pause) { 2999 if (net_ratelimit()) ··· 3005 /* rearm timer */ 3006 mod_timer(&mgp->watchdog_timer, 3007 jiffies + myri10ge_watchdog_timeout * HZ); 3008 + ss->watchdog_tx_done = ss->tx.done; 3009 + ss->watchdog_tx_req = ss->tx.req; 3010 mgp->watchdog_pause = rx_pause_cnt; 3011 } 3012 ··· 3030 3031 mgp = netdev_priv(netdev); 3032 mgp->dev = netdev; 3033 + netif_napi_add(netdev, &mgp->ss.napi, myri10ge_poll, myri10ge_napi_weight); 3034 mgp->pdev = pdev; 3035 mgp->csum_flag = MXGEFW_FLAGS_CKSUM; 3036 mgp->pause = myri10ge_flow_control; ··· 3076 if (mgp->cmd == NULL) 3077 goto abort_with_netdev; 3078 3079 + mgp->ss.fw_stats = dma_alloc_coherent(&pdev->dev, sizeof(*mgp->ss.fw_stats), 3080 + &mgp->ss.fw_stats_bus, GFP_KERNEL); 3081 + if (mgp->ss.fw_stats == NULL) 3082 goto abort_with_cmd; 3083 3084 mgp->board_span = pci_resource_len(pdev, 0); ··· 3118 netdev->dev_addr[i] = mgp->mac_addr[i]; 3119 3120 /* allocate rx done ring */ 3121 + bytes = mgp->max_intr_slots * sizeof(*mgp->ss.rx_done.entry); 3122 + mgp->ss.rx_done.entry = dma_alloc_coherent(&pdev->dev, bytes, 3123 + &mgp->ss.rx_done.bus, GFP_KERNEL); 3124 + if (mgp->ss.rx_done.entry == NULL) 3125 goto abort_with_ioremap; 3126 + memset(mgp->ss.rx_done.entry, 0, bytes); 3127 3128 myri10ge_select_firmware(mgp); 3129 ··· 3183 } 3184 dev_info(dev, "%s IRQ %d, tx bndry %d, fw %s, WC %s\n", 3185 (mgp->msi_enabled ? "MSI" : "xPIC"), 3186 + netdev->irq, mgp->tx_boundary, mgp->fw_name, 3187 (mgp->wc_enabled ? "Enabled" : "Disabled")); 3188 3189 return 0; ··· 3195 myri10ge_dummy_rdma(mgp, 0); 3196 3197 abort_with_rx_done: 3198 + bytes = mgp->max_intr_slots * sizeof(*mgp->ss.rx_done.entry); 3199 dma_free_coherent(&pdev->dev, bytes, 3200 + mgp->ss.rx_done.entry, mgp->ss.rx_done.bus); 3201 3202 abort_with_ioremap: 3203 iounmap(mgp->sram); ··· 3207 if (mgp->mtrr >= 0) 3208 mtrr_del(mgp->mtrr, mgp->iomem_base, mgp->board_span); 3209 #endif 3210 + dma_free_coherent(&pdev->dev, sizeof(*mgp->ss.fw_stats), 3211 + mgp->ss.fw_stats, mgp->ss.fw_stats_bus); 3212 3213 abort_with_cmd: 3214 dma_free_coherent(&pdev->dev, sizeof(*mgp->cmd), ··· 3246 /* avoid a memory leak */ 3247 pci_restore_state(pdev); 3248 3249 + bytes = mgp->max_intr_slots * sizeof(*mgp->ss.rx_done.entry); 3250 dma_free_coherent(&pdev->dev, bytes, 3251 + mgp->ss.rx_done.entry, mgp->ss.rx_done.bus); 3252 3253 iounmap(mgp->sram); 3254 ··· 3256 if (mgp->mtrr >= 0) 3257 mtrr_del(mgp->mtrr, mgp->iomem_base, mgp->board_span); 3258 #endif 3259 + dma_free_coherent(&pdev->dev, sizeof(*mgp->ss.fw_stats), 3260 + mgp->ss.fw_stats, mgp->ss.fw_stats_bus); 3261 3262 dma_free_coherent(&pdev->dev, sizeof(*mgp->cmd), 3263 mgp->cmd, mgp->cmd_bus);
+50 -6
drivers/net/myri10ge/myri10ge_mcp.h
··· 10 __be32 low; 11 }; 12 13 - /* 4 Bytes. 8 Bytes for NDIS drivers. */ 14 struct mcp_slot { 15 __sum16 checksum; 16 __be16 length; ··· 144 * a power of 2 number of entries. */ 145 146 MXGEFW_CMD_SET_INTRQ_SIZE, /* in bytes */ 147 148 /* command to bring ethernet interface up. Above parameters 149 * (plus mtu & mac address) must have been exchanged prior ··· 222 MXGEFW_CMD_GET_MAX_RSS_QUEUES, 223 MXGEFW_CMD_ENABLE_RSS_QUEUES, 224 /* data0 = number of slices n (0, 1, ..., n-1) to enable 225 - * data1 = interrupt mode. 0=share one INTx/MSI, 1=use one MSI-X per queue. 226 * If all queues share one interrupt, the driver must have set 227 * RSS_SHARED_INTERRUPT_DMA before enabling queues. 228 */ 229 MXGEFW_CMD_GET_RSS_SHARED_INTERRUPT_MASK_OFFSET, 230 MXGEFW_CMD_SET_RSS_SHARED_INTERRUPT_DMA, 231 /* data0, data1 = bus address lsw, msw */ ··· 246 * 0: disable rss. nic does not distribute receive packets. 247 * 1: enable rss. nic distributes receive packets among queues. 248 * data1 = hash type 249 - * 1: IPV4 250 - * 2: TCP_IPV4 251 - * 3: IPV4 | TCP_IPV4 252 */ 253 254 MXGEFW_CMD_GET_MAX_TSO6_HDR_SIZE, 255 /* Return data = the max. size of the entire headers of a IPv6 TSO packet. ··· 269 * 0: Linux/FreeBSD style (NIC default) 270 * 1: NDIS/NetBSD style 271 */ 272 273 MXGEFW_CMD_MDIO_READ, 274 /* data0 = dev_addr (PMA/PMD or PCS ...), data1 = register/addr */ ··· 297 /* Return data = NIC memory offset of mcp_vpump_public_global */ 298 MXGEFW_CMD_RESET_VPUMP, 299 /* Resets the VPUMP state */ 300 }; 301 302 enum myri10ge_mcp_cmd_status { ··· 345 MXGEFW_CMD_ERROR_UNALIGNED, 346 MXGEFW_CMD_ERROR_NO_MDIO, 347 MXGEFW_CMD_ERROR_XFP_FAILURE, 348 - MXGEFW_CMD_ERROR_XFP_ABSENT 349 }; 350 351 #define MXGEFW_OLD_IRQ_DATA_LEN 40
··· 10 __be32 low; 11 }; 12 13 + /* 4 Bytes */ 14 struct mcp_slot { 15 __sum16 checksum; 16 __be16 length; ··· 144 * a power of 2 number of entries. */ 145 146 MXGEFW_CMD_SET_INTRQ_SIZE, /* in bytes */ 147 + #define MXGEFW_CMD_SET_INTRQ_SIZE_FLAG_NO_STRICT_SIZE_CHECK (1 << 31) 148 149 /* command to bring ethernet interface up. Above parameters 150 * (plus mtu & mac address) must have been exchanged prior ··· 221 MXGEFW_CMD_GET_MAX_RSS_QUEUES, 222 MXGEFW_CMD_ENABLE_RSS_QUEUES, 223 /* data0 = number of slices n (0, 1, ..., n-1) to enable 224 + * data1 = interrupt mode. 225 + * 0=share one INTx/MSI, 1=use one MSI-X per queue. 226 * If all queues share one interrupt, the driver must have set 227 * RSS_SHARED_INTERRUPT_DMA before enabling queues. 228 */ 229 + #define MXGEFW_SLICE_INTR_MODE_SHARED 0 230 + #define MXGEFW_SLICE_INTR_MODE_ONE_PER_SLICE 1 231 + 232 MXGEFW_CMD_GET_RSS_SHARED_INTERRUPT_MASK_OFFSET, 233 MXGEFW_CMD_SET_RSS_SHARED_INTERRUPT_DMA, 234 /* data0, data1 = bus address lsw, msw */ ··· 241 * 0: disable rss. nic does not distribute receive packets. 242 * 1: enable rss. nic distributes receive packets among queues. 243 * data1 = hash type 244 + * 1: IPV4 (required by RSS) 245 + * 2: TCP_IPV4 (required by RSS) 246 + * 3: IPV4 | TCP_IPV4 (required by RSS) 247 + * 4: source port 248 */ 249 + #define MXGEFW_RSS_HASH_TYPE_IPV4 0x1 250 + #define MXGEFW_RSS_HASH_TYPE_TCP_IPV4 0x2 251 + #define MXGEFW_RSS_HASH_TYPE_SRC_PORT 0x4 252 253 MXGEFW_CMD_GET_MAX_TSO6_HDR_SIZE, 254 /* Return data = the max. size of the entire headers of a IPv6 TSO packet. ··· 260 * 0: Linux/FreeBSD style (NIC default) 261 * 1: NDIS/NetBSD style 262 */ 263 + #define MXGEFW_TSO_MODE_LINUX 0 264 + #define MXGEFW_TSO_MODE_NDIS 1 265 266 MXGEFW_CMD_MDIO_READ, 267 /* data0 = dev_addr (PMA/PMD or PCS ...), data1 = register/addr */ ··· 286 /* Return data = NIC memory offset of mcp_vpump_public_global */ 287 MXGEFW_CMD_RESET_VPUMP, 288 /* Resets the VPUMP state */ 289 + 290 + MXGEFW_CMD_SET_RSS_MCP_SLOT_TYPE, 291 + /* data0 = mcp_slot type to use. 292 + * 0 = the default 4B mcp_slot 293 + * 1 = 8B mcp_slot_8 294 + */ 295 + #define MXGEFW_RSS_MCP_SLOT_TYPE_MIN 0 296 + #define MXGEFW_RSS_MCP_SLOT_TYPE_WITH_HASH 1 297 + 298 + MXGEFW_CMD_SET_THROTTLE_FACTOR, 299 + /* set the throttle factor for ethp_z8e 300 + * data0 = throttle_factor 301 + * throttle_factor = 256 * pcie-raw-speed / tx_speed 302 + * tx_speed = 256 * pcie-raw-speed / throttle_factor 303 + * 304 + * For PCI-E x8: pcie-raw-speed == 16Gb/s 305 + * For PCI-E x4: pcie-raw-speed == 8Gb/s 306 + * 307 + * ex1: throttle_factor == 0x1a0 (416), tx_speed == 1.23GB/s == 9.846 Gb/s 308 + * ex2: throttle_factor == 0x200 (512), tx_speed == 1.0GB/s == 8 Gb/s 309 + * 310 + * with tx_boundary == 2048, max-throttle-factor == 8191 => min-speed == 500Mb/s 311 + * with tx_boundary == 4096, max-throttle-factor == 4095 => min-speed == 1Gb/s 312 + */ 313 + 314 + MXGEFW_CMD_VPUMP_UP, 315 + /* Allocates VPump Connection, Send Request and Zero copy buffer address tables */ 316 + MXGEFW_CMD_GET_VPUMP_CLK, 317 + /* Get the lanai clock */ 318 + 319 + MXGEFW_CMD_GET_DCA_OFFSET, 320 + /* offset of dca control for WDMAs */ 321 }; 322 323 enum myri10ge_mcp_cmd_status { ··· 302 MXGEFW_CMD_ERROR_UNALIGNED, 303 MXGEFW_CMD_ERROR_NO_MDIO, 304 MXGEFW_CMD_ERROR_XFP_FAILURE, 305 + MXGEFW_CMD_ERROR_XFP_ABSENT, 306 + MXGEFW_CMD_ERROR_BAD_PCIE_LINK 307 }; 308 309 #define MXGEFW_OLD_IRQ_DATA_LEN 40
+14 -25
drivers/net/myri10ge/myri10ge_mcp_gen_header.h
··· 1 #ifndef __MYRI10GE_MCP_GEN_HEADER_H__ 2 #define __MYRI10GE_MCP_GEN_HEADER_H__ 3 4 - /* this file define a standard header used as a first entry point to 5 - * exchange information between firmware/driver and driver. The 6 - * header structure can be anywhere in the mcp. It will usually be in 7 - * the .data section, because some fields needs to be initialized at 8 - * compile time. 9 - * The 32bit word at offset MX_HEADER_PTR_OFFSET in the mcp must 10 - * contains the location of the header. 11 - * 12 - * Typically a MCP will start with the following: 13 - * .text 14 - * .space 52 ! to help catch MEMORY_INT errors 15 - * bt start ! jump to real code 16 - * nop 17 - * .long _gen_mcp_header 18 - * 19 - * The source will have a definition like: 20 - * 21 - * mcp_gen_header_t gen_mcp_header = { 22 - * .header_length = sizeof(mcp_gen_header_t), 23 - * .mcp_type = MCP_TYPE_XXX, 24 - * .version = "something $Id: mcp_gen_header.h,v 1.2 2006/05/13 10:04:35 bgoglin Exp $", 25 - * .mcp_globals = (unsigned)&Globals 26 - * }; 27 - */ 28 29 #define MCP_HEADER_PTR_OFFSET 0x3c 30 ··· 8 #define MCP_TYPE_PCIE 0x70636965 /* "PCIE" pcie-only MCP */ 9 #define MCP_TYPE_ETH 0x45544820 /* "ETH " */ 10 #define MCP_TYPE_MCP0 0x4d435030 /* "MCP0" */ 11 12 struct mcp_gen_header { 13 /* the first 4 fields are filled at compile time */ 14 unsigned header_length; 15 __be32 mcp_type; 16 char version[128]; 17 - unsigned mcp_globals; /* pointer to mcp-type specific structure */ 18 19 /* filled by the MCP at run-time */ 20 unsigned sram_size; ··· 30 * 31 * Never remove any field. Keep everything naturally align. 32 */ 33 }; 34 35 #endif /* __MYRI10GE_MCP_GEN_HEADER_H__ */
··· 1 #ifndef __MYRI10GE_MCP_GEN_HEADER_H__ 2 #define __MYRI10GE_MCP_GEN_HEADER_H__ 3 4 5 #define MCP_HEADER_PTR_OFFSET 0x3c 6 ··· 32 #define MCP_TYPE_PCIE 0x70636965 /* "PCIE" pcie-only MCP */ 33 #define MCP_TYPE_ETH 0x45544820 /* "ETH " */ 34 #define MCP_TYPE_MCP0 0x4d435030 /* "MCP0" */ 35 + #define MCP_TYPE_DFLT 0x20202020 /* " " */ 36 37 struct mcp_gen_header { 38 /* the first 4 fields are filled at compile time */ 39 unsigned header_length; 40 __be32 mcp_type; 41 char version[128]; 42 + unsigned mcp_private; /* pointer to mcp-type specific structure */ 43 44 /* filled by the MCP at run-time */ 45 unsigned sram_size; ··· 53 * 54 * Never remove any field. Keep everything naturally align. 55 */ 56 + 57 + /* Specifies if the running mcp is mcp0, 1, or 2. */ 58 + unsigned char mcp_index; 59 + unsigned char disable_rabbit; 60 + unsigned char unaligned_tlp; 61 + unsigned char pad1; 62 + unsigned counters_addr; 63 + unsigned copy_block_info; /* for small mcps loaded with "lload -d" */ 64 + unsigned short handoff_id_major; /* must be equal */ 65 + unsigned short handoff_id_caps; /* bitfield: new mcp must have superset */ 66 + unsigned msix_table_addr; /* start address of msix table in firmware */ 67 + /* 8 */ 68 }; 69 70 #endif /* __MYRI10GE_MCP_GEN_HEADER_H__ */
+21 -32
drivers/net/niu.c
··· 865 return 0; 866 } 867 868 - 869 static int link_status_10g_serdes(struct niu *np, int *link_up_p) 870 { 871 unsigned long flags; ··· 898 *link_up_p = link_up; 899 return 0; 900 } 901 - 902 903 static int link_status_1g_rgmii(struct niu *np, int *link_up_p) 904 { ··· 954 *link_up_p = link_up; 955 return err; 956 } 957 - 958 959 static int bcm8704_reset(struct niu *np) 960 { ··· 1354 return 0; 1355 } 1356 1357 - 1358 - 1359 static int xcvr_init_1g_rgmii(struct niu *np) 1360 { 1361 int err; ··· 1413 1414 return 0; 1415 } 1416 - 1417 1418 static int mii_init_common(struct niu *np) 1419 { ··· 7002 return 0; 7003 } 7004 7005 - /* niu board models have a trailing dash version incremented 7006 - * with HW rev change. Need to ingnore the dash version while 7007 - * checking for match 7008 - * 7009 - * for example, for the 10G card the current vpd.board_model 7010 - * is 501-5283-04, of which -04 is the dash version and have 7011 - * to be ignored 7012 - */ 7013 - static int niu_board_model_match(struct niu *np, const char *model) 7014 - { 7015 - return !strncmp(np->vpd.board_model, model, strlen(model)); 7016 - } 7017 - 7018 static int niu_pci_vpd_get_nports(struct niu *np) 7019 { 7020 int ports = 0; 7021 7022 - if ((niu_board_model_match(np, NIU_QGC_LP_BM_STR)) || 7023 - (niu_board_model_match(np, NIU_QGC_PEM_BM_STR)) || 7024 - (niu_board_model_match(np, NIU_ALONSO_BM_STR))) { 7025 ports = 4; 7026 - } else if ((niu_board_model_match(np, NIU_2XGF_LP_BM_STR)) || 7027 - (niu_board_model_match(np, NIU_2XGF_PEM_BM_STR)) || 7028 - (niu_board_model_match(np, NIU_FOXXY_BM_STR)) || 7029 - (niu_board_model_match(np, NIU_2XGF_MRVL_BM_STR))) { 7030 ports = 2; 7031 } 7032 ··· 7036 return; 7037 } 7038 7039 - if (!strcmp(np->vpd.model, "SUNW,CP3220") || 7040 - !strcmp(np->vpd.model, "SUNW,CP3260")) { 7041 np->flags |= NIU_FLAGS_10G; 7042 np->flags &= ~NIU_FLAGS_FIBER; 7043 np->flags |= NIU_FLAGS_XCVR_SERDES; ··· 7048 } 7049 if (np->flags & NIU_FLAGS_10G) 7050 np->mac_xcvr = MAC_XCVR_XPCS; 7051 - } else if (niu_board_model_match(np, NIU_FOXXY_BM_STR)) { 7052 np->flags |= (NIU_FLAGS_10G | NIU_FLAGS_FIBER | 7053 NIU_FLAGS_HOTPLUG_PHY); 7054 } else if (niu_phy_type_prop_decode(np, np->vpd.phy_type)) { ··· 7524 u32 val; 7525 int err; 7526 7527 - if (!strcmp(np->vpd.model, "SUNW,CP3220") || 7528 - !strcmp(np->vpd.model, "SUNW,CP3260")) { 7529 num_10g = 0; 7530 num_1g = 2; 7531 parent->plat_type = PLAT_TYPE_ATCA_CP3220; ··· 7534 phy_encode(PORT_TYPE_1G, 1) | 7535 phy_encode(PORT_TYPE_1G, 2) | 7536 phy_encode(PORT_TYPE_1G, 3)); 7537 - } else if (niu_board_model_match(np, NIU_FOXXY_BM_STR)) { 7538 num_10g = 2; 7539 num_1g = 0; 7540 parent->num_ports = 2; ··· 7929 struct device_node *dp; 7930 const char *phy_type; 7931 const u8 *mac_addr; 7932 int prop_len; 7933 7934 if (np->parent->plat_type == PLAT_TYPE_NIU) ··· 7983 } 7984 7985 memcpy(dev->dev_addr, dev->perm_addr, dev->addr_len); 7986 7987 return 0; 7988 #else
··· 865 return 0; 866 } 867 868 static int link_status_10g_serdes(struct niu *np, int *link_up_p) 869 { 870 unsigned long flags; ··· 899 *link_up_p = link_up; 900 return 0; 901 } 902 903 static int link_status_1g_rgmii(struct niu *np, int *link_up_p) 904 { ··· 956 *link_up_p = link_up; 957 return err; 958 } 959 960 static int bcm8704_reset(struct niu *np) 961 { ··· 1357 return 0; 1358 } 1359 1360 static int xcvr_init_1g_rgmii(struct niu *np) 1361 { 1362 int err; ··· 1418 1419 return 0; 1420 } 1421 1422 static int mii_init_common(struct niu *np) 1423 { ··· 7008 return 0; 7009 } 7010 7011 static int niu_pci_vpd_get_nports(struct niu *np) 7012 { 7013 int ports = 0; 7014 7015 + if ((!strcmp(np->vpd.model, NIU_QGC_LP_MDL_STR)) || 7016 + (!strcmp(np->vpd.model, NIU_QGC_PEM_MDL_STR)) || 7017 + (!strcmp(np->vpd.model, NIU_MARAMBA_MDL_STR)) || 7018 + (!strcmp(np->vpd.model, NIU_KIMI_MDL_STR)) || 7019 + (!strcmp(np->vpd.model, NIU_ALONSO_MDL_STR))) { 7020 ports = 4; 7021 + } else if ((!strcmp(np->vpd.model, NIU_2XGF_LP_MDL_STR)) || 7022 + (!strcmp(np->vpd.model, NIU_2XGF_PEM_MDL_STR)) || 7023 + (!strcmp(np->vpd.model, NIU_FOXXY_MDL_STR)) || 7024 + (!strcmp(np->vpd.model, NIU_2XGF_MRVL_MDL_STR))) { 7025 ports = 2; 7026 } 7027 ··· 7053 return; 7054 } 7055 7056 + if (!strcmp(np->vpd.model, NIU_ALONSO_MDL_STR) || 7057 + !strcmp(np->vpd.model, NIU_KIMI_MDL_STR)) { 7058 np->flags |= NIU_FLAGS_10G; 7059 np->flags &= ~NIU_FLAGS_FIBER; 7060 np->flags |= NIU_FLAGS_XCVR_SERDES; ··· 7065 } 7066 if (np->flags & NIU_FLAGS_10G) 7067 np->mac_xcvr = MAC_XCVR_XPCS; 7068 + } else if (!strcmp(np->vpd.model, NIU_FOXXY_MDL_STR)) { 7069 np->flags |= (NIU_FLAGS_10G | NIU_FLAGS_FIBER | 7070 NIU_FLAGS_HOTPLUG_PHY); 7071 } else if (niu_phy_type_prop_decode(np, np->vpd.phy_type)) { ··· 7541 u32 val; 7542 int err; 7543 7544 + if (!strcmp(np->vpd.model, NIU_ALONSO_MDL_STR) || 7545 + !strcmp(np->vpd.model, NIU_KIMI_MDL_STR)) { 7546 num_10g = 0; 7547 num_1g = 2; 7548 parent->plat_type = PLAT_TYPE_ATCA_CP3220; ··· 7551 phy_encode(PORT_TYPE_1G, 1) | 7552 phy_encode(PORT_TYPE_1G, 2) | 7553 phy_encode(PORT_TYPE_1G, 3)); 7554 + } else if (!strcmp(np->vpd.model, NIU_FOXXY_MDL_STR)) { 7555 num_10g = 2; 7556 num_1g = 0; 7557 parent->num_ports = 2; ··· 7946 struct device_node *dp; 7947 const char *phy_type; 7948 const u8 *mac_addr; 7949 + const char *model; 7950 int prop_len; 7951 7952 if (np->parent->plat_type == PLAT_TYPE_NIU) ··· 7999 } 8000 8001 memcpy(dev->dev_addr, dev->perm_addr, dev->addr_len); 8002 + 8003 + model = of_get_property(dp, "model", &prop_len); 8004 + 8005 + if (model) 8006 + strcpy(np->vpd.model, model); 8007 8008 return 0; 8009 #else
+9
drivers/net/niu.h
··· 2946 #define NIU_ALONSO_BM_STR "373-0202" 2947 #define NIU_FOXXY_BM_STR "501-7961" 2948 #define NIU_2XGF_MRVL_BM_STR "SK-6E82" 2949 2950 #define NIU_VPD_MIN_MAJOR 3 2951 #define NIU_VPD_MIN_MINOR 4
··· 2946 #define NIU_ALONSO_BM_STR "373-0202" 2947 #define NIU_FOXXY_BM_STR "501-7961" 2948 #define NIU_2XGF_MRVL_BM_STR "SK-6E82" 2949 + #define NIU_QGC_LP_MDL_STR "SUNW,pcie-qgc" 2950 + #define NIU_2XGF_LP_MDL_STR "SUNW,pcie-2xgf" 2951 + #define NIU_QGC_PEM_MDL_STR "SUNW,pcie-qgc-pem" 2952 + #define NIU_2XGF_PEM_MDL_STR "SUNW,pcie-2xgf-pem" 2953 + #define NIU_ALONSO_MDL_STR "SUNW,CP3220" 2954 + #define NIU_KIMI_MDL_STR "SUNW,CP3260" 2955 + #define NIU_MARAMBA_MDL_STR "SUNW,pcie-neptune" 2956 + #define NIU_FOXXY_MDL_STR "SUNW,pcie-rfem" 2957 + #define NIU_2XGF_MRVL_MDL_STR "SysKonnect,pcie-2xgf" 2958 2959 #define NIU_VPD_MIN_MAJOR 3 2960 #define NIU_VPD_MIN_MINOR 4
+1
drivers/net/ppp_generic.c
··· 2458 2459 out3: 2460 atomic_dec(&ppp_unit_count); 2461 out2: 2462 mutex_unlock(&all_ppp_mutex); 2463 free_netdev(dev);
··· 2458 2459 out3: 2460 atomic_dec(&ppp_unit_count); 2461 + unregister_netdev(dev); 2462 out2: 2463 mutex_unlock(&all_ppp_mutex); 2464 free_netdev(dev);
+10 -3
drivers/net/pppol2tp.c
··· 1621 end: 1622 release_sock(sk); 1623 1624 - if (error != 0) 1625 - PRINTK(session ? session->debug : -1, PPPOL2TP_MSG_CONTROL, KERN_WARNING, 1626 - "%s: connect failed: %d\n", session->name, error); 1627 1628 return error; 1629 }
··· 1621 end: 1622 release_sock(sk); 1623 1624 + if (error != 0) { 1625 + if (session) 1626 + PRINTK(session->debug, 1627 + PPPOL2TP_MSG_CONTROL, KERN_WARNING, 1628 + "%s: connect failed: %d\n", 1629 + session->name, error); 1630 + else 1631 + PRINTK(-1, PPPOL2TP_MSG_CONTROL, KERN_WARNING, 1632 + "connect failed: %d\n", error); 1633 + } 1634 1635 return error; 1636 }
+2
drivers/net/ps3_gelic_wireless.c
··· 2474 2475 pr_debug("%s: <-\n", __func__); 2476 2477 pr_debug("%s: destroy queues\n", __func__); 2478 destroy_workqueue(wl->eurus_cmd_queue); 2479 destroy_workqueue(wl->event_queue);
··· 2474 2475 pr_debug("%s: <-\n", __func__); 2476 2477 + free_page((unsigned long)wl->buf); 2478 + 2479 pr_debug("%s: destroy queues\n", __func__); 2480 destroy_workqueue(wl->eurus_cmd_queue); 2481 destroy_workqueue(wl->event_queue);
+2 -2
drivers/net/sfc/Makefile
··· 1 sfc-y += efx.o falcon.o tx.o rx.o falcon_xmac.o \ 2 - i2c-direct.o ethtool.o xfp_phy.o mdio_10g.o \ 3 - tenxpress.o boards.o sfe4001.o 4 5 obj-$(CONFIG_SFC) += sfc.o
··· 1 sfc-y += efx.o falcon.o tx.o rx.o falcon_xmac.o \ 2 + i2c-direct.o selftest.o ethtool.o xfp_phy.o \ 3 + mdio_10g.o tenxpress.o boards.o sfe4001.o 4 5 obj-$(CONFIG_SFC) += sfc.o
+2
drivers/net/sfc/boards.h
··· 22 extern int efx_set_board_info(struct efx_nic *efx, u16 revision_info); 23 extern int sfe4001_poweron(struct efx_nic *efx); 24 extern void sfe4001_poweroff(struct efx_nic *efx); 25 26 #endif
··· 22 extern int efx_set_board_info(struct efx_nic *efx, u16 revision_info); 23 extern int sfe4001_poweron(struct efx_nic *efx); 24 extern void sfe4001_poweroff(struct efx_nic *efx); 25 + /* Are we putting the PHY into flash config mode */ 26 + extern unsigned int sfe4001_phy_flash_cfg; 27 28 #endif
+3 -1
drivers/net/sfc/efx.c
··· 1873 tx_queue->queue = i; 1874 tx_queue->buffer = NULL; 1875 tx_queue->channel = &efx->channel[0]; /* for safety */ 1876 } 1877 for (i = 0; i < EFX_MAX_RX_QUEUES; i++) { 1878 rx_queue = &efx->rx_queue[i]; ··· 2072 net_dev = alloc_etherdev(sizeof(*efx)); 2073 if (!net_dev) 2074 return -ENOMEM; 2075 - net_dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_HIGHDMA; 2076 if (lro) 2077 net_dev->features |= NETIF_F_LRO; 2078 efx = net_dev->priv;
··· 1873 tx_queue->queue = i; 1874 tx_queue->buffer = NULL; 1875 tx_queue->channel = &efx->channel[0]; /* for safety */ 1876 + tx_queue->tso_headers_free = NULL; 1877 } 1878 for (i = 0; i < EFX_MAX_RX_QUEUES; i++) { 1879 rx_queue = &efx->rx_queue[i]; ··· 2071 net_dev = alloc_etherdev(sizeof(*efx)); 2072 if (!net_dev) 2073 return -ENOMEM; 2074 + net_dev->features |= (NETIF_F_IP_CSUM | NETIF_F_SG | 2075 + NETIF_F_HIGHDMA | NETIF_F_TSO); 2076 if (lro) 2077 net_dev->features |= NETIF_F_LRO; 2078 efx = net_dev->priv;
+49
drivers/net/sfc/enum.h
··· 10 #ifndef EFX_ENUM_H 11 #define EFX_ENUM_H 12 13 /*****************************************************************************/ 14 15 /**
··· 10 #ifndef EFX_ENUM_H 11 #define EFX_ENUM_H 12 13 + /** 14 + * enum efx_loopback_mode - loopback modes 15 + * @LOOPBACK_NONE: no loopback 16 + * @LOOPBACK_XGMII: loopback within MAC at XGMII level 17 + * @LOOPBACK_XGXS: loopback within MAC at XGXS level 18 + * @LOOPBACK_XAUI: loopback within MAC at XAUI level 19 + * @LOOPBACK_PHYXS: loopback within PHY at PHYXS level 20 + * @LOOPBACK_PCS: loopback within PHY at PCS level 21 + * @LOOPBACK_PMAPMD: loopback within PHY at PMAPMD level 22 + * @LOOPBACK_NETWORK: reflecting loopback (even further than furthest!) 23 + */ 24 + /* Please keep in order and up-to-date w.r.t the following two #defines */ 25 + enum efx_loopback_mode { 26 + LOOPBACK_NONE = 0, 27 + LOOPBACK_MAC = 1, 28 + LOOPBACK_XGMII = 2, 29 + LOOPBACK_XGXS = 3, 30 + LOOPBACK_XAUI = 4, 31 + LOOPBACK_PHY = 5, 32 + LOOPBACK_PHYXS = 6, 33 + LOOPBACK_PCS = 7, 34 + LOOPBACK_PMAPMD = 8, 35 + LOOPBACK_NETWORK = 9, 36 + LOOPBACK_MAX 37 + }; 38 + 39 + #define LOOPBACK_TEST_MAX LOOPBACK_PMAPMD 40 + 41 + extern const char *efx_loopback_mode_names[]; 42 + #define LOOPBACK_MODE_NAME(mode) \ 43 + STRING_TABLE_LOOKUP(mode, efx_loopback_mode) 44 + #define LOOPBACK_MODE(efx) \ 45 + LOOPBACK_MODE_NAME(efx->loopback_mode) 46 + 47 + /* These loopbacks occur within the controller */ 48 + #define LOOPBACKS_10G_INTERNAL ((1 << LOOPBACK_XGMII)| \ 49 + (1 << LOOPBACK_XGXS) | \ 50 + (1 << LOOPBACK_XAUI)) 51 + 52 + #define LOOPBACK_MASK(_efx) \ 53 + (1 << (_efx)->loopback_mode) 54 + 55 + #define LOOPBACK_INTERNAL(_efx) \ 56 + ((LOOPBACKS_10G_INTERNAL & LOOPBACK_MASK(_efx)) ? 1 : 0) 57 + 58 + #define LOOPBACK_OUT_OF(_from, _to, _mask) \ 59 + (((LOOPBACK_MASK(_from) & (_mask)) && \ 60 + ((LOOPBACK_MASK(_to) & (_mask)) == 0)) ? 1 : 0) 61 + 62 /*****************************************************************************/ 63 64 /**
+258 -1
drivers/net/sfc/ethtool.c
··· 12 #include <linux/ethtool.h> 13 #include <linux/rtnetlink.h> 14 #include "net_driver.h" 15 #include "efx.h" 16 #include "ethtool.h" 17 #include "falcon.h" 18 #include "gmii.h" 19 #include "mac.h" 20 21 static int efx_ethtool_set_tx_csum(struct net_device *net_dev, u32 enable); 22 ··· 231 strlcpy(info->bus_info, pci_name(efx->pci_dev), sizeof(info->bus_info)); 232 } 233 234 static int efx_ethtool_get_stats_count(struct net_device *net_dev) 235 { 236 return EFX_ETHTOOL_NUM_STATS; 237 } 238 239 static void efx_ethtool_get_strings(struct net_device *net_dev, 240 u32 string_set, u8 *strings) 241 { 242 struct ethtool_string *ethtool_strings = 243 (struct ethtool_string *)strings; 244 int i; 245 246 - if (string_set == ETH_SS_STATS) 247 for (i = 0; i < EFX_ETHTOOL_NUM_STATS; i++) 248 strncpy(ethtool_strings[i].name, 249 efx_ethtool_stats[i].name, 250 sizeof(ethtool_strings[i].name)); 251 } 252 253 static void efx_ethtool_get_stats(struct net_device *net_dev, ··· 442 } 443 } 444 445 static int efx_ethtool_set_tx_csum(struct net_device *net_dev, u32 enable) 446 { 447 struct efx_nic *efx = net_dev->priv; ··· 468 return rc; 469 470 efx_flush_queues(efx); 471 472 return 0; 473 } ··· 498 struct efx_nic *efx = net_dev->priv; 499 500 return efx->rx_checksum_enabled; 501 } 502 503 /* Restart autonegotiation */ ··· 704 .set_tx_csum = efx_ethtool_set_tx_csum, 705 .get_sg = ethtool_op_get_sg, 706 .set_sg = ethtool_op_set_sg, 707 .get_flags = ethtool_op_get_flags, 708 .set_flags = ethtool_op_set_flags, 709 .get_strings = efx_ethtool_get_strings, 710 .phys_id = efx_ethtool_phys_id, 711 .get_stats_count = efx_ethtool_get_stats_count,
··· 12 #include <linux/ethtool.h> 13 #include <linux/rtnetlink.h> 14 #include "net_driver.h" 15 + #include "selftest.h" 16 #include "efx.h" 17 #include "ethtool.h" 18 #include "falcon.h" 19 #include "gmii.h" 20 #include "mac.h" 21 + 22 + const char *efx_loopback_mode_names[] = { 23 + [LOOPBACK_NONE] = "NONE", 24 + [LOOPBACK_MAC] = "MAC", 25 + [LOOPBACK_XGMII] = "XGMII", 26 + [LOOPBACK_XGXS] = "XGXS", 27 + [LOOPBACK_XAUI] = "XAUI", 28 + [LOOPBACK_PHY] = "PHY", 29 + [LOOPBACK_PHYXS] = "PHY(XS)", 30 + [LOOPBACK_PCS] = "PHY(PCS)", 31 + [LOOPBACK_PMAPMD] = "PHY(PMAPMD)", 32 + [LOOPBACK_NETWORK] = "NETWORK", 33 + }; 34 35 static int efx_ethtool_set_tx_csum(struct net_device *net_dev, u32 enable); 36 ··· 217 strlcpy(info->bus_info, pci_name(efx->pci_dev), sizeof(info->bus_info)); 218 } 219 220 + /** 221 + * efx_fill_test - fill in an individual self-test entry 222 + * @test_index: Index of the test 223 + * @strings: Ethtool strings, or %NULL 224 + * @data: Ethtool test results, or %NULL 225 + * @test: Pointer to test result (used only if data != %NULL) 226 + * @unit_format: Unit name format (e.g. "channel\%d") 227 + * @unit_id: Unit id (e.g. 0 for "channel0") 228 + * @test_format: Test name format (e.g. "loopback.\%s.tx.sent") 229 + * @test_id: Test id (e.g. "PHY" for "loopback.PHY.tx_sent") 230 + * 231 + * Fill in an individual self-test entry. 232 + */ 233 + static void efx_fill_test(unsigned int test_index, 234 + struct ethtool_string *strings, u64 *data, 235 + int *test, const char *unit_format, int unit_id, 236 + const char *test_format, const char *test_id) 237 + { 238 + struct ethtool_string unit_str, test_str; 239 + 240 + /* Fill data value, if applicable */ 241 + if (data) 242 + data[test_index] = *test; 243 + 244 + /* Fill string, if applicable */ 245 + if (strings) { 246 + snprintf(unit_str.name, sizeof(unit_str.name), 247 + unit_format, unit_id); 248 + snprintf(test_str.name, sizeof(test_str.name), 249 + test_format, test_id); 250 + snprintf(strings[test_index].name, 251 + sizeof(strings[test_index].name), 252 + "%-9s%-17s", unit_str.name, test_str.name); 253 + } 254 + } 255 + 256 + #define EFX_PORT_NAME "port%d", 0 257 + #define EFX_CHANNEL_NAME(_channel) "channel%d", _channel->channel 258 + #define EFX_TX_QUEUE_NAME(_tx_queue) "txq%d", _tx_queue->queue 259 + #define EFX_RX_QUEUE_NAME(_rx_queue) "rxq%d", _rx_queue->queue 260 + #define EFX_LOOPBACK_NAME(_mode, _counter) \ 261 + "loopback.%s." _counter, LOOPBACK_MODE_NAME(mode) 262 + 263 + /** 264 + * efx_fill_loopback_test - fill in a block of loopback self-test entries 265 + * @efx: Efx NIC 266 + * @lb_tests: Efx loopback self-test results structure 267 + * @mode: Loopback test mode 268 + * @test_index: Starting index of the test 269 + * @strings: Ethtool strings, or %NULL 270 + * @data: Ethtool test results, or %NULL 271 + */ 272 + static int efx_fill_loopback_test(struct efx_nic *efx, 273 + struct efx_loopback_self_tests *lb_tests, 274 + enum efx_loopback_mode mode, 275 + unsigned int test_index, 276 + struct ethtool_string *strings, u64 *data) 277 + { 278 + struct efx_tx_queue *tx_queue; 279 + 280 + efx_for_each_tx_queue(tx_queue, efx) { 281 + efx_fill_test(test_index++, strings, data, 282 + &lb_tests->tx_sent[tx_queue->queue], 283 + EFX_TX_QUEUE_NAME(tx_queue), 284 + EFX_LOOPBACK_NAME(mode, "tx_sent")); 285 + efx_fill_test(test_index++, strings, data, 286 + &lb_tests->tx_done[tx_queue->queue], 287 + EFX_TX_QUEUE_NAME(tx_queue), 288 + EFX_LOOPBACK_NAME(mode, "tx_done")); 289 + } 290 + efx_fill_test(test_index++, strings, data, 291 + &lb_tests->rx_good, 292 + EFX_PORT_NAME, 293 + EFX_LOOPBACK_NAME(mode, "rx_good")); 294 + efx_fill_test(test_index++, strings, data, 295 + &lb_tests->rx_bad, 296 + EFX_PORT_NAME, 297 + EFX_LOOPBACK_NAME(mode, "rx_bad")); 298 + 299 + return test_index; 300 + } 301 + 302 + /** 303 + * efx_ethtool_fill_self_tests - get self-test details 304 + * @efx: Efx NIC 305 + * @tests: Efx self-test results structure, or %NULL 306 + * @strings: Ethtool strings, or %NULL 307 + * @data: Ethtool test results, or %NULL 308 + */ 309 + static int efx_ethtool_fill_self_tests(struct efx_nic *efx, 310 + struct efx_self_tests *tests, 311 + struct ethtool_string *strings, 312 + u64 *data) 313 + { 314 + struct efx_channel *channel; 315 + unsigned int n = 0; 316 + enum efx_loopback_mode mode; 317 + 318 + /* Interrupt */ 319 + efx_fill_test(n++, strings, data, &tests->interrupt, 320 + "core", 0, "interrupt", NULL); 321 + 322 + /* Event queues */ 323 + efx_for_each_channel(channel, efx) { 324 + efx_fill_test(n++, strings, data, 325 + &tests->eventq_dma[channel->channel], 326 + EFX_CHANNEL_NAME(channel), 327 + "eventq.dma", NULL); 328 + efx_fill_test(n++, strings, data, 329 + &tests->eventq_int[channel->channel], 330 + EFX_CHANNEL_NAME(channel), 331 + "eventq.int", NULL); 332 + efx_fill_test(n++, strings, data, 333 + &tests->eventq_poll[channel->channel], 334 + EFX_CHANNEL_NAME(channel), 335 + "eventq.poll", NULL); 336 + } 337 + 338 + /* PHY presence */ 339 + efx_fill_test(n++, strings, data, &tests->phy_ok, 340 + EFX_PORT_NAME, "phy_ok", NULL); 341 + 342 + /* Loopback tests */ 343 + efx_fill_test(n++, strings, data, &tests->loopback_speed, 344 + EFX_PORT_NAME, "loopback.speed", NULL); 345 + efx_fill_test(n++, strings, data, &tests->loopback_full_duplex, 346 + EFX_PORT_NAME, "loopback.full_duplex", NULL); 347 + for (mode = LOOPBACK_NONE; mode < LOOPBACK_TEST_MAX; mode++) { 348 + if (!(efx->loopback_modes & (1 << mode))) 349 + continue; 350 + n = efx_fill_loopback_test(efx, 351 + &tests->loopback[mode], mode, n, 352 + strings, data); 353 + } 354 + 355 + return n; 356 + } 357 + 358 static int efx_ethtool_get_stats_count(struct net_device *net_dev) 359 { 360 return EFX_ETHTOOL_NUM_STATS; 361 } 362 363 + static int efx_ethtool_self_test_count(struct net_device *net_dev) 364 + { 365 + struct efx_nic *efx = net_dev->priv; 366 + 367 + return efx_ethtool_fill_self_tests(efx, NULL, NULL, NULL); 368 + } 369 + 370 static void efx_ethtool_get_strings(struct net_device *net_dev, 371 u32 string_set, u8 *strings) 372 { 373 + struct efx_nic *efx = net_dev->priv; 374 struct ethtool_string *ethtool_strings = 375 (struct ethtool_string *)strings; 376 int i; 377 378 + switch (string_set) { 379 + case ETH_SS_STATS: 380 for (i = 0; i < EFX_ETHTOOL_NUM_STATS; i++) 381 strncpy(ethtool_strings[i].name, 382 efx_ethtool_stats[i].name, 383 sizeof(ethtool_strings[i].name)); 384 + break; 385 + case ETH_SS_TEST: 386 + efx_ethtool_fill_self_tests(efx, NULL, 387 + ethtool_strings, NULL); 388 + break; 389 + default: 390 + /* No other string sets */ 391 + break; 392 + } 393 } 394 395 static void efx_ethtool_get_stats(struct net_device *net_dev, ··· 272 } 273 } 274 275 + static int efx_ethtool_set_tso(struct net_device *net_dev, u32 enable) 276 + { 277 + int rc; 278 + 279 + /* Our TSO requires TX checksumming, so force TX checksumming 280 + * on when TSO is enabled. 281 + */ 282 + if (enable) { 283 + rc = efx_ethtool_set_tx_csum(net_dev, 1); 284 + if (rc) 285 + return rc; 286 + } 287 + 288 + return ethtool_op_set_tso(net_dev, enable); 289 + } 290 + 291 static int efx_ethtool_set_tx_csum(struct net_device *net_dev, u32 enable) 292 { 293 struct efx_nic *efx = net_dev->priv; ··· 282 return rc; 283 284 efx_flush_queues(efx); 285 + 286 + /* Our TSO requires TX checksumming, so disable TSO when 287 + * checksumming is disabled 288 + */ 289 + if (!enable) { 290 + rc = efx_ethtool_set_tso(net_dev, 0); 291 + if (rc) 292 + return rc; 293 + } 294 295 return 0; 296 } ··· 303 struct efx_nic *efx = net_dev->priv; 304 305 return efx->rx_checksum_enabled; 306 + } 307 + 308 + static void efx_ethtool_self_test(struct net_device *net_dev, 309 + struct ethtool_test *test, u64 *data) 310 + { 311 + struct efx_nic *efx = net_dev->priv; 312 + struct efx_self_tests efx_tests; 313 + int offline, already_up; 314 + int rc; 315 + 316 + ASSERT_RTNL(); 317 + if (efx->state != STATE_RUNNING) { 318 + rc = -EIO; 319 + goto fail1; 320 + } 321 + 322 + /* We need rx buffers and interrupts. */ 323 + already_up = (efx->net_dev->flags & IFF_UP); 324 + if (!already_up) { 325 + rc = dev_open(efx->net_dev); 326 + if (rc) { 327 + EFX_ERR(efx, "failed opening device.\n"); 328 + goto fail2; 329 + } 330 + } 331 + 332 + memset(&efx_tests, 0, sizeof(efx_tests)); 333 + offline = (test->flags & ETH_TEST_FL_OFFLINE); 334 + 335 + /* Perform online self tests first */ 336 + rc = efx_online_test(efx, &efx_tests); 337 + if (rc) 338 + goto out; 339 + 340 + /* Perform offline tests only if online tests passed */ 341 + if (offline) { 342 + /* Stop the kernel from sending packets during the test. */ 343 + efx_stop_queue(efx); 344 + rc = efx_flush_queues(efx); 345 + if (!rc) 346 + rc = efx_offline_test(efx, &efx_tests, 347 + efx->loopback_modes); 348 + efx_wake_queue(efx); 349 + } 350 + 351 + out: 352 + if (!already_up) 353 + dev_close(efx->net_dev); 354 + 355 + EFX_LOG(efx, "%s all %sline self-tests\n", 356 + rc == 0 ? "passed" : "failed", offline ? "off" : "on"); 357 + 358 + fail2: 359 + fail1: 360 + /* Fill ethtool results structures */ 361 + efx_ethtool_fill_self_tests(efx, &efx_tests, NULL, data); 362 + if (rc) 363 + test->flags |= ETH_TEST_FL_FAILED; 364 } 365 366 /* Restart autonegotiation */ ··· 451 .set_tx_csum = efx_ethtool_set_tx_csum, 452 .get_sg = ethtool_op_get_sg, 453 .set_sg = ethtool_op_set_sg, 454 + .get_tso = ethtool_op_get_tso, 455 + .set_tso = efx_ethtool_set_tso, 456 .get_flags = ethtool_op_get_flags, 457 .set_flags = ethtool_op_set_flags, 458 + .self_test_count = efx_ethtool_self_test_count, 459 + .self_test = efx_ethtool_self_test, 460 .get_strings = efx_ethtool_get_strings, 461 .phys_id = efx_ethtool_phys_id, 462 .get_stats_count = efx_ethtool_get_stats_count,
+5 -3
drivers/net/sfc/falcon.c
··· 1129 case RX_RECOVERY_EV_DECODE: 1130 EFX_ERR(efx, "channel %d seen DRIVER RX_RESET event. " 1131 "Resetting.\n", channel->channel); 1132 efx_schedule_reset(efx, 1133 EFX_WORKAROUND_6555(efx) ? 1134 RESET_TYPE_RX_RECOVERY : ··· 1732 efx_oword_t temp; 1733 int count; 1734 1735 - if (FALCON_REV(efx) < FALCON_REV_B0) 1736 return; 1737 1738 falcon_read(efx, &temp, MAC0_CTRL_REG_KER); ··· 2093 efx->phy_type); 2094 return -1; 2095 } 2096 return 0; 2097 } 2098 ··· 2472 fail5: 2473 falcon_free_buffer(efx, &efx->irq_status); 2474 fail4: 2475 - /* fall-thru */ 2476 fail3: 2477 if (nic_data->pci_dev2) { 2478 pci_dev_put(nic_data->pci_dev2); 2479 nic_data->pci_dev2 = NULL; 2480 } 2481 fail2: 2482 - /* fall-thru */ 2483 fail1: 2484 kfree(efx->nic_data); 2485 return rc;
··· 1129 case RX_RECOVERY_EV_DECODE: 1130 EFX_ERR(efx, "channel %d seen DRIVER RX_RESET event. " 1131 "Resetting.\n", channel->channel); 1132 + atomic_inc(&efx->rx_reset); 1133 efx_schedule_reset(efx, 1134 EFX_WORKAROUND_6555(efx) ? 1135 RESET_TYPE_RX_RECOVERY : ··· 1731 efx_oword_t temp; 1732 int count; 1733 1734 + if ((FALCON_REV(efx) < FALCON_REV_B0) || 1735 + (efx->loopback_mode != LOOPBACK_NONE)) 1736 return; 1737 1738 falcon_read(efx, &temp, MAC0_CTRL_REG_KER); ··· 2091 efx->phy_type); 2092 return -1; 2093 } 2094 + 2095 + efx->loopback_modes = LOOPBACKS_10G_INTERNAL | efx->phy_op->loopbacks; 2096 return 0; 2097 } 2098 ··· 2468 fail5: 2469 falcon_free_buffer(efx, &efx->irq_status); 2470 fail4: 2471 fail3: 2472 if (nic_data->pci_dev2) { 2473 pci_dev_put(nic_data->pci_dev2); 2474 nic_data->pci_dev2 = NULL; 2475 } 2476 fail2: 2477 fail1: 2478 kfree(efx->nic_data); 2479 return rc;
+15 -1
drivers/net/sfc/falcon_hwdefs.h
··· 636 #define XX_HIDRVA_WIDTH 1 637 #define XX_LODRVA_LBN 8 638 #define XX_LODRVA_WIDTH 1 639 640 #define XX_TXDRV_CTL_REG_MAC 0x12 641 #define XX_DEQD_LBN 28 ··· 664 #define XX_DTXA_WIDTH 4 665 666 /* XAUI XGXS core status register */ 667 - #define XX_FORCE_SIG_DECODE_FORCED 0xff 668 #define XX_CORE_STAT_REG_MAC 0x16 669 #define XX_ALIGN_DONE_LBN 20 670 #define XX_ALIGN_DONE_WIDTH 1 671 #define XX_SYNC_STAT_LBN 16
··· 636 #define XX_HIDRVA_WIDTH 1 637 #define XX_LODRVA_LBN 8 638 #define XX_LODRVA_WIDTH 1 639 + #define XX_LPBKD_LBN 3 640 + #define XX_LPBKD_WIDTH 1 641 + #define XX_LPBKC_LBN 2 642 + #define XX_LPBKC_WIDTH 1 643 + #define XX_LPBKB_LBN 1 644 + #define XX_LPBKB_WIDTH 1 645 + #define XX_LPBKA_LBN 0 646 + #define XX_LPBKA_WIDTH 1 647 648 #define XX_TXDRV_CTL_REG_MAC 0x12 649 #define XX_DEQD_LBN 28 ··· 656 #define XX_DTXA_WIDTH 4 657 658 /* XAUI XGXS core status register */ 659 #define XX_CORE_STAT_REG_MAC 0x16 660 + #define XX_FORCE_SIG_LBN 24 661 + #define XX_FORCE_SIG_WIDTH 8 662 + #define XX_FORCE_SIG_DECODE_FORCED 0xff 663 + #define XX_XGXS_LB_EN_LBN 23 664 + #define XX_XGXS_LB_EN_WIDTH 1 665 + #define XX_XGMII_LB_EN_LBN 22 666 + #define XX_XGMII_LB_EN_WIDTH 1 667 #define XX_ALIGN_DONE_LBN 20 668 #define XX_ALIGN_DONE_WIDTH 1 669 #define XX_SYNC_STAT_LBN 16
+76 -6
drivers/net/sfc/falcon_xmac.c
··· 32 (FALCON_XMAC_REGBANK + ((mac_reg) * FALCON_XMAC_REG_SIZE)) 33 34 void falcon_xmac_writel(struct efx_nic *efx, 35 - efx_dword_t *value, unsigned int mac_reg) 36 { 37 efx_oword_t temp; 38 ··· 68 return 0; 69 udelay(10); 70 } 71 72 EFX_ERR(efx, "timed out waiting for XMAC core reset\n"); 73 return -ETIMEDOUT; ··· 227 /* The ISR latches, so clear it and re-read */ 228 falcon_xmac_readl(efx, &reg, XM_MGT_INT_REG_MAC_B0); 229 falcon_xmac_readl(efx, &reg, XM_MGT_INT_REG_MAC_B0); 230 - 231 if (EFX_DWORD_FIELD(reg, XM_LCLFLT) || 232 EFX_DWORD_FIELD(reg, XM_RMTFLT)) { 233 EFX_INFO(efx, "MGT_INT: "EFX_DWORD_FMT"\n", EFX_DWORD_VAL(reg)); ··· 241 { 242 efx_dword_t reg; 243 244 - if (FALCON_REV(efx) < FALCON_REV_B0) 245 return; 246 247 /* Flush the ISR */ ··· 287 { 288 efx_dword_t reg; 289 int align_done, sync_status, link_ok = 0; 290 291 /* Read link status */ 292 falcon_xmac_readl(efx, &reg, XX_CORE_STAT_REG_MAC); ··· 381 falcon_xmac_writel(efx, &reg, XM_ADR_HI_REG_MAC); 382 } 383 384 /* Try and bring the Falcon side of the Falcon-Phy XAUI link fails 385 * to come back up. Bash it until it comes back up */ 386 static int falcon_check_xaui_link_up(struct efx_nic *efx) ··· 444 tries = EFX_WORKAROUND_5147(efx) ? 5 : 1; 445 max_tries = tries; 446 447 - if (efx->phy_type == PHY_TYPE_NONE) 448 return 0; 449 450 while (tries) { ··· 471 falcon_mask_status_intr(efx, 0); 472 473 falcon_deconfigure_mac_wrapper(efx); 474 efx->phy_op->reconfigure(efx); 475 falcon_reconfigure_xmac_core(efx); 476 falcon_reconfigure_mac_wrapper(efx); 477 478 /* Ensure XAUI link is up */ ··· 559 (mac_stats->rx_bytes - mac_stats->rx_good_bytes); 560 } 561 562 - #define EFX_XAUI_RETRAIN_MAX 8 563 - 564 int falcon_check_xmac(struct efx_nic *efx) 565 { 566 unsigned xaui_link_ok; 567 int rc; 568 569 falcon_mask_status_intr(efx, 0); 570 xaui_link_ok = falcon_xaui_link_ok(efx);
··· 32 (FALCON_XMAC_REGBANK + ((mac_reg) * FALCON_XMAC_REG_SIZE)) 33 34 void falcon_xmac_writel(struct efx_nic *efx, 35 + efx_dword_t *value, unsigned int mac_reg) 36 { 37 efx_oword_t temp; 38 ··· 68 return 0; 69 udelay(10); 70 } 71 + 72 + /* This often fails when DSP is disabled, ignore it */ 73 + if (sfe4001_phy_flash_cfg != 0) 74 + return 0; 75 76 EFX_ERR(efx, "timed out waiting for XMAC core reset\n"); 77 return -ETIMEDOUT; ··· 223 /* The ISR latches, so clear it and re-read */ 224 falcon_xmac_readl(efx, &reg, XM_MGT_INT_REG_MAC_B0); 225 falcon_xmac_readl(efx, &reg, XM_MGT_INT_REG_MAC_B0); 226 + 227 if (EFX_DWORD_FIELD(reg, XM_LCLFLT) || 228 EFX_DWORD_FIELD(reg, XM_RMTFLT)) { 229 EFX_INFO(efx, "MGT_INT: "EFX_DWORD_FMT"\n", EFX_DWORD_VAL(reg)); ··· 237 { 238 efx_dword_t reg; 239 240 + if ((FALCON_REV(efx) < FALCON_REV_B0) || LOOPBACK_INTERNAL(efx)) 241 return; 242 243 /* Flush the ISR */ ··· 283 { 284 efx_dword_t reg; 285 int align_done, sync_status, link_ok = 0; 286 + 287 + if (LOOPBACK_INTERNAL(efx)) 288 + return 1; 289 290 /* Read link status */ 291 falcon_xmac_readl(efx, &reg, XX_CORE_STAT_REG_MAC); ··· 374 falcon_xmac_writel(efx, &reg, XM_ADR_HI_REG_MAC); 375 } 376 377 + static void falcon_reconfigure_xgxs_core(struct efx_nic *efx) 378 + { 379 + efx_dword_t reg; 380 + int xgxs_loopback = (efx->loopback_mode == LOOPBACK_XGXS) ? 1 : 0; 381 + int xaui_loopback = (efx->loopback_mode == LOOPBACK_XAUI) ? 1 : 0; 382 + int xgmii_loopback = 383 + (efx->loopback_mode == LOOPBACK_XGMII) ? 1 : 0; 384 + 385 + /* XGXS block is flaky and will need to be reset if moving 386 + * into our out of XGMII, XGXS or XAUI loopbacks. */ 387 + if (EFX_WORKAROUND_5147(efx)) { 388 + int old_xgmii_loopback, old_xgxs_loopback, old_xaui_loopback; 389 + int reset_xgxs; 390 + 391 + falcon_xmac_readl(efx, &reg, XX_CORE_STAT_REG_MAC); 392 + old_xgxs_loopback = EFX_DWORD_FIELD(reg, XX_XGXS_LB_EN); 393 + old_xgmii_loopback = EFX_DWORD_FIELD(reg, XX_XGMII_LB_EN); 394 + 395 + falcon_xmac_readl(efx, &reg, XX_SD_CTL_REG_MAC); 396 + old_xaui_loopback = EFX_DWORD_FIELD(reg, XX_LPBKA); 397 + 398 + /* The PHY driver may have turned XAUI off */ 399 + reset_xgxs = ((xgxs_loopback != old_xgxs_loopback) || 400 + (xaui_loopback != old_xaui_loopback) || 401 + (xgmii_loopback != old_xgmii_loopback)); 402 + if (reset_xgxs) { 403 + falcon_xmac_readl(efx, &reg, XX_PWR_RST_REG_MAC); 404 + EFX_SET_DWORD_FIELD(reg, XX_RSTXGXSTX_EN, 1); 405 + EFX_SET_DWORD_FIELD(reg, XX_RSTXGXSRX_EN, 1); 406 + falcon_xmac_writel(efx, &reg, XX_PWR_RST_REG_MAC); 407 + udelay(1); 408 + EFX_SET_DWORD_FIELD(reg, XX_RSTXGXSTX_EN, 0); 409 + EFX_SET_DWORD_FIELD(reg, XX_RSTXGXSRX_EN, 0); 410 + falcon_xmac_writel(efx, &reg, XX_PWR_RST_REG_MAC); 411 + udelay(1); 412 + } 413 + } 414 + 415 + falcon_xmac_readl(efx, &reg, XX_CORE_STAT_REG_MAC); 416 + EFX_SET_DWORD_FIELD(reg, XX_FORCE_SIG, 417 + (xgxs_loopback || xaui_loopback) ? 418 + XX_FORCE_SIG_DECODE_FORCED : 0); 419 + EFX_SET_DWORD_FIELD(reg, XX_XGXS_LB_EN, xgxs_loopback); 420 + EFX_SET_DWORD_FIELD(reg, XX_XGMII_LB_EN, xgmii_loopback); 421 + falcon_xmac_writel(efx, &reg, XX_CORE_STAT_REG_MAC); 422 + 423 + falcon_xmac_readl(efx, &reg, XX_SD_CTL_REG_MAC); 424 + EFX_SET_DWORD_FIELD(reg, XX_LPBKD, xaui_loopback); 425 + EFX_SET_DWORD_FIELD(reg, XX_LPBKC, xaui_loopback); 426 + EFX_SET_DWORD_FIELD(reg, XX_LPBKB, xaui_loopback); 427 + EFX_SET_DWORD_FIELD(reg, XX_LPBKA, xaui_loopback); 428 + falcon_xmac_writel(efx, &reg, XX_SD_CTL_REG_MAC); 429 + } 430 + 431 + 432 /* Try and bring the Falcon side of the Falcon-Phy XAUI link fails 433 * to come back up. Bash it until it comes back up */ 434 static int falcon_check_xaui_link_up(struct efx_nic *efx) ··· 382 tries = EFX_WORKAROUND_5147(efx) ? 5 : 1; 383 max_tries = tries; 384 385 + if ((efx->loopback_mode == LOOPBACK_NETWORK) || 386 + (efx->phy_type == PHY_TYPE_NONE)) 387 return 0; 388 389 while (tries) { ··· 408 falcon_mask_status_intr(efx, 0); 409 410 falcon_deconfigure_mac_wrapper(efx); 411 + 412 + efx->tx_disabled = LOOPBACK_INTERNAL(efx); 413 efx->phy_op->reconfigure(efx); 414 + 415 + falcon_reconfigure_xgxs_core(efx); 416 falcon_reconfigure_xmac_core(efx); 417 + 418 falcon_reconfigure_mac_wrapper(efx); 419 420 /* Ensure XAUI link is up */ ··· 491 (mac_stats->rx_bytes - mac_stats->rx_good_bytes); 492 } 493 494 int falcon_check_xmac(struct efx_nic *efx) 495 { 496 unsigned xaui_link_ok; 497 int rc; 498 + 499 + if ((efx->loopback_mode == LOOPBACK_NETWORK) || 500 + (efx->phy_type == PHY_TYPE_NONE)) 501 + return 0; 502 503 falcon_mask_status_intr(efx, 0); 504 xaui_link_ok = falcon_xaui_link_ok(efx);
+78
drivers/net/sfc/mdio_10g.c
··· 44 int status; 45 int phy_id = efx->mii.phy_id; 46 47 /* Read MMD STATUS2 to check it is responding. */ 48 status = mdio_clause45_read(efx, phy_id, mmd, MDIO_MMDREG_STAT2); 49 if (((status >> MDIO_MMDREG_STAT2_PRESENT_LBN) & ··· 167 int mmd = 0; 168 int good; 169 170 while (mmd_mask) { 171 if (mmd_mask & 1) { 172 /* Double reads because link state is latched, and a ··· 199 mmd++; 200 } 201 return ok; 202 } 203 204 /**
··· 44 int status; 45 int phy_id = efx->mii.phy_id; 46 47 + if (LOOPBACK_INTERNAL(efx)) 48 + return 0; 49 + 50 /* Read MMD STATUS2 to check it is responding. */ 51 status = mdio_clause45_read(efx, phy_id, mmd, MDIO_MMDREG_STAT2); 52 if (((status >> MDIO_MMDREG_STAT2_PRESENT_LBN) & ··· 164 int mmd = 0; 165 int good; 166 167 + /* If the port is in loopback, then we should only consider a subset 168 + * of mmd's */ 169 + if (LOOPBACK_INTERNAL(efx)) 170 + return 1; 171 + else if (efx->loopback_mode == LOOPBACK_NETWORK) 172 + return 0; 173 + else if (efx->loopback_mode == LOOPBACK_PHYXS) 174 + mmd_mask &= ~(MDIO_MMDREG_DEVS0_PHYXS | 175 + MDIO_MMDREG_DEVS0_PCS | 176 + MDIO_MMDREG_DEVS0_PMAPMD); 177 + else if (efx->loopback_mode == LOOPBACK_PCS) 178 + mmd_mask &= ~(MDIO_MMDREG_DEVS0_PCS | 179 + MDIO_MMDREG_DEVS0_PMAPMD); 180 + else if (efx->loopback_mode == LOOPBACK_PMAPMD) 181 + mmd_mask &= ~MDIO_MMDREG_DEVS0_PMAPMD; 182 + 183 while (mmd_mask) { 184 if (mmd_mask & 1) { 185 /* Double reads because link state is latched, and a ··· 180 mmd++; 181 } 182 return ok; 183 + } 184 + 185 + void mdio_clause45_transmit_disable(struct efx_nic *efx) 186 + { 187 + int phy_id = efx->mii.phy_id; 188 + int ctrl1, ctrl2; 189 + 190 + ctrl1 = ctrl2 = mdio_clause45_read(efx, phy_id, MDIO_MMD_PMAPMD, 191 + MDIO_MMDREG_TXDIS); 192 + if (efx->tx_disabled) 193 + ctrl2 |= (1 << MDIO_MMDREG_TXDIS_GLOBAL_LBN); 194 + else 195 + ctrl1 &= ~(1 << MDIO_MMDREG_TXDIS_GLOBAL_LBN); 196 + if (ctrl1 != ctrl2) 197 + mdio_clause45_write(efx, phy_id, MDIO_MMD_PMAPMD, 198 + MDIO_MMDREG_TXDIS, ctrl2); 199 + } 200 + 201 + void mdio_clause45_phy_reconfigure(struct efx_nic *efx) 202 + { 203 + int phy_id = efx->mii.phy_id; 204 + int ctrl1, ctrl2; 205 + 206 + /* Handle (with debouncing) PMA/PMD loopback */ 207 + ctrl1 = ctrl2 = mdio_clause45_read(efx, phy_id, MDIO_MMD_PMAPMD, 208 + MDIO_MMDREG_CTRL1); 209 + 210 + if (efx->loopback_mode == LOOPBACK_PMAPMD) 211 + ctrl2 |= (1 << MDIO_PMAPMD_CTRL1_LBACK_LBN); 212 + else 213 + ctrl2 &= ~(1 << MDIO_PMAPMD_CTRL1_LBACK_LBN); 214 + 215 + if (ctrl1 != ctrl2) 216 + mdio_clause45_write(efx, phy_id, MDIO_MMD_PMAPMD, 217 + MDIO_MMDREG_CTRL1, ctrl2); 218 + 219 + /* Handle (with debouncing) PCS loopback */ 220 + ctrl1 = ctrl2 = mdio_clause45_read(efx, phy_id, MDIO_MMD_PCS, 221 + MDIO_MMDREG_CTRL1); 222 + if (efx->loopback_mode == LOOPBACK_PCS) 223 + ctrl2 |= (1 << MDIO_MMDREG_CTRL1_LBACK_LBN); 224 + else 225 + ctrl2 &= ~(1 << MDIO_MMDREG_CTRL1_LBACK_LBN); 226 + 227 + if (ctrl1 != ctrl2) 228 + mdio_clause45_write(efx, phy_id, MDIO_MMD_PCS, 229 + MDIO_MMDREG_CTRL1, ctrl2); 230 + 231 + /* Handle (with debouncing) PHYXS network loopback */ 232 + ctrl1 = ctrl2 = mdio_clause45_read(efx, phy_id, MDIO_MMD_PHYXS, 233 + MDIO_MMDREG_CTRL1); 234 + if (efx->loopback_mode == LOOPBACK_NETWORK) 235 + ctrl2 |= (1 << MDIO_MMDREG_CTRL1_LBACK_LBN); 236 + else 237 + ctrl2 &= ~(1 << MDIO_MMDREG_CTRL1_LBACK_LBN); 238 + 239 + if (ctrl1 != ctrl2) 240 + mdio_clause45_write(efx, phy_id, MDIO_MMD_PHYXS, 241 + MDIO_MMDREG_CTRL1, ctrl2); 242 } 243 244 /**
+23 -1
drivers/net/sfc/mdio_10g.h
··· 44 #define MDIO_MMDREG_DEVS1 (6) 45 #define MDIO_MMDREG_CTRL2 (7) 46 #define MDIO_MMDREG_STAT2 (8) 47 48 /* Bits in MMDREG_CTRL1 */ 49 /* Reset */ 50 #define MDIO_MMDREG_CTRL1_RESET_LBN (15) 51 #define MDIO_MMDREG_CTRL1_RESET_WIDTH (1) 52 53 /* Bits in MMDREG_STAT1 */ 54 #define MDIO_MMDREG_STAT1_FAULT_LBN (7) ··· 61 /* Link state */ 62 #define MDIO_MMDREG_STAT1_LINK_LBN (2) 63 #define MDIO_MMDREG_STAT1_LINK_WIDTH (1) 64 65 /* Bits in ID reg */ 66 #define MDIO_ID_REV(_id32) (_id32 & 0xf) ··· 84 #define MDIO_MMDREG_STAT2_PRESENT_LBN (14) 85 #define MDIO_MMDREG_STAT2_PRESENT_WIDTH (2) 86 87 /* PMA type (4 bits) */ 88 #define MDIO_PMAPMD_CTRL2_10G_CX4 (0x0) 89 #define MDIO_PMAPMD_CTRL2_10G_EW (0x1) ··· 111 #define MDIO_PMAPMD_CTRL2_10_BT (0xf) 112 #define MDIO_PMAPMD_CTRL2_TYPE_MASK (0xf) 113 114 - /* /\* PHY XGXS lane state *\/ */ 115 #define MDIO_PHYXS_LANE_STATE (0x18) 116 #define MDIO_PHYXS_LANE_ALIGNED_LBN (12) 117 ··· 232 /* Check the link status of specified mmds in bit mask */ 233 extern int mdio_clause45_links_ok(struct efx_nic *efx, 234 unsigned int mmd_mask); 235 236 /* Read (some of) the PHY settings over MDIO */ 237 extern void mdio_clause45_get_settings(struct efx_nic *efx,
··· 44 #define MDIO_MMDREG_DEVS1 (6) 45 #define MDIO_MMDREG_CTRL2 (7) 46 #define MDIO_MMDREG_STAT2 (8) 47 + #define MDIO_MMDREG_TXDIS (9) 48 49 /* Bits in MMDREG_CTRL1 */ 50 /* Reset */ 51 #define MDIO_MMDREG_CTRL1_RESET_LBN (15) 52 #define MDIO_MMDREG_CTRL1_RESET_WIDTH (1) 53 + /* Loopback */ 54 + /* Loopback bit for WIS, PCS, PHYSX and DTEXS */ 55 + #define MDIO_MMDREG_CTRL1_LBACK_LBN (14) 56 + #define MDIO_MMDREG_CTRL1_LBACK_WIDTH (1) 57 58 /* Bits in MMDREG_STAT1 */ 59 #define MDIO_MMDREG_STAT1_FAULT_LBN (7) ··· 56 /* Link state */ 57 #define MDIO_MMDREG_STAT1_LINK_LBN (2) 58 #define MDIO_MMDREG_STAT1_LINK_WIDTH (1) 59 + /* Low power ability */ 60 + #define MDIO_MMDREG_STAT1_LPABLE_LBN (1) 61 + #define MDIO_MMDREG_STAT1_LPABLE_WIDTH (1) 62 63 /* Bits in ID reg */ 64 #define MDIO_ID_REV(_id32) (_id32 & 0xf) ··· 76 #define MDIO_MMDREG_STAT2_PRESENT_LBN (14) 77 #define MDIO_MMDREG_STAT2_PRESENT_WIDTH (2) 78 79 + /* Bits in MMDREG_TXDIS */ 80 + #define MDIO_MMDREG_TXDIS_GLOBAL_LBN (0) 81 + #define MDIO_MMDREG_TXDIS_GLOBAL_WIDTH (1) 82 + 83 + /* MMD-specific bits, ordered by MMD, then register */ 84 + #define MDIO_PMAPMD_CTRL1_LBACK_LBN (0) 85 + #define MDIO_PMAPMD_CTRL1_LBACK_WIDTH (1) 86 + 87 /* PMA type (4 bits) */ 88 #define MDIO_PMAPMD_CTRL2_10G_CX4 (0x0) 89 #define MDIO_PMAPMD_CTRL2_10G_EW (0x1) ··· 95 #define MDIO_PMAPMD_CTRL2_10_BT (0xf) 96 #define MDIO_PMAPMD_CTRL2_TYPE_MASK (0xf) 97 98 + /* PHY XGXS lane state */ 99 #define MDIO_PHYXS_LANE_STATE (0x18) 100 #define MDIO_PHYXS_LANE_ALIGNED_LBN (12) 101 ··· 216 /* Check the link status of specified mmds in bit mask */ 217 extern int mdio_clause45_links_ok(struct efx_nic *efx, 218 unsigned int mmd_mask); 219 + 220 + /* Generic transmit disable support though PMAPMD */ 221 + extern void mdio_clause45_transmit_disable(struct efx_nic *efx); 222 + 223 + /* Generic part of reconfigure: set/clear loopback bits */ 224 + extern void mdio_clause45_phy_reconfigure(struct efx_nic *efx); 225 226 /* Read (some of) the PHY settings over MDIO */ 227 extern void mdio_clause45_get_settings(struct efx_nic *efx,
+27 -1
drivers/net/sfc/net_driver.h
··· 134 * Set only on the final fragment of a packet; %NULL for all other 135 * fragments. When this fragment completes, then we can free this 136 * skb. 137 * @dma_addr: DMA address of the fragment. 138 * @len: Length of this fragment. 139 * This field is zero when the queue slot is empty. ··· 146 */ 147 struct efx_tx_buffer { 148 const struct sk_buff *skb; 149 dma_addr_t dma_addr; 150 unsigned short len; 151 unsigned char continuation; ··· 190 * variable indicates that the queue is full. This is to 191 * avoid cache-line ping-pong between the xmit path and the 192 * completion path. 193 */ 194 struct efx_tx_queue { 195 /* Members which don't change on the fast path */ ··· 216 unsigned int insert_count ____cacheline_aligned_in_smp; 217 unsigned int write_count; 218 unsigned int old_read_count; 219 }; 220 221 /** ··· 448 struct efx_blinker blinker; 449 }; 450 451 enum efx_int_mode { 452 /* Be careful if altering to correct macro below */ 453 EFX_INT_MODE_MSIX = 0, ··· 523 * @check_hw: Check hardware 524 * @reset_xaui: Reset XAUI side of PHY for (software sequenced reset) 525 * @mmds: MMD presence mask 526 */ 527 struct efx_phy_operations { 528 int (*init) (struct efx_nic *efx); ··· 533 int (*check_hw) (struct efx_nic *efx); 534 void (*reset_xaui) (struct efx_nic *efx); 535 int mmds; 536 }; 537 538 /* ··· 672 * @phy_op: PHY interface 673 * @phy_data: PHY private data (including PHY-specific stats) 674 * @mii: PHY interface 675 - * @phy_powered: PHY power state 676 * @tx_disabled: PHY transmitter turned off 677 * @link_up: Link status 678 * @link_options: Link options (MII/GMII format) ··· 680 * @multicast_hash: Multicast hash table 681 * @flow_control: Flow control flags - separate RX/TX so can't use link_options 682 * @reconfigure_work: work item for dealing with PHY events 683 * 684 * The @priv field of the corresponding &struct net_device points to 685 * this. ··· 742 struct efx_phy_operations *phy_op; 743 void *phy_data; 744 struct mii_if_info mii; 745 746 int link_up; 747 unsigned int link_options; ··· 754 struct work_struct reconfigure_work; 755 756 atomic_t rx_reset; 757 }; 758 759 /**
··· 134 * Set only on the final fragment of a packet; %NULL for all other 135 * fragments. When this fragment completes, then we can free this 136 * skb. 137 + * @tsoh: The associated TSO header structure, or %NULL if this 138 + * buffer is not a TSO header. 139 * @dma_addr: DMA address of the fragment. 140 * @len: Length of this fragment. 141 * This field is zero when the queue slot is empty. ··· 144 */ 145 struct efx_tx_buffer { 146 const struct sk_buff *skb; 147 + struct efx_tso_header *tsoh; 148 dma_addr_t dma_addr; 149 unsigned short len; 150 unsigned char continuation; ··· 187 * variable indicates that the queue is full. This is to 188 * avoid cache-line ping-pong between the xmit path and the 189 * completion path. 190 + * @tso_headers_free: A list of TSO headers allocated for this TX queue 191 + * that are not in use, and so available for new TSO sends. The list 192 + * is protected by the TX queue lock. 193 + * @tso_bursts: Number of times TSO xmit invoked by kernel 194 + * @tso_long_headers: Number of packets with headers too long for standard 195 + * blocks 196 + * @tso_packets: Number of packets via the TSO xmit path 197 */ 198 struct efx_tx_queue { 199 /* Members which don't change on the fast path */ ··· 206 unsigned int insert_count ____cacheline_aligned_in_smp; 207 unsigned int write_count; 208 unsigned int old_read_count; 209 + struct efx_tso_header *tso_headers_free; 210 + unsigned int tso_bursts; 211 + unsigned int tso_long_headers; 212 + unsigned int tso_packets; 213 }; 214 215 /** ··· 434 struct efx_blinker blinker; 435 }; 436 437 + #define STRING_TABLE_LOOKUP(val, member) \ 438 + member ## _names[val] 439 + 440 enum efx_int_mode { 441 /* Be careful if altering to correct macro below */ 442 EFX_INT_MODE_MSIX = 0, ··· 506 * @check_hw: Check hardware 507 * @reset_xaui: Reset XAUI side of PHY for (software sequenced reset) 508 * @mmds: MMD presence mask 509 + * @loopbacks: Supported loopback modes mask 510 */ 511 struct efx_phy_operations { 512 int (*init) (struct efx_nic *efx); ··· 515 int (*check_hw) (struct efx_nic *efx); 516 void (*reset_xaui) (struct efx_nic *efx); 517 int mmds; 518 + unsigned loopbacks; 519 }; 520 521 /* ··· 653 * @phy_op: PHY interface 654 * @phy_data: PHY private data (including PHY-specific stats) 655 * @mii: PHY interface 656 * @tx_disabled: PHY transmitter turned off 657 * @link_up: Link status 658 * @link_options: Link options (MII/GMII format) ··· 662 * @multicast_hash: Multicast hash table 663 * @flow_control: Flow control flags - separate RX/TX so can't use link_options 664 * @reconfigure_work: work item for dealing with PHY events 665 + * @loopback_mode: Loopback status 666 + * @loopback_modes: Supported loopback mode bitmask 667 + * @loopback_selftest: Offline self-test private state 668 * 669 * The @priv field of the corresponding &struct net_device points to 670 * this. ··· 721 struct efx_phy_operations *phy_op; 722 void *phy_data; 723 struct mii_if_info mii; 724 + unsigned tx_disabled; 725 726 int link_up; 727 unsigned int link_options; ··· 732 struct work_struct reconfigure_work; 733 734 atomic_t rx_reset; 735 + enum efx_loopback_mode loopback_mode; 736 + unsigned int loopback_modes; 737 + 738 + void *loopback_selftest; 739 }; 740 741 /**
+10 -1
drivers/net/sfc/rx.c
··· 19 #include "rx.h" 20 #include "efx.h" 21 #include "falcon.h" 22 #include "workarounds.h" 23 24 /* Number of RX descriptors pushed at once. */ ··· 684 struct sk_buff *skb; 685 int lro = efx->net_dev->features & NETIF_F_LRO; 686 687 if (rx_buf->skb) { 688 prefetch(skb_shinfo(rx_buf->skb)); 689 ··· 746 /* Update allocation strategy method */ 747 channel->rx_alloc_level += RX_ALLOC_FACTOR_SKB; 748 749 - /* fall-thru */ 750 done: 751 efx->net_dev->last_rx = jiffies; 752 }
··· 19 #include "rx.h" 20 #include "efx.h" 21 #include "falcon.h" 22 + #include "selftest.h" 23 #include "workarounds.h" 24 25 /* Number of RX descriptors pushed at once. */ ··· 683 struct sk_buff *skb; 684 int lro = efx->net_dev->features & NETIF_F_LRO; 685 686 + /* If we're in loopback test, then pass the packet directly to the 687 + * loopback layer, and free the rx_buf here 688 + */ 689 + if (unlikely(efx->loopback_selftest)) { 690 + efx_loopback_rx_packet(efx, rx_buf->data, rx_buf->len); 691 + efx_free_rx_buffer(efx, rx_buf); 692 + goto done; 693 + } 694 + 695 if (rx_buf->skb) { 696 prefetch(skb_shinfo(rx_buf->skb)); 697 ··· 736 /* Update allocation strategy method */ 737 channel->rx_alloc_level += RX_ALLOC_FACTOR_SKB; 738 739 done: 740 efx->net_dev->last_rx = jiffies; 741 }
+717
drivers/net/sfc/selftest.c
···
··· 1 + /**************************************************************************** 2 + * Driver for Solarflare Solarstorm network controllers and boards 3 + * Copyright 2005-2006 Fen Systems Ltd. 4 + * Copyright 2006-2008 Solarflare Communications Inc. 5 + * 6 + * This program is free software; you can redistribute it and/or modify it 7 + * under the terms of the GNU General Public License version 2 as published 8 + * by the Free Software Foundation, incorporated herein by reference. 9 + */ 10 + 11 + #include <linux/netdevice.h> 12 + #include <linux/module.h> 13 + #include <linux/delay.h> 14 + #include <linux/kernel_stat.h> 15 + #include <linux/pci.h> 16 + #include <linux/ethtool.h> 17 + #include <linux/ip.h> 18 + #include <linux/in.h> 19 + #include <linux/udp.h> 20 + #include <linux/rtnetlink.h> 21 + #include <asm/io.h> 22 + #include "net_driver.h" 23 + #include "ethtool.h" 24 + #include "efx.h" 25 + #include "falcon.h" 26 + #include "selftest.h" 27 + #include "boards.h" 28 + #include "workarounds.h" 29 + #include "mac.h" 30 + 31 + /* 32 + * Loopback test packet structure 33 + * 34 + * The self-test should stress every RSS vector, and unfortunately 35 + * Falcon only performs RSS on TCP/UDP packets. 36 + */ 37 + struct efx_loopback_payload { 38 + struct ethhdr header; 39 + struct iphdr ip; 40 + struct udphdr udp; 41 + __be16 iteration; 42 + const char msg[64]; 43 + } __attribute__ ((packed)); 44 + 45 + /* Loopback test source MAC address */ 46 + static const unsigned char payload_source[ETH_ALEN] = { 47 + 0x00, 0x0f, 0x53, 0x1b, 0x1b, 0x1b, 48 + }; 49 + 50 + static const char *payload_msg = 51 + "Hello world! This is an Efx loopback test in progress!"; 52 + 53 + /** 54 + * efx_selftest_state - persistent state during a selftest 55 + * @flush: Drop all packets in efx_loopback_rx_packet 56 + * @packet_count: Number of packets being used in this test 57 + * @skbs: An array of skbs transmitted 58 + * @rx_good: RX good packet count 59 + * @rx_bad: RX bad packet count 60 + * @payload: Payload used in tests 61 + */ 62 + struct efx_selftest_state { 63 + int flush; 64 + int packet_count; 65 + struct sk_buff **skbs; 66 + atomic_t rx_good; 67 + atomic_t rx_bad; 68 + struct efx_loopback_payload payload; 69 + }; 70 + 71 + /************************************************************************** 72 + * 73 + * Configurable values 74 + * 75 + **************************************************************************/ 76 + 77 + /* Level of loopback testing 78 + * 79 + * The maximum packet burst length is 16**(n-1), i.e. 80 + * 81 + * - Level 0 : no packets 82 + * - Level 1 : 1 packet 83 + * - Level 2 : 17 packets (1 * 1 packet, 1 * 16 packets) 84 + * - Level 3 : 273 packets (1 * 1 packet, 1 * 16 packet, 1 * 256 packets) 85 + * 86 + */ 87 + static unsigned int loopback_test_level = 3; 88 + 89 + /************************************************************************** 90 + * 91 + * Interrupt and event queue testing 92 + * 93 + **************************************************************************/ 94 + 95 + /* Test generation and receipt of interrupts */ 96 + static int efx_test_interrupts(struct efx_nic *efx, 97 + struct efx_self_tests *tests) 98 + { 99 + struct efx_channel *channel; 100 + 101 + EFX_LOG(efx, "testing interrupts\n"); 102 + tests->interrupt = -1; 103 + 104 + /* Reset interrupt flag */ 105 + efx->last_irq_cpu = -1; 106 + smp_wmb(); 107 + 108 + /* ACK each interrupting event queue. Receiving an interrupt due to 109 + * traffic before a test event is raised is considered a pass */ 110 + efx_for_each_channel_with_interrupt(channel, efx) { 111 + if (channel->work_pending) 112 + efx_process_channel_now(channel); 113 + if (efx->last_irq_cpu >= 0) 114 + goto success; 115 + } 116 + 117 + falcon_generate_interrupt(efx); 118 + 119 + /* Wait for arrival of test interrupt. */ 120 + EFX_LOG(efx, "waiting for test interrupt\n"); 121 + schedule_timeout_uninterruptible(HZ / 10); 122 + if (efx->last_irq_cpu >= 0) 123 + goto success; 124 + 125 + EFX_ERR(efx, "timed out waiting for interrupt\n"); 126 + return -ETIMEDOUT; 127 + 128 + success: 129 + EFX_LOG(efx, "test interrupt (mode %d) seen on CPU%d\n", 130 + efx->interrupt_mode, efx->last_irq_cpu); 131 + tests->interrupt = 1; 132 + return 0; 133 + } 134 + 135 + /* Test generation and receipt of non-interrupting events */ 136 + static int efx_test_eventq(struct efx_channel *channel, 137 + struct efx_self_tests *tests) 138 + { 139 + unsigned int magic; 140 + 141 + /* Channel specific code, limited to 20 bits */ 142 + magic = (0x00010150 + channel->channel); 143 + EFX_LOG(channel->efx, "channel %d testing event queue with code %x\n", 144 + channel->channel, magic); 145 + 146 + tests->eventq_dma[channel->channel] = -1; 147 + tests->eventq_int[channel->channel] = 1; /* fake pass */ 148 + tests->eventq_poll[channel->channel] = 1; /* fake pass */ 149 + 150 + /* Reset flag and zero magic word */ 151 + channel->efx->last_irq_cpu = -1; 152 + channel->eventq_magic = 0; 153 + smp_wmb(); 154 + 155 + falcon_generate_test_event(channel, magic); 156 + udelay(1); 157 + 158 + efx_process_channel_now(channel); 159 + if (channel->eventq_magic != magic) { 160 + EFX_ERR(channel->efx, "channel %d failed to see test event\n", 161 + channel->channel); 162 + return -ETIMEDOUT; 163 + } else { 164 + tests->eventq_dma[channel->channel] = 1; 165 + } 166 + 167 + return 0; 168 + } 169 + 170 + /* Test generation and receipt of interrupting events */ 171 + static int efx_test_eventq_irq(struct efx_channel *channel, 172 + struct efx_self_tests *tests) 173 + { 174 + unsigned int magic, count; 175 + 176 + /* Channel specific code, limited to 20 bits */ 177 + magic = (0x00010150 + channel->channel); 178 + EFX_LOG(channel->efx, "channel %d testing event queue with code %x\n", 179 + channel->channel, magic); 180 + 181 + tests->eventq_dma[channel->channel] = -1; 182 + tests->eventq_int[channel->channel] = -1; 183 + tests->eventq_poll[channel->channel] = -1; 184 + 185 + /* Reset flag and zero magic word */ 186 + channel->efx->last_irq_cpu = -1; 187 + channel->eventq_magic = 0; 188 + smp_wmb(); 189 + 190 + falcon_generate_test_event(channel, magic); 191 + 192 + /* Wait for arrival of interrupt */ 193 + count = 0; 194 + do { 195 + schedule_timeout_uninterruptible(HZ / 100); 196 + 197 + if (channel->work_pending) 198 + efx_process_channel_now(channel); 199 + 200 + if (channel->eventq_magic == magic) 201 + goto eventq_ok; 202 + } while (++count < 2); 203 + 204 + EFX_ERR(channel->efx, "channel %d timed out waiting for event queue\n", 205 + channel->channel); 206 + 207 + /* See if interrupt arrived */ 208 + if (channel->efx->last_irq_cpu >= 0) { 209 + EFX_ERR(channel->efx, "channel %d saw interrupt on CPU%d " 210 + "during event queue test\n", channel->channel, 211 + raw_smp_processor_id()); 212 + tests->eventq_int[channel->channel] = 1; 213 + } 214 + 215 + /* Check to see if event was received even if interrupt wasn't */ 216 + efx_process_channel_now(channel); 217 + if (channel->eventq_magic == magic) { 218 + EFX_ERR(channel->efx, "channel %d event was generated, but " 219 + "failed to trigger an interrupt\n", channel->channel); 220 + tests->eventq_dma[channel->channel] = 1; 221 + } 222 + 223 + return -ETIMEDOUT; 224 + eventq_ok: 225 + EFX_LOG(channel->efx, "channel %d event queue passed\n", 226 + channel->channel); 227 + tests->eventq_dma[channel->channel] = 1; 228 + tests->eventq_int[channel->channel] = 1; 229 + tests->eventq_poll[channel->channel] = 1; 230 + return 0; 231 + } 232 + 233 + /************************************************************************** 234 + * 235 + * PHY testing 236 + * 237 + **************************************************************************/ 238 + 239 + /* Check PHY presence by reading the PHY ID registers */ 240 + static int efx_test_phy(struct efx_nic *efx, 241 + struct efx_self_tests *tests) 242 + { 243 + u16 physid1, physid2; 244 + struct mii_if_info *mii = &efx->mii; 245 + struct net_device *net_dev = efx->net_dev; 246 + 247 + if (efx->phy_type == PHY_TYPE_NONE) 248 + return 0; 249 + 250 + EFX_LOG(efx, "testing PHY presence\n"); 251 + tests->phy_ok = -1; 252 + 253 + physid1 = mii->mdio_read(net_dev, mii->phy_id, MII_PHYSID1); 254 + physid2 = mii->mdio_read(net_dev, mii->phy_id, MII_PHYSID2); 255 + 256 + if ((physid1 != 0x0000) && (physid1 != 0xffff) && 257 + (physid2 != 0x0000) && (physid2 != 0xffff)) { 258 + EFX_LOG(efx, "found MII PHY %d ID 0x%x:%x\n", 259 + mii->phy_id, physid1, physid2); 260 + tests->phy_ok = 1; 261 + return 0; 262 + } 263 + 264 + EFX_ERR(efx, "no MII PHY present with ID %d\n", mii->phy_id); 265 + return -ENODEV; 266 + } 267 + 268 + /************************************************************************** 269 + * 270 + * Loopback testing 271 + * NB Only one loopback test can be executing concurrently. 272 + * 273 + **************************************************************************/ 274 + 275 + /* Loopback test RX callback 276 + * This is called for each received packet during loopback testing. 277 + */ 278 + void efx_loopback_rx_packet(struct efx_nic *efx, 279 + const char *buf_ptr, int pkt_len) 280 + { 281 + struct efx_selftest_state *state = efx->loopback_selftest; 282 + struct efx_loopback_payload *received; 283 + struct efx_loopback_payload *payload; 284 + 285 + BUG_ON(!buf_ptr); 286 + 287 + /* If we are just flushing, then drop the packet */ 288 + if ((state == NULL) || state->flush) 289 + return; 290 + 291 + payload = &state->payload; 292 + 293 + received = (struct efx_loopback_payload *)(char *) buf_ptr; 294 + received->ip.saddr = payload->ip.saddr; 295 + received->ip.check = payload->ip.check; 296 + 297 + /* Check that header exists */ 298 + if (pkt_len < sizeof(received->header)) { 299 + EFX_ERR(efx, "saw runt RX packet (length %d) in %s loopback " 300 + "test\n", pkt_len, LOOPBACK_MODE(efx)); 301 + goto err; 302 + } 303 + 304 + /* Check that the ethernet header exists */ 305 + if (memcmp(&received->header, &payload->header, ETH_HLEN) != 0) { 306 + EFX_ERR(efx, "saw non-loopback RX packet in %s loopback test\n", 307 + LOOPBACK_MODE(efx)); 308 + goto err; 309 + } 310 + 311 + /* Check packet length */ 312 + if (pkt_len != sizeof(*payload)) { 313 + EFX_ERR(efx, "saw incorrect RX packet length %d (wanted %d) in " 314 + "%s loopback test\n", pkt_len, (int)sizeof(*payload), 315 + LOOPBACK_MODE(efx)); 316 + goto err; 317 + } 318 + 319 + /* Check that IP header matches */ 320 + if (memcmp(&received->ip, &payload->ip, sizeof(payload->ip)) != 0) { 321 + EFX_ERR(efx, "saw corrupted IP header in %s loopback test\n", 322 + LOOPBACK_MODE(efx)); 323 + goto err; 324 + } 325 + 326 + /* Check that msg and padding matches */ 327 + if (memcmp(&received->msg, &payload->msg, sizeof(received->msg)) != 0) { 328 + EFX_ERR(efx, "saw corrupted RX packet in %s loopback test\n", 329 + LOOPBACK_MODE(efx)); 330 + goto err; 331 + } 332 + 333 + /* Check that iteration matches */ 334 + if (received->iteration != payload->iteration) { 335 + EFX_ERR(efx, "saw RX packet from iteration %d (wanted %d) in " 336 + "%s loopback test\n", ntohs(received->iteration), 337 + ntohs(payload->iteration), LOOPBACK_MODE(efx)); 338 + goto err; 339 + } 340 + 341 + /* Increase correct RX count */ 342 + EFX_TRACE(efx, "got loopback RX in %s loopback test\n", 343 + LOOPBACK_MODE(efx)); 344 + 345 + atomic_inc(&state->rx_good); 346 + return; 347 + 348 + err: 349 + #ifdef EFX_ENABLE_DEBUG 350 + if (atomic_read(&state->rx_bad) == 0) { 351 + EFX_ERR(efx, "received packet:\n"); 352 + print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 0x10, 1, 353 + buf_ptr, pkt_len, 0); 354 + EFX_ERR(efx, "expected packet:\n"); 355 + print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 0x10, 1, 356 + &state->payload, sizeof(state->payload), 0); 357 + } 358 + #endif 359 + atomic_inc(&state->rx_bad); 360 + } 361 + 362 + /* Initialise an efx_selftest_state for a new iteration */ 363 + static void efx_iterate_state(struct efx_nic *efx) 364 + { 365 + struct efx_selftest_state *state = efx->loopback_selftest; 366 + struct net_device *net_dev = efx->net_dev; 367 + struct efx_loopback_payload *payload = &state->payload; 368 + 369 + /* Initialise the layerII header */ 370 + memcpy(&payload->header.h_dest, net_dev->dev_addr, ETH_ALEN); 371 + memcpy(&payload->header.h_source, &payload_source, ETH_ALEN); 372 + payload->header.h_proto = htons(ETH_P_IP); 373 + 374 + /* saddr set later and used as incrementing count */ 375 + payload->ip.daddr = htonl(INADDR_LOOPBACK); 376 + payload->ip.ihl = 5; 377 + payload->ip.check = htons(0xdead); 378 + payload->ip.tot_len = htons(sizeof(*payload) - sizeof(struct ethhdr)); 379 + payload->ip.version = IPVERSION; 380 + payload->ip.protocol = IPPROTO_UDP; 381 + 382 + /* Initialise udp header */ 383 + payload->udp.source = 0; 384 + payload->udp.len = htons(sizeof(*payload) - sizeof(struct ethhdr) - 385 + sizeof(struct iphdr)); 386 + payload->udp.check = 0; /* checksum ignored */ 387 + 388 + /* Fill out payload */ 389 + payload->iteration = htons(ntohs(payload->iteration) + 1); 390 + memcpy(&payload->msg, payload_msg, sizeof(payload_msg)); 391 + 392 + /* Fill out remaining state members */ 393 + atomic_set(&state->rx_good, 0); 394 + atomic_set(&state->rx_bad, 0); 395 + smp_wmb(); 396 + } 397 + 398 + static int efx_tx_loopback(struct efx_tx_queue *tx_queue) 399 + { 400 + struct efx_nic *efx = tx_queue->efx; 401 + struct efx_selftest_state *state = efx->loopback_selftest; 402 + struct efx_loopback_payload *payload; 403 + struct sk_buff *skb; 404 + int i, rc; 405 + 406 + /* Transmit N copies of buffer */ 407 + for (i = 0; i < state->packet_count; i++) { 408 + /* Allocate an skb, holding an extra reference for 409 + * transmit completion counting */ 410 + skb = alloc_skb(sizeof(state->payload), GFP_KERNEL); 411 + if (!skb) 412 + return -ENOMEM; 413 + state->skbs[i] = skb; 414 + skb_get(skb); 415 + 416 + /* Copy the payload in, incrementing the source address to 417 + * exercise the rss vectors */ 418 + payload = ((struct efx_loopback_payload *) 419 + skb_put(skb, sizeof(state->payload))); 420 + memcpy(payload, &state->payload, sizeof(state->payload)); 421 + payload->ip.saddr = htonl(INADDR_LOOPBACK | (i << 2)); 422 + 423 + /* Ensure everything we've written is visible to the 424 + * interrupt handler. */ 425 + smp_wmb(); 426 + 427 + if (NET_DEV_REGISTERED(efx)) 428 + netif_tx_lock_bh(efx->net_dev); 429 + rc = efx_xmit(efx, tx_queue, skb); 430 + if (NET_DEV_REGISTERED(efx)) 431 + netif_tx_unlock_bh(efx->net_dev); 432 + 433 + if (rc != NETDEV_TX_OK) { 434 + EFX_ERR(efx, "TX queue %d could not transmit packet %d " 435 + "of %d in %s loopback test\n", tx_queue->queue, 436 + i + 1, state->packet_count, LOOPBACK_MODE(efx)); 437 + 438 + /* Defer cleaning up the other skbs for the caller */ 439 + kfree_skb(skb); 440 + return -EPIPE; 441 + } 442 + } 443 + 444 + return 0; 445 + } 446 + 447 + static int efx_rx_loopback(struct efx_tx_queue *tx_queue, 448 + struct efx_loopback_self_tests *lb_tests) 449 + { 450 + struct efx_nic *efx = tx_queue->efx; 451 + struct efx_selftest_state *state = efx->loopback_selftest; 452 + struct sk_buff *skb; 453 + int tx_done = 0, rx_good, rx_bad; 454 + int i, rc = 0; 455 + 456 + if (NET_DEV_REGISTERED(efx)) 457 + netif_tx_lock_bh(efx->net_dev); 458 + 459 + /* Count the number of tx completions, and decrement the refcnt. Any 460 + * skbs not already completed will be free'd when the queue is flushed */ 461 + for (i=0; i < state->packet_count; i++) { 462 + skb = state->skbs[i]; 463 + if (skb && !skb_shared(skb)) 464 + ++tx_done; 465 + dev_kfree_skb_any(skb); 466 + } 467 + 468 + if (NET_DEV_REGISTERED(efx)) 469 + netif_tx_unlock_bh(efx->net_dev); 470 + 471 + /* Check TX completion and received packet counts */ 472 + rx_good = atomic_read(&state->rx_good); 473 + rx_bad = atomic_read(&state->rx_bad); 474 + if (tx_done != state->packet_count) { 475 + /* Don't free the skbs; they will be picked up on TX 476 + * overflow or channel teardown. 477 + */ 478 + EFX_ERR(efx, "TX queue %d saw only %d out of an expected %d " 479 + "TX completion events in %s loopback test\n", 480 + tx_queue->queue, tx_done, state->packet_count, 481 + LOOPBACK_MODE(efx)); 482 + rc = -ETIMEDOUT; 483 + /* Allow to fall through so we see the RX errors as well */ 484 + } 485 + 486 + /* We may always be up to a flush away from our desired packet total */ 487 + if (rx_good != state->packet_count) { 488 + EFX_LOG(efx, "TX queue %d saw only %d out of an expected %d " 489 + "received packets in %s loopback test\n", 490 + tx_queue->queue, rx_good, state->packet_count, 491 + LOOPBACK_MODE(efx)); 492 + rc = -ETIMEDOUT; 493 + /* Fall through */ 494 + } 495 + 496 + /* Update loopback test structure */ 497 + lb_tests->tx_sent[tx_queue->queue] += state->packet_count; 498 + lb_tests->tx_done[tx_queue->queue] += tx_done; 499 + lb_tests->rx_good += rx_good; 500 + lb_tests->rx_bad += rx_bad; 501 + 502 + return rc; 503 + } 504 + 505 + static int 506 + efx_test_loopback(struct efx_tx_queue *tx_queue, 507 + struct efx_loopback_self_tests *lb_tests) 508 + { 509 + struct efx_nic *efx = tx_queue->efx; 510 + struct efx_selftest_state *state = efx->loopback_selftest; 511 + struct efx_channel *channel; 512 + int i, rc = 0; 513 + 514 + for (i = 0; i < loopback_test_level; i++) { 515 + /* Determine how many packets to send */ 516 + state->packet_count = (efx->type->txd_ring_mask + 1) / 3; 517 + state->packet_count = min(1 << (i << 2), state->packet_count); 518 + state->skbs = kzalloc(sizeof(state->skbs[0]) * 519 + state->packet_count, GFP_KERNEL); 520 + state->flush = 0; 521 + 522 + EFX_LOG(efx, "TX queue %d testing %s loopback with %d " 523 + "packets\n", tx_queue->queue, LOOPBACK_MODE(efx), 524 + state->packet_count); 525 + 526 + efx_iterate_state(efx); 527 + rc = efx_tx_loopback(tx_queue); 528 + 529 + /* NAPI polling is not enabled, so process channels synchronously */ 530 + schedule_timeout_uninterruptible(HZ / 50); 531 + efx_for_each_channel_with_interrupt(channel, efx) { 532 + if (channel->work_pending) 533 + efx_process_channel_now(channel); 534 + } 535 + 536 + rc |= efx_rx_loopback(tx_queue, lb_tests); 537 + kfree(state->skbs); 538 + 539 + if (rc) { 540 + /* Wait a while to ensure there are no packets 541 + * floating around after a failure. */ 542 + schedule_timeout_uninterruptible(HZ / 10); 543 + return rc; 544 + } 545 + } 546 + 547 + EFX_LOG(efx, "TX queue %d passed %s loopback test with a burst length " 548 + "of %d packets\n", tx_queue->queue, LOOPBACK_MODE(efx), 549 + state->packet_count); 550 + 551 + return rc; 552 + } 553 + 554 + static int efx_test_loopbacks(struct efx_nic *efx, 555 + struct efx_self_tests *tests, 556 + unsigned int loopback_modes) 557 + { 558 + struct efx_selftest_state *state = efx->loopback_selftest; 559 + struct ethtool_cmd ecmd, ecmd_loopback; 560 + struct efx_tx_queue *tx_queue; 561 + enum efx_loopback_mode old_mode, mode; 562 + int count, rc = 0, link_up; 563 + 564 + rc = efx_ethtool_get_settings(efx->net_dev, &ecmd); 565 + if (rc) { 566 + EFX_ERR(efx, "could not get GMII settings\n"); 567 + return rc; 568 + } 569 + old_mode = efx->loopback_mode; 570 + 571 + /* Disable autonegotiation for the purposes of loopback */ 572 + memcpy(&ecmd_loopback, &ecmd, sizeof(ecmd_loopback)); 573 + if (ecmd_loopback.autoneg == AUTONEG_ENABLE) { 574 + ecmd_loopback.autoneg = AUTONEG_DISABLE; 575 + ecmd_loopback.duplex = DUPLEX_FULL; 576 + ecmd_loopback.speed = SPEED_10000; 577 + } 578 + 579 + rc = efx_ethtool_set_settings(efx->net_dev, &ecmd_loopback); 580 + if (rc) { 581 + EFX_ERR(efx, "could not disable autonegotiation\n"); 582 + goto out; 583 + } 584 + tests->loopback_speed = ecmd_loopback.speed; 585 + tests->loopback_full_duplex = ecmd_loopback.duplex; 586 + 587 + /* Test all supported loopback modes */ 588 + for (mode = LOOPBACK_NONE; mode < LOOPBACK_TEST_MAX; mode++) { 589 + if (!(loopback_modes & (1 << mode))) 590 + continue; 591 + 592 + /* Move the port into the specified loopback mode. */ 593 + state->flush = 1; 594 + efx->loopback_mode = mode; 595 + efx_reconfigure_port(efx); 596 + 597 + /* Wait for the PHY to signal the link is up */ 598 + count = 0; 599 + do { 600 + struct efx_channel *channel = &efx->channel[0]; 601 + 602 + falcon_check_xmac(efx); 603 + schedule_timeout_uninterruptible(HZ / 10); 604 + if (channel->work_pending) 605 + efx_process_channel_now(channel); 606 + /* Wait for PHY events to be processed */ 607 + flush_workqueue(efx->workqueue); 608 + rmb(); 609 + 610 + /* efx->link_up can be 1 even if the XAUI link is down, 611 + * (bug5762). Usually, it's not worth bothering with the 612 + * difference, but for selftests, we need that extra 613 + * guarantee that the link is really, really, up. 614 + */ 615 + link_up = efx->link_up; 616 + if (!falcon_xaui_link_ok(efx)) 617 + link_up = 0; 618 + 619 + } while ((++count < 20) && !link_up); 620 + 621 + /* The link should now be up. If it isn't, there is no point 622 + * in attempting a loopback test */ 623 + if (!link_up) { 624 + EFX_ERR(efx, "loopback %s never came up\n", 625 + LOOPBACK_MODE(efx)); 626 + rc = -EIO; 627 + goto out; 628 + } 629 + 630 + EFX_LOG(efx, "link came up in %s loopback in %d iterations\n", 631 + LOOPBACK_MODE(efx), count); 632 + 633 + /* Test every TX queue */ 634 + efx_for_each_tx_queue(tx_queue, efx) { 635 + rc |= efx_test_loopback(tx_queue, 636 + &tests->loopback[mode]); 637 + if (rc) 638 + goto out; 639 + } 640 + } 641 + 642 + out: 643 + /* Take out of loopback and restore PHY settings */ 644 + state->flush = 1; 645 + efx->loopback_mode = old_mode; 646 + efx_ethtool_set_settings(efx->net_dev, &ecmd); 647 + 648 + return rc; 649 + } 650 + 651 + /************************************************************************** 652 + * 653 + * Entry points 654 + * 655 + *************************************************************************/ 656 + 657 + /* Online (i.e. non-disruptive) testing 658 + * This checks interrupt generation, event delivery and PHY presence. */ 659 + int efx_online_test(struct efx_nic *efx, struct efx_self_tests *tests) 660 + { 661 + struct efx_channel *channel; 662 + int rc = 0; 663 + 664 + EFX_LOG(efx, "performing online self-tests\n"); 665 + 666 + rc |= efx_test_interrupts(efx, tests); 667 + efx_for_each_channel(channel, efx) { 668 + if (channel->has_interrupt) 669 + rc |= efx_test_eventq_irq(channel, tests); 670 + else 671 + rc |= efx_test_eventq(channel, tests); 672 + } 673 + rc |= efx_test_phy(efx, tests); 674 + 675 + if (rc) 676 + EFX_ERR(efx, "failed online self-tests\n"); 677 + 678 + return rc; 679 + } 680 + 681 + /* Offline (i.e. disruptive) testing 682 + * This checks MAC and PHY loopback on the specified port. */ 683 + int efx_offline_test(struct efx_nic *efx, 684 + struct efx_self_tests *tests, unsigned int loopback_modes) 685 + { 686 + struct efx_selftest_state *state; 687 + int rc = 0; 688 + 689 + EFX_LOG(efx, "performing offline self-tests\n"); 690 + 691 + /* Create a selftest_state structure to hold state for the test */ 692 + state = kzalloc(sizeof(*state), GFP_KERNEL); 693 + if (state == NULL) { 694 + rc = -ENOMEM; 695 + goto out; 696 + } 697 + 698 + /* Set the port loopback_selftest member. From this point on 699 + * all received packets will be dropped. Mark the state as 700 + * "flushing" so all inflight packets are dropped */ 701 + BUG_ON(efx->loopback_selftest); 702 + state->flush = 1; 703 + efx->loopback_selftest = (void *)state; 704 + 705 + rc = efx_test_loopbacks(efx, tests, loopback_modes); 706 + 707 + efx->loopback_selftest = NULL; 708 + wmb(); 709 + kfree(state); 710 + 711 + out: 712 + if (rc) 713 + EFX_ERR(efx, "failed offline self-tests\n"); 714 + 715 + return rc; 716 + } 717 +
+50
drivers/net/sfc/selftest.h
···
··· 1 + /**************************************************************************** 2 + * Driver for Solarflare Solarstorm network controllers and boards 3 + * Copyright 2005-2006 Fen Systems Ltd. 4 + * Copyright 2006-2008 Solarflare Communications Inc. 5 + * 6 + * This program is free software; you can redistribute it and/or modify it 7 + * under the terms of the GNU General Public License version 2 as published 8 + * by the Free Software Foundation, incorporated herein by reference. 9 + */ 10 + 11 + #ifndef EFX_SELFTEST_H 12 + #define EFX_SELFTEST_H 13 + 14 + #include "net_driver.h" 15 + 16 + /* 17 + * Self tests 18 + */ 19 + 20 + struct efx_loopback_self_tests { 21 + int tx_sent[EFX_MAX_TX_QUEUES]; 22 + int tx_done[EFX_MAX_TX_QUEUES]; 23 + int rx_good; 24 + int rx_bad; 25 + }; 26 + 27 + /* Efx self test results 28 + * For fields which are not counters, 1 indicates success and -1 29 + * indicates failure. 30 + */ 31 + struct efx_self_tests { 32 + int interrupt; 33 + int eventq_dma[EFX_MAX_CHANNELS]; 34 + int eventq_int[EFX_MAX_CHANNELS]; 35 + int eventq_poll[EFX_MAX_CHANNELS]; 36 + int phy_ok; 37 + int loopback_speed; 38 + int loopback_full_duplex; 39 + struct efx_loopback_self_tests loopback[LOOPBACK_TEST_MAX]; 40 + }; 41 + 42 + extern void efx_loopback_rx_packet(struct efx_nic *efx, 43 + const char *buf_ptr, int pkt_len); 44 + extern int efx_online_test(struct efx_nic *efx, 45 + struct efx_self_tests *tests); 46 + extern int efx_offline_test(struct efx_nic *efx, 47 + struct efx_self_tests *tests, 48 + unsigned int loopback_modes); 49 + 50 + #endif /* EFX_SELFTEST_H */
+14
drivers/net/sfc/sfe4001.c
··· 130 (void) efx_i2c_read(i2c, MAX6647, RSL, &in, 1); 131 } 132 133 /* This board uses an I2C expander to provider power to the PHY, which needs to 134 * be turned on before the PHY can be used. 135 * Context: Process context, rtnl lock held ··· 212 out = 0xff & ~((1 << P0_EN_1V2_LBN) | (1 << P0_EN_2V5_LBN) | 213 (1 << P0_EN_3V3X_LBN) | (1 << P0_EN_5V_LBN) | 214 (1 << P0_X_TRST_LBN)); 215 216 rc = efx_i2c_write(i2c, PCA9539, P0_OUT, &out, 1); 217 if (rc) ··· 237 if (in & (1 << P1_AFE_PWD_LBN)) 238 goto done; 239 240 } while (++count < 20); 241 242 EFX_INFO(efx, "timed out waiting for power\n");
··· 130 (void) efx_i2c_read(i2c, MAX6647, RSL, &in, 1); 131 } 132 133 + /* The P0_EN_3V3X line on SFE4001 boards (from A2 onward) is connected 134 + * to the FLASH_CFG_1 input on the DSP. We must keep it high at power- 135 + * up to allow writing the flash (done through MDIO from userland). 136 + */ 137 + unsigned int sfe4001_phy_flash_cfg; 138 + module_param_named(phy_flash_cfg, sfe4001_phy_flash_cfg, uint, 0444); 139 + MODULE_PARM_DESC(phy_flash_cfg, 140 + "Force PHY to enter flash configuration mode"); 141 + 142 /* This board uses an I2C expander to provider power to the PHY, which needs to 143 * be turned on before the PHY can be used. 144 * Context: Process context, rtnl lock held ··· 203 out = 0xff & ~((1 << P0_EN_1V2_LBN) | (1 << P0_EN_2V5_LBN) | 204 (1 << P0_EN_3V3X_LBN) | (1 << P0_EN_5V_LBN) | 205 (1 << P0_X_TRST_LBN)); 206 + if (sfe4001_phy_flash_cfg) 207 + out |= 1 << P0_EN_3V3X_LBN; 208 209 rc = efx_i2c_write(i2c, PCA9539, P0_OUT, &out, 1); 210 if (rc) ··· 226 if (in & (1 << P1_AFE_PWD_LBN)) 227 goto done; 228 229 + /* DSP doesn't look powered in flash config mode */ 230 + if (sfe4001_phy_flash_cfg) 231 + goto done; 232 } while (++count < 20); 233 234 EFX_INFO(efx, "timed out waiting for power\n");
+87 -4
drivers/net/sfc/tenxpress.c
··· 24 MDIO_MMDREG_DEVS0_PCS | \ 25 MDIO_MMDREG_DEVS0_PHYXS) 26 27 /* We complain if we fail to see the link partner as 10G capable this many 28 * times in a row (must be > 1 as sampling the autoneg. registers is racy) 29 */ ··· 77 #define PMA_PMD_BIST_RXD_LBN (1) 78 #define PMA_PMD_BIST_AFE_LBN (0) 79 80 #define BIST_MAX_DELAY (1000) 81 #define BIST_POLL_DELAY (10) 82 ··· 94 95 #define PCS_TEST_SELECT_REG 0xd807 /* PRM 10.5.8 */ 96 #define CLK312_EN_LBN 3 97 98 /* Boot status register */ 99 #define PCS_BOOT_STATUS_REG (0xd000) ··· 120 121 struct tenxpress_phy_data { 122 enum tenxpress_state state; 123 atomic_t bad_crc_count; 124 int bad_lp_tries; 125 }; 126 ··· 215 216 tenxpress_set_state(efx, TENXPRESS_STATUS_NORMAL); 217 218 - rc = mdio_clause45_wait_reset_mmds(efx, 219 - TENXPRESS_REQUIRED_DEVS); 220 - if (rc < 0) 221 - goto fail; 222 223 rc = mdio_clause45_check_mmds(efx, TENXPRESS_REQUIRED_DEVS, 0); 224 if (rc < 0) ··· 241 kfree(efx->phy_data); 242 efx->phy_data = NULL; 243 return rc; 244 } 245 246 static void tenxpress_set_bad_lp(struct efx_nic *efx, int bad_lp) ··· 346 return ok; 347 } 348 349 static void tenxpress_phy_reconfigure(struct efx_nic *efx) 350 { 351 if (!tenxpress_state_is(efx, TENXPRESS_STATUS_NORMAL)) 352 return; 353 354 efx->link_up = tenxpress_link_ok(efx, 0); 355 efx->link_options = GM_LPA_10000FULL; 356 } ··· 513 .clear_interrupt = tenxpress_phy_clear_interrupt, 514 .reset_xaui = tenxpress_reset_xaui, 515 .mmds = TENXPRESS_REQUIRED_DEVS, 516 };
··· 24 MDIO_MMDREG_DEVS0_PCS | \ 25 MDIO_MMDREG_DEVS0_PHYXS) 26 27 + #define TENXPRESS_LOOPBACKS ((1 << LOOPBACK_PHYXS) | \ 28 + (1 << LOOPBACK_PCS) | \ 29 + (1 << LOOPBACK_PMAPMD) | \ 30 + (1 << LOOPBACK_NETWORK)) 31 + 32 /* We complain if we fail to see the link partner as 10G capable this many 33 * times in a row (must be > 1 as sampling the autoneg. registers is racy) 34 */ ··· 72 #define PMA_PMD_BIST_RXD_LBN (1) 73 #define PMA_PMD_BIST_AFE_LBN (0) 74 75 + /* Special Software reset register */ 76 + #define PMA_PMD_EXT_CTRL_REG 49152 77 + #define PMA_PMD_EXT_SSR_LBN 15 78 + 79 #define BIST_MAX_DELAY (1000) 80 #define BIST_POLL_DELAY (10) 81 ··· 85 86 #define PCS_TEST_SELECT_REG 0xd807 /* PRM 10.5.8 */ 87 #define CLK312_EN_LBN 3 88 + 89 + /* PHYXS registers */ 90 + #define PHYXS_TEST1 (49162) 91 + #define LOOPBACK_NEAR_LBN (8) 92 + #define LOOPBACK_NEAR_WIDTH (1) 93 94 /* Boot status register */ 95 #define PCS_BOOT_STATUS_REG (0xd000) ··· 106 107 struct tenxpress_phy_data { 108 enum tenxpress_state state; 109 + enum efx_loopback_mode loopback_mode; 110 atomic_t bad_crc_count; 111 + int tx_disabled; 112 int bad_lp_tries; 113 }; 114 ··· 199 200 tenxpress_set_state(efx, TENXPRESS_STATUS_NORMAL); 201 202 + if (!sfe4001_phy_flash_cfg) { 203 + rc = mdio_clause45_wait_reset_mmds(efx, 204 + TENXPRESS_REQUIRED_DEVS); 205 + if (rc < 0) 206 + goto fail; 207 + } 208 209 rc = mdio_clause45_check_mmds(efx, TENXPRESS_REQUIRED_DEVS, 0); 210 if (rc < 0) ··· 223 kfree(efx->phy_data); 224 efx->phy_data = NULL; 225 return rc; 226 + } 227 + 228 + static int tenxpress_special_reset(struct efx_nic *efx) 229 + { 230 + int rc, reg; 231 + 232 + EFX_TRACE(efx, "%s\n", __func__); 233 + 234 + /* Initiate reset */ 235 + reg = mdio_clause45_read(efx, efx->mii.phy_id, 236 + MDIO_MMD_PMAPMD, PMA_PMD_EXT_CTRL_REG); 237 + reg |= (1 << PMA_PMD_EXT_SSR_LBN); 238 + mdio_clause45_write(efx, efx->mii.phy_id, MDIO_MMD_PMAPMD, 239 + PMA_PMD_EXT_CTRL_REG, reg); 240 + 241 + msleep(200); 242 + 243 + /* Wait for the blocks to come out of reset */ 244 + rc = mdio_clause45_wait_reset_mmds(efx, 245 + TENXPRESS_REQUIRED_DEVS); 246 + if (rc < 0) 247 + return rc; 248 + 249 + /* Try and reconfigure the device */ 250 + rc = tenxpress_init(efx); 251 + if (rc < 0) 252 + return rc; 253 + 254 + return 0; 255 } 256 257 static void tenxpress_set_bad_lp(struct efx_nic *efx, int bad_lp) ··· 299 return ok; 300 } 301 302 + static void tenxpress_phyxs_loopback(struct efx_nic *efx) 303 + { 304 + int phy_id = efx->mii.phy_id; 305 + int ctrl1, ctrl2; 306 + 307 + ctrl1 = ctrl2 = mdio_clause45_read(efx, phy_id, MDIO_MMD_PHYXS, 308 + PHYXS_TEST1); 309 + if (efx->loopback_mode == LOOPBACK_PHYXS) 310 + ctrl2 |= (1 << LOOPBACK_NEAR_LBN); 311 + else 312 + ctrl2 &= ~(1 << LOOPBACK_NEAR_LBN); 313 + if (ctrl1 != ctrl2) 314 + mdio_clause45_write(efx, phy_id, MDIO_MMD_PHYXS, 315 + PHYXS_TEST1, ctrl2); 316 + } 317 + 318 static void tenxpress_phy_reconfigure(struct efx_nic *efx) 319 { 320 + struct tenxpress_phy_data *phy_data = efx->phy_data; 321 + int loop_change = LOOPBACK_OUT_OF(phy_data, efx, 322 + TENXPRESS_LOOPBACKS); 323 + 324 if (!tenxpress_state_is(efx, TENXPRESS_STATUS_NORMAL)) 325 return; 326 327 + /* When coming out of transmit disable, coming out of low power 328 + * mode, or moving out of any PHY internal loopback mode, 329 + * perform a special software reset */ 330 + if ((phy_data->tx_disabled && !efx->tx_disabled) || 331 + loop_change) { 332 + (void) tenxpress_special_reset(efx); 333 + falcon_reset_xaui(efx); 334 + } 335 + 336 + mdio_clause45_transmit_disable(efx); 337 + mdio_clause45_phy_reconfigure(efx); 338 + tenxpress_phyxs_loopback(efx); 339 + 340 + phy_data->tx_disabled = efx->tx_disabled; 341 + phy_data->loopback_mode = efx->loopback_mode; 342 efx->link_up = tenxpress_link_ok(efx, 0); 343 efx->link_options = GM_LPA_10000FULL; 344 } ··· 431 .clear_interrupt = tenxpress_phy_clear_interrupt, 432 .reset_xaui = tenxpress_reset_xaui, 433 .mmds = TENXPRESS_REQUIRED_DEVS, 434 + .loopbacks = TENXPRESS_LOOPBACKS, 435 };
+664
drivers/net/sfc/tx.c
··· 82 } 83 } 84 85 86 /* 87 * Add a socket buffer to a TX queue ··· 153 int rc = NETDEV_TX_OK; 154 155 EFX_BUG_ON_PARANOID(tx_queue->write_count != tx_queue->insert_count); 156 157 /* Get size of the initial fragment */ 158 len = skb_headlen(skb); ··· 209 insert_ptr = (tx_queue->insert_count & 210 efx->type->txd_ring_mask); 211 buffer = &tx_queue->buffer[insert_ptr]; 212 EFX_BUG_ON_PARANOID(buffer->skb); 213 EFX_BUG_ON_PARANOID(buffer->len); 214 EFX_BUG_ON_PARANOID(buffer->continuation != 1); ··· 477 478 efx_release_tx_buffers(tx_queue); 479 480 /* Release queue's stop on port, if any */ 481 if (tx_queue->stopped) { 482 tx_queue->stopped = 0; ··· 498 } 499 500
··· 82 } 83 } 84 85 + /** 86 + * struct efx_tso_header - a DMA mapped buffer for packet headers 87 + * @next: Linked list of free ones. 88 + * The list is protected by the TX queue lock. 89 + * @dma_unmap_len: Length to unmap for an oversize buffer, or 0. 90 + * @dma_addr: The DMA address of the header below. 91 + * 92 + * This controls the memory used for a TSO header. Use TSOH_DATA() 93 + * to find the packet header data. Use TSOH_SIZE() to calculate the 94 + * total size required for a given packet header length. TSO headers 95 + * in the free list are exactly %TSOH_STD_SIZE bytes in size. 96 + */ 97 + struct efx_tso_header { 98 + union { 99 + struct efx_tso_header *next; 100 + size_t unmap_len; 101 + }; 102 + dma_addr_t dma_addr; 103 + }; 104 + 105 + static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue, 106 + const struct sk_buff *skb); 107 + static void efx_fini_tso(struct efx_tx_queue *tx_queue); 108 + static void efx_tsoh_heap_free(struct efx_tx_queue *tx_queue, 109 + struct efx_tso_header *tsoh); 110 + 111 + static inline void efx_tsoh_free(struct efx_tx_queue *tx_queue, 112 + struct efx_tx_buffer *buffer) 113 + { 114 + if (buffer->tsoh) { 115 + if (likely(!buffer->tsoh->unmap_len)) { 116 + buffer->tsoh->next = tx_queue->tso_headers_free; 117 + tx_queue->tso_headers_free = buffer->tsoh; 118 + } else { 119 + efx_tsoh_heap_free(tx_queue, buffer->tsoh); 120 + } 121 + buffer->tsoh = NULL; 122 + } 123 + } 124 + 125 126 /* 127 * Add a socket buffer to a TX queue ··· 113 int rc = NETDEV_TX_OK; 114 115 EFX_BUG_ON_PARANOID(tx_queue->write_count != tx_queue->insert_count); 116 + 117 + if (skb_shinfo((struct sk_buff *)skb)->gso_size) 118 + return efx_enqueue_skb_tso(tx_queue, skb); 119 120 /* Get size of the initial fragment */ 121 len = skb_headlen(skb); ··· 166 insert_ptr = (tx_queue->insert_count & 167 efx->type->txd_ring_mask); 168 buffer = &tx_queue->buffer[insert_ptr]; 169 + efx_tsoh_free(tx_queue, buffer); 170 + EFX_BUG_ON_PARANOID(buffer->tsoh); 171 EFX_BUG_ON_PARANOID(buffer->skb); 172 EFX_BUG_ON_PARANOID(buffer->len); 173 EFX_BUG_ON_PARANOID(buffer->continuation != 1); ··· 432 433 efx_release_tx_buffers(tx_queue); 434 435 + /* Free up TSO header cache */ 436 + efx_fini_tso(tx_queue); 437 + 438 /* Release queue's stop on port, if any */ 439 if (tx_queue->stopped) { 440 tx_queue->stopped = 0; ··· 450 } 451 452 453 + /* Efx TCP segmentation acceleration. 454 + * 455 + * Why? Because by doing it here in the driver we can go significantly 456 + * faster than the GSO. 457 + * 458 + * Requires TX checksum offload support. 459 + */ 460 + 461 + /* Number of bytes inserted at the start of a TSO header buffer, 462 + * similar to NET_IP_ALIGN. 463 + */ 464 + #if defined(__i386__) || defined(__x86_64__) 465 + #define TSOH_OFFSET 0 466 + #else 467 + #define TSOH_OFFSET NET_IP_ALIGN 468 + #endif 469 + 470 + #define TSOH_BUFFER(tsoh) ((u8 *)(tsoh + 1) + TSOH_OFFSET) 471 + 472 + /* Total size of struct efx_tso_header, buffer and padding */ 473 + #define TSOH_SIZE(hdr_len) \ 474 + (sizeof(struct efx_tso_header) + TSOH_OFFSET + hdr_len) 475 + 476 + /* Size of blocks on free list. Larger blocks must be allocated from 477 + * the heap. 478 + */ 479 + #define TSOH_STD_SIZE 128 480 + 481 + #define PTR_DIFF(p1, p2) ((u8 *)(p1) - (u8 *)(p2)) 482 + #define ETH_HDR_LEN(skb) (skb_network_header(skb) - (skb)->data) 483 + #define SKB_TCP_OFF(skb) PTR_DIFF(tcp_hdr(skb), (skb)->data) 484 + #define SKB_IPV4_OFF(skb) PTR_DIFF(ip_hdr(skb), (skb)->data) 485 + 486 + /** 487 + * struct tso_state - TSO state for an SKB 488 + * @remaining_len: Bytes of data we've yet to segment 489 + * @seqnum: Current sequence number 490 + * @packet_space: Remaining space in current packet 491 + * @ifc: Input fragment cursor. 492 + * Where we are in the current fragment of the incoming SKB. These 493 + * values get updated in place when we split a fragment over 494 + * multiple packets. 495 + * @p: Parameters. 496 + * These values are set once at the start of the TSO send and do 497 + * not get changed as the routine progresses. 498 + * 499 + * The state used during segmentation. It is put into this data structure 500 + * just to make it easy to pass into inline functions. 501 + */ 502 + struct tso_state { 503 + unsigned remaining_len; 504 + unsigned seqnum; 505 + unsigned packet_space; 506 + 507 + struct { 508 + /* DMA address of current position */ 509 + dma_addr_t dma_addr; 510 + /* Remaining length */ 511 + unsigned int len; 512 + /* DMA address and length of the whole fragment */ 513 + unsigned int unmap_len; 514 + dma_addr_t unmap_addr; 515 + struct page *page; 516 + unsigned page_off; 517 + } ifc; 518 + 519 + struct { 520 + /* The number of bytes of header */ 521 + unsigned int header_length; 522 + 523 + /* The number of bytes to put in each outgoing segment. */ 524 + int full_packet_size; 525 + 526 + /* Current IPv4 ID, host endian. */ 527 + unsigned ipv4_id; 528 + } p; 529 + }; 530 + 531 + 532 + /* 533 + * Verify that our various assumptions about sk_buffs and the conditions 534 + * under which TSO will be attempted hold true. 535 + */ 536 + static inline void efx_tso_check_safe(const struct sk_buff *skb) 537 + { 538 + EFX_BUG_ON_PARANOID(skb->protocol != htons(ETH_P_IP)); 539 + EFX_BUG_ON_PARANOID(((struct ethhdr *)skb->data)->h_proto != 540 + skb->protocol); 541 + EFX_BUG_ON_PARANOID(ip_hdr(skb)->protocol != IPPROTO_TCP); 542 + EFX_BUG_ON_PARANOID((PTR_DIFF(tcp_hdr(skb), skb->data) 543 + + (tcp_hdr(skb)->doff << 2u)) > 544 + skb_headlen(skb)); 545 + } 546 + 547 + 548 + /* 549 + * Allocate a page worth of efx_tso_header structures, and string them 550 + * into the tx_queue->tso_headers_free linked list. Return 0 or -ENOMEM. 551 + */ 552 + static int efx_tsoh_block_alloc(struct efx_tx_queue *tx_queue) 553 + { 554 + 555 + struct pci_dev *pci_dev = tx_queue->efx->pci_dev; 556 + struct efx_tso_header *tsoh; 557 + dma_addr_t dma_addr; 558 + u8 *base_kva, *kva; 559 + 560 + base_kva = pci_alloc_consistent(pci_dev, PAGE_SIZE, &dma_addr); 561 + if (base_kva == NULL) { 562 + EFX_ERR(tx_queue->efx, "Unable to allocate page for TSO" 563 + " headers\n"); 564 + return -ENOMEM; 565 + } 566 + 567 + /* pci_alloc_consistent() allocates pages. */ 568 + EFX_BUG_ON_PARANOID(dma_addr & (PAGE_SIZE - 1u)); 569 + 570 + for (kva = base_kva; kva < base_kva + PAGE_SIZE; kva += TSOH_STD_SIZE) { 571 + tsoh = (struct efx_tso_header *)kva; 572 + tsoh->dma_addr = dma_addr + (TSOH_BUFFER(tsoh) - base_kva); 573 + tsoh->next = tx_queue->tso_headers_free; 574 + tx_queue->tso_headers_free = tsoh; 575 + } 576 + 577 + return 0; 578 + } 579 + 580 + 581 + /* Free up a TSO header, and all others in the same page. */ 582 + static void efx_tsoh_block_free(struct efx_tx_queue *tx_queue, 583 + struct efx_tso_header *tsoh, 584 + struct pci_dev *pci_dev) 585 + { 586 + struct efx_tso_header **p; 587 + unsigned long base_kva; 588 + dma_addr_t base_dma; 589 + 590 + base_kva = (unsigned long)tsoh & PAGE_MASK; 591 + base_dma = tsoh->dma_addr & PAGE_MASK; 592 + 593 + p = &tx_queue->tso_headers_free; 594 + while (*p != NULL) 595 + if (((unsigned long)*p & PAGE_MASK) == base_kva) 596 + *p = (*p)->next; 597 + else 598 + p = &(*p)->next; 599 + 600 + pci_free_consistent(pci_dev, PAGE_SIZE, (void *)base_kva, base_dma); 601 + } 602 + 603 + static struct efx_tso_header * 604 + efx_tsoh_heap_alloc(struct efx_tx_queue *tx_queue, size_t header_len) 605 + { 606 + struct efx_tso_header *tsoh; 607 + 608 + tsoh = kmalloc(TSOH_SIZE(header_len), GFP_ATOMIC | GFP_DMA); 609 + if (unlikely(!tsoh)) 610 + return NULL; 611 + 612 + tsoh->dma_addr = pci_map_single(tx_queue->efx->pci_dev, 613 + TSOH_BUFFER(tsoh), header_len, 614 + PCI_DMA_TODEVICE); 615 + if (unlikely(pci_dma_mapping_error(tsoh->dma_addr))) { 616 + kfree(tsoh); 617 + return NULL; 618 + } 619 + 620 + tsoh->unmap_len = header_len; 621 + return tsoh; 622 + } 623 + 624 + static void 625 + efx_tsoh_heap_free(struct efx_tx_queue *tx_queue, struct efx_tso_header *tsoh) 626 + { 627 + pci_unmap_single(tx_queue->efx->pci_dev, 628 + tsoh->dma_addr, tsoh->unmap_len, 629 + PCI_DMA_TODEVICE); 630 + kfree(tsoh); 631 + } 632 + 633 + /** 634 + * efx_tx_queue_insert - push descriptors onto the TX queue 635 + * @tx_queue: Efx TX queue 636 + * @dma_addr: DMA address of fragment 637 + * @len: Length of fragment 638 + * @skb: Only non-null for end of last segment 639 + * @end_of_packet: True if last fragment in a packet 640 + * @unmap_addr: DMA address of fragment for unmapping 641 + * @unmap_len: Only set this in last segment of a fragment 642 + * 643 + * Push descriptors onto the TX queue. Return 0 on success or 1 if 644 + * @tx_queue full. 645 + */ 646 + static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue, 647 + dma_addr_t dma_addr, unsigned len, 648 + const struct sk_buff *skb, int end_of_packet, 649 + dma_addr_t unmap_addr, unsigned unmap_len) 650 + { 651 + struct efx_tx_buffer *buffer; 652 + struct efx_nic *efx = tx_queue->efx; 653 + unsigned dma_len, fill_level, insert_ptr, misalign; 654 + int q_space; 655 + 656 + EFX_BUG_ON_PARANOID(len <= 0); 657 + 658 + fill_level = tx_queue->insert_count - tx_queue->old_read_count; 659 + /* -1 as there is no way to represent all descriptors used */ 660 + q_space = efx->type->txd_ring_mask - 1 - fill_level; 661 + 662 + while (1) { 663 + if (unlikely(q_space-- <= 0)) { 664 + /* It might be that completions have happened 665 + * since the xmit path last checked. Update 666 + * the xmit path's copy of read_count. 667 + */ 668 + ++tx_queue->stopped; 669 + /* This memory barrier protects the change of 670 + * stopped from the access of read_count. */ 671 + smp_mb(); 672 + tx_queue->old_read_count = 673 + *(volatile unsigned *)&tx_queue->read_count; 674 + fill_level = (tx_queue->insert_count 675 + - tx_queue->old_read_count); 676 + q_space = efx->type->txd_ring_mask - 1 - fill_level; 677 + if (unlikely(q_space-- <= 0)) 678 + return 1; 679 + smp_mb(); 680 + --tx_queue->stopped; 681 + } 682 + 683 + insert_ptr = tx_queue->insert_count & efx->type->txd_ring_mask; 684 + buffer = &tx_queue->buffer[insert_ptr]; 685 + ++tx_queue->insert_count; 686 + 687 + EFX_BUG_ON_PARANOID(tx_queue->insert_count - 688 + tx_queue->read_count > 689 + efx->type->txd_ring_mask); 690 + 691 + efx_tsoh_free(tx_queue, buffer); 692 + EFX_BUG_ON_PARANOID(buffer->len); 693 + EFX_BUG_ON_PARANOID(buffer->unmap_len); 694 + EFX_BUG_ON_PARANOID(buffer->skb); 695 + EFX_BUG_ON_PARANOID(buffer->continuation != 1); 696 + EFX_BUG_ON_PARANOID(buffer->tsoh); 697 + 698 + buffer->dma_addr = dma_addr; 699 + 700 + /* Ensure we do not cross a boundary unsupported by H/W */ 701 + dma_len = (~dma_addr & efx->type->tx_dma_mask) + 1; 702 + 703 + misalign = (unsigned)dma_addr & efx->type->bug5391_mask; 704 + if (misalign && dma_len + misalign > 512) 705 + dma_len = 512 - misalign; 706 + 707 + /* If there is enough space to send then do so */ 708 + if (dma_len >= len) 709 + break; 710 + 711 + buffer->len = dma_len; /* Don't set the other members */ 712 + dma_addr += dma_len; 713 + len -= dma_len; 714 + } 715 + 716 + EFX_BUG_ON_PARANOID(!len); 717 + buffer->len = len; 718 + buffer->skb = skb; 719 + buffer->continuation = !end_of_packet; 720 + buffer->unmap_addr = unmap_addr; 721 + buffer->unmap_len = unmap_len; 722 + return 0; 723 + } 724 + 725 + 726 + /* 727 + * Put a TSO header into the TX queue. 728 + * 729 + * This is special-cased because we know that it is small enough to fit in 730 + * a single fragment, and we know it doesn't cross a page boundary. It 731 + * also allows us to not worry about end-of-packet etc. 732 + */ 733 + static inline void efx_tso_put_header(struct efx_tx_queue *tx_queue, 734 + struct efx_tso_header *tsoh, unsigned len) 735 + { 736 + struct efx_tx_buffer *buffer; 737 + 738 + buffer = &tx_queue->buffer[tx_queue->insert_count & 739 + tx_queue->efx->type->txd_ring_mask]; 740 + efx_tsoh_free(tx_queue, buffer); 741 + EFX_BUG_ON_PARANOID(buffer->len); 742 + EFX_BUG_ON_PARANOID(buffer->unmap_len); 743 + EFX_BUG_ON_PARANOID(buffer->skb); 744 + EFX_BUG_ON_PARANOID(buffer->continuation != 1); 745 + EFX_BUG_ON_PARANOID(buffer->tsoh); 746 + buffer->len = len; 747 + buffer->dma_addr = tsoh->dma_addr; 748 + buffer->tsoh = tsoh; 749 + 750 + ++tx_queue->insert_count; 751 + } 752 + 753 + 754 + /* Remove descriptors put into a tx_queue. */ 755 + static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue) 756 + { 757 + struct efx_tx_buffer *buffer; 758 + 759 + /* Work backwards until we hit the original insert pointer value */ 760 + while (tx_queue->insert_count != tx_queue->write_count) { 761 + --tx_queue->insert_count; 762 + buffer = &tx_queue->buffer[tx_queue->insert_count & 763 + tx_queue->efx->type->txd_ring_mask]; 764 + efx_tsoh_free(tx_queue, buffer); 765 + EFX_BUG_ON_PARANOID(buffer->skb); 766 + buffer->len = 0; 767 + buffer->continuation = 1; 768 + if (buffer->unmap_len) { 769 + pci_unmap_page(tx_queue->efx->pci_dev, 770 + buffer->unmap_addr, 771 + buffer->unmap_len, PCI_DMA_TODEVICE); 772 + buffer->unmap_len = 0; 773 + } 774 + } 775 + } 776 + 777 + 778 + /* Parse the SKB header and initialise state. */ 779 + static inline void tso_start(struct tso_state *st, const struct sk_buff *skb) 780 + { 781 + /* All ethernet/IP/TCP headers combined size is TCP header size 782 + * plus offset of TCP header relative to start of packet. 783 + */ 784 + st->p.header_length = ((tcp_hdr(skb)->doff << 2u) 785 + + PTR_DIFF(tcp_hdr(skb), skb->data)); 786 + st->p.full_packet_size = (st->p.header_length 787 + + skb_shinfo(skb)->gso_size); 788 + 789 + st->p.ipv4_id = ntohs(ip_hdr(skb)->id); 790 + st->seqnum = ntohl(tcp_hdr(skb)->seq); 791 + 792 + EFX_BUG_ON_PARANOID(tcp_hdr(skb)->urg); 793 + EFX_BUG_ON_PARANOID(tcp_hdr(skb)->syn); 794 + EFX_BUG_ON_PARANOID(tcp_hdr(skb)->rst); 795 + 796 + st->packet_space = st->p.full_packet_size; 797 + st->remaining_len = skb->len - st->p.header_length; 798 + } 799 + 800 + 801 + /** 802 + * tso_get_fragment - record fragment details and map for DMA 803 + * @st: TSO state 804 + * @efx: Efx NIC 805 + * @data: Pointer to fragment data 806 + * @len: Length of fragment 807 + * 808 + * Record fragment details and map for DMA. Return 0 on success, or 809 + * -%ENOMEM if DMA mapping fails. 810 + */ 811 + static inline int tso_get_fragment(struct tso_state *st, struct efx_nic *efx, 812 + int len, struct page *page, int page_off) 813 + { 814 + 815 + st->ifc.unmap_addr = pci_map_page(efx->pci_dev, page, page_off, 816 + len, PCI_DMA_TODEVICE); 817 + if (likely(!pci_dma_mapping_error(st->ifc.unmap_addr))) { 818 + st->ifc.unmap_len = len; 819 + st->ifc.len = len; 820 + st->ifc.dma_addr = st->ifc.unmap_addr; 821 + st->ifc.page = page; 822 + st->ifc.page_off = page_off; 823 + return 0; 824 + } 825 + return -ENOMEM; 826 + } 827 + 828 + 829 + /** 830 + * tso_fill_packet_with_fragment - form descriptors for the current fragment 831 + * @tx_queue: Efx TX queue 832 + * @skb: Socket buffer 833 + * @st: TSO state 834 + * 835 + * Form descriptors for the current fragment, until we reach the end 836 + * of fragment or end-of-packet. Return 0 on success, 1 if not enough 837 + * space in @tx_queue. 838 + */ 839 + static inline int tso_fill_packet_with_fragment(struct efx_tx_queue *tx_queue, 840 + const struct sk_buff *skb, 841 + struct tso_state *st) 842 + { 843 + 844 + int n, end_of_packet, rc; 845 + 846 + if (st->ifc.len == 0) 847 + return 0; 848 + if (st->packet_space == 0) 849 + return 0; 850 + 851 + EFX_BUG_ON_PARANOID(st->ifc.len <= 0); 852 + EFX_BUG_ON_PARANOID(st->packet_space <= 0); 853 + 854 + n = min(st->ifc.len, st->packet_space); 855 + 856 + st->packet_space -= n; 857 + st->remaining_len -= n; 858 + st->ifc.len -= n; 859 + st->ifc.page_off += n; 860 + end_of_packet = st->remaining_len == 0 || st->packet_space == 0; 861 + 862 + rc = efx_tx_queue_insert(tx_queue, st->ifc.dma_addr, n, 863 + st->remaining_len ? NULL : skb, 864 + end_of_packet, st->ifc.unmap_addr, 865 + st->ifc.len ? 0 : st->ifc.unmap_len); 866 + 867 + st->ifc.dma_addr += n; 868 + 869 + return rc; 870 + } 871 + 872 + 873 + /** 874 + * tso_start_new_packet - generate a new header and prepare for the new packet 875 + * @tx_queue: Efx TX queue 876 + * @skb: Socket buffer 877 + * @st: TSO state 878 + * 879 + * Generate a new header and prepare for the new packet. Return 0 on 880 + * success, or -1 if failed to alloc header. 881 + */ 882 + static inline int tso_start_new_packet(struct efx_tx_queue *tx_queue, 883 + const struct sk_buff *skb, 884 + struct tso_state *st) 885 + { 886 + struct efx_tso_header *tsoh; 887 + struct iphdr *tsoh_iph; 888 + struct tcphdr *tsoh_th; 889 + unsigned ip_length; 890 + u8 *header; 891 + 892 + /* Allocate a DMA-mapped header buffer. */ 893 + if (likely(TSOH_SIZE(st->p.header_length) <= TSOH_STD_SIZE)) { 894 + if (tx_queue->tso_headers_free == NULL) 895 + if (efx_tsoh_block_alloc(tx_queue)) 896 + return -1; 897 + EFX_BUG_ON_PARANOID(!tx_queue->tso_headers_free); 898 + tsoh = tx_queue->tso_headers_free; 899 + tx_queue->tso_headers_free = tsoh->next; 900 + tsoh->unmap_len = 0; 901 + } else { 902 + tx_queue->tso_long_headers++; 903 + tsoh = efx_tsoh_heap_alloc(tx_queue, st->p.header_length); 904 + if (unlikely(!tsoh)) 905 + return -1; 906 + } 907 + 908 + header = TSOH_BUFFER(tsoh); 909 + tsoh_th = (struct tcphdr *)(header + SKB_TCP_OFF(skb)); 910 + tsoh_iph = (struct iphdr *)(header + SKB_IPV4_OFF(skb)); 911 + 912 + /* Copy and update the headers. */ 913 + memcpy(header, skb->data, st->p.header_length); 914 + 915 + tsoh_th->seq = htonl(st->seqnum); 916 + st->seqnum += skb_shinfo(skb)->gso_size; 917 + if (st->remaining_len > skb_shinfo(skb)->gso_size) { 918 + /* This packet will not finish the TSO burst. */ 919 + ip_length = st->p.full_packet_size - ETH_HDR_LEN(skb); 920 + tsoh_th->fin = 0; 921 + tsoh_th->psh = 0; 922 + } else { 923 + /* This packet will be the last in the TSO burst. */ 924 + ip_length = (st->p.header_length - ETH_HDR_LEN(skb) 925 + + st->remaining_len); 926 + tsoh_th->fin = tcp_hdr(skb)->fin; 927 + tsoh_th->psh = tcp_hdr(skb)->psh; 928 + } 929 + tsoh_iph->tot_len = htons(ip_length); 930 + 931 + /* Linux leaves suitable gaps in the IP ID space for us to fill. */ 932 + tsoh_iph->id = htons(st->p.ipv4_id); 933 + st->p.ipv4_id++; 934 + 935 + st->packet_space = skb_shinfo(skb)->gso_size; 936 + ++tx_queue->tso_packets; 937 + 938 + /* Form a descriptor for this header. */ 939 + efx_tso_put_header(tx_queue, tsoh, st->p.header_length); 940 + 941 + return 0; 942 + } 943 + 944 + 945 + /** 946 + * efx_enqueue_skb_tso - segment and transmit a TSO socket buffer 947 + * @tx_queue: Efx TX queue 948 + * @skb: Socket buffer 949 + * 950 + * Context: You must hold netif_tx_lock() to call this function. 951 + * 952 + * Add socket buffer @skb to @tx_queue, doing TSO or return != 0 if 953 + * @skb was not enqueued. In all cases @skb is consumed. Return 954 + * %NETDEV_TX_OK or %NETDEV_TX_BUSY. 955 + */ 956 + static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue, 957 + const struct sk_buff *skb) 958 + { 959 + int frag_i, rc, rc2 = NETDEV_TX_OK; 960 + struct tso_state state; 961 + skb_frag_t *f; 962 + 963 + /* Verify TSO is safe - these checks should never fail. */ 964 + efx_tso_check_safe(skb); 965 + 966 + EFX_BUG_ON_PARANOID(tx_queue->write_count != tx_queue->insert_count); 967 + 968 + tso_start(&state, skb); 969 + 970 + /* Assume that skb header area contains exactly the headers, and 971 + * all payload is in the frag list. 972 + */ 973 + if (skb_headlen(skb) == state.p.header_length) { 974 + /* Grab the first payload fragment. */ 975 + EFX_BUG_ON_PARANOID(skb_shinfo(skb)->nr_frags < 1); 976 + frag_i = 0; 977 + f = &skb_shinfo(skb)->frags[frag_i]; 978 + rc = tso_get_fragment(&state, tx_queue->efx, 979 + f->size, f->page, f->page_offset); 980 + if (rc) 981 + goto mem_err; 982 + } else { 983 + /* It may look like this code fragment assumes that the 984 + * skb->data portion does not cross a page boundary, but 985 + * that is not the case. It is guaranteed to be direct 986 + * mapped memory, and therefore is physically contiguous, 987 + * and so DMA will work fine. kmap_atomic() on this region 988 + * will just return the direct mapping, so that will work 989 + * too. 990 + */ 991 + int page_off = (unsigned long)skb->data & (PAGE_SIZE - 1); 992 + int hl = state.p.header_length; 993 + rc = tso_get_fragment(&state, tx_queue->efx, 994 + skb_headlen(skb) - hl, 995 + virt_to_page(skb->data), page_off + hl); 996 + if (rc) 997 + goto mem_err; 998 + frag_i = -1; 999 + } 1000 + 1001 + if (tso_start_new_packet(tx_queue, skb, &state) < 0) 1002 + goto mem_err; 1003 + 1004 + while (1) { 1005 + rc = tso_fill_packet_with_fragment(tx_queue, skb, &state); 1006 + if (unlikely(rc)) 1007 + goto stop; 1008 + 1009 + /* Move onto the next fragment? */ 1010 + if (state.ifc.len == 0) { 1011 + if (++frag_i >= skb_shinfo(skb)->nr_frags) 1012 + /* End of payload reached. */ 1013 + break; 1014 + f = &skb_shinfo(skb)->frags[frag_i]; 1015 + rc = tso_get_fragment(&state, tx_queue->efx, 1016 + f->size, f->page, f->page_offset); 1017 + if (rc) 1018 + goto mem_err; 1019 + } 1020 + 1021 + /* Start at new packet? */ 1022 + if (state.packet_space == 0 && 1023 + tso_start_new_packet(tx_queue, skb, &state) < 0) 1024 + goto mem_err; 1025 + } 1026 + 1027 + /* Pass off to hardware */ 1028 + falcon_push_buffers(tx_queue); 1029 + 1030 + tx_queue->tso_bursts++; 1031 + return NETDEV_TX_OK; 1032 + 1033 + mem_err: 1034 + EFX_ERR(tx_queue->efx, "Out of memory for TSO headers, or PCI mapping" 1035 + " error\n"); 1036 + dev_kfree_skb_any((struct sk_buff *)skb); 1037 + goto unwind; 1038 + 1039 + stop: 1040 + rc2 = NETDEV_TX_BUSY; 1041 + 1042 + /* Stop the queue if it wasn't stopped before. */ 1043 + if (tx_queue->stopped == 1) 1044 + efx_stop_queue(tx_queue->efx); 1045 + 1046 + unwind: 1047 + efx_enqueue_unwind(tx_queue); 1048 + return rc2; 1049 + } 1050 + 1051 + 1052 + /* 1053 + * Free up all TSO datastructures associated with tx_queue. This 1054 + * routine should be called only once the tx_queue is both empty and 1055 + * will no longer be used. 1056 + */ 1057 + static void efx_fini_tso(struct efx_tx_queue *tx_queue) 1058 + { 1059 + unsigned i; 1060 + 1061 + if (tx_queue->buffer) 1062 + for (i = 0; i <= tx_queue->efx->type->txd_ring_mask; ++i) 1063 + efx_tsoh_free(tx_queue, &tx_queue->buffer[i]); 1064 + 1065 + while (tx_queue->tso_headers_free != NULL) 1066 + efx_tsoh_block_free(tx_queue, tx_queue->tso_headers_free, 1067 + tx_queue->efx->pci_dev); 1068 + }
+36
drivers/net/sfc/xfp_phy.c
··· 24 MDIO_MMDREG_DEVS0_PMAPMD | \ 25 MDIO_MMDREG_DEVS0_PHYXS) 26 27 /****************************************************************************/ 28 /* Quake-specific MDIO registers */ 29 #define MDIO_QUAKE_LED0_REG (0xD006) ··· 38 mdio_clause45_write(p, p->mii.phy_id, MDIO_MMD_PMAPMD, addr, 39 mode); 40 } 41 42 #define XFP_MAX_RESET_TIME 500 43 #define XFP_RESET_WAIT 10 ··· 80 81 static int xfp_phy_init(struct efx_nic *efx) 82 { 83 u32 devid = mdio_clause45_read_id(efx, MDIO_MMD_PHYXS); 84 int rc; 85 86 EFX_INFO(efx, "XFP: PHY ID reg %x (OUI %x model %x revision" 87 " %x)\n", devid, MDIO_ID_OUI(devid), MDIO_ID_MODEL(devid), 88 MDIO_ID_REV(devid)); 89 90 rc = xfp_reset_phy(efx); 91 92 EFX_INFO(efx, "XFP: PHY init %s.\n", 93 rc ? "failed" : "successful"); 94 95 return rc; 96 } 97 ··· 131 132 static void xfp_phy_reconfigure(struct efx_nic *efx) 133 { 134 efx->link_up = xfp_link_ok(efx); 135 efx->link_options = GM_LPA_10000FULL; 136 } ··· 150 { 151 /* Clobber the LED if it was blinking */ 152 efx->board_info.blink(efx, 0); 153 } 154 155 struct efx_phy_operations falcon_xfp_phy_ops = { ··· 164 .clear_interrupt = xfp_phy_clear_interrupt, 165 .reset_xaui = efx_port_dummy_op_void, 166 .mmds = XFP_REQUIRED_DEVS, 167 };
··· 24 MDIO_MMDREG_DEVS0_PMAPMD | \ 25 MDIO_MMDREG_DEVS0_PHYXS) 26 27 + #define XFP_LOOPBACKS ((1 << LOOPBACK_PCS) | \ 28 + (1 << LOOPBACK_PMAPMD) | \ 29 + (1 << LOOPBACK_NETWORK)) 30 + 31 /****************************************************************************/ 32 /* Quake-specific MDIO registers */ 33 #define MDIO_QUAKE_LED0_REG (0xD006) ··· 34 mdio_clause45_write(p, p->mii.phy_id, MDIO_MMD_PMAPMD, addr, 35 mode); 36 } 37 + 38 + struct xfp_phy_data { 39 + int tx_disabled; 40 + }; 41 42 #define XFP_MAX_RESET_TIME 500 43 #define XFP_RESET_WAIT 10 ··· 72 73 static int xfp_phy_init(struct efx_nic *efx) 74 { 75 + struct xfp_phy_data *phy_data; 76 u32 devid = mdio_clause45_read_id(efx, MDIO_MMD_PHYXS); 77 int rc; 78 + 79 + phy_data = kzalloc(sizeof(struct xfp_phy_data), GFP_KERNEL); 80 + efx->phy_data = (void *) phy_data; 81 82 EFX_INFO(efx, "XFP: PHY ID reg %x (OUI %x model %x revision" 83 " %x)\n", devid, MDIO_ID_OUI(devid), MDIO_ID_MODEL(devid), 84 MDIO_ID_REV(devid)); 85 86 + phy_data->tx_disabled = efx->tx_disabled; 87 + 88 rc = xfp_reset_phy(efx); 89 90 EFX_INFO(efx, "XFP: PHY init %s.\n", 91 rc ? "failed" : "successful"); 92 + if (rc < 0) 93 + goto fail; 94 95 + return 0; 96 + 97 + fail: 98 + kfree(efx->phy_data); 99 + efx->phy_data = NULL; 100 return rc; 101 } 102 ··· 110 111 static void xfp_phy_reconfigure(struct efx_nic *efx) 112 { 113 + struct xfp_phy_data *phy_data = efx->phy_data; 114 + 115 + /* Reset the PHY when moving from tx off to tx on */ 116 + if (phy_data->tx_disabled && !efx->tx_disabled) 117 + xfp_reset_phy(efx); 118 + 119 + mdio_clause45_transmit_disable(efx); 120 + mdio_clause45_phy_reconfigure(efx); 121 + 122 + phy_data->tx_disabled = efx->tx_disabled; 123 efx->link_up = xfp_link_ok(efx); 124 efx->link_options = GM_LPA_10000FULL; 125 } ··· 119 { 120 /* Clobber the LED if it was blinking */ 121 efx->board_info.blink(efx, 0); 122 + 123 + /* Free the context block */ 124 + kfree(efx->phy_data); 125 + efx->phy_data = NULL; 126 } 127 128 struct efx_phy_operations falcon_xfp_phy_ops = { ··· 129 .clear_interrupt = xfp_phy_clear_interrupt, 130 .reset_xaui = efx_port_dummy_op_void, 131 .mmds = XFP_REQUIRED_DEVS, 132 + .loopbacks = XFP_LOOPBACKS, 133 };
+2 -2
drivers/net/sky2.h
··· 1966 struct tx_ring_info { 1967 struct sk_buff *skb; 1968 DECLARE_PCI_UNMAP_ADDR(mapaddr); 1969 - DECLARE_PCI_UNMAP_ADDR(maplen); 1970 }; 1971 1972 struct rx_ring_info { 1973 struct sk_buff *skb; 1974 dma_addr_t data_addr; 1975 - DECLARE_PCI_UNMAP_ADDR(data_size); 1976 dma_addr_t frag_addr[ETH_JUMBO_MTU >> PAGE_SHIFT]; 1977 }; 1978
··· 1966 struct tx_ring_info { 1967 struct sk_buff *skb; 1968 DECLARE_PCI_UNMAP_ADDR(mapaddr); 1969 + DECLARE_PCI_UNMAP_LEN(maplen); 1970 }; 1971 1972 struct rx_ring_info { 1973 struct sk_buff *skb; 1974 dma_addr_t data_addr; 1975 + DECLARE_PCI_UNMAP_LEN(data_size); 1976 dma_addr_t frag_addr[ETH_JUMBO_MTU >> PAGE_SHIFT]; 1977 }; 1978
+1 -3
drivers/net/wan/Kconfig
··· 150 151 config HDLC_PPP 152 tristate "Synchronous Point-to-Point Protocol (PPP) support" 153 - depends on HDLC && BROKEN 154 help 155 Generic HDLC driver supporting PPP over WAN connections. 156 - This module is currently broken and will cause a kernel panic 157 - when a device configured in PPP mode is activated. 158 159 It will be replaced by new PPP implementation in Linux 2.6.26. 160
··· 150 151 config HDLC_PPP 152 tristate "Synchronous Point-to-Point Protocol (PPP) support" 153 + depends on HDLC 154 help 155 Generic HDLC driver supporting PPP over WAN connections. 156 157 It will be replaced by new PPP implementation in Linux 2.6.26. 158
+7 -7
drivers/net/wan/cosa.c
··· 629 d->base_addr = chan->cosa->datareg; 630 d->irq = chan->cosa->irq; 631 d->dma = chan->cosa->dma; 632 - d->priv = chan; 633 sppp_attach(&chan->pppdev); 634 if (register_netdev(d)) { 635 printk(KERN_WARNING "%s: register_netdev failed.\n", d->name); ··· 650 651 static int cosa_sppp_open(struct net_device *d) 652 { 653 - struct channel_data *chan = d->priv; 654 int err; 655 unsigned long flags; 656 ··· 690 691 static int cosa_sppp_tx(struct sk_buff *skb, struct net_device *dev) 692 { 693 - struct channel_data *chan = dev->priv; 694 695 netif_stop_queue(dev); 696 ··· 701 702 static void cosa_sppp_timeout(struct net_device *dev) 703 { 704 - struct channel_data *chan = dev->priv; 705 706 if (test_bit(RXBIT, &chan->cosa->rxtx)) { 707 chan->stats.rx_errors++; ··· 720 721 static int cosa_sppp_close(struct net_device *d) 722 { 723 - struct channel_data *chan = d->priv; 724 unsigned long flags; 725 726 netif_stop_queue(d); ··· 800 801 static struct net_device_stats *cosa_net_stats(struct net_device *dev) 802 { 803 - struct channel_data *chan = dev->priv; 804 return &chan->stats; 805 } 806 ··· 1217 int cmd) 1218 { 1219 int rv; 1220 - struct channel_data *chan = dev->priv; 1221 rv = cosa_ioctl_common(chan->cosa, chan, cmd, (unsigned long)ifr->ifr_data); 1222 if (rv == -ENOIOCTLCMD) { 1223 return sppp_do_ioctl(dev, ifr, cmd);
··· 629 d->base_addr = chan->cosa->datareg; 630 d->irq = chan->cosa->irq; 631 d->dma = chan->cosa->dma; 632 + d->ml_priv = chan; 633 sppp_attach(&chan->pppdev); 634 if (register_netdev(d)) { 635 printk(KERN_WARNING "%s: register_netdev failed.\n", d->name); ··· 650 651 static int cosa_sppp_open(struct net_device *d) 652 { 653 + struct channel_data *chan = d->ml_priv; 654 int err; 655 unsigned long flags; 656 ··· 690 691 static int cosa_sppp_tx(struct sk_buff *skb, struct net_device *dev) 692 { 693 + struct channel_data *chan = dev->ml_priv; 694 695 netif_stop_queue(dev); 696 ··· 701 702 static void cosa_sppp_timeout(struct net_device *dev) 703 { 704 + struct channel_data *chan = dev->ml_priv; 705 706 if (test_bit(RXBIT, &chan->cosa->rxtx)) { 707 chan->stats.rx_errors++; ··· 720 721 static int cosa_sppp_close(struct net_device *d) 722 { 723 + struct channel_data *chan = d->ml_priv; 724 unsigned long flags; 725 726 netif_stop_queue(d); ··· 800 801 static struct net_device_stats *cosa_net_stats(struct net_device *dev) 802 { 803 + struct channel_data *chan = dev->ml_priv; 804 return &chan->stats; 805 } 806 ··· 1217 int cmd) 1218 { 1219 int rv; 1220 + struct channel_data *chan = dev->ml_priv; 1221 rv = cosa_ioctl_common(chan->cosa, chan, cmd, (unsigned long)ifr->ifr_data); 1222 if (rv == -ENOIOCTLCMD) { 1223 return sppp_do_ioctl(dev, ifr, cmd);
+1 -1
drivers/net/wan/hdlc_ppp.c
··· 45 int (*old_ioctl)(struct net_device *, struct ifreq *, int); 46 int result; 47 48 - dev->priv = &state(hdlc)->syncppp_ptr; 49 state(hdlc)->syncppp_ptr = &state(hdlc)->pppdev; 50 state(hdlc)->pppdev.dev = dev; 51
··· 45 int (*old_ioctl)(struct net_device *, struct ifreq *, int); 46 int result; 47 48 + dev->ml_priv = &state(hdlc)->syncppp_ptr; 49 state(hdlc)->syncppp_ptr = &state(hdlc)->pppdev; 50 state(hdlc)->pppdev.dev = dev; 51
+6 -6
drivers/net/wan/hostess_sv11.c
··· 75 76 static int hostess_open(struct net_device *d) 77 { 78 - struct sv11_device *sv11=d->priv; 79 int err = -1; 80 81 /* ··· 128 129 static int hostess_close(struct net_device *d) 130 { 131 - struct sv11_device *sv11=d->priv; 132 /* 133 * Discard new frames 134 */ ··· 159 160 static int hostess_ioctl(struct net_device *d, struct ifreq *ifr, int cmd) 161 { 162 - /* struct sv11_device *sv11=d->priv; 163 z8530_ioctl(d,&sv11->sync.chanA,ifr,cmd) */ 164 return sppp_do_ioctl(d, ifr,cmd); 165 } 166 167 static struct net_device_stats *hostess_get_stats(struct net_device *d) 168 { 169 - struct sv11_device *sv11=d->priv; 170 if(sv11) 171 return z8530_get_stats(&sv11->sync.chanA); 172 else ··· 179 180 static int hostess_queue_xmit(struct sk_buff *skb, struct net_device *d) 181 { 182 - struct sv11_device *sv11=d->priv; 183 return z8530_queue_xmit(&sv11->sync.chanA, skb); 184 } 185 ··· 325 /* 326 * Initialise the PPP components 327 */ 328 sppp_attach(&sv->netdev); 329 330 /* ··· 334 335 d->base_addr = iobase; 336 d->irq = irq; 337 - d->priv = sv; 338 339 if(register_netdev(d)) 340 {
··· 75 76 static int hostess_open(struct net_device *d) 77 { 78 + struct sv11_device *sv11=d->ml_priv; 79 int err = -1; 80 81 /* ··· 128 129 static int hostess_close(struct net_device *d) 130 { 131 + struct sv11_device *sv11=d->ml_priv; 132 /* 133 * Discard new frames 134 */ ··· 159 160 static int hostess_ioctl(struct net_device *d, struct ifreq *ifr, int cmd) 161 { 162 + /* struct sv11_device *sv11=d->ml_priv; 163 z8530_ioctl(d,&sv11->sync.chanA,ifr,cmd) */ 164 return sppp_do_ioctl(d, ifr,cmd); 165 } 166 167 static struct net_device_stats *hostess_get_stats(struct net_device *d) 168 { 169 + struct sv11_device *sv11=d->ml_priv; 170 if(sv11) 171 return z8530_get_stats(&sv11->sync.chanA); 172 else ··· 179 180 static int hostess_queue_xmit(struct sk_buff *skb, struct net_device *d) 181 { 182 + struct sv11_device *sv11=d->ml_priv; 183 return z8530_queue_xmit(&sv11->sync.chanA, skb); 184 } 185 ··· 325 /* 326 * Initialise the PPP components 327 */ 328 + d->ml_priv = sv; 329 sppp_attach(&sv->netdev); 330 331 /* ··· 333 334 d->base_addr = iobase; 335 d->irq = irq; 336 337 if(register_netdev(d)) 338 {
+1
drivers/net/wan/lmc/lmc_main.c
··· 891 892 /* Initialize the sppp layer */ 893 /* An ioctl can cause a subsequent detach for raw frame interface */ 894 sc->if_type = LMC_PPP; 895 sc->check = 0xBEAFCAFE; 896 dev->base_addr = pci_resource_start(pdev, 0);
··· 891 892 /* Initialize the sppp layer */ 893 /* An ioctl can cause a subsequent detach for raw frame interface */ 894 + dev->ml_priv = sc; 895 sc->if_type = LMC_PPP; 896 sc->check = 0xBEAFCAFE; 897 dev->base_addr = pci_resource_start(pdev, 0);
+1
drivers/net/wan/sealevel.c
··· 241 return NULL; 242 243 sv = d->priv; 244 sv->if_ptr = &sv->pppdev; 245 sv->pppdev.dev = d; 246 d->base_addr = iobase;
··· 241 return NULL; 242 243 sv = d->priv; 244 + d->ml_priv = sv; 245 sv->if_ptr = &sv->pppdev; 246 sv->pppdev.dev = d; 247 d->base_addr = iobase;
+1 -1
drivers/net/wireless/iwlwifi/iwl-3945.c
··· 666 rx_status.flag = 0; 667 rx_status.mactime = le64_to_cpu(rx_end->timestamp); 668 rx_status.freq = 669 - ieee80211_frequency_to_channel(le16_to_cpu(rx_hdr->channel)); 670 rx_status.band = (rx_hdr->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ? 671 IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ; 672
··· 666 rx_status.flag = 0; 667 rx_status.mactime = le64_to_cpu(rx_end->timestamp); 668 rx_status.freq = 669 + ieee80211_channel_to_frequency(le16_to_cpu(rx_hdr->channel)); 670 rx_status.band = (rx_hdr->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ? 671 IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ; 672
+1 -1
drivers/net/wireless/iwlwifi/iwl-4965-rs.c
··· 163 struct dentry *rs_sta_dbgfs_tx_agg_tid_en_file; 164 #endif 165 struct iwl4965_rate dbg_fixed; 166 - struct iwl_priv *drv; 167 #endif 168 }; 169 170 static void rs_rate_scale_perform(struct iwl_priv *priv,
··· 163 struct dentry *rs_sta_dbgfs_tx_agg_tid_en_file; 164 #endif 165 struct iwl4965_rate dbg_fixed; 166 #endif 167 + struct iwl_priv *drv; 168 }; 169 170 static void rs_rate_scale_perform(struct iwl_priv *priv,
+1 -1
drivers/net/wireless/iwlwifi/iwl-4965.c
··· 3978 3979 rx_status.mactime = le64_to_cpu(rx_start->timestamp); 3980 rx_status.freq = 3981 - ieee80211_frequency_to_channel(le16_to_cpu(rx_start->channel)); 3982 rx_status.band = (rx_start->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ? 3983 IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ; 3984 rx_status.rate_idx =
··· 3978 3979 rx_status.mactime = le64_to_cpu(rx_start->timestamp); 3980 rx_status.freq = 3981 + ieee80211_channel_to_frequency(le16_to_cpu(rx_start->channel)); 3982 rx_status.band = (rx_start->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ? 3983 IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ; 3984 rx_status.rate_idx =
+9 -2
drivers/net/wireless/prism54/islpci_dev.c
··· 388 389 netif_start_queue(ndev); 390 391 - /* Turn off carrier unless we know we have associated */ 392 - netif_carrier_off(ndev); 393 394 return 0; 395 }
··· 388 389 netif_start_queue(ndev); 390 391 + /* Turn off carrier if in STA or Ad-hoc mode. It will be turned on 392 + * once the firmware receives a trap of being associated 393 + * (GEN_OID_LINKSTATE). In other modes (AP or WDS or monitor) we 394 + * should just leave the carrier on as its expected the firmware 395 + * won't send us a trigger. */ 396 + if (priv->iw_mode == IW_MODE_INFRA || priv->iw_mode == IW_MODE_ADHOC) 397 + netif_carrier_off(ndev); 398 + else 399 + netif_carrier_on(ndev); 400 401 return 0; 402 }
+4 -7
drivers/net/wireless/rt2x00/rt2x00dev.c
··· 1032 * Initialize the device. 1033 */ 1034 status = rt2x00dev->ops->lib->initialize(rt2x00dev); 1035 - if (status) 1036 - goto exit; 1037 1038 __set_bit(DEVICE_INITIALIZED, &rt2x00dev->flags); 1039 ··· 1045 rt2x00rfkill_register(rt2x00dev); 1046 1047 return 0; 1048 - 1049 - exit: 1050 - rt2x00lib_uninitialize(rt2x00dev); 1051 - 1052 - return status; 1053 } 1054 1055 int rt2x00lib_start(struct rt2x00_dev *rt2x00dev)
··· 1032 * Initialize the device. 1033 */ 1034 status = rt2x00dev->ops->lib->initialize(rt2x00dev); 1035 + if (status) { 1036 + rt2x00queue_uninitialize(rt2x00dev); 1037 + return status; 1038 + } 1039 1040 __set_bit(DEVICE_INITIALIZED, &rt2x00dev->flags); 1041 ··· 1043 rt2x00rfkill_register(rt2x00dev); 1044 1045 return 0; 1046 } 1047 1048 int rt2x00lib_start(struct rt2x00_dev *rt2x00dev)
+3 -2
drivers/net/wireless/rt2x00/rt2x00pci.c
··· 314 if (status) { 315 ERROR(rt2x00dev, "IRQ %d allocation failed (error %d).\n", 316 pci_dev->irq, status); 317 - return status; 318 } 319 320 return 0; 321 322 exit: 323 - rt2x00pci_uninitialize(rt2x00dev); 324 325 return status; 326 }
··· 314 if (status) { 315 ERROR(rt2x00dev, "IRQ %d allocation failed (error %d).\n", 316 pci_dev->irq, status); 317 + goto exit; 318 } 319 320 return 0; 321 322 exit: 323 + queue_for_each(rt2x00dev, queue) 324 + rt2x00pci_free_queue_dma(rt2x00dev, queue); 325 326 return status; 327 }
+11 -20
drivers/net/wireless/rt2x00/rt61pci.c
··· 2366 { 2367 struct rt2x00_dev *rt2x00dev = hw->priv; 2368 struct rt2x00_intf *intf = vif_to_intf(control->vif); 2369 struct skb_frame_desc *skbdesc; 2370 unsigned int beacon_base; 2371 u32 reg; ··· 2374 if (unlikely(!intf->beacon)) 2375 return -ENOBUFS; 2376 2377 - /* 2378 - * We need to append the descriptor in front of the 2379 - * beacon frame. 2380 - */ 2381 - if (skb_headroom(skb) < intf->beacon->queue->desc_size) { 2382 - if (pskb_expand_head(skb, intf->beacon->queue->desc_size, 2383 - 0, GFP_ATOMIC)) 2384 - return -ENOMEM; 2385 - } 2386 - 2387 - /* 2388 - * Add the descriptor in front of the skb. 2389 - */ 2390 - skb_push(skb, intf->beacon->queue->desc_size); 2391 - memset(skb->data, 0, intf->beacon->queue->desc_size); 2392 2393 /* 2394 * Fill in skb descriptor ··· 2383 skbdesc = get_skb_frame_desc(skb); 2384 memset(skbdesc, 0, sizeof(*skbdesc)); 2385 skbdesc->flags |= FRAME_DESC_DRIVER_GENERATED; 2386 - skbdesc->data = skb->data + intf->beacon->queue->desc_size; 2387 - skbdesc->data_len = skb->len - intf->beacon->queue->desc_size; 2388 - skbdesc->desc = skb->data; 2389 skbdesc->desc_len = intf->beacon->queue->desc_size; 2390 skbdesc->entry = intf->beacon; 2391 ··· 2413 */ 2414 beacon_base = HW_BEACON_OFFSET(intf->beacon->entry_idx); 2415 rt2x00pci_register_multiwrite(rt2x00dev, beacon_base, 2416 - skb->data, skb->len); 2417 rt61pci_kick_tx_queue(rt2x00dev, control->queue); 2418 2419 return 0; ··· 2481 2482 static const struct data_queue_desc rt61pci_queue_bcn = { 2483 .entry_num = 4 * BEACON_ENTRIES, 2484 - .data_size = MGMT_FRAME_SIZE, 2485 .desc_size = TXINFO_SIZE, 2486 .priv_size = sizeof(struct queue_entry_priv_pci_tx), 2487 };
··· 2366 { 2367 struct rt2x00_dev *rt2x00dev = hw->priv; 2368 struct rt2x00_intf *intf = vif_to_intf(control->vif); 2369 + struct queue_entry_priv_pci_tx *priv_tx; 2370 struct skb_frame_desc *skbdesc; 2371 unsigned int beacon_base; 2372 u32 reg; ··· 2373 if (unlikely(!intf->beacon)) 2374 return -ENOBUFS; 2375 2376 + priv_tx = intf->beacon->priv_data; 2377 + memset(priv_tx->desc, 0, intf->beacon->queue->desc_size); 2378 2379 /* 2380 * Fill in skb descriptor ··· 2395 skbdesc = get_skb_frame_desc(skb); 2396 memset(skbdesc, 0, sizeof(*skbdesc)); 2397 skbdesc->flags |= FRAME_DESC_DRIVER_GENERATED; 2398 + skbdesc->data = skb->data; 2399 + skbdesc->data_len = skb->len; 2400 + skbdesc->desc = priv_tx->desc; 2401 skbdesc->desc_len = intf->beacon->queue->desc_size; 2402 skbdesc->entry = intf->beacon; 2403 ··· 2425 */ 2426 beacon_base = HW_BEACON_OFFSET(intf->beacon->entry_idx); 2427 rt2x00pci_register_multiwrite(rt2x00dev, beacon_base, 2428 + skbdesc->desc, skbdesc->desc_len); 2429 + rt2x00pci_register_multiwrite(rt2x00dev, 2430 + beacon_base + skbdesc->desc_len, 2431 + skbdesc->data, skbdesc->data_len); 2432 rt61pci_kick_tx_queue(rt2x00dev, control->queue); 2433 2434 return 0; ··· 2490 2491 static const struct data_queue_desc rt61pci_queue_bcn = { 2492 .entry_num = 4 * BEACON_ENTRIES, 2493 + .data_size = 0, /* No DMA required for beacons */ 2494 .desc_size = TXINFO_SIZE, 2495 .priv_size = sizeof(struct queue_entry_priv_pci_tx), 2496 };
+2 -2
drivers/net/wireless/wavelan.c
··· 908 p->psa_call_code[3], p->psa_call_code[4], p->psa_call_code[5], 909 p->psa_call_code[6], p->psa_call_code[7]); 910 #ifdef DEBUG_SHOW_UNUSED 911 - printk(KERN_DEBUG "psa_reserved[]: %02X:%02X:%02X:%02X\n", 912 p->psa_reserved[0], 913 - p->psa_reserved[1], p->psa_reserved[2], p->psa_reserved[3]); 914 #endif /* DEBUG_SHOW_UNUSED */ 915 printk(KERN_DEBUG "psa_conf_status: %d, ", p->psa_conf_status); 916 printk("psa_crc: 0x%02x%02x, ", p->psa_crc[0], p->psa_crc[1]);
··· 908 p->psa_call_code[3], p->psa_call_code[4], p->psa_call_code[5], 909 p->psa_call_code[6], p->psa_call_code[7]); 910 #ifdef DEBUG_SHOW_UNUSED 911 + printk(KERN_DEBUG "psa_reserved[]: %02X:%02X\n", 912 p->psa_reserved[0], 913 + p->psa_reserved[1]); 914 #endif /* DEBUG_SHOW_UNUSED */ 915 printk(KERN_DEBUG "psa_conf_status: %d, ", p->psa_conf_status); 916 printk("psa_crc: 0x%02x%02x, ", p->psa_crc[0], p->psa_crc[1]);
+2 -4
drivers/net/wireless/wavelan_cs.c
··· 1074 p->psa_call_code[6], 1075 p->psa_call_code[7]); 1076 #ifdef DEBUG_SHOW_UNUSED 1077 - printk(KERN_DEBUG "psa_reserved[]: %02X:%02X:%02X:%02X\n", 1078 p->psa_reserved[0], 1079 - p->psa_reserved[1], 1080 - p->psa_reserved[2], 1081 - p->psa_reserved[3]); 1082 #endif /* DEBUG_SHOW_UNUSED */ 1083 printk(KERN_DEBUG "psa_conf_status: %d, ", p->psa_conf_status); 1084 printk("psa_crc: 0x%02x%02x, ", p->psa_crc[0], p->psa_crc[1]);
··· 1074 p->psa_call_code[6], 1075 p->psa_call_code[7]); 1076 #ifdef DEBUG_SHOW_UNUSED 1077 + printk(KERN_DEBUG "psa_reserved[]: %02X:%02X\n", 1078 p->psa_reserved[0], 1079 + p->psa_reserved[1]); 1080 #endif /* DEBUG_SHOW_UNUSED */ 1081 printk(KERN_DEBUG "psa_conf_status: %d, ", p->psa_conf_status); 1082 printk("psa_crc: 0x%02x%02x, ", p->psa_crc[0], p->psa_crc[1]);
+5 -1
drivers/net/wireless/zd1211rw/zd_usb.c
··· 889 } 890 free_urb: 891 skb = (struct sk_buff *)urb->context; 892 - zd_mac_tx_to_dev(skb, urb->status); 893 cb = (struct zd_tx_skb_control_block *)skb->cb; 894 usb = &zd_hw_mac(cb->hw)->chip.usb; 895 free_tx_urb(usb, urb); 896 tx_dec_submitted_urbs(usb); 897 return;
··· 889 } 890 free_urb: 891 skb = (struct sk_buff *)urb->context; 892 + /* 893 + * grab 'usb' pointer before handing off the skb (since 894 + * it might be freed by zd_mac_tx_to_dev or mac80211) 895 + */ 896 cb = (struct zd_tx_skb_control_block *)skb->cb; 897 usb = &zd_hw_mac(cb->hw)->chip.usb; 898 + zd_mac_tx_to_dev(skb, urb->status); 899 free_tx_urb(usb, urb); 900 tx_dec_submitted_urbs(usb); 901 return;
+26 -9
include/linux/netdevice.h
··· 93 * used. 94 */ 95 96 - #if !defined(CONFIG_AX25) && !defined(CONFIG_AX25_MODULE) && !defined(CONFIG_TR) 97 - #define LL_MAX_HEADER 32 98 #else 99 - #if defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE) 100 - #define LL_MAX_HEADER 96 101 - #else 102 - #define LL_MAX_HEADER 48 103 - #endif 104 #endif 105 106 #if !defined(CONFIG_NET_IPIP) && !defined(CONFIG_NET_IPIP_MODULE) && \ ··· 246 * 247 * We could use other alignment values, but we must maintain the 248 * relationship HH alignment <= LL alignment. 249 */ 250 #define LL_RESERVED_SPACE(dev) \ 251 - (((dev)->hard_header_len&~(HH_DATA_MOD - 1)) + HH_DATA_MOD) 252 #define LL_RESERVED_SPACE_EXTRA(dev,extra) \ 253 - ((((dev)->hard_header_len+extra)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD) 254 255 struct header_ops { 256 int (*create) (struct sk_buff *skb, struct net_device *dev, ··· 574 unsigned short type; /* interface hardware type */ 575 unsigned short hard_header_len; /* hardware hdr length */ 576 577 struct net_device *master; /* Pointer to master device of a group, 578 * which this device is member of. 579 */ ··· 728 /* Network namespace this network device is inside */ 729 struct net *nd_net; 730 #endif 731 732 /* bridge stuff */ 733 struct net_bridge_port *br_port;
··· 93 * used. 94 */ 95 96 + #if defined(CONFIG_WLAN_80211) || defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE) 97 + # if defined(CONFIG_MAC80211_MESH) 98 + # define LL_MAX_HEADER 128 99 + # else 100 + # define LL_MAX_HEADER 96 101 + # endif 102 + #elif defined(CONFIG_TR) 103 + # define LL_MAX_HEADER 48 104 #else 105 + # define LL_MAX_HEADER 32 106 #endif 107 108 #if !defined(CONFIG_NET_IPIP) && !defined(CONFIG_NET_IPIP_MODULE) && \ ··· 244 * 245 * We could use other alignment values, but we must maintain the 246 * relationship HH alignment <= LL alignment. 247 + * 248 + * LL_ALLOCATED_SPACE also takes into account the tailroom the device 249 + * may need. 250 */ 251 #define LL_RESERVED_SPACE(dev) \ 252 + ((((dev)->hard_header_len+(dev)->needed_headroom)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD) 253 #define LL_RESERVED_SPACE_EXTRA(dev,extra) \ 254 + ((((dev)->hard_header_len+(dev)->needed_headroom+(extra))&~(HH_DATA_MOD - 1)) + HH_DATA_MOD) 255 + #define LL_ALLOCATED_SPACE(dev) \ 256 + ((((dev)->hard_header_len+(dev)->needed_headroom+(dev)->needed_tailroom)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD) 257 258 struct header_ops { 259 int (*create) (struct sk_buff *skb, struct net_device *dev, ··· 567 unsigned short type; /* interface hardware type */ 568 unsigned short hard_header_len; /* hardware hdr length */ 569 570 + /* extra head- and tailroom the hardware may need, but not in all cases 571 + * can this be guaranteed, especially tailroom. Some cases also use 572 + * LL_MAX_HEADER instead to allocate the skb. 573 + */ 574 + unsigned short needed_headroom; 575 + unsigned short needed_tailroom; 576 + 577 struct net_device *master; /* Pointer to master device of a group, 578 * which this device is member of. 579 */ ··· 714 /* Network namespace this network device is inside */ 715 struct net *nd_net; 716 #endif 717 + 718 + /* mid-layer private */ 719 + void *ml_priv; 720 721 /* bridge stuff */ 722 struct net_bridge_port *br_port;
-3
include/net/irda/discovery.h
··· 57 __u8 byte[2]; 58 } __u16_host_order; 59 60 - /* Same purpose, different application */ 61 - #define u16ho(array) (* ((__u16 *) array)) 62 - 63 /* Types of discovery */ 64 typedef enum { 65 DISCOVERY_LOG, /* What's in our discovery log */
··· 57 __u8 byte[2]; 58 } __u16_host_order; 59 60 /* Types of discovery */ 61 typedef enum { 62 DISCOVERY_LOG, /* What's in our discovery log */
+1 -1
include/net/syncppp.h
··· 59 60 static inline struct sppp *sppp_of(struct net_device *dev) 61 { 62 - struct ppp_device **ppp = dev->priv; 63 BUG_ON((*ppp)->dev != dev); 64 return &(*ppp)->sppp; 65 }
··· 59 60 static inline struct sppp *sppp_of(struct net_device *dev) 61 { 62 + struct ppp_device **ppp = dev->ml_priv; 63 BUG_ON((*ppp)->dev != dev); 64 return &(*ppp)->sppp; 65 }
+1 -1
net/core/netpoll.c
··· 419 return; 420 421 size = arp_hdr_len(skb->dev); 422 - send_skb = find_skb(np, size + LL_RESERVED_SPACE(np->dev), 423 LL_RESERVED_SPACE(np->dev)); 424 425 if (!send_skb)
··· 419 return; 420 421 size = arp_hdr_len(skb->dev); 422 + send_skb = find_skb(np, size + LL_ALLOCATED_SPACE(np->dev), 423 LL_RESERVED_SPACE(np->dev)); 424 425 if (!send_skb)
+1 -1
net/core/sock.c
··· 270 int err = 0; 271 int skb_len; 272 273 - /* Cast skb->rcvbuf to unsigned... It's pointless, but reduces 274 number of warnings when compiling with -W --ANK 275 */ 276 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
··· 270 int err = 0; 271 int skb_len; 272 273 + /* Cast sk->rcvbuf to unsigned... It's pointless, but reduces 274 number of warnings when compiling with -W --ANK 275 */ 276 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
+1 -1
net/econet/af_econet.c
··· 340 341 dev_hold(dev); 342 343 - skb = sock_alloc_send_skb(sk, len+LL_RESERVED_SPACE(dev), 344 msg->msg_flags & MSG_DONTWAIT, &err); 345 if (skb==NULL) 346 goto out_unlock;
··· 340 341 dev_hold(dev); 342 343 + skb = sock_alloc_send_skb(sk, len+LL_ALLOCATED_SPACE(dev), 344 msg->msg_flags & MSG_DONTWAIT, &err); 345 if (skb==NULL) 346 goto out_unlock;
+1 -1
net/ipv4/arp.c
··· 570 * Allocate a buffer 571 */ 572 573 - skb = alloc_skb(arp_hdr_len(dev) + LL_RESERVED_SPACE(dev), GFP_ATOMIC); 574 if (skb == NULL) 575 return NULL; 576
··· 570 * Allocate a buffer 571 */ 572 573 + skb = alloc_skb(arp_hdr_len(dev) + LL_ALLOCATED_SPACE(dev), GFP_ATOMIC); 574 if (skb == NULL) 575 return NULL; 576
+2 -2
net/ipv4/cipso_ipv4.c
··· 338 return -ENOENT; 339 340 hash = cipso_v4_map_cache_hash(key, key_len); 341 - bkt = hash & (CIPSO_V4_CACHE_BUCKETBITS - 1); 342 spin_lock_bh(&cipso_v4_cache[bkt].lock); 343 list_for_each_entry(entry, &cipso_v4_cache[bkt].list, list) { 344 if (entry->hash == hash && ··· 417 atomic_inc(&secattr->cache->refcount); 418 entry->lsm_data = secattr->cache; 419 420 - bkt = entry->hash & (CIPSO_V4_CACHE_BUCKETBITS - 1); 421 spin_lock_bh(&cipso_v4_cache[bkt].lock); 422 if (cipso_v4_cache[bkt].size < cipso_v4_cache_bucketsize) { 423 list_add(&entry->list, &cipso_v4_cache[bkt].list);
··· 338 return -ENOENT; 339 340 hash = cipso_v4_map_cache_hash(key, key_len); 341 + bkt = hash & (CIPSO_V4_CACHE_BUCKETS - 1); 342 spin_lock_bh(&cipso_v4_cache[bkt].lock); 343 list_for_each_entry(entry, &cipso_v4_cache[bkt].list, list) { 344 if (entry->hash == hash && ··· 417 atomic_inc(&secattr->cache->refcount); 418 entry->lsm_data = secattr->cache; 419 420 + bkt = entry->hash & (CIPSO_V4_CACHE_BUCKETS - 1); 421 spin_lock_bh(&cipso_v4_cache[bkt].lock); 422 if (cipso_v4_cache[bkt].size < cipso_v4_cache_bucketsize) { 423 list_add(&entry->list, &cipso_v4_cache[bkt].list);
+2 -2
net/ipv4/igmp.c
··· 292 struct iphdr *pip; 293 struct igmpv3_report *pig; 294 295 - skb = alloc_skb(size + LL_RESERVED_SPACE(dev), GFP_ATOMIC); 296 if (skb == NULL) 297 return NULL; 298 ··· 653 return -1; 654 } 655 656 - skb=alloc_skb(IGMP_SIZE+LL_RESERVED_SPACE(dev), GFP_ATOMIC); 657 if (skb == NULL) { 658 ip_rt_put(rt); 659 return -1;
··· 292 struct iphdr *pip; 293 struct igmpv3_report *pig; 294 295 + skb = alloc_skb(size + LL_ALLOCATED_SPACE(dev), GFP_ATOMIC); 296 if (skb == NULL) 297 return NULL; 298 ··· 653 return -1; 654 } 655 656 + skb=alloc_skb(IGMP_SIZE+LL_ALLOCATED_SPACE(dev), GFP_ATOMIC); 657 if (skb == NULL) { 658 ip_rt_put(rt); 659 return -1;
+3 -3
net/ipv4/ipconfig.c
··· 710 struct net_device *dev = d->dev; 711 struct sk_buff *skb; 712 struct bootp_pkt *b; 713 - int hh_len = LL_RESERVED_SPACE(dev); 714 struct iphdr *h; 715 716 /* Allocate packet */ 717 - skb = alloc_skb(sizeof(struct bootp_pkt) + hh_len + 15, GFP_KERNEL); 718 if (!skb) 719 return; 720 - skb_reserve(skb, hh_len); 721 b = (struct bootp_pkt *) skb_put(skb, sizeof(struct bootp_pkt)); 722 memset(b, 0, sizeof(struct bootp_pkt)); 723
··· 710 struct net_device *dev = d->dev; 711 struct sk_buff *skb; 712 struct bootp_pkt *b; 713 struct iphdr *h; 714 715 /* Allocate packet */ 716 + skb = alloc_skb(sizeof(struct bootp_pkt) + LL_ALLOCATED_SPACE(dev) + 15, 717 + GFP_KERNEL); 718 if (!skb) 719 return; 720 + skb_reserve(skb, LL_RESERVED_SPACE(dev)); 721 b = (struct bootp_pkt *) skb_put(skb, sizeof(struct bootp_pkt)); 722 memset(b, 0, sizeof(struct bootp_pkt)); 723
+4 -6
net/ipv4/raw.c
··· 322 unsigned int flags) 323 { 324 struct inet_sock *inet = inet_sk(sk); 325 - int hh_len; 326 struct iphdr *iph; 327 struct sk_buff *skb; 328 unsigned int iphlen; ··· 335 if (flags&MSG_PROBE) 336 goto out; 337 338 - hh_len = LL_RESERVED_SPACE(rt->u.dst.dev); 339 - 340 - skb = sock_alloc_send_skb(sk, length+hh_len+15, 341 - flags&MSG_DONTWAIT, &err); 342 if (skb == NULL) 343 goto error; 344 - skb_reserve(skb, hh_len); 345 346 skb->priority = sk->sk_priority; 347 skb->mark = sk->sk_mark;
··· 322 unsigned int flags) 323 { 324 struct inet_sock *inet = inet_sk(sk); 325 struct iphdr *iph; 326 struct sk_buff *skb; 327 unsigned int iphlen; ··· 336 if (flags&MSG_PROBE) 337 goto out; 338 339 + skb = sock_alloc_send_skb(sk, 340 + length + LL_ALLOCATED_SPACE(rt->u.dst.dev) + 15, 341 + flags & MSG_DONTWAIT, &err); 342 if (skb == NULL) 343 goto error; 344 + skb_reserve(skb, LL_RESERVED_SPACE(rt->u.dst.dev)); 345 346 skb->priority = sk->sk_priority; 347 skb->mark = sk->sk_mark;
+12 -5
net/ipv4/tcp_input.c
··· 1842 TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS; 1843 } 1844 1845 - /* Don't lost mark skbs that were fwd transmitted after RTO */ 1846 - if (!(TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED) && 1847 - !after(TCP_SKB_CB(skb)->end_seq, tp->frto_highmark)) { 1848 TCP_SKB_CB(skb)->sacked |= TCPCB_LOST; 1849 tp->lost_out += tcp_skb_pcount(skb); 1850 } ··· 1867 tp->reordering = min_t(unsigned int, tp->reordering, 1868 sysctl_tcp_reordering); 1869 tcp_set_ca_state(sk, TCP_CA_Loss); 1870 - tp->high_seq = tp->frto_highmark; 1871 TCP_ECN_queue_cwr(tp); 1872 1873 tcp_clear_retrans_hints_partial(tp); ··· 2489 2490 tcp_verify_left_out(tp); 2491 2492 - if (tp->retrans_out == 0) 2493 tp->retrans_stamp = 0; 2494 2495 if (flag & FLAG_ECE)
··· 1842 TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS; 1843 } 1844 1845 + /* Marking forward transmissions that were made after RTO lost 1846 + * can cause unnecessary retransmissions in some scenarios, 1847 + * SACK blocks will mitigate that in some but not in all cases. 1848 + * We used to not mark them but it was causing break-ups with 1849 + * receivers that do only in-order receival. 1850 + * 1851 + * TODO: we could detect presence of such receiver and select 1852 + * different behavior per flow. 1853 + */ 1854 + if (!(TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)) { 1855 TCP_SKB_CB(skb)->sacked |= TCPCB_LOST; 1856 tp->lost_out += tcp_skb_pcount(skb); 1857 } ··· 1860 tp->reordering = min_t(unsigned int, tp->reordering, 1861 sysctl_tcp_reordering); 1862 tcp_set_ca_state(sk, TCP_CA_Loss); 1863 + tp->high_seq = tp->snd_nxt; 1864 TCP_ECN_queue_cwr(tp); 1865 1866 tcp_clear_retrans_hints_partial(tp); ··· 2482 2483 tcp_verify_left_out(tp); 2484 2485 + if (!tp->frto_counter && tp->retrans_out == 0) 2486 tp->retrans_stamp = 0; 2487 2488 if (flag & FLAG_ECE)
+1 -1
net/ipv6/ip6_output.c
··· 780 * Allocate buffer. 781 */ 782 783 - if ((frag = alloc_skb(len+hlen+sizeof(struct frag_hdr)+LL_RESERVED_SPACE(rt->u.dst.dev), GFP_ATOMIC)) == NULL) { 784 NETDEBUG(KERN_INFO "IPv6: frag: no memory for new fragment!\n"); 785 IP6_INC_STATS(ip6_dst_idev(skb->dst), 786 IPSTATS_MIB_FRAGFAILS);
··· 780 * Allocate buffer. 781 */ 782 783 + if ((frag = alloc_skb(len+hlen+sizeof(struct frag_hdr)+LL_ALLOCATED_SPACE(rt->u.dst.dev), GFP_ATOMIC)) == NULL) { 784 NETDEBUG(KERN_INFO "IPv6: frag: no memory for new fragment!\n"); 785 IP6_INC_STATS(ip6_dst_idev(skb->dst), 786 IPSTATS_MIB_FRAGFAILS);
+2 -2
net/ipv6/mcast.c
··· 1411 IPV6_TLV_PADN, 0 }; 1412 1413 /* we assume size > sizeof(ra) here */ 1414 - skb = sock_alloc_send_skb(sk, size + LL_RESERVED_SPACE(dev), 1, &err); 1415 1416 if (!skb) 1417 return NULL; ··· 1790 payload_len = len + sizeof(ra); 1791 full_len = sizeof(struct ipv6hdr) + payload_len; 1792 1793 - skb = sock_alloc_send_skb(sk, LL_RESERVED_SPACE(dev) + full_len, 1, &err); 1794 1795 if (skb == NULL) { 1796 rcu_read_lock();
··· 1411 IPV6_TLV_PADN, 0 }; 1412 1413 /* we assume size > sizeof(ra) here */ 1414 + skb = sock_alloc_send_skb(sk, size + LL_ALLOCATED_SPACE(dev), 1, &err); 1415 1416 if (!skb) 1417 return NULL; ··· 1790 payload_len = len + sizeof(ra); 1791 full_len = sizeof(struct ipv6hdr) + payload_len; 1792 1793 + skb = sock_alloc_send_skb(sk, LL_ALLOCATED_SPACE(dev) + full_len, 1, &err); 1794 1795 if (skb == NULL) { 1796 rcu_read_lock();
+2 -2
net/ipv6/ndisc.c
··· 479 480 skb = sock_alloc_send_skb(sk, 481 (MAX_HEADER + sizeof(struct ipv6hdr) + 482 - len + LL_RESERVED_SPACE(dev)), 483 1, &err); 484 if (!skb) { 485 ND_PRINTK0(KERN_ERR ··· 1521 1522 buff = sock_alloc_send_skb(sk, 1523 (MAX_HEADER + sizeof(struct ipv6hdr) + 1524 - len + LL_RESERVED_SPACE(dev)), 1525 1, &err); 1526 if (buff == NULL) { 1527 ND_PRINTK0(KERN_ERR
··· 479 480 skb = sock_alloc_send_skb(sk, 481 (MAX_HEADER + sizeof(struct ipv6hdr) + 482 + len + LL_ALLOCATED_SPACE(dev)), 483 1, &err); 484 if (!skb) { 485 ND_PRINTK0(KERN_ERR ··· 1521 1522 buff = sock_alloc_send_skb(sk, 1523 (MAX_HEADER + sizeof(struct ipv6hdr) + 1524 + len + LL_ALLOCATED_SPACE(dev)), 1525 1, &err); 1526 if (buff == NULL) { 1527 ND_PRINTK0(KERN_ERR
+4 -6
net/ipv6/raw.c
··· 609 struct ipv6_pinfo *np = inet6_sk(sk); 610 struct ipv6hdr *iph; 611 struct sk_buff *skb; 612 - unsigned int hh_len; 613 int err; 614 615 if (length > rt->u.dst.dev->mtu) { ··· 618 if (flags&MSG_PROBE) 619 goto out; 620 621 - hh_len = LL_RESERVED_SPACE(rt->u.dst.dev); 622 - 623 - skb = sock_alloc_send_skb(sk, length+hh_len+15, 624 - flags&MSG_DONTWAIT, &err); 625 if (skb == NULL) 626 goto error; 627 - skb_reserve(skb, hh_len); 628 629 skb->priority = sk->sk_priority; 630 skb->mark = sk->sk_mark;
··· 609 struct ipv6_pinfo *np = inet6_sk(sk); 610 struct ipv6hdr *iph; 611 struct sk_buff *skb; 612 int err; 613 614 if (length > rt->u.dst.dev->mtu) { ··· 619 if (flags&MSG_PROBE) 620 goto out; 621 622 + skb = sock_alloc_send_skb(sk, 623 + length + LL_ALLOCATED_SPACE(rt->u.dst.dev) + 15, 624 + flags & MSG_DONTWAIT, &err); 625 if (skb == NULL) 626 goto error; 627 + skb_reserve(skb, LL_RESERVED_SPACE(rt->u.dst.dev)); 628 629 skb->priority = sk->sk_priority; 630 skb->mark = sk->sk_mark;
+5 -3
net/irda/discovery.c
··· 40 41 #include <net/irda/discovery.h> 42 43 /* 44 * Function irlmp_add_discovery (cachelog, discovery) 45 * ··· 89 */ 90 hashbin_remove_this(cachelog, (irda_queue_t *) node); 91 /* Check if hints bits are unchanged */ 92 - if(u16ho(node->data.hints) == u16ho(new->data.hints)) 93 /* Set time of first discovery for this node */ 94 new->firststamp = node->firststamp; 95 kfree(node); ··· 283 /* Mask out the ones we don't want : 284 * We want to match the discovery mask, and to get only 285 * the most recent one (unless we want old ones) */ 286 - if ((u16ho(discovery->data.hints) & mask) && 287 ((old_entries) || 288 - ((jiffies - discovery->firststamp) < j_timeout)) ) { 289 /* Create buffer as needed. 290 * As this function get called a lot and most time 291 * we don't have anything to put in the log (we are
··· 40 41 #include <net/irda/discovery.h> 42 43 + #include <asm/unaligned.h> 44 + 45 /* 46 * Function irlmp_add_discovery (cachelog, discovery) 47 * ··· 87 */ 88 hashbin_remove_this(cachelog, (irda_queue_t *) node); 89 /* Check if hints bits are unchanged */ 90 + if (get_unaligned((__u16 *)node->data.hints) == get_unaligned((__u16 *)new->data.hints)) 91 /* Set time of first discovery for this node */ 92 new->firststamp = node->firststamp; 93 kfree(node); ··· 281 /* Mask out the ones we don't want : 282 * We want to match the discovery mask, and to get only 283 * the most recent one (unless we want old ones) */ 284 + if ((get_unaligned((__u16 *)discovery->data.hints) & mask) && 285 ((old_entries) || 286 + ((jiffies - discovery->firststamp) < j_timeout))) { 287 /* Create buffer as needed. 288 * As this function get called a lot and most time 289 * we don't have anything to put in the log (we are
+3 -2
net/irda/irlmp.c
··· 1062 for(i = 0; i < number; i++) { 1063 /* Check if we should notify client */ 1064 if ((client->expir_callback) && 1065 - (client->hint_mask.word & u16ho(expiries[i].hints) 1066 & 0x7f7f) ) 1067 client->expir_callback(&(expiries[i]), 1068 EXPIRY_TIMEOUT, ··· 1087 1088 IRDA_ASSERT(irlmp != NULL, return NULL;); 1089 1090 - u16ho(irlmp->discovery_rsp.data.hints) = irlmp->hints.word; 1091 1092 /* 1093 * Set character set for device name (we use ASCII), and
··· 1062 for(i = 0; i < number; i++) { 1063 /* Check if we should notify client */ 1064 if ((client->expir_callback) && 1065 + (client->hint_mask.word & 1066 + get_unaligned((__u16 *)expiries[i].hints) 1067 & 0x7f7f) ) 1068 client->expir_callback(&(expiries[i]), 1069 EXPIRY_TIMEOUT, ··· 1086 1087 IRDA_ASSERT(irlmp != NULL, return NULL;); 1088 1089 + put_unaligned(irlmp->hints.word, (__u16 *)irlmp->discovery_rsp.data.hints); 1090 1091 /* 1092 * Set character set for device name (we use ASCII), and
+2 -2
net/irda/irnet/irnet_irda.c
··· 1673 /* Notify the control channel */ 1674 irnet_post_event(NULL, IRNET_DISCOVER, 1675 discovery->saddr, discovery->daddr, discovery->info, 1676 - u16ho(discovery->hints)); 1677 1678 DEXIT(IRDA_OCB_TRACE, "\n"); 1679 } ··· 1704 /* Notify the control channel */ 1705 irnet_post_event(NULL, IRNET_EXPIRE, 1706 expiry->saddr, expiry->daddr, expiry->info, 1707 - u16ho(expiry->hints)); 1708 1709 DEXIT(IRDA_OCB_TRACE, "\n"); 1710 }
··· 1673 /* Notify the control channel */ 1674 irnet_post_event(NULL, IRNET_DISCOVER, 1675 discovery->saddr, discovery->daddr, discovery->info, 1676 + get_unaligned((__u16 *)discovery->hints)); 1677 1678 DEXIT(IRDA_OCB_TRACE, "\n"); 1679 } ··· 1704 /* Notify the control channel */ 1705 irnet_post_event(NULL, IRNET_EXPIRE, 1706 expiry->saddr, expiry->daddr, expiry->info, 1707 + get_unaligned((__u16 *)expiry->hints)); 1708 1709 DEXIT(IRDA_OCB_TRACE, "\n"); 1710 }
+12 -3
net/mac80211/debugfs_key.c
··· 255 void ieee80211_debugfs_key_add_default(struct ieee80211_sub_if_data *sdata) 256 { 257 char buf[50]; 258 259 if (!sdata->debugfsdir) 260 return; 261 262 - sprintf(buf, "../keys/%d", sdata->default_key->debugfs.cnt); 263 - sdata->debugfs.default_key = 264 - debugfs_create_symlink("default_key", sdata->debugfsdir, buf); 265 } 266 void ieee80211_debugfs_key_remove_default(struct ieee80211_sub_if_data *sdata) 267 { 268 if (!sdata)
··· 255 void ieee80211_debugfs_key_add_default(struct ieee80211_sub_if_data *sdata) 256 { 257 char buf[50]; 258 + struct ieee80211_key *key; 259 260 if (!sdata->debugfsdir) 261 return; 262 263 + /* this is running under the key lock */ 264 + 265 + key = sdata->default_key; 266 + if (key) { 267 + sprintf(buf, "../keys/%d", key->debugfs.cnt); 268 + sdata->debugfs.default_key = 269 + debugfs_create_symlink("default_key", 270 + sdata->debugfsdir, buf); 271 + } else 272 + ieee80211_debugfs_key_remove_default(sdata); 273 } 274 + 275 void ieee80211_debugfs_key_remove_default(struct ieee80211_sub_if_data *sdata) 276 { 277 if (!sdata)
+9
net/mac80211/iface.c
··· 54 if (!ndev) 55 return -ENOMEM; 56 57 ret = dev_alloc_name(ndev, ndev->name); 58 if (ret < 0) 59 goto fail;
··· 54 if (!ndev) 55 return -ENOMEM; 56 57 + ndev->needed_headroom = local->tx_headroom + 58 + 4*6 /* four MAC addresses */ 59 + + 2 + 2 + 2 + 2 /* ctl, dur, seq, qos */ 60 + + 6 /* mesh */ 61 + + 8 /* rfc1042/bridge tunnel */ 62 + - ETH_HLEN /* ethernet hard_header_len */ 63 + + IEEE80211_ENCRYPT_HEADROOM; 64 + ndev->needed_tailroom = IEEE80211_ENCRYPT_TAILROOM; 65 + 66 ret = dev_alloc_name(ndev, ndev->name); 67 if (ret < 0) 68 goto fail;
+1 -1
net/mac80211/mesh.c
··· 397 put_unaligned(cpu_to_le32(sdata->u.sta.mesh_seqnum), &meshhdr->seqnum); 398 sdata->u.sta.mesh_seqnum++; 399 400 - return 5; 401 } 402 403 void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata)
··· 397 put_unaligned(cpu_to_le32(sdata->u.sta.mesh_seqnum), &meshhdr->seqnum); 398 sdata->u.sta.mesh_seqnum++; 399 400 + return 6; 401 } 402 403 void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata)
+1 -1
net/mac80211/mesh_hwmp.c
··· 120 *pos++ = WLAN_EID_PREP; 121 break; 122 default: 123 - kfree(skb); 124 return -ENOTSUPP; 125 break; 126 }
··· 120 *pos++ = WLAN_EID_PREP; 121 break; 122 default: 123 + kfree_skb(skb); 124 return -ENOTSUPP; 125 break; 126 }
+12 -5
net/mac80211/mesh_pathtbl.c
··· 158 if (atomic_add_unless(&sdata->u.sta.mpaths, 1, MESH_MAX_MPATHS) == 0) 159 return -ENOSPC; 160 161 - read_lock(&pathtbl_resize_lock); 162 - 163 new_mpath = kzalloc(sizeof(struct mesh_path), GFP_KERNEL); 164 if (!new_mpath) { 165 atomic_dec(&sdata->u.sta.mpaths); 166 err = -ENOMEM; 167 goto endadd2; 168 } 169 memcpy(new_mpath->dst, dst, ETH_ALEN); 170 new_mpath->dev = dev; 171 new_mpath->flags = 0; 172 skb_queue_head_init(&new_mpath->frame_queue); 173 - new_node = kmalloc(sizeof(struct mpath_node), GFP_KERNEL); 174 new_node->mpath = new_mpath; 175 new_mpath->timer.data = (unsigned long) new_mpath; 176 new_mpath->timer.function = mesh_path_timer; ··· 208 209 endadd: 210 spin_unlock(&mesh_paths->hashwlock[hash_idx]); 211 - endadd2: 212 read_unlock(&pathtbl_resize_lock); 213 if (!err && grow) { 214 struct mesh_table *oldtbl, *newtbl; ··· 220 return -ENOMEM; 221 } 222 rcu_assign_pointer(mesh_paths, newtbl); 223 synchronize_rcu(); 224 mesh_table_free(oldtbl, false); 225 - write_unlock(&pathtbl_resize_lock); 226 } 227 return err; 228 } 229
··· 158 if (atomic_add_unless(&sdata->u.sta.mpaths, 1, MESH_MAX_MPATHS) == 0) 159 return -ENOSPC; 160 161 new_mpath = kzalloc(sizeof(struct mesh_path), GFP_KERNEL); 162 if (!new_mpath) { 163 atomic_dec(&sdata->u.sta.mpaths); 164 err = -ENOMEM; 165 goto endadd2; 166 } 167 + new_node = kmalloc(sizeof(struct mpath_node), GFP_KERNEL); 168 + if (!new_node) { 169 + kfree(new_mpath); 170 + atomic_dec(&sdata->u.sta.mpaths); 171 + err = -ENOMEM; 172 + goto endadd2; 173 + } 174 + 175 + read_lock(&pathtbl_resize_lock); 176 memcpy(new_mpath->dst, dst, ETH_ALEN); 177 new_mpath->dev = dev; 178 new_mpath->flags = 0; 179 skb_queue_head_init(&new_mpath->frame_queue); 180 new_node->mpath = new_mpath; 181 new_mpath->timer.data = (unsigned long) new_mpath; 182 new_mpath->timer.function = mesh_path_timer; ··· 202 203 endadd: 204 spin_unlock(&mesh_paths->hashwlock[hash_idx]); 205 read_unlock(&pathtbl_resize_lock); 206 if (!err && grow) { 207 struct mesh_table *oldtbl, *newtbl; ··· 215 return -ENOMEM; 216 } 217 rcu_assign_pointer(mesh_paths, newtbl); 218 + write_unlock(&pathtbl_resize_lock); 219 + 220 synchronize_rcu(); 221 mesh_table_free(oldtbl, false); 222 } 223 + endadd2: 224 return err; 225 } 226
+52 -16
net/mac80211/mlme.c
··· 665 mod_timer(&ifsta->timer, jiffies + IEEE80211_AUTH_TIMEOUT); 666 } 667 668 669 static void ieee80211_send_assoc(struct net_device *dev, 670 struct ieee80211_if_sta *ifsta) ··· 693 struct sk_buff *skb; 694 struct ieee80211_mgmt *mgmt; 695 u8 *pos, *ies; 696 - int i, len; 697 u16 capab; 698 struct ieee80211_sta_bss *bss; 699 int wmm = 0; 700 struct ieee80211_supported_band *sband; 701 702 skb = dev_alloc_skb(local->hw.extra_tx_headroom + 703 sizeof(*mgmt) + 200 + ifsta->extra_ie_len + ··· 761 *pos++ = ifsta->ssid_len; 762 memcpy(pos, ifsta->ssid, ifsta->ssid_len); 763 764 - len = sband->n_bitrates; 765 - if (len > 8) 766 - len = 8; 767 - pos = skb_put(skb, len + 2); 768 - *pos++ = WLAN_EID_SUPP_RATES; 769 - *pos++ = len; 770 - for (i = 0; i < len; i++) { 771 - int rate = sband->bitrates[i].bitrate; 772 - *pos++ = (u8) (rate / 5); 773 - } 774 775 - if (sband->n_bitrates > len) { 776 - pos = skb_put(skb, sband->n_bitrates - len + 2); 777 - *pos++ = WLAN_EID_EXT_SUPP_RATES; 778 - *pos++ = sband->n_bitrates - len; 779 - for (i = len; i < sband->n_bitrates; i++) { 780 int rate = sband->bitrates[i].bitrate; 781 *pos++ = (u8) (rate / 5); 782 } 783 } 784
··· 665 mod_timer(&ifsta->timer, jiffies + IEEE80211_AUTH_TIMEOUT); 666 } 667 668 + static int ieee80211_compatible_rates(struct ieee80211_sta_bss *bss, 669 + struct ieee80211_supported_band *sband, 670 + u64 *rates) 671 + { 672 + int i, j, count; 673 + *rates = 0; 674 + count = 0; 675 + for (i = 0; i < bss->supp_rates_len; i++) { 676 + int rate = (bss->supp_rates[i] & 0x7F) * 5; 677 + 678 + for (j = 0; j < sband->n_bitrates; j++) 679 + if (sband->bitrates[j].bitrate == rate) { 680 + *rates |= BIT(j); 681 + count++; 682 + break; 683 + } 684 + } 685 + 686 + return count; 687 + } 688 689 static void ieee80211_send_assoc(struct net_device *dev, 690 struct ieee80211_if_sta *ifsta) ··· 673 struct sk_buff *skb; 674 struct ieee80211_mgmt *mgmt; 675 u8 *pos, *ies; 676 + int i, len, count, rates_len, supp_rates_len; 677 u16 capab; 678 struct ieee80211_sta_bss *bss; 679 int wmm = 0; 680 struct ieee80211_supported_band *sband; 681 + u64 rates = 0; 682 683 skb = dev_alloc_skb(local->hw.extra_tx_headroom + 684 sizeof(*mgmt) + 200 + ifsta->extra_ie_len + ··· 740 *pos++ = ifsta->ssid_len; 741 memcpy(pos, ifsta->ssid, ifsta->ssid_len); 742 743 + /* all supported rates should be added here but some APs 744 + * (e.g. D-Link DAP 1353 in b-only mode) don't like that 745 + * Therefore only add rates the AP supports */ 746 + rates_len = ieee80211_compatible_rates(bss, sband, &rates); 747 + supp_rates_len = rates_len; 748 + if (supp_rates_len > 8) 749 + supp_rates_len = 8; 750 751 + len = sband->n_bitrates; 752 + pos = skb_put(skb, supp_rates_len + 2); 753 + *pos++ = WLAN_EID_SUPP_RATES; 754 + *pos++ = supp_rates_len; 755 + 756 + count = 0; 757 + for (i = 0; i < sband->n_bitrates; i++) { 758 + if (BIT(i) & rates) { 759 int rate = sband->bitrates[i].bitrate; 760 *pos++ = (u8) (rate / 5); 761 + if (++count == 8) 762 + break; 763 + } 764 + } 765 + 766 + if (count == 8) { 767 + pos = skb_put(skb, rates_len - count + 2); 768 + *pos++ = WLAN_EID_EXT_SUPP_RATES; 769 + *pos++ = rates_len - count; 770 + 771 + for (i++; i < sband->n_bitrates; i++) { 772 + if (BIT(i) & rates) { 773 + int rate = sband->bitrates[i].bitrate; 774 + *pos++ = (u8) (rate / 5); 775 + } 776 } 777 } 778
+6 -6
net/mac80211/rx.c
··· 1305 if (is_multicast_ether_addr(skb->data)) { 1306 if (*mesh_ttl > 0) { 1307 xmit_skb = skb_copy(skb, GFP_ATOMIC); 1308 - if (!xmit_skb && net_ratelimit()) 1309 printk(KERN_DEBUG "%s: failed to clone " 1310 "multicast frame\n", dev->name); 1311 - else 1312 - xmit_skb->pkt_type = PACKET_OTHERHOST; 1313 } else 1314 IEEE80211_IFSTA_MESH_CTR_INC(&sdata->u.sta, 1315 dropped_frames_ttl); ··· 1395 padding = ((4 - subframe_len) & 0x3); 1396 /* the last MSDU has no padding */ 1397 if (subframe_len > remaining) { 1398 - printk(KERN_DEBUG "%s: wrong buffer size", dev->name); 1399 return RX_DROP_UNUSABLE; 1400 } 1401 ··· 1418 eth = (struct ethhdr *) skb_pull(skb, ntohs(len) + 1419 padding); 1420 if (!eth) { 1421 - printk(KERN_DEBUG "%s: wrong buffer size ", 1422 dev->name); 1423 dev_kfree_skb(frame); 1424 return RX_DROP_UNUSABLE; ··· 1952 if (!skb_new) { 1953 if (net_ratelimit()) 1954 printk(KERN_DEBUG "%s: failed to copy " 1955 - "multicast frame for %s", 1956 wiphy_name(local->hw.wiphy), 1957 prev->dev->name); 1958 continue;
··· 1305 if (is_multicast_ether_addr(skb->data)) { 1306 if (*mesh_ttl > 0) { 1307 xmit_skb = skb_copy(skb, GFP_ATOMIC); 1308 + if (xmit_skb) 1309 + xmit_skb->pkt_type = PACKET_OTHERHOST; 1310 + else if (net_ratelimit()) 1311 printk(KERN_DEBUG "%s: failed to clone " 1312 "multicast frame\n", dev->name); 1313 } else 1314 IEEE80211_IFSTA_MESH_CTR_INC(&sdata->u.sta, 1315 dropped_frames_ttl); ··· 1395 padding = ((4 - subframe_len) & 0x3); 1396 /* the last MSDU has no padding */ 1397 if (subframe_len > remaining) { 1398 + printk(KERN_DEBUG "%s: wrong buffer size\n", dev->name); 1399 return RX_DROP_UNUSABLE; 1400 } 1401 ··· 1418 eth = (struct ethhdr *) skb_pull(skb, ntohs(len) + 1419 padding); 1420 if (!eth) { 1421 + printk(KERN_DEBUG "%s: wrong buffer size\n", 1422 dev->name); 1423 dev_kfree_skb(frame); 1424 return RX_DROP_UNUSABLE; ··· 1952 if (!skb_new) { 1953 if (net_ratelimit()) 1954 printk(KERN_DEBUG "%s: failed to copy " 1955 + "multicast frame for %s\n", 1956 wiphy_name(local->hw.wiphy), 1957 prev->dev->name); 1958 continue;
+3 -2
net/mac80211/tx.c
··· 1562 * be cloned. This could happen, e.g., with Linux bridge code passing 1563 * us broadcast frames. */ 1564 1565 - if (head_need > 0 || skb_cloned(skb)) { 1566 #if 0 1567 printk(KERN_DEBUG "%s: need to reallocate buffer for %d bytes " 1568 "of headroom\n", dev->name, head_need); 1569 #endif 1570 1571 - if (skb_cloned(skb)) 1572 I802_DEBUG_INC(local->tx_expand_skb_head_cloned); 1573 else 1574 I802_DEBUG_INC(local->tx_expand_skb_head); ··· 1898 control->flags |= IEEE80211_TXCTL_SHORT_PREAMBLE; 1899 control->antenna_sel_tx = local->hw.conf.antenna_sel_tx; 1900 control->flags |= IEEE80211_TXCTL_NO_ACK; 1901 control->retry_limit = 1; 1902 control->flags |= IEEE80211_TXCTL_CLEAR_PS_FILT; 1903 }
··· 1562 * be cloned. This could happen, e.g., with Linux bridge code passing 1563 * us broadcast frames. */ 1564 1565 + if (head_need > 0 || skb_header_cloned(skb)) { 1566 #if 0 1567 printk(KERN_DEBUG "%s: need to reallocate buffer for %d bytes " 1568 "of headroom\n", dev->name, head_need); 1569 #endif 1570 1571 + if (skb_header_cloned(skb)) 1572 I802_DEBUG_INC(local->tx_expand_skb_head_cloned); 1573 else 1574 I802_DEBUG_INC(local->tx_expand_skb_head); ··· 1898 control->flags |= IEEE80211_TXCTL_SHORT_PREAMBLE; 1899 control->antenna_sel_tx = local->hw.conf.antenna_sel_tx; 1900 control->flags |= IEEE80211_TXCTL_NO_ACK; 1901 + control->flags |= IEEE80211_TXCTL_DO_NOT_ENCRYPT; 1902 control->retry_limit = 1; 1903 control->flags |= IEEE80211_TXCTL_CLEAR_PS_FILT; 1904 }
+5 -5
net/mac80211/util.c
··· 153 /* 7.1.3.5a.2 */ 154 switch (ae) { 155 case 0: 156 - return 5; 157 case 1: 158 - return 11; 159 case 2: 160 - return 17; 161 case 3: 162 - return 23; 163 default: 164 - return 5; 165 } 166 } 167
··· 153 /* 7.1.3.5a.2 */ 154 switch (ae) { 155 case 0: 156 + return 6; 157 case 1: 158 + return 12; 159 case 2: 160 + return 18; 161 case 3: 162 + return 24; 163 default: 164 + return 6; 165 } 166 } 167
+2 -1
net/mac80211/wme.c
··· 394 qd->handle); 395 if (!q->queues[i]) { 396 q->queues[i] = &noop_qdisc; 397 - printk(KERN_ERR "%s child qdisc %i creation failed", dev->name, i); 398 } 399 } 400
··· 394 qd->handle); 395 if (!q->queues[i]) { 396 q->queues[i] = &noop_qdisc; 397 + printk(KERN_ERR "%s child qdisc %i creation failed\n", 398 + dev->name, i); 399 } 400 } 401
+3
net/netfilter/nf_conntrack_netlink.c
··· 472 goto nla_put_failure; 473 nla_nest_end(skb, nest_parms); 474 475 if (events & IPCT_DESTROY) { 476 if (ctnetlink_dump_counters(skb, ct, IP_CT_DIR_ORIGINAL) < 0 || 477 ctnetlink_dump_counters(skb, ct, IP_CT_DIR_REPLY) < 0)
··· 472 goto nla_put_failure; 473 nla_nest_end(skb, nest_parms); 474 475 + if (ctnetlink_dump_id(skb, ct) < 0) 476 + goto nla_put_failure; 477 + 478 if (events & IPCT_DESTROY) { 479 if (ctnetlink_dump_counters(skb, ct, IP_CT_DIR_ORIGINAL) < 0 || 480 ctnetlink_dump_counters(skb, ct, IP_CT_DIR_REPLY) < 0)
+2
net/netfilter/xt_iprange.c
··· 179 MODULE_LICENSE("GPL"); 180 MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>, Jan Engelhardt <jengelh@computergmbh.de>"); 181 MODULE_DESCRIPTION("Xtables: arbitrary IPv4 range matching");
··· 179 MODULE_LICENSE("GPL"); 180 MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>, Jan Engelhardt <jengelh@computergmbh.de>"); 181 MODULE_DESCRIPTION("Xtables: arbitrary IPv4 range matching"); 182 + MODULE_ALIAS("ipt_iprange"); 183 + MODULE_ALIAS("ip6t_iprange");
+1 -1
net/packet/af_packet.c
··· 743 if (len > dev->mtu+reserve) 744 goto out_unlock; 745 746 - skb = sock_alloc_send_skb(sk, len + LL_RESERVED_SPACE(dev), 747 msg->msg_flags & MSG_DONTWAIT, &err); 748 if (skb==NULL) 749 goto out_unlock;
··· 743 if (len > dev->mtu+reserve) 744 goto out_unlock; 745 746 + skb = sock_alloc_send_skb(sk, len + LL_ALLOCATED_SPACE(dev), 747 msg->msg_flags & MSG_DONTWAIT, &err); 748 if (skb==NULL) 749 goto out_unlock;
+15 -1
net/sctp/sm_make_chunk.c
··· 2418 break; 2419 2420 case SCTP_PARAM_IPV6_ADDRESS: 2421 - asoc->peer.ipv6_address = 1; 2422 break; 2423 2424 case SCTP_PARAM_HOST_NAME_ADDRESS: ··· 2829 2830 addr_param = (union sctp_addr_param *) 2831 ((void *)asconf_param + sizeof(sctp_addip_param_t)); 2832 2833 af = sctp_get_af_specific(param_type2af(addr_param->v4.param_hdr.type)); 2834 if (unlikely(!af))
··· 2418 break; 2419 2420 case SCTP_PARAM_IPV6_ADDRESS: 2421 + if (PF_INET6 == asoc->base.sk->sk_family) 2422 + asoc->peer.ipv6_address = 1; 2423 break; 2424 2425 case SCTP_PARAM_HOST_NAME_ADDRESS: ··· 2828 2829 addr_param = (union sctp_addr_param *) 2830 ((void *)asconf_param + sizeof(sctp_addip_param_t)); 2831 + 2832 + switch (addr_param->v4.param_hdr.type) { 2833 + case SCTP_PARAM_IPV6_ADDRESS: 2834 + if (!asoc->peer.ipv6_address) 2835 + return SCTP_ERROR_INV_PARAM; 2836 + break; 2837 + case SCTP_PARAM_IPV4_ADDRESS: 2838 + if (!asoc->peer.ipv4_address) 2839 + return SCTP_ERROR_INV_PARAM; 2840 + break; 2841 + default: 2842 + return SCTP_ERROR_INV_PARAM; 2843 + } 2844 2845 af = sctp_get_af_specific(param_type2af(addr_param->v4.param_hdr.type)); 2846 if (unlikely(!af))
+3 -3
net/xfrm/xfrm_output.c
··· 25 struct dst_entry *dst = skb->dst; 26 int nhead = dst->header_len + LL_RESERVED_SPACE(dst->dev) 27 - skb_headroom(skb); 28 29 - if (nhead > 0) 30 - return pskb_expand_head(skb, nhead, 0, GFP_ATOMIC); 31 32 - /* Check tail too... */ 33 return 0; 34 } 35
··· 25 struct dst_entry *dst = skb->dst; 26 int nhead = dst->header_len + LL_RESERVED_SPACE(dst->dev) 27 - skb_headroom(skb); 28 + int ntail = dst->dev->needed_tailroom - skb_tailroom(skb); 29 30 + if (nhead > 0 || ntail > 0) 31 + return pskb_expand_head(skb, nhead, ntail, GFP_ATOMIC); 32 33 return 0; 34 } 35