Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6: (73 commits)
net: Fix typo in net/core/sock.c.
ppp: Do not free not yet unregistered net device.
netfilter: xt_iprange: module aliases for xt_iprange
netfilter: ctnetlink: dump conntrack ID in event messages
irda: Fix a misalign access issue. (v2)
sctp: Fix use of uninitialized pointer
cipso: Relax too much careful cipso hash function.
tcp FRTO: work-around inorder receivers
tcp FRTO: Fix fallback to conventional recovery
New maintainer for Intel ethernet adapters
DM9000: Use delayed work to update MII PHY state
DM9000: Update and fix driver debugging messages
DM9000: Add __devinit and __devexit attributes to probe and remove
sky2: fix simple define thinko
[netdrvr] sfc: sfc: Add self-test support
[netdrvr] sfc: Increment rx_reset when reported as driver event
[netdrvr] sfc: Remove unused macro EFX_XAUI_RETRAIN_MAX
[netdrvr] sfc: Fix code formatting
[netdrvr] sfc: Remove kernel-doc comments for removed members of struct efx_nic
[netdrvr] sfc: Remove garbage from comment
...

+3378 -734
+2
drivers/base/memory.c
··· 53 53 { 54 54 return blocking_notifier_chain_register(&memory_chain, nb); 55 55 } 56 + EXPORT_SYMBOL(register_memory_notifier); 56 57 57 58 void unregister_memory_notifier(struct notifier_block *nb) 58 59 { 59 60 blocking_notifier_chain_unregister(&memory_chain, nb); 60 61 } 62 + EXPORT_SYMBOL(unregister_memory_notifier); 61 63 62 64 /* 63 65 * register_memory - Setup a sysfs device for a memory block
+1 -1
drivers/net/Kconfig
··· 2426 2426 2427 2427 config EHEA 2428 2428 tristate "eHEA Ethernet support" 2429 - depends on IBMEBUS && INET && SPARSEMEM 2429 + depends on IBMEBUS && INET && SPARSEMEM && MEMORY_HOTPLUG 2430 2430 select INET_LRO 2431 2431 ---help--- 2432 2432 This driver supports the IBM pSeries eHEA ethernet adapter.
+97 -58
drivers/net/atlx/atl1.c
··· 1 1 /* 2 2 * Copyright(c) 2005 - 2006 Attansic Corporation. All rights reserved. 3 3 * Copyright(c) 2006 - 2007 Chris Snook <csnook@redhat.com> 4 - * Copyright(c) 2006 Jay Cliburn <jcliburn@gmail.com> 4 + * Copyright(c) 2006 - 2008 Jay Cliburn <jcliburn@gmail.com> 5 5 * 6 6 * Derived from Intel e1000 driver 7 7 * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved. ··· 36 36 * A very incomplete list of things that need to be dealt with: 37 37 * 38 38 * TODO: 39 - * Wake on LAN. 40 39 * Add more ethtool functions. 41 40 * Fix abstruse irq enable/disable condition described here: 42 41 * http://marc.theaimsgroup.com/?l=linux-netdev&m=116398508500553&w=2 ··· 637 638 } 638 639 639 640 /* 640 - *TODO: do something or get rid of this 641 + * Force the PHY into power saving mode using vendor magic. 641 642 */ 642 643 #ifdef CONFIG_PM 643 - static s32 atl1_phy_enter_power_saving(struct atl1_hw *hw) 644 + static void atl1_phy_enter_power_saving(struct atl1_hw *hw) 644 645 { 645 - /* s32 ret_val; 646 - * u16 phy_data; 647 - */ 646 + atl1_write_phy_reg(hw, MII_DBG_ADDR, 0); 647 + atl1_write_phy_reg(hw, MII_DBG_DATA, 0x124E); 648 + atl1_write_phy_reg(hw, MII_DBG_ADDR, 2); 649 + atl1_write_phy_reg(hw, MII_DBG_DATA, 0x3000); 650 + atl1_write_phy_reg(hw, MII_DBG_ADDR, 3); 651 + atl1_write_phy_reg(hw, MII_DBG_DATA, 0); 648 652 649 - /* 650 - ret_val = atl1_write_phy_reg(hw, ...); 651 - ret_val = atl1_write_phy_reg(hw, ...); 652 - .... 653 - */ 654 - return 0; 655 653 } 656 654 #endif 657 655 ··· 2780 2784 struct atl1_hw *hw = &adapter->hw; 2781 2785 u32 ctrl = 0; 2782 2786 u32 wufc = adapter->wol; 2787 + u32 val; 2788 + int retval; 2789 + u16 speed; 2790 + u16 duplex; 2783 2791 2784 2792 netif_device_detach(netdev); 2785 2793 if (netif_running(netdev)) 2786 2794 atl1_down(adapter); 2787 2795 2796 + retval = pci_save_state(pdev); 2797 + if (retval) 2798 + return retval; 2799 + 2788 2800 atl1_read_phy_reg(hw, MII_BMSR, (u16 *) & ctrl); 2789 2801 atl1_read_phy_reg(hw, MII_BMSR, (u16 *) & ctrl); 2790 - if (ctrl & BMSR_LSTATUS) 2802 + val = ctrl & BMSR_LSTATUS; 2803 + if (val) 2791 2804 wufc &= ~ATLX_WUFC_LNKC; 2792 2805 2793 - /* reduce speed to 10/100M */ 2794 - if (wufc) { 2795 - atl1_phy_enter_power_saving(hw); 2796 - /* if resume, let driver to re- setup link */ 2797 - hw->phy_configured = false; 2798 - atl1_set_mac_addr(hw); 2799 - atlx_set_multi(netdev); 2806 + if (val && wufc) { 2807 + val = atl1_get_speed_and_duplex(hw, &speed, &duplex); 2808 + if (val) { 2809 + if (netif_msg_ifdown(adapter)) 2810 + dev_printk(KERN_DEBUG, &pdev->dev, 2811 + "error getting speed/duplex\n"); 2812 + goto disable_wol; 2813 + } 2800 2814 2801 2815 ctrl = 0; 2802 - /* turn on magic packet wol */ 2816 + 2817 + /* enable magic packet WOL */ 2803 2818 if (wufc & ATLX_WUFC_MAG) 2804 - ctrl = WOL_MAGIC_EN | WOL_MAGIC_PME_EN; 2805 - 2806 - /* turn on Link change WOL */ 2807 - if (wufc & ATLX_WUFC_LNKC) 2808 - ctrl |= (WOL_LINK_CHG_EN | WOL_LINK_CHG_PME_EN); 2819 + ctrl |= (WOL_MAGIC_EN | WOL_MAGIC_PME_EN); 2809 2820 iowrite32(ctrl, hw->hw_addr + REG_WOL_CTRL); 2821 + ioread32(hw->hw_addr + REG_WOL_CTRL); 2810 2822 2811 - /* turn on all-multi mode if wake on multicast is enabled */ 2812 - ctrl = ioread32(hw->hw_addr + REG_MAC_CTRL); 2813 - ctrl &= ~MAC_CTRL_DBG; 2814 - ctrl &= ~MAC_CTRL_PROMIS_EN; 2815 - if (wufc & ATLX_WUFC_MC) 2816 - ctrl |= MAC_CTRL_MC_ALL_EN; 2817 - else 2818 - ctrl &= ~MAC_CTRL_MC_ALL_EN; 2819 - 2820 - /* turn on broadcast mode if wake on-BC is enabled */ 2821 - if (wufc & ATLX_WUFC_BC) 2823 + /* configure the mac */ 2824 + ctrl = MAC_CTRL_RX_EN; 2825 + ctrl |= ((u32)((speed == SPEED_1000) ? MAC_CTRL_SPEED_1000 : 2826 + MAC_CTRL_SPEED_10_100) << MAC_CTRL_SPEED_SHIFT); 2827 + if (duplex == FULL_DUPLEX) 2828 + ctrl |= MAC_CTRL_DUPLX; 2829 + ctrl |= (((u32)adapter->hw.preamble_len & 2830 + MAC_CTRL_PRMLEN_MASK) << MAC_CTRL_PRMLEN_SHIFT); 2831 + if (adapter->vlgrp) 2832 + ctrl |= MAC_CTRL_RMV_VLAN; 2833 + if (wufc & ATLX_WUFC_MAG) 2822 2834 ctrl |= MAC_CTRL_BC_EN; 2823 - else 2824 - ctrl &= ~MAC_CTRL_BC_EN; 2825 - 2826 - /* enable RX */ 2827 - ctrl |= MAC_CTRL_RX_EN; 2828 2835 iowrite32(ctrl, hw->hw_addr + REG_MAC_CTRL); 2829 - pci_enable_wake(pdev, PCI_D3hot, 1); 2830 - pci_enable_wake(pdev, PCI_D3cold, 1); 2831 - } else { 2832 - iowrite32(0, hw->hw_addr + REG_WOL_CTRL); 2833 - pci_enable_wake(pdev, PCI_D3hot, 0); 2834 - pci_enable_wake(pdev, PCI_D3cold, 0); 2836 + ioread32(hw->hw_addr + REG_MAC_CTRL); 2837 + 2838 + /* poke the PHY */ 2839 + ctrl = ioread32(hw->hw_addr + REG_PCIE_PHYMISC); 2840 + ctrl |= PCIE_PHYMISC_FORCE_RCV_DET; 2841 + iowrite32(ctrl, hw->hw_addr + REG_PCIE_PHYMISC); 2842 + ioread32(hw->hw_addr + REG_PCIE_PHYMISC); 2843 + 2844 + pci_enable_wake(pdev, pci_choose_state(pdev, state), 1); 2845 + goto exit; 2835 2846 } 2836 2847 2837 - pci_save_state(pdev); 2838 - pci_disable_device(pdev); 2848 + if (!val && wufc) { 2849 + ctrl |= (WOL_LINK_CHG_EN | WOL_LINK_CHG_PME_EN); 2850 + iowrite32(ctrl, hw->hw_addr + REG_WOL_CTRL); 2851 + ioread32(hw->hw_addr + REG_WOL_CTRL); 2852 + iowrite32(0, hw->hw_addr + REG_MAC_CTRL); 2853 + ioread32(hw->hw_addr + REG_MAC_CTRL); 2854 + hw->phy_configured = false; 2855 + pci_enable_wake(pdev, pci_choose_state(pdev, state), 1); 2856 + goto exit; 2857 + } 2839 2858 2840 - pci_set_power_state(pdev, PCI_D3hot); 2859 + disable_wol: 2860 + iowrite32(0, hw->hw_addr + REG_WOL_CTRL); 2861 + ioread32(hw->hw_addr + REG_WOL_CTRL); 2862 + ctrl = ioread32(hw->hw_addr + REG_PCIE_PHYMISC); 2863 + ctrl |= PCIE_PHYMISC_FORCE_RCV_DET; 2864 + iowrite32(ctrl, hw->hw_addr + REG_PCIE_PHYMISC); 2865 + ioread32(hw->hw_addr + REG_PCIE_PHYMISC); 2866 + atl1_phy_enter_power_saving(hw); 2867 + hw->phy_configured = false; 2868 + pci_enable_wake(pdev, pci_choose_state(pdev, state), 0); 2869 + exit: 2870 + if (netif_running(netdev)) 2871 + pci_disable_msi(adapter->pdev); 2872 + pci_disable_device(pdev); 2873 + pci_set_power_state(pdev, pci_choose_state(pdev, state)); 2841 2874 2842 2875 return 0; 2843 2876 } ··· 2880 2855 pci_set_power_state(pdev, PCI_D0); 2881 2856 pci_restore_state(pdev); 2882 2857 2883 - /* FIXME: check and handle */ 2884 2858 err = pci_enable_device(pdev); 2859 + if (err) { 2860 + if (netif_msg_ifup(adapter)) 2861 + dev_printk(KERN_DEBUG, &pdev->dev, 2862 + "error enabling pci device\n"); 2863 + return err; 2864 + } 2865 + 2866 + pci_set_master(pdev); 2867 + iowrite32(0, adapter->hw.hw_addr + REG_WOL_CTRL); 2885 2868 pci_enable_wake(pdev, PCI_D3hot, 0); 2886 2869 pci_enable_wake(pdev, PCI_D3cold, 0); 2887 2870 2888 - iowrite32(0, adapter->hw.hw_addr + REG_WOL_CTRL); 2889 - atl1_reset(adapter); 2871 + atl1_reset_hw(&adapter->hw); 2872 + adapter->cmb.cmb->int_stats = 0; 2890 2873 2891 2874 if (netif_running(netdev)) 2892 2875 atl1_up(adapter); 2893 2876 netif_device_attach(netdev); 2894 - 2895 - atl1_via_workaround(adapter); 2896 2877 2897 2878 return 0; 2898 2879 } ··· 2906 2875 #define atl1_suspend NULL 2907 2876 #define atl1_resume NULL 2908 2877 #endif 2878 + 2879 + static void atl1_shutdown(struct pci_dev *pdev) 2880 + { 2881 + #ifdef CONFIG_PM 2882 + atl1_suspend(pdev, PMSG_SUSPEND); 2883 + #endif 2884 + } 2909 2885 2910 2886 #ifdef CONFIG_NET_POLL_CONTROLLER 2911 2887 static void atl1_poll_controller(struct net_device *netdev) ··· 3160 3122 .probe = atl1_probe, 3161 3123 .remove = __devexit_p(atl1_remove), 3162 3124 .suspend = atl1_suspend, 3163 - .resume = atl1_resume 3125 + .resume = atl1_resume, 3126 + .shutdown = atl1_shutdown 3164 3127 }; 3165 3128 3166 3129 /*
+1 -1
drivers/net/atlx/atl1.h
··· 1 1 /* 2 2 * Copyright(c) 2005 - 2006 Attansic Corporation. All rights reserved. 3 3 * Copyright(c) 2006 - 2007 Chris Snook <csnook@redhat.com> 4 - * Copyright(c) 2006 Jay Cliburn <jcliburn@gmail.com> 4 + * Copyright(c) 2006 - 2008 Jay Cliburn <jcliburn@gmail.com> 5 5 * 6 6 * Derived from Intel e1000 driver 7 7 * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
+1 -1
drivers/net/atlx/atlx.c
··· 2 2 * 3 3 * Copyright(c) 2005 - 2006 Attansic Corporation. All rights reserved. 4 4 * Copyright(c) 2006 - 2007 Chris Snook <csnook@redhat.com> 5 - * Copyright(c) 2006 Jay Cliburn <jcliburn@gmail.com> 5 + * Copyright(c) 2006 - 2008 Jay Cliburn <jcliburn@gmail.com> 6 6 * Copyright(c) 2007 Atheros Corporation. All rights reserved. 7 7 * 8 8 * Derived from Intel e1000 driver
+5 -2
drivers/net/atlx/atlx.h
··· 2 2 * 3 3 * Copyright(c) 2005 - 2006 Attansic Corporation. All rights reserved. 4 4 * Copyright(c) 2006 - 2007 Chris Snook <csnook@redhat.com> 5 - * Copyright(c) 2006 Jay Cliburn <jcliburn@gmail.com> 5 + * Copyright(c) 2006 - 2008 Jay Cliburn <jcliburn@gmail.com> 6 6 * Copyright(c) 2007 Atheros Corporation. All rights reserved. 7 7 * 8 8 * Derived from Intel e1000 driver ··· 29 29 #include <linux/module.h> 30 30 #include <linux/types.h> 31 31 32 - #define ATLX_DRIVER_VERSION "2.1.1" 32 + #define ATLX_DRIVER_VERSION "2.1.3" 33 33 MODULE_AUTHOR("Xiong Huang <xiong.huang@atheros.com>, \ 34 34 Chris Snook <csnook@redhat.com>, Jay Cliburn <jcliburn@gmail.com>"); 35 35 MODULE_LICENSE("GPL"); ··· 459 459 #define MII_ATLX_PSSR_10MBS 0x0000 /* 00=10Mbs */ 460 460 #define MII_ATLX_PSSR_100MBS 0x4000 /* 01=100Mbs */ 461 461 #define MII_ATLX_PSSR_1000MBS 0x8000 /* 10=1000Mbs */ 462 + 463 + #define MII_DBG_ADDR 0x1D 464 + #define MII_DBG_DATA 0x1E 462 465 463 466 /* PCI Command Register Bit Definitions */ 464 467 #define PCI_REG_COMMAND 0x04 /* PCI Command Register */
+1
drivers/net/cxgb3/adapter.h
··· 71 71 USING_MSIX = (1 << 2), 72 72 QUEUES_BOUND = (1 << 3), 73 73 TP_PARITY_INIT = (1 << 4), 74 + NAPI_INIT = (1 << 5), 74 75 }; 75 76 76 77 struct fl_pg_chunk {
+1
drivers/net/cxgb3/common.h
··· 698 698 void early_hw_init(struct adapter *adapter, const struct adapter_info *ai); 699 699 int t3_prep_adapter(struct adapter *adapter, const struct adapter_info *ai, 700 700 int reset); 701 + int t3_replay_prep_adapter(struct adapter *adapter); 701 702 void t3_led_ready(struct adapter *adapter); 702 703 void t3_fatal_err(struct adapter *adapter); 703 704 void t3_set_vlan_accel(struct adapter *adapter, unsigned int ports, int on);
+22 -18
drivers/net/cxgb3/cxgb3_main.c
··· 421 421 netif_napi_add(qs->netdev, &qs->napi, qs->napi.poll, 422 422 64); 423 423 } 424 + 425 + /* 426 + * netif_napi_add() can be called only once per napi_struct because it 427 + * adds each new napi_struct to a list. Be careful not to call it a 428 + * second time, e.g., during EEH recovery, by making a note of it. 429 + */ 430 + adap->flags |= NAPI_INIT; 424 431 } 425 432 426 433 /* ··· 903 896 goto out; 904 897 905 898 setup_rss(adap); 906 - init_napi(adap); 899 + if (!(adap->flags & NAPI_INIT)) 900 + init_napi(adap); 907 901 adap->flags |= FULL_INIT_DONE; 908 902 } 909 903 ··· 1007 999 return 0; 1008 1000 1009 1001 if (!adap_up && (err = cxgb_up(adapter)) < 0) 1010 - return err; 1002 + goto out; 1011 1003 1012 1004 t3_tp_set_offload_mode(adapter, 1); 1013 1005 tdev->lldev = adapter->port[0]; ··· 1069 1061 int other_ports = adapter->open_device_map & PORT_MASK; 1070 1062 int err; 1071 1063 1072 - if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0) { 1073 - quiesce_rx(adapter); 1064 + if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0) 1074 1065 return err; 1075 - } 1076 1066 1077 1067 set_bit(pi->port_id, &adapter->open_device_map); 1078 1068 if (is_offload(adapter) && !ofld_disable) { ··· 2430 2424 test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) 2431 2425 offload_close(&adapter->tdev); 2432 2426 2433 - /* Free sge resources */ 2434 - t3_free_sge_resources(adapter); 2435 - 2436 2427 adapter->flags &= ~FULL_INIT_DONE; 2437 2428 2438 2429 pci_disable_device(pdev); 2439 2430 2440 - /* Request a slot slot reset. */ 2431 + /* Request a slot reset. */ 2441 2432 return PCI_ERS_RESULT_NEED_RESET; 2442 2433 } 2443 2434 ··· 2451 2448 if (pci_enable_device(pdev)) { 2452 2449 dev_err(&pdev->dev, 2453 2450 "Cannot re-enable PCI device after reset.\n"); 2454 - return PCI_ERS_RESULT_DISCONNECT; 2451 + goto err; 2455 2452 } 2456 2453 pci_set_master(pdev); 2454 + pci_restore_state(pdev); 2457 2455 2458 - t3_prep_adapter(adapter, adapter->params.info, 1); 2456 + /* Free sge resources */ 2457 + t3_free_sge_resources(adapter); 2458 + 2459 + if (t3_replay_prep_adapter(adapter)) 2460 + goto err; 2459 2461 2460 2462 return PCI_ERS_RESULT_RECOVERED; 2463 + err: 2464 + return PCI_ERS_RESULT_DISCONNECT; 2461 2465 } 2462 2466 2463 2467 /** ··· 2492 2482 } 2493 2483 netif_device_attach(netdev); 2494 2484 } 2495 - } 2496 - 2497 - if (is_offload(adapter)) { 2498 - __set_bit(OFFLOAD_DEVMAP_BIT, &adapter->registered_device_map); 2499 - if (offload_open(adapter->port[0])) 2500 - printk(KERN_WARNING 2501 - "Could not bring back offload capabilities\n"); 2502 2485 } 2503 2486 } 2504 2487 ··· 2611 2608 } 2612 2609 2613 2610 pci_set_master(pdev); 2611 + pci_save_state(pdev); 2614 2612 2615 2613 mmio_start = pci_resource_start(pdev, 0); 2616 2614 mmio_len = pci_resource_len(pdev, 0);
+8
drivers/net/cxgb3/regs.h
··· 444 444 445 445 #define A_PCIE_CFG 0x88 446 446 447 + #define S_ENABLELINKDWNDRST 21 448 + #define V_ENABLELINKDWNDRST(x) ((x) << S_ENABLELINKDWNDRST) 449 + #define F_ENABLELINKDWNDRST V_ENABLELINKDWNDRST(1U) 450 + 451 + #define S_ENABLELINKDOWNRST 20 452 + #define V_ENABLELINKDOWNRST(x) ((x) << S_ENABLELINKDOWNRST) 453 + #define F_ENABLELINKDOWNRST V_ENABLELINKDOWNRST(1U) 454 + 447 455 #define S_PCIE_CLIDECEN 16 448 456 #define V_PCIE_CLIDECEN(x) ((x) << S_PCIE_CLIDECEN) 449 457 #define F_PCIE_CLIDECEN V_PCIE_CLIDECEN(1U)
+27 -2
drivers/net/cxgb3/sge.c
··· 539 539 } 540 540 541 541 /** 542 + * t3_reset_qset - reset a sge qset 543 + * @q: the queue set 544 + * 545 + * Reset the qset structure. 546 + * the NAPI structure is preserved in the event of 547 + * the qset's reincarnation, for example during EEH recovery. 548 + */ 549 + static void t3_reset_qset(struct sge_qset *q) 550 + { 551 + if (q->adap && 552 + !(q->adap->flags & NAPI_INIT)) { 553 + memset(q, 0, sizeof(*q)); 554 + return; 555 + } 556 + 557 + q->adap = NULL; 558 + memset(&q->rspq, 0, sizeof(q->rspq)); 559 + memset(q->fl, 0, sizeof(struct sge_fl) * SGE_RXQ_PER_SET); 560 + memset(q->txq, 0, sizeof(struct sge_txq) * SGE_TXQ_PER_SET); 561 + q->txq_stopped = 0; 562 + memset(&q->tx_reclaim_timer, 0, sizeof(q->tx_reclaim_timer)); 563 + } 564 + 565 + 566 + /** 542 567 * free_qset - free the resources of an SGE queue set 543 568 * @adapter: the adapter owning the queue set 544 569 * @q: the queue set ··· 619 594 q->rspq.desc, q->rspq.phys_addr); 620 595 } 621 596 622 - memset(q, 0, sizeof(*q)); 597 + t3_reset_qset(q); 623 598 } 624 599 625 600 /** ··· 1390 1365 */ 1391 1366 int t3_mgmt_tx(struct adapter *adap, struct sk_buff *skb) 1392 1367 { 1393 - int ret; 1368 + int ret; 1394 1369 local_bh_disable(); 1395 1370 ret = ctrl_xmit(adap, &adap->sge.qs[0].txq[TXQ_CTRL], skb); 1396 1371 local_bh_enable();
+28
drivers/net/cxgb3/t3_hw.c
··· 3264 3264 3265 3265 t3_write_reg(adap, A_PCIE_PEX_ERR, 0xffffffff); 3266 3266 t3_set_reg_field(adap, A_PCIE_CFG, 0, 3267 + F_ENABLELINKDWNDRST | F_ENABLELINKDOWNRST | 3267 3268 F_PCIE_DMASTOPEN | F_PCIE_CLIDECEN); 3268 3269 } 3269 3270 ··· 3656 3655 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL, 3657 3656 F_GPIO0_OUT_VAL); 3658 3657 } 3658 + 3659 + int t3_replay_prep_adapter(struct adapter *adapter) 3660 + { 3661 + const struct adapter_info *ai = adapter->params.info; 3662 + unsigned int i, j = 0; 3663 + int ret; 3664 + 3665 + early_hw_init(adapter, ai); 3666 + ret = init_parity(adapter); 3667 + if (ret) 3668 + return ret; 3669 + 3670 + for_each_port(adapter, i) { 3671 + struct port_info *p = adap2pinfo(adapter, i); 3672 + while (!adapter->params.vpd.port_type[j]) 3673 + ++j; 3674 + 3675 + p->port_type->phy_prep(&p->phy, adapter, ai->phy_base_addr + j, 3676 + ai->mdio_ops); 3677 + 3678 + p->phy.ops->power_down(&p->phy, 1); 3679 + ++j; 3680 + } 3681 + 3682 + return 0; 3683 + } 3684 +
+33 -4
drivers/net/dm9000.c
··· 117 117 118 118 struct mutex addr_lock; /* phy and eeprom access lock */ 119 119 120 + struct delayed_work phy_poll; 121 + struct net_device *ndev; 122 + 120 123 spinlock_t lock; 121 124 122 125 struct mii_if_info mii; ··· 300 297 } 301 298 } 302 299 300 + static void dm9000_schedule_poll(board_info_t *db) 301 + { 302 + schedule_delayed_work(&db->phy_poll, HZ * 2); 303 + } 303 304 304 305 /* Our watchdog timed out. Called by the networking layer */ 305 306 static void dm9000_timeout(struct net_device *dev) ··· 472 465 .set_eeprom = dm9000_set_eeprom, 473 466 }; 474 467 468 + static void 469 + dm9000_poll_work(struct work_struct *w) 470 + { 471 + struct delayed_work *dw = container_of(w, struct delayed_work, work); 472 + board_info_t *db = container_of(dw, board_info_t, phy_poll); 473 + 474 + mii_check_media(&db->mii, netif_msg_link(db), 0); 475 + 476 + if (netif_running(db->ndev)) 477 + dm9000_schedule_poll(db); 478 + } 475 479 476 480 /* dm9000_release_board 477 481 * ··· 521 503 /* 522 504 * Search DM9000 board, allocate space and register it 523 505 */ 524 - static int 506 + static int __devinit 525 507 dm9000_probe(struct platform_device *pdev) 526 508 { 527 509 struct dm9000_plat_data *pdata = pdev->dev.platform_data; ··· 543 525 544 526 SET_NETDEV_DEV(ndev, &pdev->dev); 545 527 546 - dev_dbg(&pdev->dev, "dm9000_probe()"); 528 + dev_dbg(&pdev->dev, "dm9000_probe()\n"); 547 529 548 530 /* setup board info structure */ 549 531 db = (struct board_info *) ndev->priv; 550 532 memset(db, 0, sizeof (*db)); 551 533 552 534 db->dev = &pdev->dev; 535 + db->ndev = ndev; 553 536 554 537 spin_lock_init(&db->lock); 555 538 mutex_init(&db->addr_lock); 539 + 540 + INIT_DELAYED_WORK(&db->phy_poll, dm9000_poll_work); 541 + 556 542 557 543 if (pdev->num_resources < 2) { 558 544 ret = -ENODEV; ··· 783 761 784 762 mii_check_media(&db->mii, netif_msg_link(db), 1); 785 763 netif_start_queue(dev); 764 + 765 + dm9000_schedule_poll(db); 786 766 787 767 return 0; 788 768 } ··· 902 878 903 879 if (netif_msg_ifdown(db)) 904 880 dev_dbg(db->dev, "shutting down %s\n", ndev->name); 881 + 882 + cancel_delayed_work(&db->phy_poll); 905 883 906 884 netif_stop_queue(ndev); 907 885 netif_carrier_off(ndev); ··· 1314 1288 spin_unlock_irqrestore(&db->lock,flags); 1315 1289 1316 1290 mutex_unlock(&db->addr_lock); 1291 + 1292 + dm9000_dbg(db, 5, "phy_read[%02x] -> %04x\n", reg, ret); 1317 1293 return ret; 1318 1294 } 1319 1295 ··· 1329 1301 unsigned long flags; 1330 1302 unsigned long reg_save; 1331 1303 1304 + dm9000_dbg(db, 5, "phy_write[%02x] = %04x\n", reg, value); 1332 1305 mutex_lock(&db->addr_lock); 1333 1306 1334 1307 spin_lock_irqsave(&db->lock,flags); ··· 1401 1372 return 0; 1402 1373 } 1403 1374 1404 - static int 1375 + static int __devexit 1405 1376 dm9000_drv_remove(struct platform_device *pdev) 1406 1377 { 1407 1378 struct net_device *ndev = platform_get_drvdata(pdev); ··· 1422 1393 .owner = THIS_MODULE, 1423 1394 }, 1424 1395 .probe = dm9000_probe, 1425 - .remove = dm9000_drv_remove, 1396 + .remove = __devexit_p(dm9000_drv_remove), 1426 1397 .suspend = dm9000_drv_suspend, 1427 1398 .resume = dm9000_drv_resume, 1428 1399 };
+22 -5
drivers/net/ehea/ehea.h
··· 40 40 #include <asm/io.h> 41 41 42 42 #define DRV_NAME "ehea" 43 - #define DRV_VERSION "EHEA_0090" 43 + #define DRV_VERSION "EHEA_0091" 44 44 45 45 /* eHEA capability flags */ 46 46 #define DLPAR_PORT_ADD_REM 1 ··· 118 118 #define EHEA_MR_ACC_CTRL 0x00800000 119 119 120 120 #define EHEA_BUSMAP_START 0x8000000000000000ULL 121 + #define EHEA_INVAL_ADDR 0xFFFFFFFFFFFFFFFFULL 122 + #define EHEA_DIR_INDEX_SHIFT 13 /* 8k Entries in 64k block */ 123 + #define EHEA_TOP_INDEX_SHIFT (EHEA_DIR_INDEX_SHIFT * 2) 124 + #define EHEA_MAP_ENTRIES (1 << EHEA_DIR_INDEX_SHIFT) 125 + #define EHEA_MAP_SIZE (0x10000) /* currently fixed map size */ 126 + #define EHEA_INDEX_MASK (EHEA_MAP_ENTRIES - 1) 127 + 121 128 122 129 #define EHEA_WATCH_DOG_TIMEOUT 10*HZ 123 130 ··· 199 192 set to 0 if unused */ 200 193 }; 201 194 202 - struct ehea_busmap { 203 - unsigned int entries; /* total number of entries */ 204 - unsigned int valid_sections; /* number of valid sections */ 205 - u64 *vaddr; 195 + /* 196 + * Memory map data structures 197 + */ 198 + struct ehea_dir_bmap 199 + { 200 + u64 ent[EHEA_MAP_ENTRIES]; 201 + }; 202 + struct ehea_top_bmap 203 + { 204 + struct ehea_dir_bmap *dir[EHEA_MAP_ENTRIES]; 205 + }; 206 + struct ehea_bmap 207 + { 208 + struct ehea_top_bmap *top[EHEA_MAP_ENTRIES]; 206 209 }; 207 210 208 211 struct ehea_qp;
+25
drivers/net/ehea/ehea_main.c
··· 35 35 #include <linux/if_ether.h> 36 36 #include <linux/notifier.h> 37 37 #include <linux/reboot.h> 38 + #include <linux/memory.h> 38 39 #include <asm/kexec.h> 39 40 #include <linux/mutex.h> 40 41 ··· 3504 3503 0, H_DEREG_BCMC); 3505 3504 } 3506 3505 3506 + static int ehea_mem_notifier(struct notifier_block *nb, 3507 + unsigned long action, void *data) 3508 + { 3509 + switch (action) { 3510 + case MEM_OFFLINE: 3511 + ehea_info("memory has been removed"); 3512 + ehea_rereg_mrs(NULL); 3513 + break; 3514 + default: 3515 + break; 3516 + } 3517 + return NOTIFY_OK; 3518 + } 3519 + 3520 + static struct notifier_block ehea_mem_nb = { 3521 + .notifier_call = ehea_mem_notifier, 3522 + }; 3523 + 3507 3524 static int ehea_reboot_notifier(struct notifier_block *nb, 3508 3525 unsigned long action, void *unused) 3509 3526 { ··· 3600 3581 if (ret) 3601 3582 ehea_info("failed registering reboot notifier"); 3602 3583 3584 + ret = register_memory_notifier(&ehea_mem_nb); 3585 + if (ret) 3586 + ehea_info("failed registering memory remove notifier"); 3587 + 3603 3588 ret = crash_shutdown_register(&ehea_crash_handler); 3604 3589 if (ret) 3605 3590 ehea_info("failed registering crash handler"); ··· 3627 3604 out3: 3628 3605 ibmebus_unregister_driver(&ehea_driver); 3629 3606 out2: 3607 + unregister_memory_notifier(&ehea_mem_nb); 3630 3608 unregister_reboot_notifier(&ehea_reboot_nb); 3631 3609 crash_shutdown_unregister(&ehea_crash_handler); 3632 3610 out: ··· 3645 3621 ret = crash_shutdown_unregister(&ehea_crash_handler); 3646 3622 if (ret) 3647 3623 ehea_info("failed unregistering crash handler"); 3624 + unregister_memory_notifier(&ehea_mem_nb); 3648 3625 kfree(ehea_fw_handles.arr); 3649 3626 kfree(ehea_bcmc_regs.arr); 3650 3627 ehea_destroy_busmap();
+202 -74
drivers/net/ehea/ehea_qmr.c
··· 31 31 #include "ehea_phyp.h" 32 32 #include "ehea_qmr.h" 33 33 34 + struct ehea_bmap *ehea_bmap = NULL; 34 35 35 - struct ehea_busmap ehea_bmap = { 0, 0, NULL }; 36 36 37 37 38 38 static void *hw_qpageit_get_inc(struct hw_queue *queue) ··· 559 559 return 0; 560 560 } 561 561 562 - int ehea_create_busmap(void) 562 + static inline int ehea_calc_index(unsigned long i, unsigned long s) 563 563 { 564 - u64 vaddr = EHEA_BUSMAP_START; 565 - unsigned long high_section_index = 0; 566 - int i; 564 + return (i >> s) & EHEA_INDEX_MASK; 565 + } 567 566 568 - /* 569 - * Sections are not in ascending order -> Loop over all sections and 570 - * find the highest PFN to compute the required map size. 571 - */ 572 - ehea_bmap.valid_sections = 0; 567 + static inline int ehea_init_top_bmap(struct ehea_top_bmap *ehea_top_bmap, 568 + int dir) 569 + { 570 + if(!ehea_top_bmap->dir[dir]) { 571 + ehea_top_bmap->dir[dir] = 572 + kzalloc(sizeof(struct ehea_dir_bmap), GFP_KERNEL); 573 + if (!ehea_top_bmap->dir[dir]) 574 + return -ENOMEM; 575 + } 576 + return 0; 577 + } 573 578 574 - for (i = 0; i < NR_MEM_SECTIONS; i++) 575 - if (valid_section_nr(i)) 576 - high_section_index = i; 579 + static inline int ehea_init_bmap(struct ehea_bmap *ehea_bmap, int top, int dir) 580 + { 581 + if(!ehea_bmap->top[top]) { 582 + ehea_bmap->top[top] = 583 + kzalloc(sizeof(struct ehea_top_bmap), GFP_KERNEL); 584 + if (!ehea_bmap->top[top]) 585 + return -ENOMEM; 586 + } 587 + return ehea_init_top_bmap(ehea_bmap->top[top], dir); 588 + } 577 589 578 - ehea_bmap.entries = high_section_index + 1; 579 - ehea_bmap.vaddr = vmalloc(ehea_bmap.entries * sizeof(*ehea_bmap.vaddr)); 590 + static int ehea_create_busmap_callback(unsigned long pfn, 591 + unsigned long nr_pages, void *arg) 592 + { 593 + unsigned long i, mr_len, start_section, end_section; 594 + start_section = (pfn * PAGE_SIZE) / EHEA_SECTSIZE; 595 + end_section = start_section + ((nr_pages * PAGE_SIZE) / EHEA_SECTSIZE); 596 + mr_len = *(unsigned long *)arg; 580 597 581 - if (!ehea_bmap.vaddr) 598 + ehea_bmap = kzalloc(sizeof(struct ehea_bmap), GFP_KERNEL); 599 + if (!ehea_bmap) 582 600 return -ENOMEM; 583 601 584 - for (i = 0 ; i < ehea_bmap.entries; i++) { 585 - unsigned long pfn = section_nr_to_pfn(i); 602 + for (i = start_section; i < end_section; i++) { 603 + int ret; 604 + int top, dir, idx; 605 + u64 vaddr; 586 606 587 - if (pfn_valid(pfn)) { 588 - ehea_bmap.vaddr[i] = vaddr; 589 - vaddr += EHEA_SECTSIZE; 590 - ehea_bmap.valid_sections++; 591 - } else 592 - ehea_bmap.vaddr[i] = 0; 607 + top = ehea_calc_index(i, EHEA_TOP_INDEX_SHIFT); 608 + dir = ehea_calc_index(i, EHEA_DIR_INDEX_SHIFT); 609 + 610 + ret = ehea_init_bmap(ehea_bmap, top, dir); 611 + if(ret) 612 + return ret; 613 + 614 + idx = i & EHEA_INDEX_MASK; 615 + vaddr = EHEA_BUSMAP_START + mr_len + i * EHEA_SECTSIZE; 616 + 617 + ehea_bmap->top[top]->dir[dir]->ent[idx] = vaddr; 593 618 } 619 + 620 + mr_len += nr_pages * PAGE_SIZE; 621 + *(unsigned long *)arg = mr_len; 594 622 595 623 return 0; 596 624 } 597 625 626 + static unsigned long ehea_mr_len; 627 + 628 + static DEFINE_MUTEX(ehea_busmap_mutex); 629 + 630 + int ehea_create_busmap(void) 631 + { 632 + int ret; 633 + mutex_lock(&ehea_busmap_mutex); 634 + ehea_mr_len = 0; 635 + ret = walk_memory_resource(0, 1ULL << MAX_PHYSMEM_BITS, &ehea_mr_len, 636 + ehea_create_busmap_callback); 637 + mutex_unlock(&ehea_busmap_mutex); 638 + return ret; 639 + } 640 + 598 641 void ehea_destroy_busmap(void) 599 642 { 600 - vfree(ehea_bmap.vaddr); 643 + int top, dir; 644 + mutex_lock(&ehea_busmap_mutex); 645 + if (!ehea_bmap) 646 + goto out_destroy; 647 + 648 + for (top = 0; top < EHEA_MAP_ENTRIES; top++) { 649 + if (!ehea_bmap->top[top]) 650 + continue; 651 + 652 + for (dir = 0; dir < EHEA_MAP_ENTRIES; dir++) { 653 + if (!ehea_bmap->top[top]->dir[dir]) 654 + continue; 655 + 656 + kfree(ehea_bmap->top[top]->dir[dir]); 657 + } 658 + 659 + kfree(ehea_bmap->top[top]); 660 + } 661 + 662 + kfree(ehea_bmap); 663 + ehea_bmap = NULL; 664 + out_destroy: 665 + mutex_unlock(&ehea_busmap_mutex); 601 666 } 602 667 603 668 u64 ehea_map_vaddr(void *caddr) 604 669 { 605 - u64 mapped_addr; 606 - unsigned long index = __pa(caddr) >> SECTION_SIZE_BITS; 670 + int top, dir, idx; 671 + unsigned long index, offset; 607 672 608 - if (likely(index < ehea_bmap.entries)) { 609 - mapped_addr = ehea_bmap.vaddr[index]; 610 - if (likely(mapped_addr)) 611 - mapped_addr |= (((unsigned long)caddr) 612 - & (EHEA_SECTSIZE - 1)); 613 - else 614 - mapped_addr = -1; 615 - } else 616 - mapped_addr = -1; 673 + if (!ehea_bmap) 674 + return EHEA_INVAL_ADDR; 617 675 618 - if (unlikely(mapped_addr == -1)) 619 - if (!test_and_set_bit(__EHEA_STOP_XFER, &ehea_driver_flags)) 620 - schedule_work(&ehea_rereg_mr_task); 676 + index = virt_to_abs(caddr) >> SECTION_SIZE_BITS; 677 + top = (index >> EHEA_TOP_INDEX_SHIFT) & EHEA_INDEX_MASK; 678 + if (!ehea_bmap->top[top]) 679 + return EHEA_INVAL_ADDR; 621 680 622 - return mapped_addr; 681 + dir = (index >> EHEA_DIR_INDEX_SHIFT) & EHEA_INDEX_MASK; 682 + if (!ehea_bmap->top[top]->dir[dir]) 683 + return EHEA_INVAL_ADDR; 684 + 685 + idx = index & EHEA_INDEX_MASK; 686 + if (!ehea_bmap->top[top]->dir[dir]->ent[idx]) 687 + return EHEA_INVAL_ADDR; 688 + 689 + offset = (unsigned long)caddr & (EHEA_SECTSIZE - 1); 690 + return ehea_bmap->top[top]->dir[dir]->ent[idx] | offset; 691 + } 692 + 693 + static inline void *ehea_calc_sectbase(int top, int dir, int idx) 694 + { 695 + unsigned long ret = idx; 696 + ret |= dir << EHEA_DIR_INDEX_SHIFT; 697 + ret |= top << EHEA_TOP_INDEX_SHIFT; 698 + return abs_to_virt(ret << SECTION_SIZE_BITS); 699 + } 700 + 701 + static u64 ehea_reg_mr_section(int top, int dir, int idx, u64 *pt, 702 + struct ehea_adapter *adapter, 703 + struct ehea_mr *mr) 704 + { 705 + void *pg; 706 + u64 j, m, hret; 707 + unsigned long k = 0; 708 + u64 pt_abs = virt_to_abs(pt); 709 + 710 + void *sectbase = ehea_calc_sectbase(top, dir, idx); 711 + 712 + for (j = 0; j < (EHEA_PAGES_PER_SECTION / EHEA_MAX_RPAGE); j++) { 713 + 714 + for (m = 0; m < EHEA_MAX_RPAGE; m++) { 715 + pg = sectbase + ((k++) * EHEA_PAGESIZE); 716 + pt[m] = virt_to_abs(pg); 717 + } 718 + hret = ehea_h_register_rpage_mr(adapter->handle, mr->handle, 0, 719 + 0, pt_abs, EHEA_MAX_RPAGE); 720 + 721 + if ((hret != H_SUCCESS) 722 + && (hret != H_PAGE_REGISTERED)) { 723 + ehea_h_free_resource(adapter->handle, mr->handle, 724 + FORCE_FREE); 725 + ehea_error("register_rpage_mr failed"); 726 + return hret; 727 + } 728 + } 729 + return hret; 730 + } 731 + 732 + static u64 ehea_reg_mr_sections(int top, int dir, u64 *pt, 733 + struct ehea_adapter *adapter, 734 + struct ehea_mr *mr) 735 + { 736 + u64 hret = H_SUCCESS; 737 + int idx; 738 + 739 + for (idx = 0; idx < EHEA_MAP_ENTRIES; idx++) { 740 + if (!ehea_bmap->top[top]->dir[dir]->ent[idx]) 741 + continue; 742 + 743 + hret = ehea_reg_mr_section(top, dir, idx, pt, adapter, mr); 744 + if ((hret != H_SUCCESS) && (hret != H_PAGE_REGISTERED)) 745 + return hret; 746 + } 747 + return hret; 748 + } 749 + 750 + static u64 ehea_reg_mr_dir_sections(int top, u64 *pt, 751 + struct ehea_adapter *adapter, 752 + struct ehea_mr *mr) 753 + { 754 + u64 hret = H_SUCCESS; 755 + int dir; 756 + 757 + for (dir = 0; dir < EHEA_MAP_ENTRIES; dir++) { 758 + if (!ehea_bmap->top[top]->dir[dir]) 759 + continue; 760 + 761 + hret = ehea_reg_mr_sections(top, dir, pt, adapter, mr); 762 + if ((hret != H_SUCCESS) && (hret != H_PAGE_REGISTERED)) 763 + return hret; 764 + } 765 + return hret; 623 766 } 624 767 625 768 int ehea_reg_kernel_mr(struct ehea_adapter *adapter, struct ehea_mr *mr) 626 769 { 627 770 int ret; 628 771 u64 *pt; 629 - void *pg; 630 - u64 hret, pt_abs, i, j, m, mr_len; 772 + u64 hret; 631 773 u32 acc_ctrl = EHEA_MR_ACC_CTRL; 632 774 633 - mr_len = ehea_bmap.valid_sections * EHEA_SECTSIZE; 775 + unsigned long top; 634 776 635 - pt = kzalloc(PAGE_SIZE, GFP_KERNEL); 777 + pt = kzalloc(PAGE_SIZE, GFP_KERNEL); 636 778 if (!pt) { 637 779 ehea_error("no mem"); 638 780 ret = -ENOMEM; 639 781 goto out; 640 782 } 641 - pt_abs = virt_to_abs(pt); 642 783 643 - hret = ehea_h_alloc_resource_mr(adapter->handle, 644 - EHEA_BUSMAP_START, mr_len, 645 - acc_ctrl, adapter->pd, 784 + hret = ehea_h_alloc_resource_mr(adapter->handle, EHEA_BUSMAP_START, 785 + ehea_mr_len, acc_ctrl, adapter->pd, 646 786 &mr->handle, &mr->lkey); 787 + 647 788 if (hret != H_SUCCESS) { 648 789 ehea_error("alloc_resource_mr failed"); 649 790 ret = -EIO; 650 791 goto out; 651 792 } 652 793 653 - for (i = 0 ; i < ehea_bmap.entries; i++) 654 - if (ehea_bmap.vaddr[i]) { 655 - void *sectbase = __va(i << SECTION_SIZE_BITS); 656 - unsigned long k = 0; 794 + if (!ehea_bmap) { 795 + ehea_h_free_resource(adapter->handle, mr->handle, FORCE_FREE); 796 + ehea_error("no busmap available"); 797 + ret = -EIO; 798 + goto out; 799 + } 657 800 658 - for (j = 0; j < (EHEA_PAGES_PER_SECTION / 659 - EHEA_MAX_RPAGE); j++) { 801 + for (top = 0; top < EHEA_MAP_ENTRIES; top++) { 802 + if (!ehea_bmap->top[top]) 803 + continue; 660 804 661 - for (m = 0; m < EHEA_MAX_RPAGE; m++) { 662 - pg = sectbase + ((k++) * EHEA_PAGESIZE); 663 - pt[m] = virt_to_abs(pg); 664 - } 665 - 666 - hret = ehea_h_register_rpage_mr(adapter->handle, 667 - mr->handle, 668 - 0, 0, pt_abs, 669 - EHEA_MAX_RPAGE); 670 - if ((hret != H_SUCCESS) 671 - && (hret != H_PAGE_REGISTERED)) { 672 - ehea_h_free_resource(adapter->handle, 673 - mr->handle, 674 - FORCE_FREE); 675 - ehea_error("register_rpage_mr failed"); 676 - ret = -EIO; 677 - goto out; 678 - } 679 - } 680 - } 805 + hret = ehea_reg_mr_dir_sections(top, pt, adapter, mr); 806 + if((hret != H_PAGE_REGISTERED) && (hret != H_SUCCESS)) 807 + break; 808 + } 681 809 682 810 if (hret != H_SUCCESS) { 683 811 ehea_h_free_resource(adapter->handle, mr->handle, FORCE_FREE);
+2
drivers/net/gianfar.c
··· 635 635 dev_kfree_skb_any(priv->tx_skbuff[i]); 636 636 priv->tx_skbuff[i] = NULL; 637 637 } 638 + 639 + txbdp++; 638 640 } 639 641 640 642 kfree(priv->tx_skbuff);
+419 -323
drivers/net/myri10ge/myri10ge.c
··· 144 144 char *req_bytes; 145 145 struct myri10ge_tx_buffer_state *info; 146 146 int mask; /* number of transmit slots -1 */ 147 - int boundary; /* boundary transmits cannot cross */ 148 147 int req ____cacheline_aligned; /* transmit slots submitted */ 149 148 int pkt_start; /* packets started */ 149 + int stop_queue; 150 + int linearized; 150 151 int done ____cacheline_aligned; /* transmit slots completed */ 151 152 int pkt_done; /* packets completed */ 153 + int wake_queue; 152 154 }; 153 155 154 156 struct myri10ge_rx_done { ··· 162 160 struct net_lro_desc lro_desc[MYRI10GE_MAX_LRO_DESCRIPTORS]; 163 161 }; 164 162 165 - struct myri10ge_priv { 166 - int running; /* running? */ 167 - int csum_flag; /* rx_csums? */ 163 + struct myri10ge_slice_netstats { 164 + unsigned long rx_packets; 165 + unsigned long tx_packets; 166 + unsigned long rx_bytes; 167 + unsigned long tx_bytes; 168 + unsigned long rx_dropped; 169 + unsigned long tx_dropped; 170 + }; 171 + 172 + struct myri10ge_slice_state { 168 173 struct myri10ge_tx_buf tx; /* transmit ring */ 169 174 struct myri10ge_rx_buf rx_small; 170 175 struct myri10ge_rx_buf rx_big; 171 176 struct myri10ge_rx_done rx_done; 172 - int small_bytes; 173 - int big_bytes; 174 177 struct net_device *dev; 175 178 struct napi_struct napi; 179 + struct myri10ge_priv *mgp; 180 + struct myri10ge_slice_netstats stats; 181 + __be32 __iomem *irq_claim; 182 + struct mcp_irq_data *fw_stats; 183 + dma_addr_t fw_stats_bus; 184 + int watchdog_tx_done; 185 + int watchdog_tx_req; 186 + }; 187 + 188 + struct myri10ge_priv { 189 + struct myri10ge_slice_state ss; 190 + int tx_boundary; /* boundary transmits cannot cross */ 191 + int running; /* running? */ 192 + int csum_flag; /* rx_csums? */ 193 + int small_bytes; 194 + int big_bytes; 195 + int max_intr_slots; 196 + struct net_device *dev; 176 197 struct net_device_stats stats; 198 + spinlock_t stats_lock; 177 199 u8 __iomem *sram; 178 200 int sram_size; 179 201 unsigned long board_span; 180 202 unsigned long iomem_base; 181 - __be32 __iomem *irq_claim; 182 203 __be32 __iomem *irq_deassert; 183 204 char *mac_addr_string; 184 205 struct mcp_cmd_response *cmd; 185 206 dma_addr_t cmd_bus; 186 - struct mcp_irq_data *fw_stats; 187 - dma_addr_t fw_stats_bus; 188 207 struct pci_dev *pdev; 189 208 int msi_enabled; 190 209 u32 link_state; ··· 214 191 __be32 __iomem *intr_coal_delay_ptr; 215 192 int mtrr; 216 193 int wc_enabled; 217 - int wake_queue; 218 - int stop_queue; 219 194 int down_cnt; 220 195 wait_queue_head_t down_wq; 221 196 struct work_struct watchdog_work; 222 197 struct timer_list watchdog_timer; 223 - int watchdog_tx_done; 224 - int watchdog_tx_req; 225 - int watchdog_pause; 226 198 int watchdog_resets; 227 - int tx_linearized; 199 + int watchdog_pause; 228 200 int pause; 229 201 char *fw_name; 230 202 char eeprom_strings[MYRI10GE_EEPROM_STRINGS_SIZE]; 203 + char *product_code_string; 231 204 char fw_version[128]; 232 205 int fw_ver_major; 233 206 int fw_ver_minor; ··· 247 228 248 229 static char *myri10ge_fw_name = NULL; 249 230 module_param(myri10ge_fw_name, charp, S_IRUGO | S_IWUSR); 250 - MODULE_PARM_DESC(myri10ge_fw_name, "Firmware image name\n"); 231 + MODULE_PARM_DESC(myri10ge_fw_name, "Firmware image name"); 251 232 252 233 static int myri10ge_ecrc_enable = 1; 253 234 module_param(myri10ge_ecrc_enable, int, S_IRUGO); 254 - MODULE_PARM_DESC(myri10ge_ecrc_enable, "Enable Extended CRC on PCI-E\n"); 255 - 256 - static int myri10ge_max_intr_slots = 1024; 257 - module_param(myri10ge_max_intr_slots, int, S_IRUGO); 258 - MODULE_PARM_DESC(myri10ge_max_intr_slots, "Interrupt queue slots\n"); 235 + MODULE_PARM_DESC(myri10ge_ecrc_enable, "Enable Extended CRC on PCI-E"); 259 236 260 237 static int myri10ge_small_bytes = -1; /* -1 == auto */ 261 238 module_param(myri10ge_small_bytes, int, S_IRUGO | S_IWUSR); 262 - MODULE_PARM_DESC(myri10ge_small_bytes, "Threshold of small packets\n"); 239 + MODULE_PARM_DESC(myri10ge_small_bytes, "Threshold of small packets"); 263 240 264 241 static int myri10ge_msi = 1; /* enable msi by default */ 265 242 module_param(myri10ge_msi, int, S_IRUGO | S_IWUSR); 266 - MODULE_PARM_DESC(myri10ge_msi, "Enable Message Signalled Interrupts\n"); 243 + MODULE_PARM_DESC(myri10ge_msi, "Enable Message Signalled Interrupts"); 267 244 268 245 static int myri10ge_intr_coal_delay = 75; 269 246 module_param(myri10ge_intr_coal_delay, int, S_IRUGO); 270 - MODULE_PARM_DESC(myri10ge_intr_coal_delay, "Interrupt coalescing delay\n"); 247 + MODULE_PARM_DESC(myri10ge_intr_coal_delay, "Interrupt coalescing delay"); 271 248 272 249 static int myri10ge_flow_control = 1; 273 250 module_param(myri10ge_flow_control, int, S_IRUGO); 274 - MODULE_PARM_DESC(myri10ge_flow_control, "Pause parameter\n"); 251 + MODULE_PARM_DESC(myri10ge_flow_control, "Pause parameter"); 275 252 276 253 static int myri10ge_deassert_wait = 1; 277 254 module_param(myri10ge_deassert_wait, int, S_IRUGO | S_IWUSR); 278 255 MODULE_PARM_DESC(myri10ge_deassert_wait, 279 - "Wait when deasserting legacy interrupts\n"); 256 + "Wait when deasserting legacy interrupts"); 280 257 281 258 static int myri10ge_force_firmware = 0; 282 259 module_param(myri10ge_force_firmware, int, S_IRUGO); 283 260 MODULE_PARM_DESC(myri10ge_force_firmware, 284 - "Force firmware to assume aligned completions\n"); 261 + "Force firmware to assume aligned completions"); 285 262 286 263 static int myri10ge_initial_mtu = MYRI10GE_MAX_ETHER_MTU - ETH_HLEN; 287 264 module_param(myri10ge_initial_mtu, int, S_IRUGO); 288 - MODULE_PARM_DESC(myri10ge_initial_mtu, "Initial MTU\n"); 265 + MODULE_PARM_DESC(myri10ge_initial_mtu, "Initial MTU"); 289 266 290 267 static int myri10ge_napi_weight = 64; 291 268 module_param(myri10ge_napi_weight, int, S_IRUGO); 292 - MODULE_PARM_DESC(myri10ge_napi_weight, "Set NAPI weight\n"); 269 + MODULE_PARM_DESC(myri10ge_napi_weight, "Set NAPI weight"); 293 270 294 271 static int myri10ge_watchdog_timeout = 1; 295 272 module_param(myri10ge_watchdog_timeout, int, S_IRUGO); 296 - MODULE_PARM_DESC(myri10ge_watchdog_timeout, "Set watchdog timeout\n"); 273 + MODULE_PARM_DESC(myri10ge_watchdog_timeout, "Set watchdog timeout"); 297 274 298 275 static int myri10ge_max_irq_loops = 1048576; 299 276 module_param(myri10ge_max_irq_loops, int, S_IRUGO); 300 277 MODULE_PARM_DESC(myri10ge_max_irq_loops, 301 - "Set stuck legacy IRQ detection threshold\n"); 278 + "Set stuck legacy IRQ detection threshold"); 302 279 303 280 #define MYRI10GE_MSG_DEFAULT NETIF_MSG_LINK 304 281 ··· 304 289 305 290 static int myri10ge_lro = 1; 306 291 module_param(myri10ge_lro, int, S_IRUGO); 307 - MODULE_PARM_DESC(myri10ge_lro, "Enable large receive offload\n"); 292 + MODULE_PARM_DESC(myri10ge_lro, "Enable large receive offload"); 308 293 309 294 static int myri10ge_lro_max_pkts = MYRI10GE_LRO_MAX_PKTS; 310 295 module_param(myri10ge_lro_max_pkts, int, S_IRUGO); 311 - MODULE_PARM_DESC(myri10ge_lro, "Number of LRO packets to be aggregated\n"); 296 + MODULE_PARM_DESC(myri10ge_lro_max_pkts, 297 + "Number of LRO packets to be aggregated"); 312 298 313 299 static int myri10ge_fill_thresh = 256; 314 300 module_param(myri10ge_fill_thresh, int, S_IRUGO | S_IWUSR); 315 - MODULE_PARM_DESC(myri10ge_fill_thresh, "Number of empty rx slots allowed\n"); 301 + MODULE_PARM_DESC(myri10ge_fill_thresh, "Number of empty rx slots allowed"); 316 302 317 303 static int myri10ge_reset_recover = 1; 318 304 319 305 static int myri10ge_wcfifo = 0; 320 306 module_param(myri10ge_wcfifo, int, S_IRUGO); 321 - MODULE_PARM_DESC(myri10ge_wcfifo, "Enable WC Fifo when WC is enabled\n"); 307 + MODULE_PARM_DESC(myri10ge_wcfifo, "Enable WC Fifo when WC is enabled"); 322 308 323 309 #define MYRI10GE_FW_OFFSET 1024*1024 324 310 #define MYRI10GE_HIGHPART_TO_U32(X) \ ··· 375 359 for (sleep_total = 0; 376 360 sleep_total < 1000 377 361 && response->result == htonl(MYRI10GE_NO_RESPONSE_RESULT); 378 - sleep_total += 10) 362 + sleep_total += 10) { 379 363 udelay(10); 364 + mb(); 365 + } 380 366 } else { 381 367 /* use msleep for most command */ 382 368 for (sleep_total = 0; ··· 438 420 ptr += 1; 439 421 } 440 422 } 423 + if (memcmp(ptr, "PC=", 3) == 0) { 424 + ptr += 3; 425 + mgp->product_code_string = ptr; 426 + } 441 427 if (memcmp((const void *)ptr, "SN=", 3) == 0) { 442 428 ptr += 3; 443 429 mgp->serial_number = simple_strtoul(ptr, &ptr, 10); ··· 464 442 static void myri10ge_dummy_rdma(struct myri10ge_priv *mgp, int enable) 465 443 { 466 444 char __iomem *submit; 467 - __be32 buf[16]; 445 + __be32 buf[16] __attribute__ ((__aligned__(8))); 468 446 u32 dma_low, dma_high; 469 447 int i; 470 448 ··· 631 609 return status; 632 610 } 633 611 612 + int myri10ge_get_firmware_capabilities(struct myri10ge_priv *mgp) 613 + { 614 + struct myri10ge_cmd cmd; 615 + int status; 616 + 617 + /* probe for IPv6 TSO support */ 618 + mgp->features = NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_TSO; 619 + status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_MAX_TSO6_HDR_SIZE, 620 + &cmd, 0); 621 + if (status == 0) { 622 + mgp->max_tso6 = cmd.data0; 623 + mgp->features |= NETIF_F_TSO6; 624 + } 625 + 626 + status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_RX_RING_SIZE, &cmd, 0); 627 + if (status != 0) { 628 + dev_err(&mgp->pdev->dev, 629 + "failed MXGEFW_CMD_GET_RX_RING_SIZE\n"); 630 + return -ENXIO; 631 + } 632 + 633 + mgp->max_intr_slots = 2 * (cmd.data0 / sizeof(struct mcp_dma_addr)); 634 + 635 + return 0; 636 + } 637 + 634 638 static int myri10ge_load_firmware(struct myri10ge_priv *mgp) 635 639 { 636 640 char __iomem *submit; 637 - __be32 buf[16]; 641 + __be32 buf[16] __attribute__ ((__aligned__(8))); 638 642 u32 dma_low, dma_high, size; 639 643 int status, i; 640 - struct myri10ge_cmd cmd; 641 644 642 645 size = 0; 643 646 status = myri10ge_load_hotplug_firmware(mgp, &size); ··· 682 635 } 683 636 dev_info(&mgp->pdev->dev, 684 637 "Successfully adopted running firmware\n"); 685 - if (mgp->tx.boundary == 4096) { 638 + if (mgp->tx_boundary == 4096) { 686 639 dev_warn(&mgp->pdev->dev, 687 640 "Using firmware currently running on NIC" 688 641 ". For optimal\n"); ··· 693 646 } 694 647 695 648 mgp->fw_name = "adopted"; 696 - mgp->tx.boundary = 2048; 649 + mgp->tx_boundary = 2048; 650 + myri10ge_dummy_rdma(mgp, 1); 651 + status = myri10ge_get_firmware_capabilities(mgp); 697 652 return status; 698 653 } 699 654 ··· 730 681 msleep(1); 731 682 mb(); 732 683 i = 0; 733 - while (mgp->cmd->data != MYRI10GE_NO_CONFIRM_DATA && i < 20) { 734 - msleep(1); 684 + while (mgp->cmd->data != MYRI10GE_NO_CONFIRM_DATA && i < 9) { 685 + msleep(1 << i); 735 686 i++; 736 687 } 737 688 if (mgp->cmd->data != MYRI10GE_NO_CONFIRM_DATA) { 738 689 dev_err(&mgp->pdev->dev, "handoff failed\n"); 739 690 return -ENXIO; 740 691 } 741 - dev_info(&mgp->pdev->dev, "handoff confirmed\n"); 742 692 myri10ge_dummy_rdma(mgp, 1); 693 + status = myri10ge_get_firmware_capabilities(mgp); 743 694 744 - /* probe for IPv6 TSO support */ 745 - mgp->features = NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_TSO; 746 - status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_MAX_TSO6_HDR_SIZE, 747 - &cmd, 0); 748 - if (status == 0) { 749 - mgp->max_tso6 = cmd.data0; 750 - mgp->features |= NETIF_F_TSO6; 751 - } 752 - return 0; 695 + return status; 753 696 } 754 697 755 698 static int myri10ge_update_mac_address(struct myri10ge_priv *mgp, u8 * addr) ··· 813 772 * transfers took to complete. 814 773 */ 815 774 816 - len = mgp->tx.boundary; 775 + len = mgp->tx_boundary; 817 776 818 777 cmd.data0 = MYRI10GE_LOWPART_TO_U32(dmatest_bus); 819 778 cmd.data1 = MYRI10GE_HIGHPART_TO_U32(dmatest_bus); ··· 875 834 876 835 /* Now exchange information about interrupts */ 877 836 878 - bytes = myri10ge_max_intr_slots * sizeof(*mgp->rx_done.entry); 879 - memset(mgp->rx_done.entry, 0, bytes); 837 + bytes = mgp->max_intr_slots * sizeof(*mgp->ss.rx_done.entry); 838 + memset(mgp->ss.rx_done.entry, 0, bytes); 880 839 cmd.data0 = (u32) bytes; 881 840 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_INTRQ_SIZE, &cmd, 0); 882 - cmd.data0 = MYRI10GE_LOWPART_TO_U32(mgp->rx_done.bus); 883 - cmd.data1 = MYRI10GE_HIGHPART_TO_U32(mgp->rx_done.bus); 841 + cmd.data0 = MYRI10GE_LOWPART_TO_U32(mgp->ss.rx_done.bus); 842 + cmd.data1 = MYRI10GE_HIGHPART_TO_U32(mgp->ss.rx_done.bus); 884 843 status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_INTRQ_DMA, &cmd, 0); 885 844 886 845 status |= 887 846 myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_IRQ_ACK_OFFSET, &cmd, 0); 888 - mgp->irq_claim = (__iomem __be32 *) (mgp->sram + cmd.data0); 847 + mgp->ss.irq_claim = (__iomem __be32 *) (mgp->sram + cmd.data0); 889 848 status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_IRQ_DEASSERT_OFFSET, 890 849 &cmd, 0); 891 850 mgp->irq_deassert = (__iomem __be32 *) (mgp->sram + cmd.data0); ··· 899 858 } 900 859 put_be32(htonl(mgp->intr_coal_delay), mgp->intr_coal_delay_ptr); 901 860 902 - memset(mgp->rx_done.entry, 0, bytes); 861 + memset(mgp->ss.rx_done.entry, 0, bytes); 903 862 904 863 /* reset mcp/driver shared state back to 0 */ 905 - mgp->tx.req = 0; 906 - mgp->tx.done = 0; 907 - mgp->tx.pkt_start = 0; 908 - mgp->tx.pkt_done = 0; 909 - mgp->rx_big.cnt = 0; 910 - mgp->rx_small.cnt = 0; 911 - mgp->rx_done.idx = 0; 912 - mgp->rx_done.cnt = 0; 864 + mgp->ss.tx.req = 0; 865 + mgp->ss.tx.done = 0; 866 + mgp->ss.tx.pkt_start = 0; 867 + mgp->ss.tx.pkt_done = 0; 868 + mgp->ss.rx_big.cnt = 0; 869 + mgp->ss.rx_small.cnt = 0; 870 + mgp->ss.rx_done.idx = 0; 871 + mgp->ss.rx_done.cnt = 0; 913 872 mgp->link_changes = 0; 914 873 status = myri10ge_update_mac_address(mgp, mgp->dev->dev_addr); 915 874 myri10ge_change_pause(mgp, mgp->pause); ··· 1061 1020 * page into an skb */ 1062 1021 1063 1022 static inline int 1064 - myri10ge_rx_done(struct myri10ge_priv *mgp, struct myri10ge_rx_buf *rx, 1023 + myri10ge_rx_done(struct myri10ge_slice_state *ss, struct myri10ge_rx_buf *rx, 1065 1024 int bytes, int len, __wsum csum) 1066 1025 { 1026 + struct myri10ge_priv *mgp = ss->mgp; 1067 1027 struct sk_buff *skb; 1068 1028 struct skb_frag_struct rx_frags[MYRI10GE_MAX_FRAGS_PER_FRAME]; 1069 1029 int i, idx, hlen, remainder; ··· 1094 1052 rx_frags[0].page_offset += MXGEFW_PAD; 1095 1053 rx_frags[0].size -= MXGEFW_PAD; 1096 1054 len -= MXGEFW_PAD; 1097 - lro_receive_frags(&mgp->rx_done.lro_mgr, rx_frags, 1055 + lro_receive_frags(&ss->rx_done.lro_mgr, rx_frags, 1098 1056 len, len, 1099 - /* opaque, will come back in get_frag_header */ 1100 - (void *)(__force unsigned long)csum, 1101 - csum); 1057 + /* opaque, will come back in get_frag_header */ 1058 + (void *)(__force unsigned long)csum, csum); 1102 1059 return 1; 1103 1060 } 1104 1061 ··· 1137 1096 return 1; 1138 1097 } 1139 1098 1140 - static inline void myri10ge_tx_done(struct myri10ge_priv *mgp, int mcp_index) 1099 + static inline void 1100 + myri10ge_tx_done(struct myri10ge_slice_state *ss, int mcp_index) 1141 1101 { 1142 - struct pci_dev *pdev = mgp->pdev; 1143 - struct myri10ge_tx_buf *tx = &mgp->tx; 1102 + struct pci_dev *pdev = ss->mgp->pdev; 1103 + struct myri10ge_tx_buf *tx = &ss->tx; 1144 1104 struct sk_buff *skb; 1145 1105 int idx, len; 1146 1106 ··· 1159 1117 len = pci_unmap_len(&tx->info[idx], len); 1160 1118 pci_unmap_len_set(&tx->info[idx], len, 0); 1161 1119 if (skb) { 1162 - mgp->stats.tx_bytes += skb->len; 1163 - mgp->stats.tx_packets++; 1120 + ss->stats.tx_bytes += skb->len; 1121 + ss->stats.tx_packets++; 1164 1122 dev_kfree_skb_irq(skb); 1165 1123 if (len) 1166 1124 pci_unmap_single(pdev, ··· 1176 1134 } 1177 1135 } 1178 1136 /* start the queue if we've stopped it */ 1179 - if (netif_queue_stopped(mgp->dev) 1137 + if (netif_queue_stopped(ss->dev) 1180 1138 && tx->req - tx->done < (tx->mask >> 1)) { 1181 - mgp->wake_queue++; 1182 - netif_wake_queue(mgp->dev); 1139 + tx->wake_queue++; 1140 + netif_wake_queue(ss->dev); 1183 1141 } 1184 1142 } 1185 1143 1186 - static inline int myri10ge_clean_rx_done(struct myri10ge_priv *mgp, int budget) 1144 + static inline int 1145 + myri10ge_clean_rx_done(struct myri10ge_slice_state *ss, int budget) 1187 1146 { 1188 - struct myri10ge_rx_done *rx_done = &mgp->rx_done; 1147 + struct myri10ge_rx_done *rx_done = &ss->rx_done; 1148 + struct myri10ge_priv *mgp = ss->mgp; 1189 1149 unsigned long rx_bytes = 0; 1190 1150 unsigned long rx_packets = 0; 1191 1151 unsigned long rx_ok; ··· 1203 1159 rx_done->entry[idx].length = 0; 1204 1160 checksum = csum_unfold(rx_done->entry[idx].checksum); 1205 1161 if (length <= mgp->small_bytes) 1206 - rx_ok = myri10ge_rx_done(mgp, &mgp->rx_small, 1162 + rx_ok = myri10ge_rx_done(ss, &ss->rx_small, 1207 1163 mgp->small_bytes, 1208 1164 length, checksum); 1209 1165 else 1210 - rx_ok = myri10ge_rx_done(mgp, &mgp->rx_big, 1166 + rx_ok = myri10ge_rx_done(ss, &ss->rx_big, 1211 1167 mgp->big_bytes, 1212 1168 length, checksum); 1213 1169 rx_packets += rx_ok; 1214 1170 rx_bytes += rx_ok * (unsigned long)length; 1215 1171 cnt++; 1216 - idx = cnt & (myri10ge_max_intr_slots - 1); 1172 + idx = cnt & (mgp->max_intr_slots - 1); 1217 1173 work_done++; 1218 1174 } 1219 1175 rx_done->idx = idx; 1220 1176 rx_done->cnt = cnt; 1221 - mgp->stats.rx_packets += rx_packets; 1222 - mgp->stats.rx_bytes += rx_bytes; 1177 + ss->stats.rx_packets += rx_packets; 1178 + ss->stats.rx_bytes += rx_bytes; 1223 1179 1224 1180 if (myri10ge_lro) 1225 1181 lro_flush_all(&rx_done->lro_mgr); 1226 1182 1227 1183 /* restock receive rings if needed */ 1228 - if (mgp->rx_small.fill_cnt - mgp->rx_small.cnt < myri10ge_fill_thresh) 1229 - myri10ge_alloc_rx_pages(mgp, &mgp->rx_small, 1184 + if (ss->rx_small.fill_cnt - ss->rx_small.cnt < myri10ge_fill_thresh) 1185 + myri10ge_alloc_rx_pages(mgp, &ss->rx_small, 1230 1186 mgp->small_bytes + MXGEFW_PAD, 0); 1231 - if (mgp->rx_big.fill_cnt - mgp->rx_big.cnt < myri10ge_fill_thresh) 1232 - myri10ge_alloc_rx_pages(mgp, &mgp->rx_big, mgp->big_bytes, 0); 1187 + if (ss->rx_big.fill_cnt - ss->rx_big.cnt < myri10ge_fill_thresh) 1188 + myri10ge_alloc_rx_pages(mgp, &ss->rx_big, mgp->big_bytes, 0); 1233 1189 1234 1190 return work_done; 1235 1191 } 1236 1192 1237 1193 static inline void myri10ge_check_statblock(struct myri10ge_priv *mgp) 1238 1194 { 1239 - struct mcp_irq_data *stats = mgp->fw_stats; 1195 + struct mcp_irq_data *stats = mgp->ss.fw_stats; 1240 1196 1241 1197 if (unlikely(stats->stats_updated)) { 1242 1198 unsigned link_up = ntohl(stats->link_up); ··· 1263 1219 } 1264 1220 } 1265 1221 if (mgp->rdma_tags_available != 1266 - ntohl(mgp->fw_stats->rdma_tags_available)) { 1222 + ntohl(stats->rdma_tags_available)) { 1267 1223 mgp->rdma_tags_available = 1268 - ntohl(mgp->fw_stats->rdma_tags_available); 1224 + ntohl(stats->rdma_tags_available); 1269 1225 printk(KERN_WARNING "myri10ge: %s: RDMA timed out! " 1270 1226 "%d tags left\n", mgp->dev->name, 1271 1227 mgp->rdma_tags_available); ··· 1278 1234 1279 1235 static int myri10ge_poll(struct napi_struct *napi, int budget) 1280 1236 { 1281 - struct myri10ge_priv *mgp = 1282 - container_of(napi, struct myri10ge_priv, napi); 1283 - struct net_device *netdev = mgp->dev; 1237 + struct myri10ge_slice_state *ss = 1238 + container_of(napi, struct myri10ge_slice_state, napi); 1239 + struct net_device *netdev = ss->mgp->dev; 1284 1240 int work_done; 1285 1241 1286 1242 /* process as many rx events as NAPI will allow */ 1287 - work_done = myri10ge_clean_rx_done(mgp, budget); 1243 + work_done = myri10ge_clean_rx_done(ss, budget); 1288 1244 1289 1245 if (work_done < budget) { 1290 1246 netif_rx_complete(netdev, napi); 1291 - put_be32(htonl(3), mgp->irq_claim); 1247 + put_be32(htonl(3), ss->irq_claim); 1292 1248 } 1293 1249 return work_done; 1294 1250 } 1295 1251 1296 1252 static irqreturn_t myri10ge_intr(int irq, void *arg) 1297 1253 { 1298 - struct myri10ge_priv *mgp = arg; 1299 - struct mcp_irq_data *stats = mgp->fw_stats; 1300 - struct myri10ge_tx_buf *tx = &mgp->tx; 1254 + struct myri10ge_slice_state *ss = arg; 1255 + struct myri10ge_priv *mgp = ss->mgp; 1256 + struct mcp_irq_data *stats = ss->fw_stats; 1257 + struct myri10ge_tx_buf *tx = &ss->tx; 1301 1258 u32 send_done_count; 1302 1259 int i; 1303 1260 ··· 1309 1264 /* low bit indicates receives are present, so schedule 1310 1265 * napi poll handler */ 1311 1266 if (stats->valid & 1) 1312 - netif_rx_schedule(mgp->dev, &mgp->napi); 1267 + netif_rx_schedule(ss->dev, &ss->napi); 1313 1268 1314 1269 if (!mgp->msi_enabled) { 1315 1270 put_be32(0, mgp->irq_deassert); ··· 1326 1281 /* check for transmit completes and receives */ 1327 1282 send_done_count = ntohl(stats->send_done_count); 1328 1283 if (send_done_count != tx->pkt_done) 1329 - myri10ge_tx_done(mgp, (int)send_done_count); 1284 + myri10ge_tx_done(ss, (int)send_done_count); 1330 1285 if (unlikely(i > myri10ge_max_irq_loops)) { 1331 1286 printk(KERN_WARNING "myri10ge: %s: irq stuck?\n", 1332 1287 mgp->dev->name); ··· 1341 1296 1342 1297 myri10ge_check_statblock(mgp); 1343 1298 1344 - put_be32(htonl(3), mgp->irq_claim + 1); 1299 + put_be32(htonl(3), ss->irq_claim + 1); 1345 1300 return (IRQ_HANDLED); 1346 1301 } 1347 1302 1348 1303 static int 1349 1304 myri10ge_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd) 1350 1305 { 1306 + struct myri10ge_priv *mgp = netdev_priv(netdev); 1307 + char *ptr; 1308 + int i; 1309 + 1351 1310 cmd->autoneg = AUTONEG_DISABLE; 1352 1311 cmd->speed = SPEED_10000; 1353 1312 cmd->duplex = DUPLEX_FULL; 1313 + 1314 + /* 1315 + * parse the product code to deterimine the interface type 1316 + * (CX4, XFP, Quad Ribbon Fiber) by looking at the character 1317 + * after the 3rd dash in the driver's cached copy of the 1318 + * EEPROM's product code string. 1319 + */ 1320 + ptr = mgp->product_code_string; 1321 + if (ptr == NULL) { 1322 + printk(KERN_ERR "myri10ge: %s: Missing product code\n", 1323 + netdev->name); 1324 + return 0; 1325 + } 1326 + for (i = 0; i < 3; i++, ptr++) { 1327 + ptr = strchr(ptr, '-'); 1328 + if (ptr == NULL) { 1329 + printk(KERN_ERR "myri10ge: %s: Invalid product " 1330 + "code %s\n", netdev->name, 1331 + mgp->product_code_string); 1332 + return 0; 1333 + } 1334 + } 1335 + if (*ptr == 'R' || *ptr == 'Q') { 1336 + /* We've found either an XFP or quad ribbon fiber */ 1337 + cmd->port = PORT_FIBRE; 1338 + } 1354 1339 return 0; 1355 1340 } 1356 1341 ··· 1399 1324 myri10ge_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *coal) 1400 1325 { 1401 1326 struct myri10ge_priv *mgp = netdev_priv(netdev); 1327 + 1402 1328 coal->rx_coalesce_usecs = mgp->intr_coal_delay; 1403 1329 return 0; 1404 1330 } ··· 1446 1370 { 1447 1371 struct myri10ge_priv *mgp = netdev_priv(netdev); 1448 1372 1449 - ring->rx_mini_max_pending = mgp->rx_small.mask + 1; 1450 - ring->rx_max_pending = mgp->rx_big.mask + 1; 1373 + ring->rx_mini_max_pending = mgp->ss.rx_small.mask + 1; 1374 + ring->rx_max_pending = mgp->ss.rx_big.mask + 1; 1451 1375 ring->rx_jumbo_max_pending = 0; 1452 - ring->tx_max_pending = mgp->rx_small.mask + 1; 1376 + ring->tx_max_pending = mgp->ss.rx_small.mask + 1; 1453 1377 ring->rx_mini_pending = ring->rx_mini_max_pending; 1454 1378 ring->rx_pending = ring->rx_max_pending; 1455 1379 ring->rx_jumbo_pending = ring->rx_jumbo_max_pending; ··· 1459 1383 static u32 myri10ge_get_rx_csum(struct net_device *netdev) 1460 1384 { 1461 1385 struct myri10ge_priv *mgp = netdev_priv(netdev); 1386 + 1462 1387 if (mgp->csum_flag) 1463 1388 return 1; 1464 1389 else ··· 1469 1392 static int myri10ge_set_rx_csum(struct net_device *netdev, u32 csum_enabled) 1470 1393 { 1471 1394 struct myri10ge_priv *mgp = netdev_priv(netdev); 1395 + 1472 1396 if (csum_enabled) 1473 1397 mgp->csum_flag = MXGEFW_FLAGS_CKSUM; 1474 1398 else ··· 1489 1411 return 0; 1490 1412 } 1491 1413 1492 - static const char myri10ge_gstrings_stats[][ETH_GSTRING_LEN] = { 1414 + static const char myri10ge_gstrings_main_stats[][ETH_GSTRING_LEN] = { 1493 1415 "rx_packets", "tx_packets", "rx_bytes", "tx_bytes", "rx_errors", 1494 1416 "tx_errors", "rx_dropped", "tx_dropped", "multicast", "collisions", 1495 1417 "rx_length_errors", "rx_over_errors", "rx_crc_errors", ··· 1499 1421 /* device-specific stats */ 1500 1422 "tx_boundary", "WC", "irq", "MSI", 1501 1423 "read_dma_bw_MBs", "write_dma_bw_MBs", "read_write_dma_bw_MBs", 1502 - "serial_number", "tx_pkt_start", "tx_pkt_done", 1503 - "tx_req", "tx_done", "rx_small_cnt", "rx_big_cnt", 1504 - "wake_queue", "stop_queue", "watchdog_resets", "tx_linearized", 1424 + "serial_number", "watchdog_resets", 1505 1425 "link_changes", "link_up", "dropped_link_overflow", 1506 1426 "dropped_link_error_or_filtered", 1507 1427 "dropped_pause", "dropped_bad_phy", "dropped_bad_crc32", 1508 1428 "dropped_unicast_filtered", "dropped_multicast_filtered", 1509 1429 "dropped_runt", "dropped_overrun", "dropped_no_small_buffer", 1510 - "dropped_no_big_buffer", "LRO aggregated", "LRO flushed", 1430 + "dropped_no_big_buffer" 1431 + }; 1432 + 1433 + static const char myri10ge_gstrings_slice_stats[][ETH_GSTRING_LEN] = { 1434 + "----------- slice ---------", 1435 + "tx_pkt_start", "tx_pkt_done", "tx_req", "tx_done", 1436 + "rx_small_cnt", "rx_big_cnt", 1437 + "wake_queue", "stop_queue", "tx_linearized", "LRO aggregated", 1438 + "LRO flushed", 1511 1439 "LRO avg aggr", "LRO no_desc" 1512 1440 }; 1513 1441 1514 1442 #define MYRI10GE_NET_STATS_LEN 21 1515 - #define MYRI10GE_STATS_LEN ARRAY_SIZE(myri10ge_gstrings_stats) 1443 + #define MYRI10GE_MAIN_STATS_LEN ARRAY_SIZE(myri10ge_gstrings_main_stats) 1444 + #define MYRI10GE_SLICE_STATS_LEN ARRAY_SIZE(myri10ge_gstrings_slice_stats) 1516 1445 1517 1446 static void 1518 1447 myri10ge_get_strings(struct net_device *netdev, u32 stringset, u8 * data) 1519 1448 { 1520 1449 switch (stringset) { 1521 1450 case ETH_SS_STATS: 1522 - memcpy(data, *myri10ge_gstrings_stats, 1523 - sizeof(myri10ge_gstrings_stats)); 1451 + memcpy(data, *myri10ge_gstrings_main_stats, 1452 + sizeof(myri10ge_gstrings_main_stats)); 1453 + data += sizeof(myri10ge_gstrings_main_stats); 1454 + memcpy(data, *myri10ge_gstrings_slice_stats, 1455 + sizeof(myri10ge_gstrings_slice_stats)); 1456 + data += sizeof(myri10ge_gstrings_slice_stats); 1524 1457 break; 1525 1458 } 1526 1459 } ··· 1540 1451 { 1541 1452 switch (sset) { 1542 1453 case ETH_SS_STATS: 1543 - return MYRI10GE_STATS_LEN; 1454 + return MYRI10GE_MAIN_STATS_LEN + MYRI10GE_SLICE_STATS_LEN; 1544 1455 default: 1545 1456 return -EOPNOTSUPP; 1546 1457 } ··· 1551 1462 struct ethtool_stats *stats, u64 * data) 1552 1463 { 1553 1464 struct myri10ge_priv *mgp = netdev_priv(netdev); 1465 + struct myri10ge_slice_state *ss; 1554 1466 int i; 1555 1467 1556 1468 for (i = 0; i < MYRI10GE_NET_STATS_LEN; i++) 1557 1469 data[i] = ((unsigned long *)&mgp->stats)[i]; 1558 1470 1559 - data[i++] = (unsigned int)mgp->tx.boundary; 1471 + data[i++] = (unsigned int)mgp->tx_boundary; 1560 1472 data[i++] = (unsigned int)mgp->wc_enabled; 1561 1473 data[i++] = (unsigned int)mgp->pdev->irq; 1562 1474 data[i++] = (unsigned int)mgp->msi_enabled; ··· 1565 1475 data[i++] = (unsigned int)mgp->write_dma; 1566 1476 data[i++] = (unsigned int)mgp->read_write_dma; 1567 1477 data[i++] = (unsigned int)mgp->serial_number; 1568 - data[i++] = (unsigned int)mgp->tx.pkt_start; 1569 - data[i++] = (unsigned int)mgp->tx.pkt_done; 1570 - data[i++] = (unsigned int)mgp->tx.req; 1571 - data[i++] = (unsigned int)mgp->tx.done; 1572 - data[i++] = (unsigned int)mgp->rx_small.cnt; 1573 - data[i++] = (unsigned int)mgp->rx_big.cnt; 1574 - data[i++] = (unsigned int)mgp->wake_queue; 1575 - data[i++] = (unsigned int)mgp->stop_queue; 1576 1478 data[i++] = (unsigned int)mgp->watchdog_resets; 1577 - data[i++] = (unsigned int)mgp->tx_linearized; 1578 1479 data[i++] = (unsigned int)mgp->link_changes; 1579 - data[i++] = (unsigned int)ntohl(mgp->fw_stats->link_up); 1580 - data[i++] = (unsigned int)ntohl(mgp->fw_stats->dropped_link_overflow); 1480 + 1481 + /* firmware stats are useful only in the first slice */ 1482 + ss = &mgp->ss; 1483 + data[i++] = (unsigned int)ntohl(ss->fw_stats->link_up); 1484 + data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_link_overflow); 1581 1485 data[i++] = 1582 - (unsigned int)ntohl(mgp->fw_stats->dropped_link_error_or_filtered); 1583 - data[i++] = (unsigned int)ntohl(mgp->fw_stats->dropped_pause); 1584 - data[i++] = (unsigned int)ntohl(mgp->fw_stats->dropped_bad_phy); 1585 - data[i++] = (unsigned int)ntohl(mgp->fw_stats->dropped_bad_crc32); 1486 + (unsigned int)ntohl(ss->fw_stats->dropped_link_error_or_filtered); 1487 + data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_pause); 1488 + data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_bad_phy); 1489 + data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_bad_crc32); 1490 + data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_unicast_filtered); 1586 1491 data[i++] = 1587 - (unsigned int)ntohl(mgp->fw_stats->dropped_unicast_filtered); 1588 - data[i++] = 1589 - (unsigned int)ntohl(mgp->fw_stats->dropped_multicast_filtered); 1590 - data[i++] = (unsigned int)ntohl(mgp->fw_stats->dropped_runt); 1591 - data[i++] = (unsigned int)ntohl(mgp->fw_stats->dropped_overrun); 1592 - data[i++] = (unsigned int)ntohl(mgp->fw_stats->dropped_no_small_buffer); 1593 - data[i++] = (unsigned int)ntohl(mgp->fw_stats->dropped_no_big_buffer); 1594 - data[i++] = mgp->rx_done.lro_mgr.stats.aggregated; 1595 - data[i++] = mgp->rx_done.lro_mgr.stats.flushed; 1596 - if (mgp->rx_done.lro_mgr.stats.flushed) 1597 - data[i++] = mgp->rx_done.lro_mgr.stats.aggregated / 1598 - mgp->rx_done.lro_mgr.stats.flushed; 1492 + (unsigned int)ntohl(ss->fw_stats->dropped_multicast_filtered); 1493 + data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_runt); 1494 + data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_overrun); 1495 + data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_no_small_buffer); 1496 + data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_no_big_buffer); 1497 + 1498 + data[i++] = 0; 1499 + data[i++] = (unsigned int)ss->tx.pkt_start; 1500 + data[i++] = (unsigned int)ss->tx.pkt_done; 1501 + data[i++] = (unsigned int)ss->tx.req; 1502 + data[i++] = (unsigned int)ss->tx.done; 1503 + data[i++] = (unsigned int)ss->rx_small.cnt; 1504 + data[i++] = (unsigned int)ss->rx_big.cnt; 1505 + data[i++] = (unsigned int)ss->tx.wake_queue; 1506 + data[i++] = (unsigned int)ss->tx.stop_queue; 1507 + data[i++] = (unsigned int)ss->tx.linearized; 1508 + data[i++] = ss->rx_done.lro_mgr.stats.aggregated; 1509 + data[i++] = ss->rx_done.lro_mgr.stats.flushed; 1510 + if (ss->rx_done.lro_mgr.stats.flushed) 1511 + data[i++] = ss->rx_done.lro_mgr.stats.aggregated / 1512 + ss->rx_done.lro_mgr.stats.flushed; 1599 1513 else 1600 1514 data[i++] = 0; 1601 - data[i++] = mgp->rx_done.lro_mgr.stats.no_desc; 1515 + data[i++] = ss->rx_done.lro_mgr.stats.no_desc; 1602 1516 } 1603 1517 1604 1518 static void myri10ge_set_msglevel(struct net_device *netdev, u32 value) ··· 1638 1544 .get_msglevel = myri10ge_get_msglevel 1639 1545 }; 1640 1546 1641 - static int myri10ge_allocate_rings(struct net_device *dev) 1547 + static int myri10ge_allocate_rings(struct myri10ge_slice_state *ss) 1642 1548 { 1643 - struct myri10ge_priv *mgp; 1549 + struct myri10ge_priv *mgp = ss->mgp; 1644 1550 struct myri10ge_cmd cmd; 1551 + struct net_device *dev = mgp->dev; 1645 1552 int tx_ring_size, rx_ring_size; 1646 1553 int tx_ring_entries, rx_ring_entries; 1647 1554 int i, status; 1648 1555 size_t bytes; 1649 1556 1650 - mgp = netdev_priv(dev); 1651 - 1652 1557 /* get ring sizes */ 1653 - 1654 1558 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_SEND_RING_SIZE, &cmd, 0); 1655 1559 tx_ring_size = cmd.data0; 1656 1560 status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_RX_RING_SIZE, &cmd, 0); ··· 1658 1566 1659 1567 tx_ring_entries = tx_ring_size / sizeof(struct mcp_kreq_ether_send); 1660 1568 rx_ring_entries = rx_ring_size / sizeof(struct mcp_dma_addr); 1661 - mgp->tx.mask = tx_ring_entries - 1; 1662 - mgp->rx_small.mask = mgp->rx_big.mask = rx_ring_entries - 1; 1569 + ss->tx.mask = tx_ring_entries - 1; 1570 + ss->rx_small.mask = ss->rx_big.mask = rx_ring_entries - 1; 1663 1571 1664 1572 status = -ENOMEM; 1665 1573 1666 1574 /* allocate the host shadow rings */ 1667 1575 1668 1576 bytes = 8 + (MYRI10GE_MAX_SEND_DESC_TSO + 4) 1669 - * sizeof(*mgp->tx.req_list); 1670 - mgp->tx.req_bytes = kzalloc(bytes, GFP_KERNEL); 1671 - if (mgp->tx.req_bytes == NULL) 1577 + * sizeof(*ss->tx.req_list); 1578 + ss->tx.req_bytes = kzalloc(bytes, GFP_KERNEL); 1579 + if (ss->tx.req_bytes == NULL) 1672 1580 goto abort_with_nothing; 1673 1581 1674 1582 /* ensure req_list entries are aligned to 8 bytes */ 1675 - mgp->tx.req_list = (struct mcp_kreq_ether_send *) 1676 - ALIGN((unsigned long)mgp->tx.req_bytes, 8); 1583 + ss->tx.req_list = (struct mcp_kreq_ether_send *) 1584 + ALIGN((unsigned long)ss->tx.req_bytes, 8); 1677 1585 1678 - bytes = rx_ring_entries * sizeof(*mgp->rx_small.shadow); 1679 - mgp->rx_small.shadow = kzalloc(bytes, GFP_KERNEL); 1680 - if (mgp->rx_small.shadow == NULL) 1586 + bytes = rx_ring_entries * sizeof(*ss->rx_small.shadow); 1587 + ss->rx_small.shadow = kzalloc(bytes, GFP_KERNEL); 1588 + if (ss->rx_small.shadow == NULL) 1681 1589 goto abort_with_tx_req_bytes; 1682 1590 1683 - bytes = rx_ring_entries * sizeof(*mgp->rx_big.shadow); 1684 - mgp->rx_big.shadow = kzalloc(bytes, GFP_KERNEL); 1685 - if (mgp->rx_big.shadow == NULL) 1591 + bytes = rx_ring_entries * sizeof(*ss->rx_big.shadow); 1592 + ss->rx_big.shadow = kzalloc(bytes, GFP_KERNEL); 1593 + if (ss->rx_big.shadow == NULL) 1686 1594 goto abort_with_rx_small_shadow; 1687 1595 1688 1596 /* allocate the host info rings */ 1689 1597 1690 - bytes = tx_ring_entries * sizeof(*mgp->tx.info); 1691 - mgp->tx.info = kzalloc(bytes, GFP_KERNEL); 1692 - if (mgp->tx.info == NULL) 1598 + bytes = tx_ring_entries * sizeof(*ss->tx.info); 1599 + ss->tx.info = kzalloc(bytes, GFP_KERNEL); 1600 + if (ss->tx.info == NULL) 1693 1601 goto abort_with_rx_big_shadow; 1694 1602 1695 - bytes = rx_ring_entries * sizeof(*mgp->rx_small.info); 1696 - mgp->rx_small.info = kzalloc(bytes, GFP_KERNEL); 1697 - if (mgp->rx_small.info == NULL) 1603 + bytes = rx_ring_entries * sizeof(*ss->rx_small.info); 1604 + ss->rx_small.info = kzalloc(bytes, GFP_KERNEL); 1605 + if (ss->rx_small.info == NULL) 1698 1606 goto abort_with_tx_info; 1699 1607 1700 - bytes = rx_ring_entries * sizeof(*mgp->rx_big.info); 1701 - mgp->rx_big.info = kzalloc(bytes, GFP_KERNEL); 1702 - if (mgp->rx_big.info == NULL) 1608 + bytes = rx_ring_entries * sizeof(*ss->rx_big.info); 1609 + ss->rx_big.info = kzalloc(bytes, GFP_KERNEL); 1610 + if (ss->rx_big.info == NULL) 1703 1611 goto abort_with_rx_small_info; 1704 1612 1705 1613 /* Fill the receive rings */ 1706 - mgp->rx_big.cnt = 0; 1707 - mgp->rx_small.cnt = 0; 1708 - mgp->rx_big.fill_cnt = 0; 1709 - mgp->rx_small.fill_cnt = 0; 1710 - mgp->rx_small.page_offset = MYRI10GE_ALLOC_SIZE; 1711 - mgp->rx_big.page_offset = MYRI10GE_ALLOC_SIZE; 1712 - mgp->rx_small.watchdog_needed = 0; 1713 - mgp->rx_big.watchdog_needed = 0; 1714 - myri10ge_alloc_rx_pages(mgp, &mgp->rx_small, 1614 + ss->rx_big.cnt = 0; 1615 + ss->rx_small.cnt = 0; 1616 + ss->rx_big.fill_cnt = 0; 1617 + ss->rx_small.fill_cnt = 0; 1618 + ss->rx_small.page_offset = MYRI10GE_ALLOC_SIZE; 1619 + ss->rx_big.page_offset = MYRI10GE_ALLOC_SIZE; 1620 + ss->rx_small.watchdog_needed = 0; 1621 + ss->rx_big.watchdog_needed = 0; 1622 + myri10ge_alloc_rx_pages(mgp, &ss->rx_small, 1715 1623 mgp->small_bytes + MXGEFW_PAD, 0); 1716 1624 1717 - if (mgp->rx_small.fill_cnt < mgp->rx_small.mask + 1) { 1625 + if (ss->rx_small.fill_cnt < ss->rx_small.mask + 1) { 1718 1626 printk(KERN_ERR "myri10ge: %s: alloced only %d small bufs\n", 1719 - dev->name, mgp->rx_small.fill_cnt); 1627 + dev->name, ss->rx_small.fill_cnt); 1720 1628 goto abort_with_rx_small_ring; 1721 1629 } 1722 1630 1723 - myri10ge_alloc_rx_pages(mgp, &mgp->rx_big, mgp->big_bytes, 0); 1724 - if (mgp->rx_big.fill_cnt < mgp->rx_big.mask + 1) { 1631 + myri10ge_alloc_rx_pages(mgp, &ss->rx_big, mgp->big_bytes, 0); 1632 + if (ss->rx_big.fill_cnt < ss->rx_big.mask + 1) { 1725 1633 printk(KERN_ERR "myri10ge: %s: alloced only %d big bufs\n", 1726 - dev->name, mgp->rx_big.fill_cnt); 1634 + dev->name, ss->rx_big.fill_cnt); 1727 1635 goto abort_with_rx_big_ring; 1728 1636 } 1729 1637 1730 1638 return 0; 1731 1639 1732 1640 abort_with_rx_big_ring: 1733 - for (i = mgp->rx_big.cnt; i < mgp->rx_big.fill_cnt; i++) { 1734 - int idx = i & mgp->rx_big.mask; 1735 - myri10ge_unmap_rx_page(mgp->pdev, &mgp->rx_big.info[idx], 1641 + for (i = ss->rx_big.cnt; i < ss->rx_big.fill_cnt; i++) { 1642 + int idx = i & ss->rx_big.mask; 1643 + myri10ge_unmap_rx_page(mgp->pdev, &ss->rx_big.info[idx], 1736 1644 mgp->big_bytes); 1737 - put_page(mgp->rx_big.info[idx].page); 1645 + put_page(ss->rx_big.info[idx].page); 1738 1646 } 1739 1647 1740 1648 abort_with_rx_small_ring: 1741 - for (i = mgp->rx_small.cnt; i < mgp->rx_small.fill_cnt; i++) { 1742 - int idx = i & mgp->rx_small.mask; 1743 - myri10ge_unmap_rx_page(mgp->pdev, &mgp->rx_small.info[idx], 1649 + for (i = ss->rx_small.cnt; i < ss->rx_small.fill_cnt; i++) { 1650 + int idx = i & ss->rx_small.mask; 1651 + myri10ge_unmap_rx_page(mgp->pdev, &ss->rx_small.info[idx], 1744 1652 mgp->small_bytes + MXGEFW_PAD); 1745 - put_page(mgp->rx_small.info[idx].page); 1653 + put_page(ss->rx_small.info[idx].page); 1746 1654 } 1747 1655 1748 - kfree(mgp->rx_big.info); 1656 + kfree(ss->rx_big.info); 1749 1657 1750 1658 abort_with_rx_small_info: 1751 - kfree(mgp->rx_small.info); 1659 + kfree(ss->rx_small.info); 1752 1660 1753 1661 abort_with_tx_info: 1754 - kfree(mgp->tx.info); 1662 + kfree(ss->tx.info); 1755 1663 1756 1664 abort_with_rx_big_shadow: 1757 - kfree(mgp->rx_big.shadow); 1665 + kfree(ss->rx_big.shadow); 1758 1666 1759 1667 abort_with_rx_small_shadow: 1760 - kfree(mgp->rx_small.shadow); 1668 + kfree(ss->rx_small.shadow); 1761 1669 1762 1670 abort_with_tx_req_bytes: 1763 - kfree(mgp->tx.req_bytes); 1764 - mgp->tx.req_bytes = NULL; 1765 - mgp->tx.req_list = NULL; 1671 + kfree(ss->tx.req_bytes); 1672 + ss->tx.req_bytes = NULL; 1673 + ss->tx.req_list = NULL; 1766 1674 1767 1675 abort_with_nothing: 1768 1676 return status; 1769 1677 } 1770 1678 1771 - static void myri10ge_free_rings(struct net_device *dev) 1679 + static void myri10ge_free_rings(struct myri10ge_slice_state *ss) 1772 1680 { 1773 - struct myri10ge_priv *mgp; 1681 + struct myri10ge_priv *mgp = ss->mgp; 1774 1682 struct sk_buff *skb; 1775 1683 struct myri10ge_tx_buf *tx; 1776 1684 int i, len, idx; 1777 1685 1778 - mgp = netdev_priv(dev); 1779 - 1780 - for (i = mgp->rx_big.cnt; i < mgp->rx_big.fill_cnt; i++) { 1781 - idx = i & mgp->rx_big.mask; 1782 - if (i == mgp->rx_big.fill_cnt - 1) 1783 - mgp->rx_big.info[idx].page_offset = MYRI10GE_ALLOC_SIZE; 1784 - myri10ge_unmap_rx_page(mgp->pdev, &mgp->rx_big.info[idx], 1686 + for (i = ss->rx_big.cnt; i < ss->rx_big.fill_cnt; i++) { 1687 + idx = i & ss->rx_big.mask; 1688 + if (i == ss->rx_big.fill_cnt - 1) 1689 + ss->rx_big.info[idx].page_offset = MYRI10GE_ALLOC_SIZE; 1690 + myri10ge_unmap_rx_page(mgp->pdev, &ss->rx_big.info[idx], 1785 1691 mgp->big_bytes); 1786 - put_page(mgp->rx_big.info[idx].page); 1692 + put_page(ss->rx_big.info[idx].page); 1787 1693 } 1788 1694 1789 - for (i = mgp->rx_small.cnt; i < mgp->rx_small.fill_cnt; i++) { 1790 - idx = i & mgp->rx_small.mask; 1791 - if (i == mgp->rx_small.fill_cnt - 1) 1792 - mgp->rx_small.info[idx].page_offset = 1695 + for (i = ss->rx_small.cnt; i < ss->rx_small.fill_cnt; i++) { 1696 + idx = i & ss->rx_small.mask; 1697 + if (i == ss->rx_small.fill_cnt - 1) 1698 + ss->rx_small.info[idx].page_offset = 1793 1699 MYRI10GE_ALLOC_SIZE; 1794 - myri10ge_unmap_rx_page(mgp->pdev, &mgp->rx_small.info[idx], 1700 + myri10ge_unmap_rx_page(mgp->pdev, &ss->rx_small.info[idx], 1795 1701 mgp->small_bytes + MXGEFW_PAD); 1796 - put_page(mgp->rx_small.info[idx].page); 1702 + put_page(ss->rx_small.info[idx].page); 1797 1703 } 1798 - tx = &mgp->tx; 1704 + tx = &ss->tx; 1799 1705 while (tx->done != tx->req) { 1800 1706 idx = tx->done & tx->mask; 1801 1707 skb = tx->info[idx].skb; ··· 1804 1714 len = pci_unmap_len(&tx->info[idx], len); 1805 1715 pci_unmap_len_set(&tx->info[idx], len, 0); 1806 1716 if (skb) { 1807 - mgp->stats.tx_dropped++; 1717 + ss->stats.tx_dropped++; 1808 1718 dev_kfree_skb_any(skb); 1809 1719 if (len) 1810 1720 pci_unmap_single(mgp->pdev, ··· 1819 1729 PCI_DMA_TODEVICE); 1820 1730 } 1821 1731 } 1822 - kfree(mgp->rx_big.info); 1732 + kfree(ss->rx_big.info); 1823 1733 1824 - kfree(mgp->rx_small.info); 1734 + kfree(ss->rx_small.info); 1825 1735 1826 - kfree(mgp->tx.info); 1736 + kfree(ss->tx.info); 1827 1737 1828 - kfree(mgp->rx_big.shadow); 1738 + kfree(ss->rx_big.shadow); 1829 1739 1830 - kfree(mgp->rx_small.shadow); 1740 + kfree(ss->rx_small.shadow); 1831 1741 1832 - kfree(mgp->tx.req_bytes); 1833 - mgp->tx.req_bytes = NULL; 1834 - mgp->tx.req_list = NULL; 1742 + kfree(ss->tx.req_bytes); 1743 + ss->tx.req_bytes = NULL; 1744 + ss->tx.req_list = NULL; 1835 1745 } 1836 1746 1837 1747 static int myri10ge_request_irq(struct myri10ge_priv *mgp) ··· 1930 1840 1931 1841 static int myri10ge_open(struct net_device *dev) 1932 1842 { 1933 - struct myri10ge_priv *mgp; 1843 + struct myri10ge_priv *mgp = netdev_priv(dev); 1934 1844 struct myri10ge_cmd cmd; 1935 1845 struct net_lro_mgr *lro_mgr; 1936 1846 int status, big_pow2; 1937 - 1938 - mgp = netdev_priv(dev); 1939 1847 1940 1848 if (mgp->running != MYRI10GE_ETH_STOPPED) 1941 1849 return -EBUSY; ··· 1971 1883 /* get the lanai pointers to the send and receive rings */ 1972 1884 1973 1885 status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_SEND_OFFSET, &cmd, 0); 1974 - mgp->tx.lanai = 1886 + mgp->ss.tx.lanai = 1975 1887 (struct mcp_kreq_ether_send __iomem *)(mgp->sram + cmd.data0); 1976 1888 1977 1889 status |= 1978 1890 myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_SMALL_RX_OFFSET, &cmd, 0); 1979 - mgp->rx_small.lanai = 1891 + mgp->ss.rx_small.lanai = 1980 1892 (struct mcp_kreq_ether_recv __iomem *)(mgp->sram + cmd.data0); 1981 1893 1982 1894 status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_BIG_RX_OFFSET, &cmd, 0); 1983 - mgp->rx_big.lanai = 1895 + mgp->ss.rx_big.lanai = 1984 1896 (struct mcp_kreq_ether_recv __iomem *)(mgp->sram + cmd.data0); 1985 1897 1986 1898 if (status != 0) { ··· 1992 1904 } 1993 1905 1994 1906 if (myri10ge_wcfifo && mgp->wc_enabled) { 1995 - mgp->tx.wc_fifo = (u8 __iomem *) mgp->sram + MXGEFW_ETH_SEND_4; 1996 - mgp->rx_small.wc_fifo = 1907 + mgp->ss.tx.wc_fifo = (u8 __iomem *) mgp->sram + MXGEFW_ETH_SEND_4; 1908 + mgp->ss.rx_small.wc_fifo = 1997 1909 (u8 __iomem *) mgp->sram + MXGEFW_ETH_RECV_SMALL; 1998 - mgp->rx_big.wc_fifo = 1910 + mgp->ss.rx_big.wc_fifo = 1999 1911 (u8 __iomem *) mgp->sram + MXGEFW_ETH_RECV_BIG; 2000 1912 } else { 2001 - mgp->tx.wc_fifo = NULL; 2002 - mgp->rx_small.wc_fifo = NULL; 2003 - mgp->rx_big.wc_fifo = NULL; 1913 + mgp->ss.tx.wc_fifo = NULL; 1914 + mgp->ss.rx_small.wc_fifo = NULL; 1915 + mgp->ss.rx_big.wc_fifo = NULL; 2004 1916 } 2005 1917 2006 1918 /* Firmware needs the big buff size as a power of 2. Lie and ··· 2017 1929 mgp->big_bytes = big_pow2; 2018 1930 } 2019 1931 2020 - status = myri10ge_allocate_rings(dev); 1932 + status = myri10ge_allocate_rings(&mgp->ss); 2021 1933 if (status != 0) 2022 1934 goto abort_with_irq; 2023 1935 ··· 2036 1948 goto abort_with_rings; 2037 1949 } 2038 1950 2039 - cmd.data0 = MYRI10GE_LOWPART_TO_U32(mgp->fw_stats_bus); 2040 - cmd.data1 = MYRI10GE_HIGHPART_TO_U32(mgp->fw_stats_bus); 1951 + cmd.data0 = MYRI10GE_LOWPART_TO_U32(mgp->ss.fw_stats_bus); 1952 + cmd.data1 = MYRI10GE_HIGHPART_TO_U32(mgp->ss.fw_stats_bus); 2041 1953 cmd.data2 = sizeof(struct mcp_irq_data); 2042 1954 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_STATS_DMA_V2, &cmd, 0); 2043 1955 if (status == -ENOSYS) { 2044 - dma_addr_t bus = mgp->fw_stats_bus; 1956 + dma_addr_t bus = mgp->ss.fw_stats_bus; 2045 1957 bus += offsetof(struct mcp_irq_data, send_done_count); 2046 1958 cmd.data0 = MYRI10GE_LOWPART_TO_U32(bus); 2047 1959 cmd.data1 = MYRI10GE_HIGHPART_TO_U32(bus); ··· 2062 1974 mgp->link_state = ~0U; 2063 1975 mgp->rdma_tags_available = 15; 2064 1976 2065 - lro_mgr = &mgp->rx_done.lro_mgr; 1977 + lro_mgr = &mgp->ss.rx_done.lro_mgr; 2066 1978 lro_mgr->dev = dev; 2067 1979 lro_mgr->features = LRO_F_NAPI; 2068 1980 lro_mgr->ip_summed = CHECKSUM_COMPLETE; 2069 1981 lro_mgr->ip_summed_aggr = CHECKSUM_UNNECESSARY; 2070 1982 lro_mgr->max_desc = MYRI10GE_MAX_LRO_DESCRIPTORS; 2071 - lro_mgr->lro_arr = mgp->rx_done.lro_desc; 1983 + lro_mgr->lro_arr = mgp->ss.rx_done.lro_desc; 2072 1984 lro_mgr->get_frag_header = myri10ge_get_frag_header; 2073 1985 lro_mgr->max_aggr = myri10ge_lro_max_pkts; 2074 1986 lro_mgr->frag_align_pad = 2; 2075 1987 if (lro_mgr->max_aggr > MAX_SKB_FRAGS) 2076 1988 lro_mgr->max_aggr = MAX_SKB_FRAGS; 2077 1989 2078 - napi_enable(&mgp->napi); /* must happen prior to any irq */ 1990 + napi_enable(&mgp->ss.napi); /* must happen prior to any irq */ 2079 1991 2080 1992 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_ETHERNET_UP, &cmd, 0); 2081 1993 if (status) { ··· 2084 1996 goto abort_with_rings; 2085 1997 } 2086 1998 2087 - mgp->wake_queue = 0; 2088 - mgp->stop_queue = 0; 1999 + mgp->ss.tx.wake_queue = 0; 2000 + mgp->ss.tx.stop_queue = 0; 2089 2001 mgp->running = MYRI10GE_ETH_RUNNING; 2090 2002 mgp->watchdog_timer.expires = jiffies + myri10ge_watchdog_timeout * HZ; 2091 2003 add_timer(&mgp->watchdog_timer); ··· 2093 2005 return 0; 2094 2006 2095 2007 abort_with_rings: 2096 - myri10ge_free_rings(dev); 2008 + myri10ge_free_rings(&mgp->ss); 2097 2009 2098 2010 abort_with_irq: 2099 2011 myri10ge_free_irq(mgp); ··· 2105 2017 2106 2018 static int myri10ge_close(struct net_device *dev) 2107 2019 { 2108 - struct myri10ge_priv *mgp; 2020 + struct myri10ge_priv *mgp = netdev_priv(dev); 2109 2021 struct myri10ge_cmd cmd; 2110 2022 int status, old_down_cnt; 2111 - 2112 - mgp = netdev_priv(dev); 2113 2023 2114 2024 if (mgp->running != MYRI10GE_ETH_RUNNING) 2115 2025 return 0; 2116 2026 2117 - if (mgp->tx.req_bytes == NULL) 2027 + if (mgp->ss.tx.req_bytes == NULL) 2118 2028 return 0; 2119 2029 2120 2030 del_timer_sync(&mgp->watchdog_timer); 2121 2031 mgp->running = MYRI10GE_ETH_STOPPING; 2122 - napi_disable(&mgp->napi); 2032 + napi_disable(&mgp->ss.napi); 2123 2033 netif_carrier_off(dev); 2124 2034 netif_stop_queue(dev); 2125 2035 old_down_cnt = mgp->down_cnt; ··· 2133 2047 2134 2048 netif_tx_disable(dev); 2135 2049 myri10ge_free_irq(mgp); 2136 - myri10ge_free_rings(dev); 2050 + myri10ge_free_rings(&mgp->ss); 2137 2051 2138 2052 mgp->running = MYRI10GE_ETH_STOPPED; 2139 2053 return 0; ··· 2229 2143 2230 2144 /* 2231 2145 * Transmit a packet. We need to split the packet so that a single 2232 - * segment does not cross myri10ge->tx.boundary, so this makes segment 2146 + * segment does not cross myri10ge->tx_boundary, so this makes segment 2233 2147 * counting tricky. So rather than try to count segments up front, we 2234 2148 * just give up if there are too few segments to hold a reasonably 2235 2149 * fragmented packet currently available. If we run ··· 2240 2154 static int myri10ge_xmit(struct sk_buff *skb, struct net_device *dev) 2241 2155 { 2242 2156 struct myri10ge_priv *mgp = netdev_priv(dev); 2157 + struct myri10ge_slice_state *ss; 2243 2158 struct mcp_kreq_ether_send *req; 2244 - struct myri10ge_tx_buf *tx = &mgp->tx; 2159 + struct myri10ge_tx_buf *tx; 2245 2160 struct skb_frag_struct *frag; 2246 2161 dma_addr_t bus; 2247 2162 u32 low; ··· 2253 2166 int cum_len, seglen, boundary, rdma_count; 2254 2167 u8 flags, odd_flag; 2255 2168 2169 + /* always transmit through slot 0 */ 2170 + ss = &mgp->ss; 2171 + tx = &ss->tx; 2256 2172 again: 2257 2173 req = tx->req_list; 2258 2174 avail = tx->mask - 1 - (tx->req - tx->done); ··· 2270 2180 2271 2181 if ((unlikely(avail < max_segments))) { 2272 2182 /* we are out of transmit resources */ 2273 - mgp->stop_queue++; 2183 + tx->stop_queue++; 2274 2184 netif_stop_queue(dev); 2275 2185 return 1; 2276 2186 } ··· 2332 2242 if (skb_padto(skb, ETH_ZLEN)) { 2333 2243 /* The packet is gone, so we must 2334 2244 * return 0 */ 2335 - mgp->stats.tx_dropped += 1; 2245 + ss->stats.tx_dropped += 1; 2336 2246 return 0; 2337 2247 } 2338 2248 /* adjust the len to account for the zero pad ··· 2374 2284 2375 2285 while (1) { 2376 2286 /* Break the SKB or Fragment up into pieces which 2377 - * do not cross mgp->tx.boundary */ 2287 + * do not cross mgp->tx_boundary */ 2378 2288 low = MYRI10GE_LOWPART_TO_U32(bus); 2379 2289 high_swapped = htonl(MYRI10GE_HIGHPART_TO_U32(bus)); 2380 2290 while (len) { ··· 2384 2294 if (unlikely(count == max_segments)) 2385 2295 goto abort_linearize; 2386 2296 2387 - boundary = (low + tx->boundary) & ~(tx->boundary - 1); 2297 + boundary = 2298 + (low + mgp->tx_boundary) & ~(mgp->tx_boundary - 1); 2388 2299 seglen = boundary - low; 2389 2300 if (seglen > len) 2390 2301 seglen = len; ··· 2469 2378 myri10ge_submit_req_wc(tx, tx->req_list, count); 2470 2379 tx->pkt_start++; 2471 2380 if ((avail - count) < MXGEFW_MAX_SEND_DESC) { 2472 - mgp->stop_queue++; 2381 + tx->stop_queue++; 2473 2382 netif_stop_queue(dev); 2474 2383 } 2475 2384 dev->trans_start = jiffies; ··· 2511 2420 if (skb_linearize(skb)) 2512 2421 goto drop; 2513 2422 2514 - mgp->tx_linearized++; 2423 + tx->linearized++; 2515 2424 goto again; 2516 2425 2517 2426 drop: 2518 2427 dev_kfree_skb_any(skb); 2519 - mgp->stats.tx_dropped += 1; 2428 + ss->stats.tx_dropped += 1; 2520 2429 return 0; 2521 2430 2522 2431 } ··· 2524 2433 static int myri10ge_sw_tso(struct sk_buff *skb, struct net_device *dev) 2525 2434 { 2526 2435 struct sk_buff *segs, *curr; 2527 - struct myri10ge_priv *mgp = dev->priv; 2436 + struct myri10ge_priv *mgp = netdev_priv(dev); 2528 2437 int status; 2529 2438 2530 2439 segs = skb_gso_segment(skb, dev->features & ~NETIF_F_TSO6); ··· 2564 2473 2565 2474 static void myri10ge_set_multicast_list(struct net_device *dev) 2566 2475 { 2476 + struct myri10ge_priv *mgp = netdev_priv(dev); 2567 2477 struct myri10ge_cmd cmd; 2568 - struct myri10ge_priv *mgp; 2569 2478 struct dev_mc_list *mc_list; 2570 2479 __be32 data[2] = { 0, 0 }; 2571 2480 int err; 2572 2481 DECLARE_MAC_BUF(mac); 2573 2482 2574 - mgp = netdev_priv(dev); 2575 2483 /* can be called from atomic contexts, 2576 2484 * pass 1 to force atomicity in myri10ge_send_cmd() */ 2577 2485 myri10ge_change_promisc(mgp, dev->flags & IFF_PROMISC, 1); ··· 2706 2616 ext_type = (val & PCI_EXP_FLAGS_TYPE) >> 4; 2707 2617 if (ext_type != PCI_EXP_TYPE_ROOT_PORT) { 2708 2618 if (myri10ge_ecrc_enable > 1) { 2709 - struct pci_dev *old_bridge = bridge; 2619 + struct pci_dev *prev_bridge, *old_bridge = bridge; 2710 2620 2711 2621 /* Walk the hierarchy up to the root port 2712 2622 * where ECRC has to be enabled */ 2713 2623 do { 2624 + prev_bridge = bridge; 2714 2625 bridge = bridge->bus->self; 2715 - if (!bridge) { 2626 + if (!bridge || prev_bridge == bridge) { 2716 2627 dev_err(dev, 2717 2628 "Failed to find root port" 2718 2629 " to force ECRC\n"); ··· 2772 2681 * already been enabled, then it must use a firmware image which works 2773 2682 * around unaligned completion packets (myri10ge_ethp_z8e.dat), and it 2774 2683 * should also ensure that it never gives the device a Read-DMA which is 2775 - * larger than 2KB by setting the tx.boundary to 2KB. If ECRC is 2684 + * larger than 2KB by setting the tx_boundary to 2KB. If ECRC is 2776 2685 * enabled, then the driver should use the aligned (myri10ge_eth_z8e.dat) 2777 - * firmware image, and set tx.boundary to 4KB. 2686 + * firmware image, and set tx_boundary to 4KB. 2778 2687 */ 2779 2688 2780 2689 static void myri10ge_firmware_probe(struct myri10ge_priv *mgp) ··· 2783 2692 struct device *dev = &pdev->dev; 2784 2693 int status; 2785 2694 2786 - mgp->tx.boundary = 4096; 2695 + mgp->tx_boundary = 4096; 2787 2696 /* 2788 2697 * Verify the max read request size was set to 4KB 2789 2698 * before trying the test with 4KB. ··· 2795 2704 } 2796 2705 if (status != 4096) { 2797 2706 dev_warn(dev, "Max Read Request size != 4096 (%d)\n", status); 2798 - mgp->tx.boundary = 2048; 2707 + mgp->tx_boundary = 2048; 2799 2708 } 2800 2709 /* 2801 2710 * load the optimized firmware (which assumes aligned PCIe ··· 2828 2737 "Please install up to date fw\n"); 2829 2738 abort: 2830 2739 /* fall back to using the unaligned firmware */ 2831 - mgp->tx.boundary = 2048; 2740 + mgp->tx_boundary = 2048; 2832 2741 mgp->fw_name = myri10ge_fw_unaligned; 2833 2742 2834 2743 } ··· 2849 2758 if (link_width < 8) { 2850 2759 dev_info(&mgp->pdev->dev, "PCIE x%d Link\n", 2851 2760 link_width); 2852 - mgp->tx.boundary = 4096; 2761 + mgp->tx_boundary = 4096; 2853 2762 mgp->fw_name = myri10ge_fw_aligned; 2854 2763 } else { 2855 2764 myri10ge_firmware_probe(mgp); ··· 2858 2767 if (myri10ge_force_firmware == 1) { 2859 2768 dev_info(&mgp->pdev->dev, 2860 2769 "Assuming aligned completions (forced)\n"); 2861 - mgp->tx.boundary = 4096; 2770 + mgp->tx_boundary = 4096; 2862 2771 mgp->fw_name = myri10ge_fw_aligned; 2863 2772 } else { 2864 2773 dev_info(&mgp->pdev->dev, 2865 2774 "Assuming unaligned completions (forced)\n"); 2866 - mgp->tx.boundary = 2048; 2775 + mgp->tx_boundary = 2048; 2867 2776 mgp->fw_name = myri10ge_fw_unaligned; 2868 2777 } 2869 2778 } ··· 2980 2889 { 2981 2890 struct myri10ge_priv *mgp = 2982 2891 container_of(work, struct myri10ge_priv, watchdog_work); 2892 + struct myri10ge_tx_buf *tx; 2983 2893 u32 reboot; 2984 2894 int status; 2985 2895 u16 cmd, vendor; ··· 3030 2938 3031 2939 printk(KERN_ERR "myri10ge: %s: device timeout, resetting\n", 3032 2940 mgp->dev->name); 2941 + tx = &mgp->ss.tx; 3033 2942 printk(KERN_INFO "myri10ge: %s: %d %d %d %d %d\n", 3034 - mgp->dev->name, mgp->tx.req, mgp->tx.done, 3035 - mgp->tx.pkt_start, mgp->tx.pkt_done, 3036 - (int)ntohl(mgp->fw_stats->send_done_count)); 2943 + mgp->dev->name, tx->req, tx->done, 2944 + tx->pkt_start, tx->pkt_done, 2945 + (int)ntohl(mgp->ss.fw_stats->send_done_count)); 3037 2946 msleep(2000); 3038 2947 printk(KERN_INFO "myri10ge: %s: %d %d %d %d %d\n", 3039 - mgp->dev->name, mgp->tx.req, mgp->tx.done, 3040 - mgp->tx.pkt_start, mgp->tx.pkt_done, 3041 - (int)ntohl(mgp->fw_stats->send_done_count)); 2948 + mgp->dev->name, tx->req, tx->done, 2949 + tx->pkt_start, tx->pkt_done, 2950 + (int)ntohl(mgp->ss.fw_stats->send_done_count)); 3042 2951 } 3043 2952 rtnl_lock(); 3044 2953 myri10ge_close(mgp->dev); ··· 3062 2969 static void myri10ge_watchdog_timer(unsigned long arg) 3063 2970 { 3064 2971 struct myri10ge_priv *mgp; 2972 + struct myri10ge_slice_state *ss; 3065 2973 u32 rx_pause_cnt; 3066 2974 3067 2975 mgp = (struct myri10ge_priv *)arg; 3068 2976 3069 - if (mgp->rx_small.watchdog_needed) { 3070 - myri10ge_alloc_rx_pages(mgp, &mgp->rx_small, 3071 - mgp->small_bytes + MXGEFW_PAD, 1); 3072 - if (mgp->rx_small.fill_cnt - mgp->rx_small.cnt >= 3073 - myri10ge_fill_thresh) 3074 - mgp->rx_small.watchdog_needed = 0; 3075 - } 3076 - if (mgp->rx_big.watchdog_needed) { 3077 - myri10ge_alloc_rx_pages(mgp, &mgp->rx_big, mgp->big_bytes, 1); 3078 - if (mgp->rx_big.fill_cnt - mgp->rx_big.cnt >= 3079 - myri10ge_fill_thresh) 3080 - mgp->rx_big.watchdog_needed = 0; 3081 - } 3082 - rx_pause_cnt = ntohl(mgp->fw_stats->dropped_pause); 2977 + rx_pause_cnt = ntohl(mgp->ss.fw_stats->dropped_pause); 3083 2978 3084 - if (mgp->tx.req != mgp->tx.done && 3085 - mgp->tx.done == mgp->watchdog_tx_done && 3086 - mgp->watchdog_tx_req != mgp->watchdog_tx_done) { 2979 + ss = &mgp->ss; 2980 + if (ss->rx_small.watchdog_needed) { 2981 + myri10ge_alloc_rx_pages(mgp, &ss->rx_small, 2982 + mgp->small_bytes + MXGEFW_PAD, 1); 2983 + if (ss->rx_small.fill_cnt - ss->rx_small.cnt >= 2984 + myri10ge_fill_thresh) 2985 + ss->rx_small.watchdog_needed = 0; 2986 + } 2987 + if (ss->rx_big.watchdog_needed) { 2988 + myri10ge_alloc_rx_pages(mgp, &ss->rx_big, mgp->big_bytes, 1); 2989 + if (ss->rx_big.fill_cnt - ss->rx_big.cnt >= 2990 + myri10ge_fill_thresh) 2991 + ss->rx_big.watchdog_needed = 0; 2992 + } 2993 + 2994 + if (ss->tx.req != ss->tx.done && 2995 + ss->tx.done == ss->watchdog_tx_done && 2996 + ss->watchdog_tx_req != ss->watchdog_tx_done) { 3087 2997 /* nic seems like it might be stuck.. */ 3088 2998 if (rx_pause_cnt != mgp->watchdog_pause) { 3089 2999 if (net_ratelimit()) ··· 3101 3005 /* rearm timer */ 3102 3006 mod_timer(&mgp->watchdog_timer, 3103 3007 jiffies + myri10ge_watchdog_timeout * HZ); 3104 - mgp->watchdog_tx_done = mgp->tx.done; 3105 - mgp->watchdog_tx_req = mgp->tx.req; 3008 + ss->watchdog_tx_done = ss->tx.done; 3009 + ss->watchdog_tx_req = ss->tx.req; 3106 3010 mgp->watchdog_pause = rx_pause_cnt; 3107 3011 } 3108 3012 ··· 3126 3030 3127 3031 mgp = netdev_priv(netdev); 3128 3032 mgp->dev = netdev; 3129 - netif_napi_add(netdev, &mgp->napi, myri10ge_poll, myri10ge_napi_weight); 3033 + netif_napi_add(netdev, &mgp->ss.napi, myri10ge_poll, myri10ge_napi_weight); 3130 3034 mgp->pdev = pdev; 3131 3035 mgp->csum_flag = MXGEFW_FLAGS_CKSUM; 3132 3036 mgp->pause = myri10ge_flow_control; ··· 3172 3076 if (mgp->cmd == NULL) 3173 3077 goto abort_with_netdev; 3174 3078 3175 - mgp->fw_stats = dma_alloc_coherent(&pdev->dev, sizeof(*mgp->fw_stats), 3176 - &mgp->fw_stats_bus, GFP_KERNEL); 3177 - if (mgp->fw_stats == NULL) 3079 + mgp->ss.fw_stats = dma_alloc_coherent(&pdev->dev, sizeof(*mgp->ss.fw_stats), 3080 + &mgp->ss.fw_stats_bus, GFP_KERNEL); 3081 + if (mgp->ss.fw_stats == NULL) 3178 3082 goto abort_with_cmd; 3179 3083 3180 3084 mgp->board_span = pci_resource_len(pdev, 0); ··· 3214 3118 netdev->dev_addr[i] = mgp->mac_addr[i]; 3215 3119 3216 3120 /* allocate rx done ring */ 3217 - bytes = myri10ge_max_intr_slots * sizeof(*mgp->rx_done.entry); 3218 - mgp->rx_done.entry = dma_alloc_coherent(&pdev->dev, bytes, 3219 - &mgp->rx_done.bus, GFP_KERNEL); 3220 - if (mgp->rx_done.entry == NULL) 3121 + bytes = mgp->max_intr_slots * sizeof(*mgp->ss.rx_done.entry); 3122 + mgp->ss.rx_done.entry = dma_alloc_coherent(&pdev->dev, bytes, 3123 + &mgp->ss.rx_done.bus, GFP_KERNEL); 3124 + if (mgp->ss.rx_done.entry == NULL) 3221 3125 goto abort_with_ioremap; 3222 - memset(mgp->rx_done.entry, 0, bytes); 3126 + memset(mgp->ss.rx_done.entry, 0, bytes); 3223 3127 3224 3128 myri10ge_select_firmware(mgp); 3225 3129 ··· 3279 3183 } 3280 3184 dev_info(dev, "%s IRQ %d, tx bndry %d, fw %s, WC %s\n", 3281 3185 (mgp->msi_enabled ? "MSI" : "xPIC"), 3282 - netdev->irq, mgp->tx.boundary, mgp->fw_name, 3186 + netdev->irq, mgp->tx_boundary, mgp->fw_name, 3283 3187 (mgp->wc_enabled ? "Enabled" : "Disabled")); 3284 3188 3285 3189 return 0; ··· 3291 3195 myri10ge_dummy_rdma(mgp, 0); 3292 3196 3293 3197 abort_with_rx_done: 3294 - bytes = myri10ge_max_intr_slots * sizeof(*mgp->rx_done.entry); 3198 + bytes = mgp->max_intr_slots * sizeof(*mgp->ss.rx_done.entry); 3295 3199 dma_free_coherent(&pdev->dev, bytes, 3296 - mgp->rx_done.entry, mgp->rx_done.bus); 3200 + mgp->ss.rx_done.entry, mgp->ss.rx_done.bus); 3297 3201 3298 3202 abort_with_ioremap: 3299 3203 iounmap(mgp->sram); ··· 3303 3207 if (mgp->mtrr >= 0) 3304 3208 mtrr_del(mgp->mtrr, mgp->iomem_base, mgp->board_span); 3305 3209 #endif 3306 - dma_free_coherent(&pdev->dev, sizeof(*mgp->fw_stats), 3307 - mgp->fw_stats, mgp->fw_stats_bus); 3210 + dma_free_coherent(&pdev->dev, sizeof(*mgp->ss.fw_stats), 3211 + mgp->ss.fw_stats, mgp->ss.fw_stats_bus); 3308 3212 3309 3213 abort_with_cmd: 3310 3214 dma_free_coherent(&pdev->dev, sizeof(*mgp->cmd), ··· 3342 3246 /* avoid a memory leak */ 3343 3247 pci_restore_state(pdev); 3344 3248 3345 - bytes = myri10ge_max_intr_slots * sizeof(*mgp->rx_done.entry); 3249 + bytes = mgp->max_intr_slots * sizeof(*mgp->ss.rx_done.entry); 3346 3250 dma_free_coherent(&pdev->dev, bytes, 3347 - mgp->rx_done.entry, mgp->rx_done.bus); 3251 + mgp->ss.rx_done.entry, mgp->ss.rx_done.bus); 3348 3252 3349 3253 iounmap(mgp->sram); 3350 3254 ··· 3352 3256 if (mgp->mtrr >= 0) 3353 3257 mtrr_del(mgp->mtrr, mgp->iomem_base, mgp->board_span); 3354 3258 #endif 3355 - dma_free_coherent(&pdev->dev, sizeof(*mgp->fw_stats), 3356 - mgp->fw_stats, mgp->fw_stats_bus); 3259 + dma_free_coherent(&pdev->dev, sizeof(*mgp->ss.fw_stats), 3260 + mgp->ss.fw_stats, mgp->ss.fw_stats_bus); 3357 3261 3358 3262 dma_free_coherent(&pdev->dev, sizeof(*mgp->cmd), 3359 3263 mgp->cmd, mgp->cmd_bus);
+50 -6
drivers/net/myri10ge/myri10ge_mcp.h
··· 10 10 __be32 low; 11 11 }; 12 12 13 - /* 4 Bytes. 8 Bytes for NDIS drivers. */ 13 + /* 4 Bytes */ 14 14 struct mcp_slot { 15 15 __sum16 checksum; 16 16 __be16 length; ··· 144 144 * a power of 2 number of entries. */ 145 145 146 146 MXGEFW_CMD_SET_INTRQ_SIZE, /* in bytes */ 147 + #define MXGEFW_CMD_SET_INTRQ_SIZE_FLAG_NO_STRICT_SIZE_CHECK (1 << 31) 147 148 148 149 /* command to bring ethernet interface up. Above parameters 149 150 * (plus mtu & mac address) must have been exchanged prior ··· 222 221 MXGEFW_CMD_GET_MAX_RSS_QUEUES, 223 222 MXGEFW_CMD_ENABLE_RSS_QUEUES, 224 223 /* data0 = number of slices n (0, 1, ..., n-1) to enable 225 - * data1 = interrupt mode. 0=share one INTx/MSI, 1=use one MSI-X per queue. 224 + * data1 = interrupt mode. 225 + * 0=share one INTx/MSI, 1=use one MSI-X per queue. 226 226 * If all queues share one interrupt, the driver must have set 227 227 * RSS_SHARED_INTERRUPT_DMA before enabling queues. 228 228 */ 229 + #define MXGEFW_SLICE_INTR_MODE_SHARED 0 230 + #define MXGEFW_SLICE_INTR_MODE_ONE_PER_SLICE 1 231 + 229 232 MXGEFW_CMD_GET_RSS_SHARED_INTERRUPT_MASK_OFFSET, 230 233 MXGEFW_CMD_SET_RSS_SHARED_INTERRUPT_DMA, 231 234 /* data0, data1 = bus address lsw, msw */ ··· 246 241 * 0: disable rss. nic does not distribute receive packets. 247 242 * 1: enable rss. nic distributes receive packets among queues. 248 243 * data1 = hash type 249 - * 1: IPV4 250 - * 2: TCP_IPV4 251 - * 3: IPV4 | TCP_IPV4 244 + * 1: IPV4 (required by RSS) 245 + * 2: TCP_IPV4 (required by RSS) 246 + * 3: IPV4 | TCP_IPV4 (required by RSS) 247 + * 4: source port 252 248 */ 249 + #define MXGEFW_RSS_HASH_TYPE_IPV4 0x1 250 + #define MXGEFW_RSS_HASH_TYPE_TCP_IPV4 0x2 251 + #define MXGEFW_RSS_HASH_TYPE_SRC_PORT 0x4 253 252 254 253 MXGEFW_CMD_GET_MAX_TSO6_HDR_SIZE, 255 254 /* Return data = the max. size of the entire headers of a IPv6 TSO packet. ··· 269 260 * 0: Linux/FreeBSD style (NIC default) 270 261 * 1: NDIS/NetBSD style 271 262 */ 263 + #define MXGEFW_TSO_MODE_LINUX 0 264 + #define MXGEFW_TSO_MODE_NDIS 1 272 265 273 266 MXGEFW_CMD_MDIO_READ, 274 267 /* data0 = dev_addr (PMA/PMD or PCS ...), data1 = register/addr */ ··· 297 286 /* Return data = NIC memory offset of mcp_vpump_public_global */ 298 287 MXGEFW_CMD_RESET_VPUMP, 299 288 /* Resets the VPUMP state */ 289 + 290 + MXGEFW_CMD_SET_RSS_MCP_SLOT_TYPE, 291 + /* data0 = mcp_slot type to use. 292 + * 0 = the default 4B mcp_slot 293 + * 1 = 8B mcp_slot_8 294 + */ 295 + #define MXGEFW_RSS_MCP_SLOT_TYPE_MIN 0 296 + #define MXGEFW_RSS_MCP_SLOT_TYPE_WITH_HASH 1 297 + 298 + MXGEFW_CMD_SET_THROTTLE_FACTOR, 299 + /* set the throttle factor for ethp_z8e 300 + * data0 = throttle_factor 301 + * throttle_factor = 256 * pcie-raw-speed / tx_speed 302 + * tx_speed = 256 * pcie-raw-speed / throttle_factor 303 + * 304 + * For PCI-E x8: pcie-raw-speed == 16Gb/s 305 + * For PCI-E x4: pcie-raw-speed == 8Gb/s 306 + * 307 + * ex1: throttle_factor == 0x1a0 (416), tx_speed == 1.23GB/s == 9.846 Gb/s 308 + * ex2: throttle_factor == 0x200 (512), tx_speed == 1.0GB/s == 8 Gb/s 309 + * 310 + * with tx_boundary == 2048, max-throttle-factor == 8191 => min-speed == 500Mb/s 311 + * with tx_boundary == 4096, max-throttle-factor == 4095 => min-speed == 1Gb/s 312 + */ 313 + 314 + MXGEFW_CMD_VPUMP_UP, 315 + /* Allocates VPump Connection, Send Request and Zero copy buffer address tables */ 316 + MXGEFW_CMD_GET_VPUMP_CLK, 317 + /* Get the lanai clock */ 318 + 319 + MXGEFW_CMD_GET_DCA_OFFSET, 320 + /* offset of dca control for WDMAs */ 300 321 }; 301 322 302 323 enum myri10ge_mcp_cmd_status { ··· 345 302 MXGEFW_CMD_ERROR_UNALIGNED, 346 303 MXGEFW_CMD_ERROR_NO_MDIO, 347 304 MXGEFW_CMD_ERROR_XFP_FAILURE, 348 - MXGEFW_CMD_ERROR_XFP_ABSENT 305 + MXGEFW_CMD_ERROR_XFP_ABSENT, 306 + MXGEFW_CMD_ERROR_BAD_PCIE_LINK 349 307 }; 350 308 351 309 #define MXGEFW_OLD_IRQ_DATA_LEN 40
+14 -25
drivers/net/myri10ge/myri10ge_mcp_gen_header.h
··· 1 1 #ifndef __MYRI10GE_MCP_GEN_HEADER_H__ 2 2 #define __MYRI10GE_MCP_GEN_HEADER_H__ 3 3 4 - /* this file define a standard header used as a first entry point to 5 - * exchange information between firmware/driver and driver. The 6 - * header structure can be anywhere in the mcp. It will usually be in 7 - * the .data section, because some fields needs to be initialized at 8 - * compile time. 9 - * The 32bit word at offset MX_HEADER_PTR_OFFSET in the mcp must 10 - * contains the location of the header. 11 - * 12 - * Typically a MCP will start with the following: 13 - * .text 14 - * .space 52 ! to help catch MEMORY_INT errors 15 - * bt start ! jump to real code 16 - * nop 17 - * .long _gen_mcp_header 18 - * 19 - * The source will have a definition like: 20 - * 21 - * mcp_gen_header_t gen_mcp_header = { 22 - * .header_length = sizeof(mcp_gen_header_t), 23 - * .mcp_type = MCP_TYPE_XXX, 24 - * .version = "something $Id: mcp_gen_header.h,v 1.2 2006/05/13 10:04:35 bgoglin Exp $", 25 - * .mcp_globals = (unsigned)&Globals 26 - * }; 27 - */ 28 4 29 5 #define MCP_HEADER_PTR_OFFSET 0x3c 30 6 ··· 8 32 #define MCP_TYPE_PCIE 0x70636965 /* "PCIE" pcie-only MCP */ 9 33 #define MCP_TYPE_ETH 0x45544820 /* "ETH " */ 10 34 #define MCP_TYPE_MCP0 0x4d435030 /* "MCP0" */ 35 + #define MCP_TYPE_DFLT 0x20202020 /* " " */ 11 36 12 37 struct mcp_gen_header { 13 38 /* the first 4 fields are filled at compile time */ 14 39 unsigned header_length; 15 40 __be32 mcp_type; 16 41 char version[128]; 17 - unsigned mcp_globals; /* pointer to mcp-type specific structure */ 42 + unsigned mcp_private; /* pointer to mcp-type specific structure */ 18 43 19 44 /* filled by the MCP at run-time */ 20 45 unsigned sram_size; ··· 30 53 * 31 54 * Never remove any field. Keep everything naturally align. 32 55 */ 56 + 57 + /* Specifies if the running mcp is mcp0, 1, or 2. */ 58 + unsigned char mcp_index; 59 + unsigned char disable_rabbit; 60 + unsigned char unaligned_tlp; 61 + unsigned char pad1; 62 + unsigned counters_addr; 63 + unsigned copy_block_info; /* for small mcps loaded with "lload -d" */ 64 + unsigned short handoff_id_major; /* must be equal */ 65 + unsigned short handoff_id_caps; /* bitfield: new mcp must have superset */ 66 + unsigned msix_table_addr; /* start address of msix table in firmware */ 67 + /* 8 */ 33 68 }; 34 69 35 70 #endif /* __MYRI10GE_MCP_GEN_HEADER_H__ */
+21 -32
drivers/net/niu.c
··· 865 865 return 0; 866 866 } 867 867 868 - 869 868 static int link_status_10g_serdes(struct niu *np, int *link_up_p) 870 869 { 871 870 unsigned long flags; ··· 898 899 *link_up_p = link_up; 899 900 return 0; 900 901 } 901 - 902 902 903 903 static int link_status_1g_rgmii(struct niu *np, int *link_up_p) 904 904 { ··· 954 956 *link_up_p = link_up; 955 957 return err; 956 958 } 957 - 958 959 959 960 static int bcm8704_reset(struct niu *np) 960 961 { ··· 1354 1357 return 0; 1355 1358 } 1356 1359 1357 - 1358 - 1359 1360 static int xcvr_init_1g_rgmii(struct niu *np) 1360 1361 { 1361 1362 int err; ··· 1413 1418 1414 1419 return 0; 1415 1420 } 1416 - 1417 1421 1418 1422 static int mii_init_common(struct niu *np) 1419 1423 { ··· 7002 7008 return 0; 7003 7009 } 7004 7010 7005 - /* niu board models have a trailing dash version incremented 7006 - * with HW rev change. Need to ingnore the dash version while 7007 - * checking for match 7008 - * 7009 - * for example, for the 10G card the current vpd.board_model 7010 - * is 501-5283-04, of which -04 is the dash version and have 7011 - * to be ignored 7012 - */ 7013 - static int niu_board_model_match(struct niu *np, const char *model) 7014 - { 7015 - return !strncmp(np->vpd.board_model, model, strlen(model)); 7016 - } 7017 - 7018 7011 static int niu_pci_vpd_get_nports(struct niu *np) 7019 7012 { 7020 7013 int ports = 0; 7021 7014 7022 - if ((niu_board_model_match(np, NIU_QGC_LP_BM_STR)) || 7023 - (niu_board_model_match(np, NIU_QGC_PEM_BM_STR)) || 7024 - (niu_board_model_match(np, NIU_ALONSO_BM_STR))) { 7015 + if ((!strcmp(np->vpd.model, NIU_QGC_LP_MDL_STR)) || 7016 + (!strcmp(np->vpd.model, NIU_QGC_PEM_MDL_STR)) || 7017 + (!strcmp(np->vpd.model, NIU_MARAMBA_MDL_STR)) || 7018 + (!strcmp(np->vpd.model, NIU_KIMI_MDL_STR)) || 7019 + (!strcmp(np->vpd.model, NIU_ALONSO_MDL_STR))) { 7025 7020 ports = 4; 7026 - } else if ((niu_board_model_match(np, NIU_2XGF_LP_BM_STR)) || 7027 - (niu_board_model_match(np, NIU_2XGF_PEM_BM_STR)) || 7028 - (niu_board_model_match(np, NIU_FOXXY_BM_STR)) || 7029 - (niu_board_model_match(np, NIU_2XGF_MRVL_BM_STR))) { 7021 + } else if ((!strcmp(np->vpd.model, NIU_2XGF_LP_MDL_STR)) || 7022 + (!strcmp(np->vpd.model, NIU_2XGF_PEM_MDL_STR)) || 7023 + (!strcmp(np->vpd.model, NIU_FOXXY_MDL_STR)) || 7024 + (!strcmp(np->vpd.model, NIU_2XGF_MRVL_MDL_STR))) { 7030 7025 ports = 2; 7031 7026 } 7032 7027 ··· 7036 7053 return; 7037 7054 } 7038 7055 7039 - if (!strcmp(np->vpd.model, "SUNW,CP3220") || 7040 - !strcmp(np->vpd.model, "SUNW,CP3260")) { 7056 + if (!strcmp(np->vpd.model, NIU_ALONSO_MDL_STR) || 7057 + !strcmp(np->vpd.model, NIU_KIMI_MDL_STR)) { 7041 7058 np->flags |= NIU_FLAGS_10G; 7042 7059 np->flags &= ~NIU_FLAGS_FIBER; 7043 7060 np->flags |= NIU_FLAGS_XCVR_SERDES; ··· 7048 7065 } 7049 7066 if (np->flags & NIU_FLAGS_10G) 7050 7067 np->mac_xcvr = MAC_XCVR_XPCS; 7051 - } else if (niu_board_model_match(np, NIU_FOXXY_BM_STR)) { 7068 + } else if (!strcmp(np->vpd.model, NIU_FOXXY_MDL_STR)) { 7052 7069 np->flags |= (NIU_FLAGS_10G | NIU_FLAGS_FIBER | 7053 7070 NIU_FLAGS_HOTPLUG_PHY); 7054 7071 } else if (niu_phy_type_prop_decode(np, np->vpd.phy_type)) { ··· 7524 7541 u32 val; 7525 7542 int err; 7526 7543 7527 - if (!strcmp(np->vpd.model, "SUNW,CP3220") || 7528 - !strcmp(np->vpd.model, "SUNW,CP3260")) { 7544 + if (!strcmp(np->vpd.model, NIU_ALONSO_MDL_STR) || 7545 + !strcmp(np->vpd.model, NIU_KIMI_MDL_STR)) { 7529 7546 num_10g = 0; 7530 7547 num_1g = 2; 7531 7548 parent->plat_type = PLAT_TYPE_ATCA_CP3220; ··· 7534 7551 phy_encode(PORT_TYPE_1G, 1) | 7535 7552 phy_encode(PORT_TYPE_1G, 2) | 7536 7553 phy_encode(PORT_TYPE_1G, 3)); 7537 - } else if (niu_board_model_match(np, NIU_FOXXY_BM_STR)) { 7554 + } else if (!strcmp(np->vpd.model, NIU_FOXXY_MDL_STR)) { 7538 7555 num_10g = 2; 7539 7556 num_1g = 0; 7540 7557 parent->num_ports = 2; ··· 7929 7946 struct device_node *dp; 7930 7947 const char *phy_type; 7931 7948 const u8 *mac_addr; 7949 + const char *model; 7932 7950 int prop_len; 7933 7951 7934 7952 if (np->parent->plat_type == PLAT_TYPE_NIU) ··· 7983 7999 } 7984 8000 7985 8001 memcpy(dev->dev_addr, dev->perm_addr, dev->addr_len); 8002 + 8003 + model = of_get_property(dp, "model", &prop_len); 8004 + 8005 + if (model) 8006 + strcpy(np->vpd.model, model); 7986 8007 7987 8008 return 0; 7988 8009 #else
+9
drivers/net/niu.h
··· 2946 2946 #define NIU_ALONSO_BM_STR "373-0202" 2947 2947 #define NIU_FOXXY_BM_STR "501-7961" 2948 2948 #define NIU_2XGF_MRVL_BM_STR "SK-6E82" 2949 + #define NIU_QGC_LP_MDL_STR "SUNW,pcie-qgc" 2950 + #define NIU_2XGF_LP_MDL_STR "SUNW,pcie-2xgf" 2951 + #define NIU_QGC_PEM_MDL_STR "SUNW,pcie-qgc-pem" 2952 + #define NIU_2XGF_PEM_MDL_STR "SUNW,pcie-2xgf-pem" 2953 + #define NIU_ALONSO_MDL_STR "SUNW,CP3220" 2954 + #define NIU_KIMI_MDL_STR "SUNW,CP3260" 2955 + #define NIU_MARAMBA_MDL_STR "SUNW,pcie-neptune" 2956 + #define NIU_FOXXY_MDL_STR "SUNW,pcie-rfem" 2957 + #define NIU_2XGF_MRVL_MDL_STR "SysKonnect,pcie-2xgf" 2949 2958 2950 2959 #define NIU_VPD_MIN_MAJOR 3 2951 2960 #define NIU_VPD_MIN_MINOR 4
+1
drivers/net/ppp_generic.c
··· 2458 2458 2459 2459 out3: 2460 2460 atomic_dec(&ppp_unit_count); 2461 + unregister_netdev(dev); 2461 2462 out2: 2462 2463 mutex_unlock(&all_ppp_mutex); 2463 2464 free_netdev(dev);
+10 -3
drivers/net/pppol2tp.c
··· 1621 1621 end: 1622 1622 release_sock(sk); 1623 1623 1624 - if (error != 0) 1625 - PRINTK(session ? session->debug : -1, PPPOL2TP_MSG_CONTROL, KERN_WARNING, 1626 - "%s: connect failed: %d\n", session->name, error); 1624 + if (error != 0) { 1625 + if (session) 1626 + PRINTK(session->debug, 1627 + PPPOL2TP_MSG_CONTROL, KERN_WARNING, 1628 + "%s: connect failed: %d\n", 1629 + session->name, error); 1630 + else 1631 + PRINTK(-1, PPPOL2TP_MSG_CONTROL, KERN_WARNING, 1632 + "connect failed: %d\n", error); 1633 + } 1627 1634 1628 1635 return error; 1629 1636 }
+2
drivers/net/ps3_gelic_wireless.c
··· 2474 2474 2475 2475 pr_debug("%s: <-\n", __func__); 2476 2476 2477 + free_page((unsigned long)wl->buf); 2478 + 2477 2479 pr_debug("%s: destroy queues\n", __func__); 2478 2480 destroy_workqueue(wl->eurus_cmd_queue); 2479 2481 destroy_workqueue(wl->event_queue);
+2 -2
drivers/net/sfc/Makefile
··· 1 1 sfc-y += efx.o falcon.o tx.o rx.o falcon_xmac.o \ 2 - i2c-direct.o ethtool.o xfp_phy.o mdio_10g.o \ 3 - tenxpress.o boards.o sfe4001.o 2 + i2c-direct.o selftest.o ethtool.o xfp_phy.o \ 3 + mdio_10g.o tenxpress.o boards.o sfe4001.o 4 4 5 5 obj-$(CONFIG_SFC) += sfc.o
+2
drivers/net/sfc/boards.h
··· 22 22 extern int efx_set_board_info(struct efx_nic *efx, u16 revision_info); 23 23 extern int sfe4001_poweron(struct efx_nic *efx); 24 24 extern void sfe4001_poweroff(struct efx_nic *efx); 25 + /* Are we putting the PHY into flash config mode */ 26 + extern unsigned int sfe4001_phy_flash_cfg; 25 27 26 28 #endif
+3 -1
drivers/net/sfc/efx.c
··· 1873 1873 tx_queue->queue = i; 1874 1874 tx_queue->buffer = NULL; 1875 1875 tx_queue->channel = &efx->channel[0]; /* for safety */ 1876 + tx_queue->tso_headers_free = NULL; 1876 1877 } 1877 1878 for (i = 0; i < EFX_MAX_RX_QUEUES; i++) { 1878 1879 rx_queue = &efx->rx_queue[i]; ··· 2072 2071 net_dev = alloc_etherdev(sizeof(*efx)); 2073 2072 if (!net_dev) 2074 2073 return -ENOMEM; 2075 - net_dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_HIGHDMA; 2074 + net_dev->features |= (NETIF_F_IP_CSUM | NETIF_F_SG | 2075 + NETIF_F_HIGHDMA | NETIF_F_TSO); 2076 2076 if (lro) 2077 2077 net_dev->features |= NETIF_F_LRO; 2078 2078 efx = net_dev->priv;
+49
drivers/net/sfc/enum.h
··· 10 10 #ifndef EFX_ENUM_H 11 11 #define EFX_ENUM_H 12 12 13 + /** 14 + * enum efx_loopback_mode - loopback modes 15 + * @LOOPBACK_NONE: no loopback 16 + * @LOOPBACK_XGMII: loopback within MAC at XGMII level 17 + * @LOOPBACK_XGXS: loopback within MAC at XGXS level 18 + * @LOOPBACK_XAUI: loopback within MAC at XAUI level 19 + * @LOOPBACK_PHYXS: loopback within PHY at PHYXS level 20 + * @LOOPBACK_PCS: loopback within PHY at PCS level 21 + * @LOOPBACK_PMAPMD: loopback within PHY at PMAPMD level 22 + * @LOOPBACK_NETWORK: reflecting loopback (even further than furthest!) 23 + */ 24 + /* Please keep in order and up-to-date w.r.t the following two #defines */ 25 + enum efx_loopback_mode { 26 + LOOPBACK_NONE = 0, 27 + LOOPBACK_MAC = 1, 28 + LOOPBACK_XGMII = 2, 29 + LOOPBACK_XGXS = 3, 30 + LOOPBACK_XAUI = 4, 31 + LOOPBACK_PHY = 5, 32 + LOOPBACK_PHYXS = 6, 33 + LOOPBACK_PCS = 7, 34 + LOOPBACK_PMAPMD = 8, 35 + LOOPBACK_NETWORK = 9, 36 + LOOPBACK_MAX 37 + }; 38 + 39 + #define LOOPBACK_TEST_MAX LOOPBACK_PMAPMD 40 + 41 + extern const char *efx_loopback_mode_names[]; 42 + #define LOOPBACK_MODE_NAME(mode) \ 43 + STRING_TABLE_LOOKUP(mode, efx_loopback_mode) 44 + #define LOOPBACK_MODE(efx) \ 45 + LOOPBACK_MODE_NAME(efx->loopback_mode) 46 + 47 + /* These loopbacks occur within the controller */ 48 + #define LOOPBACKS_10G_INTERNAL ((1 << LOOPBACK_XGMII)| \ 49 + (1 << LOOPBACK_XGXS) | \ 50 + (1 << LOOPBACK_XAUI)) 51 + 52 + #define LOOPBACK_MASK(_efx) \ 53 + (1 << (_efx)->loopback_mode) 54 + 55 + #define LOOPBACK_INTERNAL(_efx) \ 56 + ((LOOPBACKS_10G_INTERNAL & LOOPBACK_MASK(_efx)) ? 1 : 0) 57 + 58 + #define LOOPBACK_OUT_OF(_from, _to, _mask) \ 59 + (((LOOPBACK_MASK(_from) & (_mask)) && \ 60 + ((LOOPBACK_MASK(_to) & (_mask)) == 0)) ? 1 : 0) 61 + 13 62 /*****************************************************************************/ 14 63 15 64 /**
+258 -1
drivers/net/sfc/ethtool.c
··· 12 12 #include <linux/ethtool.h> 13 13 #include <linux/rtnetlink.h> 14 14 #include "net_driver.h" 15 + #include "selftest.h" 15 16 #include "efx.h" 16 17 #include "ethtool.h" 17 18 #include "falcon.h" 18 19 #include "gmii.h" 19 20 #include "mac.h" 21 + 22 + const char *efx_loopback_mode_names[] = { 23 + [LOOPBACK_NONE] = "NONE", 24 + [LOOPBACK_MAC] = "MAC", 25 + [LOOPBACK_XGMII] = "XGMII", 26 + [LOOPBACK_XGXS] = "XGXS", 27 + [LOOPBACK_XAUI] = "XAUI", 28 + [LOOPBACK_PHY] = "PHY", 29 + [LOOPBACK_PHYXS] = "PHY(XS)", 30 + [LOOPBACK_PCS] = "PHY(PCS)", 31 + [LOOPBACK_PMAPMD] = "PHY(PMAPMD)", 32 + [LOOPBACK_NETWORK] = "NETWORK", 33 + }; 20 34 21 35 static int efx_ethtool_set_tx_csum(struct net_device *net_dev, u32 enable); 22 36 ··· 231 217 strlcpy(info->bus_info, pci_name(efx->pci_dev), sizeof(info->bus_info)); 232 218 } 233 219 220 + /** 221 + * efx_fill_test - fill in an individual self-test entry 222 + * @test_index: Index of the test 223 + * @strings: Ethtool strings, or %NULL 224 + * @data: Ethtool test results, or %NULL 225 + * @test: Pointer to test result (used only if data != %NULL) 226 + * @unit_format: Unit name format (e.g. "channel\%d") 227 + * @unit_id: Unit id (e.g. 0 for "channel0") 228 + * @test_format: Test name format (e.g. "loopback.\%s.tx.sent") 229 + * @test_id: Test id (e.g. "PHY" for "loopback.PHY.tx_sent") 230 + * 231 + * Fill in an individual self-test entry. 232 + */ 233 + static void efx_fill_test(unsigned int test_index, 234 + struct ethtool_string *strings, u64 *data, 235 + int *test, const char *unit_format, int unit_id, 236 + const char *test_format, const char *test_id) 237 + { 238 + struct ethtool_string unit_str, test_str; 239 + 240 + /* Fill data value, if applicable */ 241 + if (data) 242 + data[test_index] = *test; 243 + 244 + /* Fill string, if applicable */ 245 + if (strings) { 246 + snprintf(unit_str.name, sizeof(unit_str.name), 247 + unit_format, unit_id); 248 + snprintf(test_str.name, sizeof(test_str.name), 249 + test_format, test_id); 250 + snprintf(strings[test_index].name, 251 + sizeof(strings[test_index].name), 252 + "%-9s%-17s", unit_str.name, test_str.name); 253 + } 254 + } 255 + 256 + #define EFX_PORT_NAME "port%d", 0 257 + #define EFX_CHANNEL_NAME(_channel) "channel%d", _channel->channel 258 + #define EFX_TX_QUEUE_NAME(_tx_queue) "txq%d", _tx_queue->queue 259 + #define EFX_RX_QUEUE_NAME(_rx_queue) "rxq%d", _rx_queue->queue 260 + #define EFX_LOOPBACK_NAME(_mode, _counter) \ 261 + "loopback.%s." _counter, LOOPBACK_MODE_NAME(mode) 262 + 263 + /** 264 + * efx_fill_loopback_test - fill in a block of loopback self-test entries 265 + * @efx: Efx NIC 266 + * @lb_tests: Efx loopback self-test results structure 267 + * @mode: Loopback test mode 268 + * @test_index: Starting index of the test 269 + * @strings: Ethtool strings, or %NULL 270 + * @data: Ethtool test results, or %NULL 271 + */ 272 + static int efx_fill_loopback_test(struct efx_nic *efx, 273 + struct efx_loopback_self_tests *lb_tests, 274 + enum efx_loopback_mode mode, 275 + unsigned int test_index, 276 + struct ethtool_string *strings, u64 *data) 277 + { 278 + struct efx_tx_queue *tx_queue; 279 + 280 + efx_for_each_tx_queue(tx_queue, efx) { 281 + efx_fill_test(test_index++, strings, data, 282 + &lb_tests->tx_sent[tx_queue->queue], 283 + EFX_TX_QUEUE_NAME(tx_queue), 284 + EFX_LOOPBACK_NAME(mode, "tx_sent")); 285 + efx_fill_test(test_index++, strings, data, 286 + &lb_tests->tx_done[tx_queue->queue], 287 + EFX_TX_QUEUE_NAME(tx_queue), 288 + EFX_LOOPBACK_NAME(mode, "tx_done")); 289 + } 290 + efx_fill_test(test_index++, strings, data, 291 + &lb_tests->rx_good, 292 + EFX_PORT_NAME, 293 + EFX_LOOPBACK_NAME(mode, "rx_good")); 294 + efx_fill_test(test_index++, strings, data, 295 + &lb_tests->rx_bad, 296 + EFX_PORT_NAME, 297 + EFX_LOOPBACK_NAME(mode, "rx_bad")); 298 + 299 + return test_index; 300 + } 301 + 302 + /** 303 + * efx_ethtool_fill_self_tests - get self-test details 304 + * @efx: Efx NIC 305 + * @tests: Efx self-test results structure, or %NULL 306 + * @strings: Ethtool strings, or %NULL 307 + * @data: Ethtool test results, or %NULL 308 + */ 309 + static int efx_ethtool_fill_self_tests(struct efx_nic *efx, 310 + struct efx_self_tests *tests, 311 + struct ethtool_string *strings, 312 + u64 *data) 313 + { 314 + struct efx_channel *channel; 315 + unsigned int n = 0; 316 + enum efx_loopback_mode mode; 317 + 318 + /* Interrupt */ 319 + efx_fill_test(n++, strings, data, &tests->interrupt, 320 + "core", 0, "interrupt", NULL); 321 + 322 + /* Event queues */ 323 + efx_for_each_channel(channel, efx) { 324 + efx_fill_test(n++, strings, data, 325 + &tests->eventq_dma[channel->channel], 326 + EFX_CHANNEL_NAME(channel), 327 + "eventq.dma", NULL); 328 + efx_fill_test(n++, strings, data, 329 + &tests->eventq_int[channel->channel], 330 + EFX_CHANNEL_NAME(channel), 331 + "eventq.int", NULL); 332 + efx_fill_test(n++, strings, data, 333 + &tests->eventq_poll[channel->channel], 334 + EFX_CHANNEL_NAME(channel), 335 + "eventq.poll", NULL); 336 + } 337 + 338 + /* PHY presence */ 339 + efx_fill_test(n++, strings, data, &tests->phy_ok, 340 + EFX_PORT_NAME, "phy_ok", NULL); 341 + 342 + /* Loopback tests */ 343 + efx_fill_test(n++, strings, data, &tests->loopback_speed, 344 + EFX_PORT_NAME, "loopback.speed", NULL); 345 + efx_fill_test(n++, strings, data, &tests->loopback_full_duplex, 346 + EFX_PORT_NAME, "loopback.full_duplex", NULL); 347 + for (mode = LOOPBACK_NONE; mode < LOOPBACK_TEST_MAX; mode++) { 348 + if (!(efx->loopback_modes & (1 << mode))) 349 + continue; 350 + n = efx_fill_loopback_test(efx, 351 + &tests->loopback[mode], mode, n, 352 + strings, data); 353 + } 354 + 355 + return n; 356 + } 357 + 234 358 static int efx_ethtool_get_stats_count(struct net_device *net_dev) 235 359 { 236 360 return EFX_ETHTOOL_NUM_STATS; 237 361 } 238 362 363 + static int efx_ethtool_self_test_count(struct net_device *net_dev) 364 + { 365 + struct efx_nic *efx = net_dev->priv; 366 + 367 + return efx_ethtool_fill_self_tests(efx, NULL, NULL, NULL); 368 + } 369 + 239 370 static void efx_ethtool_get_strings(struct net_device *net_dev, 240 371 u32 string_set, u8 *strings) 241 372 { 373 + struct efx_nic *efx = net_dev->priv; 242 374 struct ethtool_string *ethtool_strings = 243 375 (struct ethtool_string *)strings; 244 376 int i; 245 377 246 - if (string_set == ETH_SS_STATS) 378 + switch (string_set) { 379 + case ETH_SS_STATS: 247 380 for (i = 0; i < EFX_ETHTOOL_NUM_STATS; i++) 248 381 strncpy(ethtool_strings[i].name, 249 382 efx_ethtool_stats[i].name, 250 383 sizeof(ethtool_strings[i].name)); 384 + break; 385 + case ETH_SS_TEST: 386 + efx_ethtool_fill_self_tests(efx, NULL, 387 + ethtool_strings, NULL); 388 + break; 389 + default: 390 + /* No other string sets */ 391 + break; 392 + } 251 393 } 252 394 253 395 static void efx_ethtool_get_stats(struct net_device *net_dev, ··· 442 272 } 443 273 } 444 274 275 + static int efx_ethtool_set_tso(struct net_device *net_dev, u32 enable) 276 + { 277 + int rc; 278 + 279 + /* Our TSO requires TX checksumming, so force TX checksumming 280 + * on when TSO is enabled. 281 + */ 282 + if (enable) { 283 + rc = efx_ethtool_set_tx_csum(net_dev, 1); 284 + if (rc) 285 + return rc; 286 + } 287 + 288 + return ethtool_op_set_tso(net_dev, enable); 289 + } 290 + 445 291 static int efx_ethtool_set_tx_csum(struct net_device *net_dev, u32 enable) 446 292 { 447 293 struct efx_nic *efx = net_dev->priv; ··· 468 282 return rc; 469 283 470 284 efx_flush_queues(efx); 285 + 286 + /* Our TSO requires TX checksumming, so disable TSO when 287 + * checksumming is disabled 288 + */ 289 + if (!enable) { 290 + rc = efx_ethtool_set_tso(net_dev, 0); 291 + if (rc) 292 + return rc; 293 + } 471 294 472 295 return 0; 473 296 } ··· 498 303 struct efx_nic *efx = net_dev->priv; 499 304 500 305 return efx->rx_checksum_enabled; 306 + } 307 + 308 + static void efx_ethtool_self_test(struct net_device *net_dev, 309 + struct ethtool_test *test, u64 *data) 310 + { 311 + struct efx_nic *efx = net_dev->priv; 312 + struct efx_self_tests efx_tests; 313 + int offline, already_up; 314 + int rc; 315 + 316 + ASSERT_RTNL(); 317 + if (efx->state != STATE_RUNNING) { 318 + rc = -EIO; 319 + goto fail1; 320 + } 321 + 322 + /* We need rx buffers and interrupts. */ 323 + already_up = (efx->net_dev->flags & IFF_UP); 324 + if (!already_up) { 325 + rc = dev_open(efx->net_dev); 326 + if (rc) { 327 + EFX_ERR(efx, "failed opening device.\n"); 328 + goto fail2; 329 + } 330 + } 331 + 332 + memset(&efx_tests, 0, sizeof(efx_tests)); 333 + offline = (test->flags & ETH_TEST_FL_OFFLINE); 334 + 335 + /* Perform online self tests first */ 336 + rc = efx_online_test(efx, &efx_tests); 337 + if (rc) 338 + goto out; 339 + 340 + /* Perform offline tests only if online tests passed */ 341 + if (offline) { 342 + /* Stop the kernel from sending packets during the test. */ 343 + efx_stop_queue(efx); 344 + rc = efx_flush_queues(efx); 345 + if (!rc) 346 + rc = efx_offline_test(efx, &efx_tests, 347 + efx->loopback_modes); 348 + efx_wake_queue(efx); 349 + } 350 + 351 + out: 352 + if (!already_up) 353 + dev_close(efx->net_dev); 354 + 355 + EFX_LOG(efx, "%s all %sline self-tests\n", 356 + rc == 0 ? "passed" : "failed", offline ? "off" : "on"); 357 + 358 + fail2: 359 + fail1: 360 + /* Fill ethtool results structures */ 361 + efx_ethtool_fill_self_tests(efx, &efx_tests, NULL, data); 362 + if (rc) 363 + test->flags |= ETH_TEST_FL_FAILED; 501 364 } 502 365 503 366 /* Restart autonegotiation */ ··· 704 451 .set_tx_csum = efx_ethtool_set_tx_csum, 705 452 .get_sg = ethtool_op_get_sg, 706 453 .set_sg = ethtool_op_set_sg, 454 + .get_tso = ethtool_op_get_tso, 455 + .set_tso = efx_ethtool_set_tso, 707 456 .get_flags = ethtool_op_get_flags, 708 457 .set_flags = ethtool_op_set_flags, 458 + .self_test_count = efx_ethtool_self_test_count, 459 + .self_test = efx_ethtool_self_test, 709 460 .get_strings = efx_ethtool_get_strings, 710 461 .phys_id = efx_ethtool_phys_id, 711 462 .get_stats_count = efx_ethtool_get_stats_count,
+5 -3
drivers/net/sfc/falcon.c
··· 1129 1129 case RX_RECOVERY_EV_DECODE: 1130 1130 EFX_ERR(efx, "channel %d seen DRIVER RX_RESET event. " 1131 1131 "Resetting.\n", channel->channel); 1132 + atomic_inc(&efx->rx_reset); 1132 1133 efx_schedule_reset(efx, 1133 1134 EFX_WORKAROUND_6555(efx) ? 1134 1135 RESET_TYPE_RX_RECOVERY : ··· 1732 1731 efx_oword_t temp; 1733 1732 int count; 1734 1733 1735 - if (FALCON_REV(efx) < FALCON_REV_B0) 1734 + if ((FALCON_REV(efx) < FALCON_REV_B0) || 1735 + (efx->loopback_mode != LOOPBACK_NONE)) 1736 1736 return; 1737 1737 1738 1738 falcon_read(efx, &temp, MAC0_CTRL_REG_KER); ··· 2093 2091 efx->phy_type); 2094 2092 return -1; 2095 2093 } 2094 + 2095 + efx->loopback_modes = LOOPBACKS_10G_INTERNAL | efx->phy_op->loopbacks; 2096 2096 return 0; 2097 2097 } 2098 2098 ··· 2472 2468 fail5: 2473 2469 falcon_free_buffer(efx, &efx->irq_status); 2474 2470 fail4: 2475 - /* fall-thru */ 2476 2471 fail3: 2477 2472 if (nic_data->pci_dev2) { 2478 2473 pci_dev_put(nic_data->pci_dev2); 2479 2474 nic_data->pci_dev2 = NULL; 2480 2475 } 2481 2476 fail2: 2482 - /* fall-thru */ 2483 2477 fail1: 2484 2478 kfree(efx->nic_data); 2485 2479 return rc;
+15 -1
drivers/net/sfc/falcon_hwdefs.h
··· 636 636 #define XX_HIDRVA_WIDTH 1 637 637 #define XX_LODRVA_LBN 8 638 638 #define XX_LODRVA_WIDTH 1 639 + #define XX_LPBKD_LBN 3 640 + #define XX_LPBKD_WIDTH 1 641 + #define XX_LPBKC_LBN 2 642 + #define XX_LPBKC_WIDTH 1 643 + #define XX_LPBKB_LBN 1 644 + #define XX_LPBKB_WIDTH 1 645 + #define XX_LPBKA_LBN 0 646 + #define XX_LPBKA_WIDTH 1 639 647 640 648 #define XX_TXDRV_CTL_REG_MAC 0x12 641 649 #define XX_DEQD_LBN 28 ··· 664 656 #define XX_DTXA_WIDTH 4 665 657 666 658 /* XAUI XGXS core status register */ 667 - #define XX_FORCE_SIG_DECODE_FORCED 0xff 668 659 #define XX_CORE_STAT_REG_MAC 0x16 660 + #define XX_FORCE_SIG_LBN 24 661 + #define XX_FORCE_SIG_WIDTH 8 662 + #define XX_FORCE_SIG_DECODE_FORCED 0xff 663 + #define XX_XGXS_LB_EN_LBN 23 664 + #define XX_XGXS_LB_EN_WIDTH 1 665 + #define XX_XGMII_LB_EN_LBN 22 666 + #define XX_XGMII_LB_EN_WIDTH 1 669 667 #define XX_ALIGN_DONE_LBN 20 670 668 #define XX_ALIGN_DONE_WIDTH 1 671 669 #define XX_SYNC_STAT_LBN 16
+76 -6
drivers/net/sfc/falcon_xmac.c
··· 32 32 (FALCON_XMAC_REGBANK + ((mac_reg) * FALCON_XMAC_REG_SIZE)) 33 33 34 34 void falcon_xmac_writel(struct efx_nic *efx, 35 - efx_dword_t *value, unsigned int mac_reg) 35 + efx_dword_t *value, unsigned int mac_reg) 36 36 { 37 37 efx_oword_t temp; 38 38 ··· 68 68 return 0; 69 69 udelay(10); 70 70 } 71 + 72 + /* This often fails when DSP is disabled, ignore it */ 73 + if (sfe4001_phy_flash_cfg != 0) 74 + return 0; 71 75 72 76 EFX_ERR(efx, "timed out waiting for XMAC core reset\n"); 73 77 return -ETIMEDOUT; ··· 227 223 /* The ISR latches, so clear it and re-read */ 228 224 falcon_xmac_readl(efx, &reg, XM_MGT_INT_REG_MAC_B0); 229 225 falcon_xmac_readl(efx, &reg, XM_MGT_INT_REG_MAC_B0); 230 - 226 + 231 227 if (EFX_DWORD_FIELD(reg, XM_LCLFLT) || 232 228 EFX_DWORD_FIELD(reg, XM_RMTFLT)) { 233 229 EFX_INFO(efx, "MGT_INT: "EFX_DWORD_FMT"\n", EFX_DWORD_VAL(reg)); ··· 241 237 { 242 238 efx_dword_t reg; 243 239 244 - if (FALCON_REV(efx) < FALCON_REV_B0) 240 + if ((FALCON_REV(efx) < FALCON_REV_B0) || LOOPBACK_INTERNAL(efx)) 245 241 return; 246 242 247 243 /* Flush the ISR */ ··· 287 283 { 288 284 efx_dword_t reg; 289 285 int align_done, sync_status, link_ok = 0; 286 + 287 + if (LOOPBACK_INTERNAL(efx)) 288 + return 1; 290 289 291 290 /* Read link status */ 292 291 falcon_xmac_readl(efx, &reg, XX_CORE_STAT_REG_MAC); ··· 381 374 falcon_xmac_writel(efx, &reg, XM_ADR_HI_REG_MAC); 382 375 } 383 376 377 + static void falcon_reconfigure_xgxs_core(struct efx_nic *efx) 378 + { 379 + efx_dword_t reg; 380 + int xgxs_loopback = (efx->loopback_mode == LOOPBACK_XGXS) ? 1 : 0; 381 + int xaui_loopback = (efx->loopback_mode == LOOPBACK_XAUI) ? 1 : 0; 382 + int xgmii_loopback = 383 + (efx->loopback_mode == LOOPBACK_XGMII) ? 1 : 0; 384 + 385 + /* XGXS block is flaky and will need to be reset if moving 386 + * into our out of XGMII, XGXS or XAUI loopbacks. */ 387 + if (EFX_WORKAROUND_5147(efx)) { 388 + int old_xgmii_loopback, old_xgxs_loopback, old_xaui_loopback; 389 + int reset_xgxs; 390 + 391 + falcon_xmac_readl(efx, &reg, XX_CORE_STAT_REG_MAC); 392 + old_xgxs_loopback = EFX_DWORD_FIELD(reg, XX_XGXS_LB_EN); 393 + old_xgmii_loopback = EFX_DWORD_FIELD(reg, XX_XGMII_LB_EN); 394 + 395 + falcon_xmac_readl(efx, &reg, XX_SD_CTL_REG_MAC); 396 + old_xaui_loopback = EFX_DWORD_FIELD(reg, XX_LPBKA); 397 + 398 + /* The PHY driver may have turned XAUI off */ 399 + reset_xgxs = ((xgxs_loopback != old_xgxs_loopback) || 400 + (xaui_loopback != old_xaui_loopback) || 401 + (xgmii_loopback != old_xgmii_loopback)); 402 + if (reset_xgxs) { 403 + falcon_xmac_readl(efx, &reg, XX_PWR_RST_REG_MAC); 404 + EFX_SET_DWORD_FIELD(reg, XX_RSTXGXSTX_EN, 1); 405 + EFX_SET_DWORD_FIELD(reg, XX_RSTXGXSRX_EN, 1); 406 + falcon_xmac_writel(efx, &reg, XX_PWR_RST_REG_MAC); 407 + udelay(1); 408 + EFX_SET_DWORD_FIELD(reg, XX_RSTXGXSTX_EN, 0); 409 + EFX_SET_DWORD_FIELD(reg, XX_RSTXGXSRX_EN, 0); 410 + falcon_xmac_writel(efx, &reg, XX_PWR_RST_REG_MAC); 411 + udelay(1); 412 + } 413 + } 414 + 415 + falcon_xmac_readl(efx, &reg, XX_CORE_STAT_REG_MAC); 416 + EFX_SET_DWORD_FIELD(reg, XX_FORCE_SIG, 417 + (xgxs_loopback || xaui_loopback) ? 418 + XX_FORCE_SIG_DECODE_FORCED : 0); 419 + EFX_SET_DWORD_FIELD(reg, XX_XGXS_LB_EN, xgxs_loopback); 420 + EFX_SET_DWORD_FIELD(reg, XX_XGMII_LB_EN, xgmii_loopback); 421 + falcon_xmac_writel(efx, &reg, XX_CORE_STAT_REG_MAC); 422 + 423 + falcon_xmac_readl(efx, &reg, XX_SD_CTL_REG_MAC); 424 + EFX_SET_DWORD_FIELD(reg, XX_LPBKD, xaui_loopback); 425 + EFX_SET_DWORD_FIELD(reg, XX_LPBKC, xaui_loopback); 426 + EFX_SET_DWORD_FIELD(reg, XX_LPBKB, xaui_loopback); 427 + EFX_SET_DWORD_FIELD(reg, XX_LPBKA, xaui_loopback); 428 + falcon_xmac_writel(efx, &reg, XX_SD_CTL_REG_MAC); 429 + } 430 + 431 + 384 432 /* Try and bring the Falcon side of the Falcon-Phy XAUI link fails 385 433 * to come back up. Bash it until it comes back up */ 386 434 static int falcon_check_xaui_link_up(struct efx_nic *efx) ··· 444 382 tries = EFX_WORKAROUND_5147(efx) ? 5 : 1; 445 383 max_tries = tries; 446 384 447 - if (efx->phy_type == PHY_TYPE_NONE) 385 + if ((efx->loopback_mode == LOOPBACK_NETWORK) || 386 + (efx->phy_type == PHY_TYPE_NONE)) 448 387 return 0; 449 388 450 389 while (tries) { ··· 471 408 falcon_mask_status_intr(efx, 0); 472 409 473 410 falcon_deconfigure_mac_wrapper(efx); 411 + 412 + efx->tx_disabled = LOOPBACK_INTERNAL(efx); 474 413 efx->phy_op->reconfigure(efx); 414 + 415 + falcon_reconfigure_xgxs_core(efx); 475 416 falcon_reconfigure_xmac_core(efx); 417 + 476 418 falcon_reconfigure_mac_wrapper(efx); 477 419 478 420 /* Ensure XAUI link is up */ ··· 559 491 (mac_stats->rx_bytes - mac_stats->rx_good_bytes); 560 492 } 561 493 562 - #define EFX_XAUI_RETRAIN_MAX 8 563 - 564 494 int falcon_check_xmac(struct efx_nic *efx) 565 495 { 566 496 unsigned xaui_link_ok; 567 497 int rc; 498 + 499 + if ((efx->loopback_mode == LOOPBACK_NETWORK) || 500 + (efx->phy_type == PHY_TYPE_NONE)) 501 + return 0; 568 502 569 503 falcon_mask_status_intr(efx, 0); 570 504 xaui_link_ok = falcon_xaui_link_ok(efx);
+78
drivers/net/sfc/mdio_10g.c
··· 44 44 int status; 45 45 int phy_id = efx->mii.phy_id; 46 46 47 + if (LOOPBACK_INTERNAL(efx)) 48 + return 0; 49 + 47 50 /* Read MMD STATUS2 to check it is responding. */ 48 51 status = mdio_clause45_read(efx, phy_id, mmd, MDIO_MMDREG_STAT2); 49 52 if (((status >> MDIO_MMDREG_STAT2_PRESENT_LBN) & ··· 167 164 int mmd = 0; 168 165 int good; 169 166 167 + /* If the port is in loopback, then we should only consider a subset 168 + * of mmd's */ 169 + if (LOOPBACK_INTERNAL(efx)) 170 + return 1; 171 + else if (efx->loopback_mode == LOOPBACK_NETWORK) 172 + return 0; 173 + else if (efx->loopback_mode == LOOPBACK_PHYXS) 174 + mmd_mask &= ~(MDIO_MMDREG_DEVS0_PHYXS | 175 + MDIO_MMDREG_DEVS0_PCS | 176 + MDIO_MMDREG_DEVS0_PMAPMD); 177 + else if (efx->loopback_mode == LOOPBACK_PCS) 178 + mmd_mask &= ~(MDIO_MMDREG_DEVS0_PCS | 179 + MDIO_MMDREG_DEVS0_PMAPMD); 180 + else if (efx->loopback_mode == LOOPBACK_PMAPMD) 181 + mmd_mask &= ~MDIO_MMDREG_DEVS0_PMAPMD; 182 + 170 183 while (mmd_mask) { 171 184 if (mmd_mask & 1) { 172 185 /* Double reads because link state is latched, and a ··· 199 180 mmd++; 200 181 } 201 182 return ok; 183 + } 184 + 185 + void mdio_clause45_transmit_disable(struct efx_nic *efx) 186 + { 187 + int phy_id = efx->mii.phy_id; 188 + int ctrl1, ctrl2; 189 + 190 + ctrl1 = ctrl2 = mdio_clause45_read(efx, phy_id, MDIO_MMD_PMAPMD, 191 + MDIO_MMDREG_TXDIS); 192 + if (efx->tx_disabled) 193 + ctrl2 |= (1 << MDIO_MMDREG_TXDIS_GLOBAL_LBN); 194 + else 195 + ctrl1 &= ~(1 << MDIO_MMDREG_TXDIS_GLOBAL_LBN); 196 + if (ctrl1 != ctrl2) 197 + mdio_clause45_write(efx, phy_id, MDIO_MMD_PMAPMD, 198 + MDIO_MMDREG_TXDIS, ctrl2); 199 + } 200 + 201 + void mdio_clause45_phy_reconfigure(struct efx_nic *efx) 202 + { 203 + int phy_id = efx->mii.phy_id; 204 + int ctrl1, ctrl2; 205 + 206 + /* Handle (with debouncing) PMA/PMD loopback */ 207 + ctrl1 = ctrl2 = mdio_clause45_read(efx, phy_id, MDIO_MMD_PMAPMD, 208 + MDIO_MMDREG_CTRL1); 209 + 210 + if (efx->loopback_mode == LOOPBACK_PMAPMD) 211 + ctrl2 |= (1 << MDIO_PMAPMD_CTRL1_LBACK_LBN); 212 + else 213 + ctrl2 &= ~(1 << MDIO_PMAPMD_CTRL1_LBACK_LBN); 214 + 215 + if (ctrl1 != ctrl2) 216 + mdio_clause45_write(efx, phy_id, MDIO_MMD_PMAPMD, 217 + MDIO_MMDREG_CTRL1, ctrl2); 218 + 219 + /* Handle (with debouncing) PCS loopback */ 220 + ctrl1 = ctrl2 = mdio_clause45_read(efx, phy_id, MDIO_MMD_PCS, 221 + MDIO_MMDREG_CTRL1); 222 + if (efx->loopback_mode == LOOPBACK_PCS) 223 + ctrl2 |= (1 << MDIO_MMDREG_CTRL1_LBACK_LBN); 224 + else 225 + ctrl2 &= ~(1 << MDIO_MMDREG_CTRL1_LBACK_LBN); 226 + 227 + if (ctrl1 != ctrl2) 228 + mdio_clause45_write(efx, phy_id, MDIO_MMD_PCS, 229 + MDIO_MMDREG_CTRL1, ctrl2); 230 + 231 + /* Handle (with debouncing) PHYXS network loopback */ 232 + ctrl1 = ctrl2 = mdio_clause45_read(efx, phy_id, MDIO_MMD_PHYXS, 233 + MDIO_MMDREG_CTRL1); 234 + if (efx->loopback_mode == LOOPBACK_NETWORK) 235 + ctrl2 |= (1 << MDIO_MMDREG_CTRL1_LBACK_LBN); 236 + else 237 + ctrl2 &= ~(1 << MDIO_MMDREG_CTRL1_LBACK_LBN); 238 + 239 + if (ctrl1 != ctrl2) 240 + mdio_clause45_write(efx, phy_id, MDIO_MMD_PHYXS, 241 + MDIO_MMDREG_CTRL1, ctrl2); 202 242 } 203 243 204 244 /**
+23 -1
drivers/net/sfc/mdio_10g.h
··· 44 44 #define MDIO_MMDREG_DEVS1 (6) 45 45 #define MDIO_MMDREG_CTRL2 (7) 46 46 #define MDIO_MMDREG_STAT2 (8) 47 + #define MDIO_MMDREG_TXDIS (9) 47 48 48 49 /* Bits in MMDREG_CTRL1 */ 49 50 /* Reset */ 50 51 #define MDIO_MMDREG_CTRL1_RESET_LBN (15) 51 52 #define MDIO_MMDREG_CTRL1_RESET_WIDTH (1) 53 + /* Loopback */ 54 + /* Loopback bit for WIS, PCS, PHYSX and DTEXS */ 55 + #define MDIO_MMDREG_CTRL1_LBACK_LBN (14) 56 + #define MDIO_MMDREG_CTRL1_LBACK_WIDTH (1) 52 57 53 58 /* Bits in MMDREG_STAT1 */ 54 59 #define MDIO_MMDREG_STAT1_FAULT_LBN (7) ··· 61 56 /* Link state */ 62 57 #define MDIO_MMDREG_STAT1_LINK_LBN (2) 63 58 #define MDIO_MMDREG_STAT1_LINK_WIDTH (1) 59 + /* Low power ability */ 60 + #define MDIO_MMDREG_STAT1_LPABLE_LBN (1) 61 + #define MDIO_MMDREG_STAT1_LPABLE_WIDTH (1) 64 62 65 63 /* Bits in ID reg */ 66 64 #define MDIO_ID_REV(_id32) (_id32 & 0xf) ··· 84 76 #define MDIO_MMDREG_STAT2_PRESENT_LBN (14) 85 77 #define MDIO_MMDREG_STAT2_PRESENT_WIDTH (2) 86 78 79 + /* Bits in MMDREG_TXDIS */ 80 + #define MDIO_MMDREG_TXDIS_GLOBAL_LBN (0) 81 + #define MDIO_MMDREG_TXDIS_GLOBAL_WIDTH (1) 82 + 83 + /* MMD-specific bits, ordered by MMD, then register */ 84 + #define MDIO_PMAPMD_CTRL1_LBACK_LBN (0) 85 + #define MDIO_PMAPMD_CTRL1_LBACK_WIDTH (1) 86 + 87 87 /* PMA type (4 bits) */ 88 88 #define MDIO_PMAPMD_CTRL2_10G_CX4 (0x0) 89 89 #define MDIO_PMAPMD_CTRL2_10G_EW (0x1) ··· 111 95 #define MDIO_PMAPMD_CTRL2_10_BT (0xf) 112 96 #define MDIO_PMAPMD_CTRL2_TYPE_MASK (0xf) 113 97 114 - /* /\* PHY XGXS lane state *\/ */ 98 + /* PHY XGXS lane state */ 115 99 #define MDIO_PHYXS_LANE_STATE (0x18) 116 100 #define MDIO_PHYXS_LANE_ALIGNED_LBN (12) 117 101 ··· 232 216 /* Check the link status of specified mmds in bit mask */ 233 217 extern int mdio_clause45_links_ok(struct efx_nic *efx, 234 218 unsigned int mmd_mask); 219 + 220 + /* Generic transmit disable support though PMAPMD */ 221 + extern void mdio_clause45_transmit_disable(struct efx_nic *efx); 222 + 223 + /* Generic part of reconfigure: set/clear loopback bits */ 224 + extern void mdio_clause45_phy_reconfigure(struct efx_nic *efx); 235 225 236 226 /* Read (some of) the PHY settings over MDIO */ 237 227 extern void mdio_clause45_get_settings(struct efx_nic *efx,
+27 -1
drivers/net/sfc/net_driver.h
··· 134 134 * Set only on the final fragment of a packet; %NULL for all other 135 135 * fragments. When this fragment completes, then we can free this 136 136 * skb. 137 + * @tsoh: The associated TSO header structure, or %NULL if this 138 + * buffer is not a TSO header. 137 139 * @dma_addr: DMA address of the fragment. 138 140 * @len: Length of this fragment. 139 141 * This field is zero when the queue slot is empty. ··· 146 144 */ 147 145 struct efx_tx_buffer { 148 146 const struct sk_buff *skb; 147 + struct efx_tso_header *tsoh; 149 148 dma_addr_t dma_addr; 150 149 unsigned short len; 151 150 unsigned char continuation; ··· 190 187 * variable indicates that the queue is full. This is to 191 188 * avoid cache-line ping-pong between the xmit path and the 192 189 * completion path. 190 + * @tso_headers_free: A list of TSO headers allocated for this TX queue 191 + * that are not in use, and so available for new TSO sends. The list 192 + * is protected by the TX queue lock. 193 + * @tso_bursts: Number of times TSO xmit invoked by kernel 194 + * @tso_long_headers: Number of packets with headers too long for standard 195 + * blocks 196 + * @tso_packets: Number of packets via the TSO xmit path 193 197 */ 194 198 struct efx_tx_queue { 195 199 /* Members which don't change on the fast path */ ··· 216 206 unsigned int insert_count ____cacheline_aligned_in_smp; 217 207 unsigned int write_count; 218 208 unsigned int old_read_count; 209 + struct efx_tso_header *tso_headers_free; 210 + unsigned int tso_bursts; 211 + unsigned int tso_long_headers; 212 + unsigned int tso_packets; 219 213 }; 220 214 221 215 /** ··· 448 434 struct efx_blinker blinker; 449 435 }; 450 436 437 + #define STRING_TABLE_LOOKUP(val, member) \ 438 + member ## _names[val] 439 + 451 440 enum efx_int_mode { 452 441 /* Be careful if altering to correct macro below */ 453 442 EFX_INT_MODE_MSIX = 0, ··· 523 506 * @check_hw: Check hardware 524 507 * @reset_xaui: Reset XAUI side of PHY for (software sequenced reset) 525 508 * @mmds: MMD presence mask 509 + * @loopbacks: Supported loopback modes mask 526 510 */ 527 511 struct efx_phy_operations { 528 512 int (*init) (struct efx_nic *efx); ··· 533 515 int (*check_hw) (struct efx_nic *efx); 534 516 void (*reset_xaui) (struct efx_nic *efx); 535 517 int mmds; 518 + unsigned loopbacks; 536 519 }; 537 520 538 521 /* ··· 672 653 * @phy_op: PHY interface 673 654 * @phy_data: PHY private data (including PHY-specific stats) 674 655 * @mii: PHY interface 675 - * @phy_powered: PHY power state 676 656 * @tx_disabled: PHY transmitter turned off 677 657 * @link_up: Link status 678 658 * @link_options: Link options (MII/GMII format) ··· 680 662 * @multicast_hash: Multicast hash table 681 663 * @flow_control: Flow control flags - separate RX/TX so can't use link_options 682 664 * @reconfigure_work: work item for dealing with PHY events 665 + * @loopback_mode: Loopback status 666 + * @loopback_modes: Supported loopback mode bitmask 667 + * @loopback_selftest: Offline self-test private state 683 668 * 684 669 * The @priv field of the corresponding &struct net_device points to 685 670 * this. ··· 742 721 struct efx_phy_operations *phy_op; 743 722 void *phy_data; 744 723 struct mii_if_info mii; 724 + unsigned tx_disabled; 745 725 746 726 int link_up; 747 727 unsigned int link_options; ··· 754 732 struct work_struct reconfigure_work; 755 733 756 734 atomic_t rx_reset; 735 + enum efx_loopback_mode loopback_mode; 736 + unsigned int loopback_modes; 737 + 738 + void *loopback_selftest; 757 739 }; 758 740 759 741 /**
+10 -1
drivers/net/sfc/rx.c
··· 19 19 #include "rx.h" 20 20 #include "efx.h" 21 21 #include "falcon.h" 22 + #include "selftest.h" 22 23 #include "workarounds.h" 23 24 24 25 /* Number of RX descriptors pushed at once. */ ··· 684 683 struct sk_buff *skb; 685 684 int lro = efx->net_dev->features & NETIF_F_LRO; 686 685 686 + /* If we're in loopback test, then pass the packet directly to the 687 + * loopback layer, and free the rx_buf here 688 + */ 689 + if (unlikely(efx->loopback_selftest)) { 690 + efx_loopback_rx_packet(efx, rx_buf->data, rx_buf->len); 691 + efx_free_rx_buffer(efx, rx_buf); 692 + goto done; 693 + } 694 + 687 695 if (rx_buf->skb) { 688 696 prefetch(skb_shinfo(rx_buf->skb)); 689 697 ··· 746 736 /* Update allocation strategy method */ 747 737 channel->rx_alloc_level += RX_ALLOC_FACTOR_SKB; 748 738 749 - /* fall-thru */ 750 739 done: 751 740 efx->net_dev->last_rx = jiffies; 752 741 }
+717
drivers/net/sfc/selftest.c
··· 1 + /**************************************************************************** 2 + * Driver for Solarflare Solarstorm network controllers and boards 3 + * Copyright 2005-2006 Fen Systems Ltd. 4 + * Copyright 2006-2008 Solarflare Communications Inc. 5 + * 6 + * This program is free software; you can redistribute it and/or modify it 7 + * under the terms of the GNU General Public License version 2 as published 8 + * by the Free Software Foundation, incorporated herein by reference. 9 + */ 10 + 11 + #include <linux/netdevice.h> 12 + #include <linux/module.h> 13 + #include <linux/delay.h> 14 + #include <linux/kernel_stat.h> 15 + #include <linux/pci.h> 16 + #include <linux/ethtool.h> 17 + #include <linux/ip.h> 18 + #include <linux/in.h> 19 + #include <linux/udp.h> 20 + #include <linux/rtnetlink.h> 21 + #include <asm/io.h> 22 + #include "net_driver.h" 23 + #include "ethtool.h" 24 + #include "efx.h" 25 + #include "falcon.h" 26 + #include "selftest.h" 27 + #include "boards.h" 28 + #include "workarounds.h" 29 + #include "mac.h" 30 + 31 + /* 32 + * Loopback test packet structure 33 + * 34 + * The self-test should stress every RSS vector, and unfortunately 35 + * Falcon only performs RSS on TCP/UDP packets. 36 + */ 37 + struct efx_loopback_payload { 38 + struct ethhdr header; 39 + struct iphdr ip; 40 + struct udphdr udp; 41 + __be16 iteration; 42 + const char msg[64]; 43 + } __attribute__ ((packed)); 44 + 45 + /* Loopback test source MAC address */ 46 + static const unsigned char payload_source[ETH_ALEN] = { 47 + 0x00, 0x0f, 0x53, 0x1b, 0x1b, 0x1b, 48 + }; 49 + 50 + static const char *payload_msg = 51 + "Hello world! This is an Efx loopback test in progress!"; 52 + 53 + /** 54 + * efx_selftest_state - persistent state during a selftest 55 + * @flush: Drop all packets in efx_loopback_rx_packet 56 + * @packet_count: Number of packets being used in this test 57 + * @skbs: An array of skbs transmitted 58 + * @rx_good: RX good packet count 59 + * @rx_bad: RX bad packet count 60 + * @payload: Payload used in tests 61 + */ 62 + struct efx_selftest_state { 63 + int flush; 64 + int packet_count; 65 + struct sk_buff **skbs; 66 + atomic_t rx_good; 67 + atomic_t rx_bad; 68 + struct efx_loopback_payload payload; 69 + }; 70 + 71 + /************************************************************************** 72 + * 73 + * Configurable values 74 + * 75 + **************************************************************************/ 76 + 77 + /* Level of loopback testing 78 + * 79 + * The maximum packet burst length is 16**(n-1), i.e. 80 + * 81 + * - Level 0 : no packets 82 + * - Level 1 : 1 packet 83 + * - Level 2 : 17 packets (1 * 1 packet, 1 * 16 packets) 84 + * - Level 3 : 273 packets (1 * 1 packet, 1 * 16 packet, 1 * 256 packets) 85 + * 86 + */ 87 + static unsigned int loopback_test_level = 3; 88 + 89 + /************************************************************************** 90 + * 91 + * Interrupt and event queue testing 92 + * 93 + **************************************************************************/ 94 + 95 + /* Test generation and receipt of interrupts */ 96 + static int efx_test_interrupts(struct efx_nic *efx, 97 + struct efx_self_tests *tests) 98 + { 99 + struct efx_channel *channel; 100 + 101 + EFX_LOG(efx, "testing interrupts\n"); 102 + tests->interrupt = -1; 103 + 104 + /* Reset interrupt flag */ 105 + efx->last_irq_cpu = -1; 106 + smp_wmb(); 107 + 108 + /* ACK each interrupting event queue. Receiving an interrupt due to 109 + * traffic before a test event is raised is considered a pass */ 110 + efx_for_each_channel_with_interrupt(channel, efx) { 111 + if (channel->work_pending) 112 + efx_process_channel_now(channel); 113 + if (efx->last_irq_cpu >= 0) 114 + goto success; 115 + } 116 + 117 + falcon_generate_interrupt(efx); 118 + 119 + /* Wait for arrival of test interrupt. */ 120 + EFX_LOG(efx, "waiting for test interrupt\n"); 121 + schedule_timeout_uninterruptible(HZ / 10); 122 + if (efx->last_irq_cpu >= 0) 123 + goto success; 124 + 125 + EFX_ERR(efx, "timed out waiting for interrupt\n"); 126 + return -ETIMEDOUT; 127 + 128 + success: 129 + EFX_LOG(efx, "test interrupt (mode %d) seen on CPU%d\n", 130 + efx->interrupt_mode, efx->last_irq_cpu); 131 + tests->interrupt = 1; 132 + return 0; 133 + } 134 + 135 + /* Test generation and receipt of non-interrupting events */ 136 + static int efx_test_eventq(struct efx_channel *channel, 137 + struct efx_self_tests *tests) 138 + { 139 + unsigned int magic; 140 + 141 + /* Channel specific code, limited to 20 bits */ 142 + magic = (0x00010150 + channel->channel); 143 + EFX_LOG(channel->efx, "channel %d testing event queue with code %x\n", 144 + channel->channel, magic); 145 + 146 + tests->eventq_dma[channel->channel] = -1; 147 + tests->eventq_int[channel->channel] = 1; /* fake pass */ 148 + tests->eventq_poll[channel->channel] = 1; /* fake pass */ 149 + 150 + /* Reset flag and zero magic word */ 151 + channel->efx->last_irq_cpu = -1; 152 + channel->eventq_magic = 0; 153 + smp_wmb(); 154 + 155 + falcon_generate_test_event(channel, magic); 156 + udelay(1); 157 + 158 + efx_process_channel_now(channel); 159 + if (channel->eventq_magic != magic) { 160 + EFX_ERR(channel->efx, "channel %d failed to see test event\n", 161 + channel->channel); 162 + return -ETIMEDOUT; 163 + } else { 164 + tests->eventq_dma[channel->channel] = 1; 165 + } 166 + 167 + return 0; 168 + } 169 + 170 + /* Test generation and receipt of interrupting events */ 171 + static int efx_test_eventq_irq(struct efx_channel *channel, 172 + struct efx_self_tests *tests) 173 + { 174 + unsigned int magic, count; 175 + 176 + /* Channel specific code, limited to 20 bits */ 177 + magic = (0x00010150 + channel->channel); 178 + EFX_LOG(channel->efx, "channel %d testing event queue with code %x\n", 179 + channel->channel, magic); 180 + 181 + tests->eventq_dma[channel->channel] = -1; 182 + tests->eventq_int[channel->channel] = -1; 183 + tests->eventq_poll[channel->channel] = -1; 184 + 185 + /* Reset flag and zero magic word */ 186 + channel->efx->last_irq_cpu = -1; 187 + channel->eventq_magic = 0; 188 + smp_wmb(); 189 + 190 + falcon_generate_test_event(channel, magic); 191 + 192 + /* Wait for arrival of interrupt */ 193 + count = 0; 194 + do { 195 + schedule_timeout_uninterruptible(HZ / 100); 196 + 197 + if (channel->work_pending) 198 + efx_process_channel_now(channel); 199 + 200 + if (channel->eventq_magic == magic) 201 + goto eventq_ok; 202 + } while (++count < 2); 203 + 204 + EFX_ERR(channel->efx, "channel %d timed out waiting for event queue\n", 205 + channel->channel); 206 + 207 + /* See if interrupt arrived */ 208 + if (channel->efx->last_irq_cpu >= 0) { 209 + EFX_ERR(channel->efx, "channel %d saw interrupt on CPU%d " 210 + "during event queue test\n", channel->channel, 211 + raw_smp_processor_id()); 212 + tests->eventq_int[channel->channel] = 1; 213 + } 214 + 215 + /* Check to see if event was received even if interrupt wasn't */ 216 + efx_process_channel_now(channel); 217 + if (channel->eventq_magic == magic) { 218 + EFX_ERR(channel->efx, "channel %d event was generated, but " 219 + "failed to trigger an interrupt\n", channel->channel); 220 + tests->eventq_dma[channel->channel] = 1; 221 + } 222 + 223 + return -ETIMEDOUT; 224 + eventq_ok: 225 + EFX_LOG(channel->efx, "channel %d event queue passed\n", 226 + channel->channel); 227 + tests->eventq_dma[channel->channel] = 1; 228 + tests->eventq_int[channel->channel] = 1; 229 + tests->eventq_poll[channel->channel] = 1; 230 + return 0; 231 + } 232 + 233 + /************************************************************************** 234 + * 235 + * PHY testing 236 + * 237 + **************************************************************************/ 238 + 239 + /* Check PHY presence by reading the PHY ID registers */ 240 + static int efx_test_phy(struct efx_nic *efx, 241 + struct efx_self_tests *tests) 242 + { 243 + u16 physid1, physid2; 244 + struct mii_if_info *mii = &efx->mii; 245 + struct net_device *net_dev = efx->net_dev; 246 + 247 + if (efx->phy_type == PHY_TYPE_NONE) 248 + return 0; 249 + 250 + EFX_LOG(efx, "testing PHY presence\n"); 251 + tests->phy_ok = -1; 252 + 253 + physid1 = mii->mdio_read(net_dev, mii->phy_id, MII_PHYSID1); 254 + physid2 = mii->mdio_read(net_dev, mii->phy_id, MII_PHYSID2); 255 + 256 + if ((physid1 != 0x0000) && (physid1 != 0xffff) && 257 + (physid2 != 0x0000) && (physid2 != 0xffff)) { 258 + EFX_LOG(efx, "found MII PHY %d ID 0x%x:%x\n", 259 + mii->phy_id, physid1, physid2); 260 + tests->phy_ok = 1; 261 + return 0; 262 + } 263 + 264 + EFX_ERR(efx, "no MII PHY present with ID %d\n", mii->phy_id); 265 + return -ENODEV; 266 + } 267 + 268 + /************************************************************************** 269 + * 270 + * Loopback testing 271 + * NB Only one loopback test can be executing concurrently. 272 + * 273 + **************************************************************************/ 274 + 275 + /* Loopback test RX callback 276 + * This is called for each received packet during loopback testing. 277 + */ 278 + void efx_loopback_rx_packet(struct efx_nic *efx, 279 + const char *buf_ptr, int pkt_len) 280 + { 281 + struct efx_selftest_state *state = efx->loopback_selftest; 282 + struct efx_loopback_payload *received; 283 + struct efx_loopback_payload *payload; 284 + 285 + BUG_ON(!buf_ptr); 286 + 287 + /* If we are just flushing, then drop the packet */ 288 + if ((state == NULL) || state->flush) 289 + return; 290 + 291 + payload = &state->payload; 292 + 293 + received = (struct efx_loopback_payload *)(char *) buf_ptr; 294 + received->ip.saddr = payload->ip.saddr; 295 + received->ip.check = payload->ip.check; 296 + 297 + /* Check that header exists */ 298 + if (pkt_len < sizeof(received->header)) { 299 + EFX_ERR(efx, "saw runt RX packet (length %d) in %s loopback " 300 + "test\n", pkt_len, LOOPBACK_MODE(efx)); 301 + goto err; 302 + } 303 + 304 + /* Check that the ethernet header exists */ 305 + if (memcmp(&received->header, &payload->header, ETH_HLEN) != 0) { 306 + EFX_ERR(efx, "saw non-loopback RX packet in %s loopback test\n", 307 + LOOPBACK_MODE(efx)); 308 + goto err; 309 + } 310 + 311 + /* Check packet length */ 312 + if (pkt_len != sizeof(*payload)) { 313 + EFX_ERR(efx, "saw incorrect RX packet length %d (wanted %d) in " 314 + "%s loopback test\n", pkt_len, (int)sizeof(*payload), 315 + LOOPBACK_MODE(efx)); 316 + goto err; 317 + } 318 + 319 + /* Check that IP header matches */ 320 + if (memcmp(&received->ip, &payload->ip, sizeof(payload->ip)) != 0) { 321 + EFX_ERR(efx, "saw corrupted IP header in %s loopback test\n", 322 + LOOPBACK_MODE(efx)); 323 + goto err; 324 + } 325 + 326 + /* Check that msg and padding matches */ 327 + if (memcmp(&received->msg, &payload->msg, sizeof(received->msg)) != 0) { 328 + EFX_ERR(efx, "saw corrupted RX packet in %s loopback test\n", 329 + LOOPBACK_MODE(efx)); 330 + goto err; 331 + } 332 + 333 + /* Check that iteration matches */ 334 + if (received->iteration != payload->iteration) { 335 + EFX_ERR(efx, "saw RX packet from iteration %d (wanted %d) in " 336 + "%s loopback test\n", ntohs(received->iteration), 337 + ntohs(payload->iteration), LOOPBACK_MODE(efx)); 338 + goto err; 339 + } 340 + 341 + /* Increase correct RX count */ 342 + EFX_TRACE(efx, "got loopback RX in %s loopback test\n", 343 + LOOPBACK_MODE(efx)); 344 + 345 + atomic_inc(&state->rx_good); 346 + return; 347 + 348 + err: 349 + #ifdef EFX_ENABLE_DEBUG 350 + if (atomic_read(&state->rx_bad) == 0) { 351 + EFX_ERR(efx, "received packet:\n"); 352 + print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 0x10, 1, 353 + buf_ptr, pkt_len, 0); 354 + EFX_ERR(efx, "expected packet:\n"); 355 + print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 0x10, 1, 356 + &state->payload, sizeof(state->payload), 0); 357 + } 358 + #endif 359 + atomic_inc(&state->rx_bad); 360 + } 361 + 362 + /* Initialise an efx_selftest_state for a new iteration */ 363 + static void efx_iterate_state(struct efx_nic *efx) 364 + { 365 + struct efx_selftest_state *state = efx->loopback_selftest; 366 + struct net_device *net_dev = efx->net_dev; 367 + struct efx_loopback_payload *payload = &state->payload; 368 + 369 + /* Initialise the layerII header */ 370 + memcpy(&payload->header.h_dest, net_dev->dev_addr, ETH_ALEN); 371 + memcpy(&payload->header.h_source, &payload_source, ETH_ALEN); 372 + payload->header.h_proto = htons(ETH_P_IP); 373 + 374 + /* saddr set later and used as incrementing count */ 375 + payload->ip.daddr = htonl(INADDR_LOOPBACK); 376 + payload->ip.ihl = 5; 377 + payload->ip.check = htons(0xdead); 378 + payload->ip.tot_len = htons(sizeof(*payload) - sizeof(struct ethhdr)); 379 + payload->ip.version = IPVERSION; 380 + payload->ip.protocol = IPPROTO_UDP; 381 + 382 + /* Initialise udp header */ 383 + payload->udp.source = 0; 384 + payload->udp.len = htons(sizeof(*payload) - sizeof(struct ethhdr) - 385 + sizeof(struct iphdr)); 386 + payload->udp.check = 0; /* checksum ignored */ 387 + 388 + /* Fill out payload */ 389 + payload->iteration = htons(ntohs(payload->iteration) + 1); 390 + memcpy(&payload->msg, payload_msg, sizeof(payload_msg)); 391 + 392 + /* Fill out remaining state members */ 393 + atomic_set(&state->rx_good, 0); 394 + atomic_set(&state->rx_bad, 0); 395 + smp_wmb(); 396 + } 397 + 398 + static int efx_tx_loopback(struct efx_tx_queue *tx_queue) 399 + { 400 + struct efx_nic *efx = tx_queue->efx; 401 + struct efx_selftest_state *state = efx->loopback_selftest; 402 + struct efx_loopback_payload *payload; 403 + struct sk_buff *skb; 404 + int i, rc; 405 + 406 + /* Transmit N copies of buffer */ 407 + for (i = 0; i < state->packet_count; i++) { 408 + /* Allocate an skb, holding an extra reference for 409 + * transmit completion counting */ 410 + skb = alloc_skb(sizeof(state->payload), GFP_KERNEL); 411 + if (!skb) 412 + return -ENOMEM; 413 + state->skbs[i] = skb; 414 + skb_get(skb); 415 + 416 + /* Copy the payload in, incrementing the source address to 417 + * exercise the rss vectors */ 418 + payload = ((struct efx_loopback_payload *) 419 + skb_put(skb, sizeof(state->payload))); 420 + memcpy(payload, &state->payload, sizeof(state->payload)); 421 + payload->ip.saddr = htonl(INADDR_LOOPBACK | (i << 2)); 422 + 423 + /* Ensure everything we've written is visible to the 424 + * interrupt handler. */ 425 + smp_wmb(); 426 + 427 + if (NET_DEV_REGISTERED(efx)) 428 + netif_tx_lock_bh(efx->net_dev); 429 + rc = efx_xmit(efx, tx_queue, skb); 430 + if (NET_DEV_REGISTERED(efx)) 431 + netif_tx_unlock_bh(efx->net_dev); 432 + 433 + if (rc != NETDEV_TX_OK) { 434 + EFX_ERR(efx, "TX queue %d could not transmit packet %d " 435 + "of %d in %s loopback test\n", tx_queue->queue, 436 + i + 1, state->packet_count, LOOPBACK_MODE(efx)); 437 + 438 + /* Defer cleaning up the other skbs for the caller */ 439 + kfree_skb(skb); 440 + return -EPIPE; 441 + } 442 + } 443 + 444 + return 0; 445 + } 446 + 447 + static int efx_rx_loopback(struct efx_tx_queue *tx_queue, 448 + struct efx_loopback_self_tests *lb_tests) 449 + { 450 + struct efx_nic *efx = tx_queue->efx; 451 + struct efx_selftest_state *state = efx->loopback_selftest; 452 + struct sk_buff *skb; 453 + int tx_done = 0, rx_good, rx_bad; 454 + int i, rc = 0; 455 + 456 + if (NET_DEV_REGISTERED(efx)) 457 + netif_tx_lock_bh(efx->net_dev); 458 + 459 + /* Count the number of tx completions, and decrement the refcnt. Any 460 + * skbs not already completed will be free'd when the queue is flushed */ 461 + for (i=0; i < state->packet_count; i++) { 462 + skb = state->skbs[i]; 463 + if (skb && !skb_shared(skb)) 464 + ++tx_done; 465 + dev_kfree_skb_any(skb); 466 + } 467 + 468 + if (NET_DEV_REGISTERED(efx)) 469 + netif_tx_unlock_bh(efx->net_dev); 470 + 471 + /* Check TX completion and received packet counts */ 472 + rx_good = atomic_read(&state->rx_good); 473 + rx_bad = atomic_read(&state->rx_bad); 474 + if (tx_done != state->packet_count) { 475 + /* Don't free the skbs; they will be picked up on TX 476 + * overflow or channel teardown. 477 + */ 478 + EFX_ERR(efx, "TX queue %d saw only %d out of an expected %d " 479 + "TX completion events in %s loopback test\n", 480 + tx_queue->queue, tx_done, state->packet_count, 481 + LOOPBACK_MODE(efx)); 482 + rc = -ETIMEDOUT; 483 + /* Allow to fall through so we see the RX errors as well */ 484 + } 485 + 486 + /* We may always be up to a flush away from our desired packet total */ 487 + if (rx_good != state->packet_count) { 488 + EFX_LOG(efx, "TX queue %d saw only %d out of an expected %d " 489 + "received packets in %s loopback test\n", 490 + tx_queue->queue, rx_good, state->packet_count, 491 + LOOPBACK_MODE(efx)); 492 + rc = -ETIMEDOUT; 493 + /* Fall through */ 494 + } 495 + 496 + /* Update loopback test structure */ 497 + lb_tests->tx_sent[tx_queue->queue] += state->packet_count; 498 + lb_tests->tx_done[tx_queue->queue] += tx_done; 499 + lb_tests->rx_good += rx_good; 500 + lb_tests->rx_bad += rx_bad; 501 + 502 + return rc; 503 + } 504 + 505 + static int 506 + efx_test_loopback(struct efx_tx_queue *tx_queue, 507 + struct efx_loopback_self_tests *lb_tests) 508 + { 509 + struct efx_nic *efx = tx_queue->efx; 510 + struct efx_selftest_state *state = efx->loopback_selftest; 511 + struct efx_channel *channel; 512 + int i, rc = 0; 513 + 514 + for (i = 0; i < loopback_test_level; i++) { 515 + /* Determine how many packets to send */ 516 + state->packet_count = (efx->type->txd_ring_mask + 1) / 3; 517 + state->packet_count = min(1 << (i << 2), state->packet_count); 518 + state->skbs = kzalloc(sizeof(state->skbs[0]) * 519 + state->packet_count, GFP_KERNEL); 520 + state->flush = 0; 521 + 522 + EFX_LOG(efx, "TX queue %d testing %s loopback with %d " 523 + "packets\n", tx_queue->queue, LOOPBACK_MODE(efx), 524 + state->packet_count); 525 + 526 + efx_iterate_state(efx); 527 + rc = efx_tx_loopback(tx_queue); 528 + 529 + /* NAPI polling is not enabled, so process channels synchronously */ 530 + schedule_timeout_uninterruptible(HZ / 50); 531 + efx_for_each_channel_with_interrupt(channel, efx) { 532 + if (channel->work_pending) 533 + efx_process_channel_now(channel); 534 + } 535 + 536 + rc |= efx_rx_loopback(tx_queue, lb_tests); 537 + kfree(state->skbs); 538 + 539 + if (rc) { 540 + /* Wait a while to ensure there are no packets 541 + * floating around after a failure. */ 542 + schedule_timeout_uninterruptible(HZ / 10); 543 + return rc; 544 + } 545 + } 546 + 547 + EFX_LOG(efx, "TX queue %d passed %s loopback test with a burst length " 548 + "of %d packets\n", tx_queue->queue, LOOPBACK_MODE(efx), 549 + state->packet_count); 550 + 551 + return rc; 552 + } 553 + 554 + static int efx_test_loopbacks(struct efx_nic *efx, 555 + struct efx_self_tests *tests, 556 + unsigned int loopback_modes) 557 + { 558 + struct efx_selftest_state *state = efx->loopback_selftest; 559 + struct ethtool_cmd ecmd, ecmd_loopback; 560 + struct efx_tx_queue *tx_queue; 561 + enum efx_loopback_mode old_mode, mode; 562 + int count, rc = 0, link_up; 563 + 564 + rc = efx_ethtool_get_settings(efx->net_dev, &ecmd); 565 + if (rc) { 566 + EFX_ERR(efx, "could not get GMII settings\n"); 567 + return rc; 568 + } 569 + old_mode = efx->loopback_mode; 570 + 571 + /* Disable autonegotiation for the purposes of loopback */ 572 + memcpy(&ecmd_loopback, &ecmd, sizeof(ecmd_loopback)); 573 + if (ecmd_loopback.autoneg == AUTONEG_ENABLE) { 574 + ecmd_loopback.autoneg = AUTONEG_DISABLE; 575 + ecmd_loopback.duplex = DUPLEX_FULL; 576 + ecmd_loopback.speed = SPEED_10000; 577 + } 578 + 579 + rc = efx_ethtool_set_settings(efx->net_dev, &ecmd_loopback); 580 + if (rc) { 581 + EFX_ERR(efx, "could not disable autonegotiation\n"); 582 + goto out; 583 + } 584 + tests->loopback_speed = ecmd_loopback.speed; 585 + tests->loopback_full_duplex = ecmd_loopback.duplex; 586 + 587 + /* Test all supported loopback modes */ 588 + for (mode = LOOPBACK_NONE; mode < LOOPBACK_TEST_MAX; mode++) { 589 + if (!(loopback_modes & (1 << mode))) 590 + continue; 591 + 592 + /* Move the port into the specified loopback mode. */ 593 + state->flush = 1; 594 + efx->loopback_mode = mode; 595 + efx_reconfigure_port(efx); 596 + 597 + /* Wait for the PHY to signal the link is up */ 598 + count = 0; 599 + do { 600 + struct efx_channel *channel = &efx->channel[0]; 601 + 602 + falcon_check_xmac(efx); 603 + schedule_timeout_uninterruptible(HZ / 10); 604 + if (channel->work_pending) 605 + efx_process_channel_now(channel); 606 + /* Wait for PHY events to be processed */ 607 + flush_workqueue(efx->workqueue); 608 + rmb(); 609 + 610 + /* efx->link_up can be 1 even if the XAUI link is down, 611 + * (bug5762). Usually, it's not worth bothering with the 612 + * difference, but for selftests, we need that extra 613 + * guarantee that the link is really, really, up. 614 + */ 615 + link_up = efx->link_up; 616 + if (!falcon_xaui_link_ok(efx)) 617 + link_up = 0; 618 + 619 + } while ((++count < 20) && !link_up); 620 + 621 + /* The link should now be up. If it isn't, there is no point 622 + * in attempting a loopback test */ 623 + if (!link_up) { 624 + EFX_ERR(efx, "loopback %s never came up\n", 625 + LOOPBACK_MODE(efx)); 626 + rc = -EIO; 627 + goto out; 628 + } 629 + 630 + EFX_LOG(efx, "link came up in %s loopback in %d iterations\n", 631 + LOOPBACK_MODE(efx), count); 632 + 633 + /* Test every TX queue */ 634 + efx_for_each_tx_queue(tx_queue, efx) { 635 + rc |= efx_test_loopback(tx_queue, 636 + &tests->loopback[mode]); 637 + if (rc) 638 + goto out; 639 + } 640 + } 641 + 642 + out: 643 + /* Take out of loopback and restore PHY settings */ 644 + state->flush = 1; 645 + efx->loopback_mode = old_mode; 646 + efx_ethtool_set_settings(efx->net_dev, &ecmd); 647 + 648 + return rc; 649 + } 650 + 651 + /************************************************************************** 652 + * 653 + * Entry points 654 + * 655 + *************************************************************************/ 656 + 657 + /* Online (i.e. non-disruptive) testing 658 + * This checks interrupt generation, event delivery and PHY presence. */ 659 + int efx_online_test(struct efx_nic *efx, struct efx_self_tests *tests) 660 + { 661 + struct efx_channel *channel; 662 + int rc = 0; 663 + 664 + EFX_LOG(efx, "performing online self-tests\n"); 665 + 666 + rc |= efx_test_interrupts(efx, tests); 667 + efx_for_each_channel(channel, efx) { 668 + if (channel->has_interrupt) 669 + rc |= efx_test_eventq_irq(channel, tests); 670 + else 671 + rc |= efx_test_eventq(channel, tests); 672 + } 673 + rc |= efx_test_phy(efx, tests); 674 + 675 + if (rc) 676 + EFX_ERR(efx, "failed online self-tests\n"); 677 + 678 + return rc; 679 + } 680 + 681 + /* Offline (i.e. disruptive) testing 682 + * This checks MAC and PHY loopback on the specified port. */ 683 + int efx_offline_test(struct efx_nic *efx, 684 + struct efx_self_tests *tests, unsigned int loopback_modes) 685 + { 686 + struct efx_selftest_state *state; 687 + int rc = 0; 688 + 689 + EFX_LOG(efx, "performing offline self-tests\n"); 690 + 691 + /* Create a selftest_state structure to hold state for the test */ 692 + state = kzalloc(sizeof(*state), GFP_KERNEL); 693 + if (state == NULL) { 694 + rc = -ENOMEM; 695 + goto out; 696 + } 697 + 698 + /* Set the port loopback_selftest member. From this point on 699 + * all received packets will be dropped. Mark the state as 700 + * "flushing" so all inflight packets are dropped */ 701 + BUG_ON(efx->loopback_selftest); 702 + state->flush = 1; 703 + efx->loopback_selftest = (void *)state; 704 + 705 + rc = efx_test_loopbacks(efx, tests, loopback_modes); 706 + 707 + efx->loopback_selftest = NULL; 708 + wmb(); 709 + kfree(state); 710 + 711 + out: 712 + if (rc) 713 + EFX_ERR(efx, "failed offline self-tests\n"); 714 + 715 + return rc; 716 + } 717 +
+50
drivers/net/sfc/selftest.h
··· 1 + /**************************************************************************** 2 + * Driver for Solarflare Solarstorm network controllers and boards 3 + * Copyright 2005-2006 Fen Systems Ltd. 4 + * Copyright 2006-2008 Solarflare Communications Inc. 5 + * 6 + * This program is free software; you can redistribute it and/or modify it 7 + * under the terms of the GNU General Public License version 2 as published 8 + * by the Free Software Foundation, incorporated herein by reference. 9 + */ 10 + 11 + #ifndef EFX_SELFTEST_H 12 + #define EFX_SELFTEST_H 13 + 14 + #include "net_driver.h" 15 + 16 + /* 17 + * Self tests 18 + */ 19 + 20 + struct efx_loopback_self_tests { 21 + int tx_sent[EFX_MAX_TX_QUEUES]; 22 + int tx_done[EFX_MAX_TX_QUEUES]; 23 + int rx_good; 24 + int rx_bad; 25 + }; 26 + 27 + /* Efx self test results 28 + * For fields which are not counters, 1 indicates success and -1 29 + * indicates failure. 30 + */ 31 + struct efx_self_tests { 32 + int interrupt; 33 + int eventq_dma[EFX_MAX_CHANNELS]; 34 + int eventq_int[EFX_MAX_CHANNELS]; 35 + int eventq_poll[EFX_MAX_CHANNELS]; 36 + int phy_ok; 37 + int loopback_speed; 38 + int loopback_full_duplex; 39 + struct efx_loopback_self_tests loopback[LOOPBACK_TEST_MAX]; 40 + }; 41 + 42 + extern void efx_loopback_rx_packet(struct efx_nic *efx, 43 + const char *buf_ptr, int pkt_len); 44 + extern int efx_online_test(struct efx_nic *efx, 45 + struct efx_self_tests *tests); 46 + extern int efx_offline_test(struct efx_nic *efx, 47 + struct efx_self_tests *tests, 48 + unsigned int loopback_modes); 49 + 50 + #endif /* EFX_SELFTEST_H */
+14
drivers/net/sfc/sfe4001.c
··· 130 130 (void) efx_i2c_read(i2c, MAX6647, RSL, &in, 1); 131 131 } 132 132 133 + /* The P0_EN_3V3X line on SFE4001 boards (from A2 onward) is connected 134 + * to the FLASH_CFG_1 input on the DSP. We must keep it high at power- 135 + * up to allow writing the flash (done through MDIO from userland). 136 + */ 137 + unsigned int sfe4001_phy_flash_cfg; 138 + module_param_named(phy_flash_cfg, sfe4001_phy_flash_cfg, uint, 0444); 139 + MODULE_PARM_DESC(phy_flash_cfg, 140 + "Force PHY to enter flash configuration mode"); 141 + 133 142 /* This board uses an I2C expander to provider power to the PHY, which needs to 134 143 * be turned on before the PHY can be used. 135 144 * Context: Process context, rtnl lock held ··· 212 203 out = 0xff & ~((1 << P0_EN_1V2_LBN) | (1 << P0_EN_2V5_LBN) | 213 204 (1 << P0_EN_3V3X_LBN) | (1 << P0_EN_5V_LBN) | 214 205 (1 << P0_X_TRST_LBN)); 206 + if (sfe4001_phy_flash_cfg) 207 + out |= 1 << P0_EN_3V3X_LBN; 215 208 216 209 rc = efx_i2c_write(i2c, PCA9539, P0_OUT, &out, 1); 217 210 if (rc) ··· 237 226 if (in & (1 << P1_AFE_PWD_LBN)) 238 227 goto done; 239 228 229 + /* DSP doesn't look powered in flash config mode */ 230 + if (sfe4001_phy_flash_cfg) 231 + goto done; 240 232 } while (++count < 20); 241 233 242 234 EFX_INFO(efx, "timed out waiting for power\n");
+87 -4
drivers/net/sfc/tenxpress.c
··· 24 24 MDIO_MMDREG_DEVS0_PCS | \ 25 25 MDIO_MMDREG_DEVS0_PHYXS) 26 26 27 + #define TENXPRESS_LOOPBACKS ((1 << LOOPBACK_PHYXS) | \ 28 + (1 << LOOPBACK_PCS) | \ 29 + (1 << LOOPBACK_PMAPMD) | \ 30 + (1 << LOOPBACK_NETWORK)) 31 + 27 32 /* We complain if we fail to see the link partner as 10G capable this many 28 33 * times in a row (must be > 1 as sampling the autoneg. registers is racy) 29 34 */ ··· 77 72 #define PMA_PMD_BIST_RXD_LBN (1) 78 73 #define PMA_PMD_BIST_AFE_LBN (0) 79 74 75 + /* Special Software reset register */ 76 + #define PMA_PMD_EXT_CTRL_REG 49152 77 + #define PMA_PMD_EXT_SSR_LBN 15 78 + 80 79 #define BIST_MAX_DELAY (1000) 81 80 #define BIST_POLL_DELAY (10) 82 81 ··· 94 85 95 86 #define PCS_TEST_SELECT_REG 0xd807 /* PRM 10.5.8 */ 96 87 #define CLK312_EN_LBN 3 88 + 89 + /* PHYXS registers */ 90 + #define PHYXS_TEST1 (49162) 91 + #define LOOPBACK_NEAR_LBN (8) 92 + #define LOOPBACK_NEAR_WIDTH (1) 97 93 98 94 /* Boot status register */ 99 95 #define PCS_BOOT_STATUS_REG (0xd000) ··· 120 106 121 107 struct tenxpress_phy_data { 122 108 enum tenxpress_state state; 109 + enum efx_loopback_mode loopback_mode; 123 110 atomic_t bad_crc_count; 111 + int tx_disabled; 124 112 int bad_lp_tries; 125 113 }; 126 114 ··· 215 199 216 200 tenxpress_set_state(efx, TENXPRESS_STATUS_NORMAL); 217 201 218 - rc = mdio_clause45_wait_reset_mmds(efx, 219 - TENXPRESS_REQUIRED_DEVS); 220 - if (rc < 0) 221 - goto fail; 202 + if (!sfe4001_phy_flash_cfg) { 203 + rc = mdio_clause45_wait_reset_mmds(efx, 204 + TENXPRESS_REQUIRED_DEVS); 205 + if (rc < 0) 206 + goto fail; 207 + } 222 208 223 209 rc = mdio_clause45_check_mmds(efx, TENXPRESS_REQUIRED_DEVS, 0); 224 210 if (rc < 0) ··· 241 223 kfree(efx->phy_data); 242 224 efx->phy_data = NULL; 243 225 return rc; 226 + } 227 + 228 + static int tenxpress_special_reset(struct efx_nic *efx) 229 + { 230 + int rc, reg; 231 + 232 + EFX_TRACE(efx, "%s\n", __func__); 233 + 234 + /* Initiate reset */ 235 + reg = mdio_clause45_read(efx, efx->mii.phy_id, 236 + MDIO_MMD_PMAPMD, PMA_PMD_EXT_CTRL_REG); 237 + reg |= (1 << PMA_PMD_EXT_SSR_LBN); 238 + mdio_clause45_write(efx, efx->mii.phy_id, MDIO_MMD_PMAPMD, 239 + PMA_PMD_EXT_CTRL_REG, reg); 240 + 241 + msleep(200); 242 + 243 + /* Wait for the blocks to come out of reset */ 244 + rc = mdio_clause45_wait_reset_mmds(efx, 245 + TENXPRESS_REQUIRED_DEVS); 246 + if (rc < 0) 247 + return rc; 248 + 249 + /* Try and reconfigure the device */ 250 + rc = tenxpress_init(efx); 251 + if (rc < 0) 252 + return rc; 253 + 254 + return 0; 244 255 } 245 256 246 257 static void tenxpress_set_bad_lp(struct efx_nic *efx, int bad_lp) ··· 346 299 return ok; 347 300 } 348 301 302 + static void tenxpress_phyxs_loopback(struct efx_nic *efx) 303 + { 304 + int phy_id = efx->mii.phy_id; 305 + int ctrl1, ctrl2; 306 + 307 + ctrl1 = ctrl2 = mdio_clause45_read(efx, phy_id, MDIO_MMD_PHYXS, 308 + PHYXS_TEST1); 309 + if (efx->loopback_mode == LOOPBACK_PHYXS) 310 + ctrl2 |= (1 << LOOPBACK_NEAR_LBN); 311 + else 312 + ctrl2 &= ~(1 << LOOPBACK_NEAR_LBN); 313 + if (ctrl1 != ctrl2) 314 + mdio_clause45_write(efx, phy_id, MDIO_MMD_PHYXS, 315 + PHYXS_TEST1, ctrl2); 316 + } 317 + 349 318 static void tenxpress_phy_reconfigure(struct efx_nic *efx) 350 319 { 320 + struct tenxpress_phy_data *phy_data = efx->phy_data; 321 + int loop_change = LOOPBACK_OUT_OF(phy_data, efx, 322 + TENXPRESS_LOOPBACKS); 323 + 351 324 if (!tenxpress_state_is(efx, TENXPRESS_STATUS_NORMAL)) 352 325 return; 353 326 327 + /* When coming out of transmit disable, coming out of low power 328 + * mode, or moving out of any PHY internal loopback mode, 329 + * perform a special software reset */ 330 + if ((phy_data->tx_disabled && !efx->tx_disabled) || 331 + loop_change) { 332 + (void) tenxpress_special_reset(efx); 333 + falcon_reset_xaui(efx); 334 + } 335 + 336 + mdio_clause45_transmit_disable(efx); 337 + mdio_clause45_phy_reconfigure(efx); 338 + tenxpress_phyxs_loopback(efx); 339 + 340 + phy_data->tx_disabled = efx->tx_disabled; 341 + phy_data->loopback_mode = efx->loopback_mode; 354 342 efx->link_up = tenxpress_link_ok(efx, 0); 355 343 efx->link_options = GM_LPA_10000FULL; 356 344 } ··· 513 431 .clear_interrupt = tenxpress_phy_clear_interrupt, 514 432 .reset_xaui = tenxpress_reset_xaui, 515 433 .mmds = TENXPRESS_REQUIRED_DEVS, 434 + .loopbacks = TENXPRESS_LOOPBACKS, 516 435 };
+664
drivers/net/sfc/tx.c
··· 82 82 } 83 83 } 84 84 85 + /** 86 + * struct efx_tso_header - a DMA mapped buffer for packet headers 87 + * @next: Linked list of free ones. 88 + * The list is protected by the TX queue lock. 89 + * @dma_unmap_len: Length to unmap for an oversize buffer, or 0. 90 + * @dma_addr: The DMA address of the header below. 91 + * 92 + * This controls the memory used for a TSO header. Use TSOH_DATA() 93 + * to find the packet header data. Use TSOH_SIZE() to calculate the 94 + * total size required for a given packet header length. TSO headers 95 + * in the free list are exactly %TSOH_STD_SIZE bytes in size. 96 + */ 97 + struct efx_tso_header { 98 + union { 99 + struct efx_tso_header *next; 100 + size_t unmap_len; 101 + }; 102 + dma_addr_t dma_addr; 103 + }; 104 + 105 + static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue, 106 + const struct sk_buff *skb); 107 + static void efx_fini_tso(struct efx_tx_queue *tx_queue); 108 + static void efx_tsoh_heap_free(struct efx_tx_queue *tx_queue, 109 + struct efx_tso_header *tsoh); 110 + 111 + static inline void efx_tsoh_free(struct efx_tx_queue *tx_queue, 112 + struct efx_tx_buffer *buffer) 113 + { 114 + if (buffer->tsoh) { 115 + if (likely(!buffer->tsoh->unmap_len)) { 116 + buffer->tsoh->next = tx_queue->tso_headers_free; 117 + tx_queue->tso_headers_free = buffer->tsoh; 118 + } else { 119 + efx_tsoh_heap_free(tx_queue, buffer->tsoh); 120 + } 121 + buffer->tsoh = NULL; 122 + } 123 + } 124 + 85 125 86 126 /* 87 127 * Add a socket buffer to a TX queue ··· 153 113 int rc = NETDEV_TX_OK; 154 114 155 115 EFX_BUG_ON_PARANOID(tx_queue->write_count != tx_queue->insert_count); 116 + 117 + if (skb_shinfo((struct sk_buff *)skb)->gso_size) 118 + return efx_enqueue_skb_tso(tx_queue, skb); 156 119 157 120 /* Get size of the initial fragment */ 158 121 len = skb_headlen(skb); ··· 209 166 insert_ptr = (tx_queue->insert_count & 210 167 efx->type->txd_ring_mask); 211 168 buffer = &tx_queue->buffer[insert_ptr]; 169 + efx_tsoh_free(tx_queue, buffer); 170 + EFX_BUG_ON_PARANOID(buffer->tsoh); 212 171 EFX_BUG_ON_PARANOID(buffer->skb); 213 172 EFX_BUG_ON_PARANOID(buffer->len); 214 173 EFX_BUG_ON_PARANOID(buffer->continuation != 1); ··· 477 432 478 433 efx_release_tx_buffers(tx_queue); 479 434 435 + /* Free up TSO header cache */ 436 + efx_fini_tso(tx_queue); 437 + 480 438 /* Release queue's stop on port, if any */ 481 439 if (tx_queue->stopped) { 482 440 tx_queue->stopped = 0; ··· 498 450 } 499 451 500 452 453 + /* Efx TCP segmentation acceleration. 454 + * 455 + * Why? Because by doing it here in the driver we can go significantly 456 + * faster than the GSO. 457 + * 458 + * Requires TX checksum offload support. 459 + */ 460 + 461 + /* Number of bytes inserted at the start of a TSO header buffer, 462 + * similar to NET_IP_ALIGN. 463 + */ 464 + #if defined(__i386__) || defined(__x86_64__) 465 + #define TSOH_OFFSET 0 466 + #else 467 + #define TSOH_OFFSET NET_IP_ALIGN 468 + #endif 469 + 470 + #define TSOH_BUFFER(tsoh) ((u8 *)(tsoh + 1) + TSOH_OFFSET) 471 + 472 + /* Total size of struct efx_tso_header, buffer and padding */ 473 + #define TSOH_SIZE(hdr_len) \ 474 + (sizeof(struct efx_tso_header) + TSOH_OFFSET + hdr_len) 475 + 476 + /* Size of blocks on free list. Larger blocks must be allocated from 477 + * the heap. 478 + */ 479 + #define TSOH_STD_SIZE 128 480 + 481 + #define PTR_DIFF(p1, p2) ((u8 *)(p1) - (u8 *)(p2)) 482 + #define ETH_HDR_LEN(skb) (skb_network_header(skb) - (skb)->data) 483 + #define SKB_TCP_OFF(skb) PTR_DIFF(tcp_hdr(skb), (skb)->data) 484 + #define SKB_IPV4_OFF(skb) PTR_DIFF(ip_hdr(skb), (skb)->data) 485 + 486 + /** 487 + * struct tso_state - TSO state for an SKB 488 + * @remaining_len: Bytes of data we've yet to segment 489 + * @seqnum: Current sequence number 490 + * @packet_space: Remaining space in current packet 491 + * @ifc: Input fragment cursor. 492 + * Where we are in the current fragment of the incoming SKB. These 493 + * values get updated in place when we split a fragment over 494 + * multiple packets. 495 + * @p: Parameters. 496 + * These values are set once at the start of the TSO send and do 497 + * not get changed as the routine progresses. 498 + * 499 + * The state used during segmentation. It is put into this data structure 500 + * just to make it easy to pass into inline functions. 501 + */ 502 + struct tso_state { 503 + unsigned remaining_len; 504 + unsigned seqnum; 505 + unsigned packet_space; 506 + 507 + struct { 508 + /* DMA address of current position */ 509 + dma_addr_t dma_addr; 510 + /* Remaining length */ 511 + unsigned int len; 512 + /* DMA address and length of the whole fragment */ 513 + unsigned int unmap_len; 514 + dma_addr_t unmap_addr; 515 + struct page *page; 516 + unsigned page_off; 517 + } ifc; 518 + 519 + struct { 520 + /* The number of bytes of header */ 521 + unsigned int header_length; 522 + 523 + /* The number of bytes to put in each outgoing segment. */ 524 + int full_packet_size; 525 + 526 + /* Current IPv4 ID, host endian. */ 527 + unsigned ipv4_id; 528 + } p; 529 + }; 530 + 531 + 532 + /* 533 + * Verify that our various assumptions about sk_buffs and the conditions 534 + * under which TSO will be attempted hold true. 535 + */ 536 + static inline void efx_tso_check_safe(const struct sk_buff *skb) 537 + { 538 + EFX_BUG_ON_PARANOID(skb->protocol != htons(ETH_P_IP)); 539 + EFX_BUG_ON_PARANOID(((struct ethhdr *)skb->data)->h_proto != 540 + skb->protocol); 541 + EFX_BUG_ON_PARANOID(ip_hdr(skb)->protocol != IPPROTO_TCP); 542 + EFX_BUG_ON_PARANOID((PTR_DIFF(tcp_hdr(skb), skb->data) 543 + + (tcp_hdr(skb)->doff << 2u)) > 544 + skb_headlen(skb)); 545 + } 546 + 547 + 548 + /* 549 + * Allocate a page worth of efx_tso_header structures, and string them 550 + * into the tx_queue->tso_headers_free linked list. Return 0 or -ENOMEM. 551 + */ 552 + static int efx_tsoh_block_alloc(struct efx_tx_queue *tx_queue) 553 + { 554 + 555 + struct pci_dev *pci_dev = tx_queue->efx->pci_dev; 556 + struct efx_tso_header *tsoh; 557 + dma_addr_t dma_addr; 558 + u8 *base_kva, *kva; 559 + 560 + base_kva = pci_alloc_consistent(pci_dev, PAGE_SIZE, &dma_addr); 561 + if (base_kva == NULL) { 562 + EFX_ERR(tx_queue->efx, "Unable to allocate page for TSO" 563 + " headers\n"); 564 + return -ENOMEM; 565 + } 566 + 567 + /* pci_alloc_consistent() allocates pages. */ 568 + EFX_BUG_ON_PARANOID(dma_addr & (PAGE_SIZE - 1u)); 569 + 570 + for (kva = base_kva; kva < base_kva + PAGE_SIZE; kva += TSOH_STD_SIZE) { 571 + tsoh = (struct efx_tso_header *)kva; 572 + tsoh->dma_addr = dma_addr + (TSOH_BUFFER(tsoh) - base_kva); 573 + tsoh->next = tx_queue->tso_headers_free; 574 + tx_queue->tso_headers_free = tsoh; 575 + } 576 + 577 + return 0; 578 + } 579 + 580 + 581 + /* Free up a TSO header, and all others in the same page. */ 582 + static void efx_tsoh_block_free(struct efx_tx_queue *tx_queue, 583 + struct efx_tso_header *tsoh, 584 + struct pci_dev *pci_dev) 585 + { 586 + struct efx_tso_header **p; 587 + unsigned long base_kva; 588 + dma_addr_t base_dma; 589 + 590 + base_kva = (unsigned long)tsoh & PAGE_MASK; 591 + base_dma = tsoh->dma_addr & PAGE_MASK; 592 + 593 + p = &tx_queue->tso_headers_free; 594 + while (*p != NULL) 595 + if (((unsigned long)*p & PAGE_MASK) == base_kva) 596 + *p = (*p)->next; 597 + else 598 + p = &(*p)->next; 599 + 600 + pci_free_consistent(pci_dev, PAGE_SIZE, (void *)base_kva, base_dma); 601 + } 602 + 603 + static struct efx_tso_header * 604 + efx_tsoh_heap_alloc(struct efx_tx_queue *tx_queue, size_t header_len) 605 + { 606 + struct efx_tso_header *tsoh; 607 + 608 + tsoh = kmalloc(TSOH_SIZE(header_len), GFP_ATOMIC | GFP_DMA); 609 + if (unlikely(!tsoh)) 610 + return NULL; 611 + 612 + tsoh->dma_addr = pci_map_single(tx_queue->efx->pci_dev, 613 + TSOH_BUFFER(tsoh), header_len, 614 + PCI_DMA_TODEVICE); 615 + if (unlikely(pci_dma_mapping_error(tsoh->dma_addr))) { 616 + kfree(tsoh); 617 + return NULL; 618 + } 619 + 620 + tsoh->unmap_len = header_len; 621 + return tsoh; 622 + } 623 + 624 + static void 625 + efx_tsoh_heap_free(struct efx_tx_queue *tx_queue, struct efx_tso_header *tsoh) 626 + { 627 + pci_unmap_single(tx_queue->efx->pci_dev, 628 + tsoh->dma_addr, tsoh->unmap_len, 629 + PCI_DMA_TODEVICE); 630 + kfree(tsoh); 631 + } 632 + 633 + /** 634 + * efx_tx_queue_insert - push descriptors onto the TX queue 635 + * @tx_queue: Efx TX queue 636 + * @dma_addr: DMA address of fragment 637 + * @len: Length of fragment 638 + * @skb: Only non-null for end of last segment 639 + * @end_of_packet: True if last fragment in a packet 640 + * @unmap_addr: DMA address of fragment for unmapping 641 + * @unmap_len: Only set this in last segment of a fragment 642 + * 643 + * Push descriptors onto the TX queue. Return 0 on success or 1 if 644 + * @tx_queue full. 645 + */ 646 + static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue, 647 + dma_addr_t dma_addr, unsigned len, 648 + const struct sk_buff *skb, int end_of_packet, 649 + dma_addr_t unmap_addr, unsigned unmap_len) 650 + { 651 + struct efx_tx_buffer *buffer; 652 + struct efx_nic *efx = tx_queue->efx; 653 + unsigned dma_len, fill_level, insert_ptr, misalign; 654 + int q_space; 655 + 656 + EFX_BUG_ON_PARANOID(len <= 0); 657 + 658 + fill_level = tx_queue->insert_count - tx_queue->old_read_count; 659 + /* -1 as there is no way to represent all descriptors used */ 660 + q_space = efx->type->txd_ring_mask - 1 - fill_level; 661 + 662 + while (1) { 663 + if (unlikely(q_space-- <= 0)) { 664 + /* It might be that completions have happened 665 + * since the xmit path last checked. Update 666 + * the xmit path's copy of read_count. 667 + */ 668 + ++tx_queue->stopped; 669 + /* This memory barrier protects the change of 670 + * stopped from the access of read_count. */ 671 + smp_mb(); 672 + tx_queue->old_read_count = 673 + *(volatile unsigned *)&tx_queue->read_count; 674 + fill_level = (tx_queue->insert_count 675 + - tx_queue->old_read_count); 676 + q_space = efx->type->txd_ring_mask - 1 - fill_level; 677 + if (unlikely(q_space-- <= 0)) 678 + return 1; 679 + smp_mb(); 680 + --tx_queue->stopped; 681 + } 682 + 683 + insert_ptr = tx_queue->insert_count & efx->type->txd_ring_mask; 684 + buffer = &tx_queue->buffer[insert_ptr]; 685 + ++tx_queue->insert_count; 686 + 687 + EFX_BUG_ON_PARANOID(tx_queue->insert_count - 688 + tx_queue->read_count > 689 + efx->type->txd_ring_mask); 690 + 691 + efx_tsoh_free(tx_queue, buffer); 692 + EFX_BUG_ON_PARANOID(buffer->len); 693 + EFX_BUG_ON_PARANOID(buffer->unmap_len); 694 + EFX_BUG_ON_PARANOID(buffer->skb); 695 + EFX_BUG_ON_PARANOID(buffer->continuation != 1); 696 + EFX_BUG_ON_PARANOID(buffer->tsoh); 697 + 698 + buffer->dma_addr = dma_addr; 699 + 700 + /* Ensure we do not cross a boundary unsupported by H/W */ 701 + dma_len = (~dma_addr & efx->type->tx_dma_mask) + 1; 702 + 703 + misalign = (unsigned)dma_addr & efx->type->bug5391_mask; 704 + if (misalign && dma_len + misalign > 512) 705 + dma_len = 512 - misalign; 706 + 707 + /* If there is enough space to send then do so */ 708 + if (dma_len >= len) 709 + break; 710 + 711 + buffer->len = dma_len; /* Don't set the other members */ 712 + dma_addr += dma_len; 713 + len -= dma_len; 714 + } 715 + 716 + EFX_BUG_ON_PARANOID(!len); 717 + buffer->len = len; 718 + buffer->skb = skb; 719 + buffer->continuation = !end_of_packet; 720 + buffer->unmap_addr = unmap_addr; 721 + buffer->unmap_len = unmap_len; 722 + return 0; 723 + } 724 + 725 + 726 + /* 727 + * Put a TSO header into the TX queue. 728 + * 729 + * This is special-cased because we know that it is small enough to fit in 730 + * a single fragment, and we know it doesn't cross a page boundary. It 731 + * also allows us to not worry about end-of-packet etc. 732 + */ 733 + static inline void efx_tso_put_header(struct efx_tx_queue *tx_queue, 734 + struct efx_tso_header *tsoh, unsigned len) 735 + { 736 + struct efx_tx_buffer *buffer; 737 + 738 + buffer = &tx_queue->buffer[tx_queue->insert_count & 739 + tx_queue->efx->type->txd_ring_mask]; 740 + efx_tsoh_free(tx_queue, buffer); 741 + EFX_BUG_ON_PARANOID(buffer->len); 742 + EFX_BUG_ON_PARANOID(buffer->unmap_len); 743 + EFX_BUG_ON_PARANOID(buffer->skb); 744 + EFX_BUG_ON_PARANOID(buffer->continuation != 1); 745 + EFX_BUG_ON_PARANOID(buffer->tsoh); 746 + buffer->len = len; 747 + buffer->dma_addr = tsoh->dma_addr; 748 + buffer->tsoh = tsoh; 749 + 750 + ++tx_queue->insert_count; 751 + } 752 + 753 + 754 + /* Remove descriptors put into a tx_queue. */ 755 + static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue) 756 + { 757 + struct efx_tx_buffer *buffer; 758 + 759 + /* Work backwards until we hit the original insert pointer value */ 760 + while (tx_queue->insert_count != tx_queue->write_count) { 761 + --tx_queue->insert_count; 762 + buffer = &tx_queue->buffer[tx_queue->insert_count & 763 + tx_queue->efx->type->txd_ring_mask]; 764 + efx_tsoh_free(tx_queue, buffer); 765 + EFX_BUG_ON_PARANOID(buffer->skb); 766 + buffer->len = 0; 767 + buffer->continuation = 1; 768 + if (buffer->unmap_len) { 769 + pci_unmap_page(tx_queue->efx->pci_dev, 770 + buffer->unmap_addr, 771 + buffer->unmap_len, PCI_DMA_TODEVICE); 772 + buffer->unmap_len = 0; 773 + } 774 + } 775 + } 776 + 777 + 778 + /* Parse the SKB header and initialise state. */ 779 + static inline void tso_start(struct tso_state *st, const struct sk_buff *skb) 780 + { 781 + /* All ethernet/IP/TCP headers combined size is TCP header size 782 + * plus offset of TCP header relative to start of packet. 783 + */ 784 + st->p.header_length = ((tcp_hdr(skb)->doff << 2u) 785 + + PTR_DIFF(tcp_hdr(skb), skb->data)); 786 + st->p.full_packet_size = (st->p.header_length 787 + + skb_shinfo(skb)->gso_size); 788 + 789 + st->p.ipv4_id = ntohs(ip_hdr(skb)->id); 790 + st->seqnum = ntohl(tcp_hdr(skb)->seq); 791 + 792 + EFX_BUG_ON_PARANOID(tcp_hdr(skb)->urg); 793 + EFX_BUG_ON_PARANOID(tcp_hdr(skb)->syn); 794 + EFX_BUG_ON_PARANOID(tcp_hdr(skb)->rst); 795 + 796 + st->packet_space = st->p.full_packet_size; 797 + st->remaining_len = skb->len - st->p.header_length; 798 + } 799 + 800 + 801 + /** 802 + * tso_get_fragment - record fragment details and map for DMA 803 + * @st: TSO state 804 + * @efx: Efx NIC 805 + * @data: Pointer to fragment data 806 + * @len: Length of fragment 807 + * 808 + * Record fragment details and map for DMA. Return 0 on success, or 809 + * -%ENOMEM if DMA mapping fails. 810 + */ 811 + static inline int tso_get_fragment(struct tso_state *st, struct efx_nic *efx, 812 + int len, struct page *page, int page_off) 813 + { 814 + 815 + st->ifc.unmap_addr = pci_map_page(efx->pci_dev, page, page_off, 816 + len, PCI_DMA_TODEVICE); 817 + if (likely(!pci_dma_mapping_error(st->ifc.unmap_addr))) { 818 + st->ifc.unmap_len = len; 819 + st->ifc.len = len; 820 + st->ifc.dma_addr = st->ifc.unmap_addr; 821 + st->ifc.page = page; 822 + st->ifc.page_off = page_off; 823 + return 0; 824 + } 825 + return -ENOMEM; 826 + } 827 + 828 + 829 + /** 830 + * tso_fill_packet_with_fragment - form descriptors for the current fragment 831 + * @tx_queue: Efx TX queue 832 + * @skb: Socket buffer 833 + * @st: TSO state 834 + * 835 + * Form descriptors for the current fragment, until we reach the end 836 + * of fragment or end-of-packet. Return 0 on success, 1 if not enough 837 + * space in @tx_queue. 838 + */ 839 + static inline int tso_fill_packet_with_fragment(struct efx_tx_queue *tx_queue, 840 + const struct sk_buff *skb, 841 + struct tso_state *st) 842 + { 843 + 844 + int n, end_of_packet, rc; 845 + 846 + if (st->ifc.len == 0) 847 + return 0; 848 + if (st->packet_space == 0) 849 + return 0; 850 + 851 + EFX_BUG_ON_PARANOID(st->ifc.len <= 0); 852 + EFX_BUG_ON_PARANOID(st->packet_space <= 0); 853 + 854 + n = min(st->ifc.len, st->packet_space); 855 + 856 + st->packet_space -= n; 857 + st->remaining_len -= n; 858 + st->ifc.len -= n; 859 + st->ifc.page_off += n; 860 + end_of_packet = st->remaining_len == 0 || st->packet_space == 0; 861 + 862 + rc = efx_tx_queue_insert(tx_queue, st->ifc.dma_addr, n, 863 + st->remaining_len ? NULL : skb, 864 + end_of_packet, st->ifc.unmap_addr, 865 + st->ifc.len ? 0 : st->ifc.unmap_len); 866 + 867 + st->ifc.dma_addr += n; 868 + 869 + return rc; 870 + } 871 + 872 + 873 + /** 874 + * tso_start_new_packet - generate a new header and prepare for the new packet 875 + * @tx_queue: Efx TX queue 876 + * @skb: Socket buffer 877 + * @st: TSO state 878 + * 879 + * Generate a new header and prepare for the new packet. Return 0 on 880 + * success, or -1 if failed to alloc header. 881 + */ 882 + static inline int tso_start_new_packet(struct efx_tx_queue *tx_queue, 883 + const struct sk_buff *skb, 884 + struct tso_state *st) 885 + { 886 + struct efx_tso_header *tsoh; 887 + struct iphdr *tsoh_iph; 888 + struct tcphdr *tsoh_th; 889 + unsigned ip_length; 890 + u8 *header; 891 + 892 + /* Allocate a DMA-mapped header buffer. */ 893 + if (likely(TSOH_SIZE(st->p.header_length) <= TSOH_STD_SIZE)) { 894 + if (tx_queue->tso_headers_free == NULL) 895 + if (efx_tsoh_block_alloc(tx_queue)) 896 + return -1; 897 + EFX_BUG_ON_PARANOID(!tx_queue->tso_headers_free); 898 + tsoh = tx_queue->tso_headers_free; 899 + tx_queue->tso_headers_free = tsoh->next; 900 + tsoh->unmap_len = 0; 901 + } else { 902 + tx_queue->tso_long_headers++; 903 + tsoh = efx_tsoh_heap_alloc(tx_queue, st->p.header_length); 904 + if (unlikely(!tsoh)) 905 + return -1; 906 + } 907 + 908 + header = TSOH_BUFFER(tsoh); 909 + tsoh_th = (struct tcphdr *)(header + SKB_TCP_OFF(skb)); 910 + tsoh_iph = (struct iphdr *)(header + SKB_IPV4_OFF(skb)); 911 + 912 + /* Copy and update the headers. */ 913 + memcpy(header, skb->data, st->p.header_length); 914 + 915 + tsoh_th->seq = htonl(st->seqnum); 916 + st->seqnum += skb_shinfo(skb)->gso_size; 917 + if (st->remaining_len > skb_shinfo(skb)->gso_size) { 918 + /* This packet will not finish the TSO burst. */ 919 + ip_length = st->p.full_packet_size - ETH_HDR_LEN(skb); 920 + tsoh_th->fin = 0; 921 + tsoh_th->psh = 0; 922 + } else { 923 + /* This packet will be the last in the TSO burst. */ 924 + ip_length = (st->p.header_length - ETH_HDR_LEN(skb) 925 + + st->remaining_len); 926 + tsoh_th->fin = tcp_hdr(skb)->fin; 927 + tsoh_th->psh = tcp_hdr(skb)->psh; 928 + } 929 + tsoh_iph->tot_len = htons(ip_length); 930 + 931 + /* Linux leaves suitable gaps in the IP ID space for us to fill. */ 932 + tsoh_iph->id = htons(st->p.ipv4_id); 933 + st->p.ipv4_id++; 934 + 935 + st->packet_space = skb_shinfo(skb)->gso_size; 936 + ++tx_queue->tso_packets; 937 + 938 + /* Form a descriptor for this header. */ 939 + efx_tso_put_header(tx_queue, tsoh, st->p.header_length); 940 + 941 + return 0; 942 + } 943 + 944 + 945 + /** 946 + * efx_enqueue_skb_tso - segment and transmit a TSO socket buffer 947 + * @tx_queue: Efx TX queue 948 + * @skb: Socket buffer 949 + * 950 + * Context: You must hold netif_tx_lock() to call this function. 951 + * 952 + * Add socket buffer @skb to @tx_queue, doing TSO or return != 0 if 953 + * @skb was not enqueued. In all cases @skb is consumed. Return 954 + * %NETDEV_TX_OK or %NETDEV_TX_BUSY. 955 + */ 956 + static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue, 957 + const struct sk_buff *skb) 958 + { 959 + int frag_i, rc, rc2 = NETDEV_TX_OK; 960 + struct tso_state state; 961 + skb_frag_t *f; 962 + 963 + /* Verify TSO is safe - these checks should never fail. */ 964 + efx_tso_check_safe(skb); 965 + 966 + EFX_BUG_ON_PARANOID(tx_queue->write_count != tx_queue->insert_count); 967 + 968 + tso_start(&state, skb); 969 + 970 + /* Assume that skb header area contains exactly the headers, and 971 + * all payload is in the frag list. 972 + */ 973 + if (skb_headlen(skb) == state.p.header_length) { 974 + /* Grab the first payload fragment. */ 975 + EFX_BUG_ON_PARANOID(skb_shinfo(skb)->nr_frags < 1); 976 + frag_i = 0; 977 + f = &skb_shinfo(skb)->frags[frag_i]; 978 + rc = tso_get_fragment(&state, tx_queue->efx, 979 + f->size, f->page, f->page_offset); 980 + if (rc) 981 + goto mem_err; 982 + } else { 983 + /* It may look like this code fragment assumes that the 984 + * skb->data portion does not cross a page boundary, but 985 + * that is not the case. It is guaranteed to be direct 986 + * mapped memory, and therefore is physically contiguous, 987 + * and so DMA will work fine. kmap_atomic() on this region 988 + * will just return the direct mapping, so that will work 989 + * too. 990 + */ 991 + int page_off = (unsigned long)skb->data & (PAGE_SIZE - 1); 992 + int hl = state.p.header_length; 993 + rc = tso_get_fragment(&state, tx_queue->efx, 994 + skb_headlen(skb) - hl, 995 + virt_to_page(skb->data), page_off + hl); 996 + if (rc) 997 + goto mem_err; 998 + frag_i = -1; 999 + } 1000 + 1001 + if (tso_start_new_packet(tx_queue, skb, &state) < 0) 1002 + goto mem_err; 1003 + 1004 + while (1) { 1005 + rc = tso_fill_packet_with_fragment(tx_queue, skb, &state); 1006 + if (unlikely(rc)) 1007 + goto stop; 1008 + 1009 + /* Move onto the next fragment? */ 1010 + if (state.ifc.len == 0) { 1011 + if (++frag_i >= skb_shinfo(skb)->nr_frags) 1012 + /* End of payload reached. */ 1013 + break; 1014 + f = &skb_shinfo(skb)->frags[frag_i]; 1015 + rc = tso_get_fragment(&state, tx_queue->efx, 1016 + f->size, f->page, f->page_offset); 1017 + if (rc) 1018 + goto mem_err; 1019 + } 1020 + 1021 + /* Start at new packet? */ 1022 + if (state.packet_space == 0 && 1023 + tso_start_new_packet(tx_queue, skb, &state) < 0) 1024 + goto mem_err; 1025 + } 1026 + 1027 + /* Pass off to hardware */ 1028 + falcon_push_buffers(tx_queue); 1029 + 1030 + tx_queue->tso_bursts++; 1031 + return NETDEV_TX_OK; 1032 + 1033 + mem_err: 1034 + EFX_ERR(tx_queue->efx, "Out of memory for TSO headers, or PCI mapping" 1035 + " error\n"); 1036 + dev_kfree_skb_any((struct sk_buff *)skb); 1037 + goto unwind; 1038 + 1039 + stop: 1040 + rc2 = NETDEV_TX_BUSY; 1041 + 1042 + /* Stop the queue if it wasn't stopped before. */ 1043 + if (tx_queue->stopped == 1) 1044 + efx_stop_queue(tx_queue->efx); 1045 + 1046 + unwind: 1047 + efx_enqueue_unwind(tx_queue); 1048 + return rc2; 1049 + } 1050 + 1051 + 1052 + /* 1053 + * Free up all TSO datastructures associated with tx_queue. This 1054 + * routine should be called only once the tx_queue is both empty and 1055 + * will no longer be used. 1056 + */ 1057 + static void efx_fini_tso(struct efx_tx_queue *tx_queue) 1058 + { 1059 + unsigned i; 1060 + 1061 + if (tx_queue->buffer) 1062 + for (i = 0; i <= tx_queue->efx->type->txd_ring_mask; ++i) 1063 + efx_tsoh_free(tx_queue, &tx_queue->buffer[i]); 1064 + 1065 + while (tx_queue->tso_headers_free != NULL) 1066 + efx_tsoh_block_free(tx_queue, tx_queue->tso_headers_free, 1067 + tx_queue->efx->pci_dev); 1068 + }
+36
drivers/net/sfc/xfp_phy.c
··· 24 24 MDIO_MMDREG_DEVS0_PMAPMD | \ 25 25 MDIO_MMDREG_DEVS0_PHYXS) 26 26 27 + #define XFP_LOOPBACKS ((1 << LOOPBACK_PCS) | \ 28 + (1 << LOOPBACK_PMAPMD) | \ 29 + (1 << LOOPBACK_NETWORK)) 30 + 27 31 /****************************************************************************/ 28 32 /* Quake-specific MDIO registers */ 29 33 #define MDIO_QUAKE_LED0_REG (0xD006) ··· 38 34 mdio_clause45_write(p, p->mii.phy_id, MDIO_MMD_PMAPMD, addr, 39 35 mode); 40 36 } 37 + 38 + struct xfp_phy_data { 39 + int tx_disabled; 40 + }; 41 41 42 42 #define XFP_MAX_RESET_TIME 500 43 43 #define XFP_RESET_WAIT 10 ··· 80 72 81 73 static int xfp_phy_init(struct efx_nic *efx) 82 74 { 75 + struct xfp_phy_data *phy_data; 83 76 u32 devid = mdio_clause45_read_id(efx, MDIO_MMD_PHYXS); 84 77 int rc; 78 + 79 + phy_data = kzalloc(sizeof(struct xfp_phy_data), GFP_KERNEL); 80 + efx->phy_data = (void *) phy_data; 85 81 86 82 EFX_INFO(efx, "XFP: PHY ID reg %x (OUI %x model %x revision" 87 83 " %x)\n", devid, MDIO_ID_OUI(devid), MDIO_ID_MODEL(devid), 88 84 MDIO_ID_REV(devid)); 89 85 86 + phy_data->tx_disabled = efx->tx_disabled; 87 + 90 88 rc = xfp_reset_phy(efx); 91 89 92 90 EFX_INFO(efx, "XFP: PHY init %s.\n", 93 91 rc ? "failed" : "successful"); 92 + if (rc < 0) 93 + goto fail; 94 94 95 + return 0; 96 + 97 + fail: 98 + kfree(efx->phy_data); 99 + efx->phy_data = NULL; 95 100 return rc; 96 101 } 97 102 ··· 131 110 132 111 static void xfp_phy_reconfigure(struct efx_nic *efx) 133 112 { 113 + struct xfp_phy_data *phy_data = efx->phy_data; 114 + 115 + /* Reset the PHY when moving from tx off to tx on */ 116 + if (phy_data->tx_disabled && !efx->tx_disabled) 117 + xfp_reset_phy(efx); 118 + 119 + mdio_clause45_transmit_disable(efx); 120 + mdio_clause45_phy_reconfigure(efx); 121 + 122 + phy_data->tx_disabled = efx->tx_disabled; 134 123 efx->link_up = xfp_link_ok(efx); 135 124 efx->link_options = GM_LPA_10000FULL; 136 125 } ··· 150 119 { 151 120 /* Clobber the LED if it was blinking */ 152 121 efx->board_info.blink(efx, 0); 122 + 123 + /* Free the context block */ 124 + kfree(efx->phy_data); 125 + efx->phy_data = NULL; 153 126 } 154 127 155 128 struct efx_phy_operations falcon_xfp_phy_ops = { ··· 164 129 .clear_interrupt = xfp_phy_clear_interrupt, 165 130 .reset_xaui = efx_port_dummy_op_void, 166 131 .mmds = XFP_REQUIRED_DEVS, 132 + .loopbacks = XFP_LOOPBACKS, 167 133 };
+2 -2
drivers/net/sky2.h
··· 1966 1966 struct tx_ring_info { 1967 1967 struct sk_buff *skb; 1968 1968 DECLARE_PCI_UNMAP_ADDR(mapaddr); 1969 - DECLARE_PCI_UNMAP_ADDR(maplen); 1969 + DECLARE_PCI_UNMAP_LEN(maplen); 1970 1970 }; 1971 1971 1972 1972 struct rx_ring_info { 1973 1973 struct sk_buff *skb; 1974 1974 dma_addr_t data_addr; 1975 - DECLARE_PCI_UNMAP_ADDR(data_size); 1975 + DECLARE_PCI_UNMAP_LEN(data_size); 1976 1976 dma_addr_t frag_addr[ETH_JUMBO_MTU >> PAGE_SHIFT]; 1977 1977 }; 1978 1978
+1 -3
drivers/net/wan/Kconfig
··· 150 150 151 151 config HDLC_PPP 152 152 tristate "Synchronous Point-to-Point Protocol (PPP) support" 153 - depends on HDLC && BROKEN 153 + depends on HDLC 154 154 help 155 155 Generic HDLC driver supporting PPP over WAN connections. 156 - This module is currently broken and will cause a kernel panic 157 - when a device configured in PPP mode is activated. 158 156 159 157 It will be replaced by new PPP implementation in Linux 2.6.26. 160 158
+7 -7
drivers/net/wan/cosa.c
··· 629 629 d->base_addr = chan->cosa->datareg; 630 630 d->irq = chan->cosa->irq; 631 631 d->dma = chan->cosa->dma; 632 - d->priv = chan; 632 + d->ml_priv = chan; 633 633 sppp_attach(&chan->pppdev); 634 634 if (register_netdev(d)) { 635 635 printk(KERN_WARNING "%s: register_netdev failed.\n", d->name); ··· 650 650 651 651 static int cosa_sppp_open(struct net_device *d) 652 652 { 653 - struct channel_data *chan = d->priv; 653 + struct channel_data *chan = d->ml_priv; 654 654 int err; 655 655 unsigned long flags; 656 656 ··· 690 690 691 691 static int cosa_sppp_tx(struct sk_buff *skb, struct net_device *dev) 692 692 { 693 - struct channel_data *chan = dev->priv; 693 + struct channel_data *chan = dev->ml_priv; 694 694 695 695 netif_stop_queue(dev); 696 696 ··· 701 701 702 702 static void cosa_sppp_timeout(struct net_device *dev) 703 703 { 704 - struct channel_data *chan = dev->priv; 704 + struct channel_data *chan = dev->ml_priv; 705 705 706 706 if (test_bit(RXBIT, &chan->cosa->rxtx)) { 707 707 chan->stats.rx_errors++; ··· 720 720 721 721 static int cosa_sppp_close(struct net_device *d) 722 722 { 723 - struct channel_data *chan = d->priv; 723 + struct channel_data *chan = d->ml_priv; 724 724 unsigned long flags; 725 725 726 726 netif_stop_queue(d); ··· 800 800 801 801 static struct net_device_stats *cosa_net_stats(struct net_device *dev) 802 802 { 803 - struct channel_data *chan = dev->priv; 803 + struct channel_data *chan = dev->ml_priv; 804 804 return &chan->stats; 805 805 } 806 806 ··· 1217 1217 int cmd) 1218 1218 { 1219 1219 int rv; 1220 - struct channel_data *chan = dev->priv; 1220 + struct channel_data *chan = dev->ml_priv; 1221 1221 rv = cosa_ioctl_common(chan->cosa, chan, cmd, (unsigned long)ifr->ifr_data); 1222 1222 if (rv == -ENOIOCTLCMD) { 1223 1223 return sppp_do_ioctl(dev, ifr, cmd);
+1 -1
drivers/net/wan/hdlc_ppp.c
··· 45 45 int (*old_ioctl)(struct net_device *, struct ifreq *, int); 46 46 int result; 47 47 48 - dev->priv = &state(hdlc)->syncppp_ptr; 48 + dev->ml_priv = &state(hdlc)->syncppp_ptr; 49 49 state(hdlc)->syncppp_ptr = &state(hdlc)->pppdev; 50 50 state(hdlc)->pppdev.dev = dev; 51 51
+6 -6
drivers/net/wan/hostess_sv11.c
··· 75 75 76 76 static int hostess_open(struct net_device *d) 77 77 { 78 - struct sv11_device *sv11=d->priv; 78 + struct sv11_device *sv11=d->ml_priv; 79 79 int err = -1; 80 80 81 81 /* ··· 128 128 129 129 static int hostess_close(struct net_device *d) 130 130 { 131 - struct sv11_device *sv11=d->priv; 131 + struct sv11_device *sv11=d->ml_priv; 132 132 /* 133 133 * Discard new frames 134 134 */ ··· 159 159 160 160 static int hostess_ioctl(struct net_device *d, struct ifreq *ifr, int cmd) 161 161 { 162 - /* struct sv11_device *sv11=d->priv; 162 + /* struct sv11_device *sv11=d->ml_priv; 163 163 z8530_ioctl(d,&sv11->sync.chanA,ifr,cmd) */ 164 164 return sppp_do_ioctl(d, ifr,cmd); 165 165 } 166 166 167 167 static struct net_device_stats *hostess_get_stats(struct net_device *d) 168 168 { 169 - struct sv11_device *sv11=d->priv; 169 + struct sv11_device *sv11=d->ml_priv; 170 170 if(sv11) 171 171 return z8530_get_stats(&sv11->sync.chanA); 172 172 else ··· 179 179 180 180 static int hostess_queue_xmit(struct sk_buff *skb, struct net_device *d) 181 181 { 182 - struct sv11_device *sv11=d->priv; 182 + struct sv11_device *sv11=d->ml_priv; 183 183 return z8530_queue_xmit(&sv11->sync.chanA, skb); 184 184 } 185 185 ··· 325 325 /* 326 326 * Initialise the PPP components 327 327 */ 328 + d->ml_priv = sv; 328 329 sppp_attach(&sv->netdev); 329 330 330 331 /* ··· 334 333 335 334 d->base_addr = iobase; 336 335 d->irq = irq; 337 - d->priv = sv; 338 336 339 337 if(register_netdev(d)) 340 338 {
+1
drivers/net/wan/lmc/lmc_main.c
··· 891 891 892 892 /* Initialize the sppp layer */ 893 893 /* An ioctl can cause a subsequent detach for raw frame interface */ 894 + dev->ml_priv = sc; 894 895 sc->if_type = LMC_PPP; 895 896 sc->check = 0xBEAFCAFE; 896 897 dev->base_addr = pci_resource_start(pdev, 0);
+1
drivers/net/wan/sealevel.c
··· 241 241 return NULL; 242 242 243 243 sv = d->priv; 244 + d->ml_priv = sv; 244 245 sv->if_ptr = &sv->pppdev; 245 246 sv->pppdev.dev = d; 246 247 d->base_addr = iobase;
+1 -1
drivers/net/wireless/iwlwifi/iwl-3945.c
··· 666 666 rx_status.flag = 0; 667 667 rx_status.mactime = le64_to_cpu(rx_end->timestamp); 668 668 rx_status.freq = 669 - ieee80211_frequency_to_channel(le16_to_cpu(rx_hdr->channel)); 669 + ieee80211_channel_to_frequency(le16_to_cpu(rx_hdr->channel)); 670 670 rx_status.band = (rx_hdr->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ? 671 671 IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ; 672 672
+1 -1
drivers/net/wireless/iwlwifi/iwl-4965-rs.c
··· 163 163 struct dentry *rs_sta_dbgfs_tx_agg_tid_en_file; 164 164 #endif 165 165 struct iwl4965_rate dbg_fixed; 166 - struct iwl_priv *drv; 167 166 #endif 167 + struct iwl_priv *drv; 168 168 }; 169 169 170 170 static void rs_rate_scale_perform(struct iwl_priv *priv,
+1 -1
drivers/net/wireless/iwlwifi/iwl-4965.c
··· 3978 3978 3979 3979 rx_status.mactime = le64_to_cpu(rx_start->timestamp); 3980 3980 rx_status.freq = 3981 - ieee80211_frequency_to_channel(le16_to_cpu(rx_start->channel)); 3981 + ieee80211_channel_to_frequency(le16_to_cpu(rx_start->channel)); 3982 3982 rx_status.band = (rx_start->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ? 3983 3983 IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ; 3984 3984 rx_status.rate_idx =
+9 -2
drivers/net/wireless/prism54/islpci_dev.c
··· 388 388 389 389 netif_start_queue(ndev); 390 390 391 - /* Turn off carrier unless we know we have associated */ 392 - netif_carrier_off(ndev); 391 + /* Turn off carrier if in STA or Ad-hoc mode. It will be turned on 392 + * once the firmware receives a trap of being associated 393 + * (GEN_OID_LINKSTATE). In other modes (AP or WDS or monitor) we 394 + * should just leave the carrier on as its expected the firmware 395 + * won't send us a trigger. */ 396 + if (priv->iw_mode == IW_MODE_INFRA || priv->iw_mode == IW_MODE_ADHOC) 397 + netif_carrier_off(ndev); 398 + else 399 + netif_carrier_on(ndev); 393 400 394 401 return 0; 395 402 }
+4 -7
drivers/net/wireless/rt2x00/rt2x00dev.c
··· 1032 1032 * Initialize the device. 1033 1033 */ 1034 1034 status = rt2x00dev->ops->lib->initialize(rt2x00dev); 1035 - if (status) 1036 - goto exit; 1035 + if (status) { 1036 + rt2x00queue_uninitialize(rt2x00dev); 1037 + return status; 1038 + } 1037 1039 1038 1040 __set_bit(DEVICE_INITIALIZED, &rt2x00dev->flags); 1039 1041 ··· 1045 1043 rt2x00rfkill_register(rt2x00dev); 1046 1044 1047 1045 return 0; 1048 - 1049 - exit: 1050 - rt2x00lib_uninitialize(rt2x00dev); 1051 - 1052 - return status; 1053 1046 } 1054 1047 1055 1048 int rt2x00lib_start(struct rt2x00_dev *rt2x00dev)
+3 -2
drivers/net/wireless/rt2x00/rt2x00pci.c
··· 314 314 if (status) { 315 315 ERROR(rt2x00dev, "IRQ %d allocation failed (error %d).\n", 316 316 pci_dev->irq, status); 317 - return status; 317 + goto exit; 318 318 } 319 319 320 320 return 0; 321 321 322 322 exit: 323 - rt2x00pci_uninitialize(rt2x00dev); 323 + queue_for_each(rt2x00dev, queue) 324 + rt2x00pci_free_queue_dma(rt2x00dev, queue); 324 325 325 326 return status; 326 327 }
+11 -20
drivers/net/wireless/rt2x00/rt61pci.c
··· 2366 2366 { 2367 2367 struct rt2x00_dev *rt2x00dev = hw->priv; 2368 2368 struct rt2x00_intf *intf = vif_to_intf(control->vif); 2369 + struct queue_entry_priv_pci_tx *priv_tx; 2369 2370 struct skb_frame_desc *skbdesc; 2370 2371 unsigned int beacon_base; 2371 2372 u32 reg; ··· 2374 2373 if (unlikely(!intf->beacon)) 2375 2374 return -ENOBUFS; 2376 2375 2377 - /* 2378 - * We need to append the descriptor in front of the 2379 - * beacon frame. 2380 - */ 2381 - if (skb_headroom(skb) < intf->beacon->queue->desc_size) { 2382 - if (pskb_expand_head(skb, intf->beacon->queue->desc_size, 2383 - 0, GFP_ATOMIC)) 2384 - return -ENOMEM; 2385 - } 2386 - 2387 - /* 2388 - * Add the descriptor in front of the skb. 2389 - */ 2390 - skb_push(skb, intf->beacon->queue->desc_size); 2391 - memset(skb->data, 0, intf->beacon->queue->desc_size); 2376 + priv_tx = intf->beacon->priv_data; 2377 + memset(priv_tx->desc, 0, intf->beacon->queue->desc_size); 2392 2378 2393 2379 /* 2394 2380 * Fill in skb descriptor ··· 2383 2395 skbdesc = get_skb_frame_desc(skb); 2384 2396 memset(skbdesc, 0, sizeof(*skbdesc)); 2385 2397 skbdesc->flags |= FRAME_DESC_DRIVER_GENERATED; 2386 - skbdesc->data = skb->data + intf->beacon->queue->desc_size; 2387 - skbdesc->data_len = skb->len - intf->beacon->queue->desc_size; 2388 - skbdesc->desc = skb->data; 2398 + skbdesc->data = skb->data; 2399 + skbdesc->data_len = skb->len; 2400 + skbdesc->desc = priv_tx->desc; 2389 2401 skbdesc->desc_len = intf->beacon->queue->desc_size; 2390 2402 skbdesc->entry = intf->beacon; 2391 2403 ··· 2413 2425 */ 2414 2426 beacon_base = HW_BEACON_OFFSET(intf->beacon->entry_idx); 2415 2427 rt2x00pci_register_multiwrite(rt2x00dev, beacon_base, 2416 - skb->data, skb->len); 2428 + skbdesc->desc, skbdesc->desc_len); 2429 + rt2x00pci_register_multiwrite(rt2x00dev, 2430 + beacon_base + skbdesc->desc_len, 2431 + skbdesc->data, skbdesc->data_len); 2417 2432 rt61pci_kick_tx_queue(rt2x00dev, control->queue); 2418 2433 2419 2434 return 0; ··· 2481 2490 2482 2491 static const struct data_queue_desc rt61pci_queue_bcn = { 2483 2492 .entry_num = 4 * BEACON_ENTRIES, 2484 - .data_size = MGMT_FRAME_SIZE, 2493 + .data_size = 0, /* No DMA required for beacons */ 2485 2494 .desc_size = TXINFO_SIZE, 2486 2495 .priv_size = sizeof(struct queue_entry_priv_pci_tx), 2487 2496 };
+2 -2
drivers/net/wireless/wavelan.c
··· 908 908 p->psa_call_code[3], p->psa_call_code[4], p->psa_call_code[5], 909 909 p->psa_call_code[6], p->psa_call_code[7]); 910 910 #ifdef DEBUG_SHOW_UNUSED 911 - printk(KERN_DEBUG "psa_reserved[]: %02X:%02X:%02X:%02X\n", 911 + printk(KERN_DEBUG "psa_reserved[]: %02X:%02X\n", 912 912 p->psa_reserved[0], 913 - p->psa_reserved[1], p->psa_reserved[2], p->psa_reserved[3]); 913 + p->psa_reserved[1]); 914 914 #endif /* DEBUG_SHOW_UNUSED */ 915 915 printk(KERN_DEBUG "psa_conf_status: %d, ", p->psa_conf_status); 916 916 printk("psa_crc: 0x%02x%02x, ", p->psa_crc[0], p->psa_crc[1]);
+2 -4
drivers/net/wireless/wavelan_cs.c
··· 1074 1074 p->psa_call_code[6], 1075 1075 p->psa_call_code[7]); 1076 1076 #ifdef DEBUG_SHOW_UNUSED 1077 - printk(KERN_DEBUG "psa_reserved[]: %02X:%02X:%02X:%02X\n", 1077 + printk(KERN_DEBUG "psa_reserved[]: %02X:%02X\n", 1078 1078 p->psa_reserved[0], 1079 - p->psa_reserved[1], 1080 - p->psa_reserved[2], 1081 - p->psa_reserved[3]); 1079 + p->psa_reserved[1]); 1082 1080 #endif /* DEBUG_SHOW_UNUSED */ 1083 1081 printk(KERN_DEBUG "psa_conf_status: %d, ", p->psa_conf_status); 1084 1082 printk("psa_crc: 0x%02x%02x, ", p->psa_crc[0], p->psa_crc[1]);
+5 -1
drivers/net/wireless/zd1211rw/zd_usb.c
··· 889 889 } 890 890 free_urb: 891 891 skb = (struct sk_buff *)urb->context; 892 - zd_mac_tx_to_dev(skb, urb->status); 892 + /* 893 + * grab 'usb' pointer before handing off the skb (since 894 + * it might be freed by zd_mac_tx_to_dev or mac80211) 895 + */ 893 896 cb = (struct zd_tx_skb_control_block *)skb->cb; 894 897 usb = &zd_hw_mac(cb->hw)->chip.usb; 898 + zd_mac_tx_to_dev(skb, urb->status); 895 899 free_tx_urb(usb, urb); 896 900 tx_dec_submitted_urbs(usb); 897 901 return;
+26 -9
include/linux/netdevice.h
··· 93 93 * used. 94 94 */ 95 95 96 - #if !defined(CONFIG_AX25) && !defined(CONFIG_AX25_MODULE) && !defined(CONFIG_TR) 97 - #define LL_MAX_HEADER 32 96 + #if defined(CONFIG_WLAN_80211) || defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE) 97 + # if defined(CONFIG_MAC80211_MESH) 98 + # define LL_MAX_HEADER 128 99 + # else 100 + # define LL_MAX_HEADER 96 101 + # endif 102 + #elif defined(CONFIG_TR) 103 + # define LL_MAX_HEADER 48 98 104 #else 99 - #if defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE) 100 - #define LL_MAX_HEADER 96 101 - #else 102 - #define LL_MAX_HEADER 48 103 - #endif 105 + # define LL_MAX_HEADER 32 104 106 #endif 105 107 106 108 #if !defined(CONFIG_NET_IPIP) && !defined(CONFIG_NET_IPIP_MODULE) && \ ··· 246 244 * 247 245 * We could use other alignment values, but we must maintain the 248 246 * relationship HH alignment <= LL alignment. 247 + * 248 + * LL_ALLOCATED_SPACE also takes into account the tailroom the device 249 + * may need. 249 250 */ 250 251 #define LL_RESERVED_SPACE(dev) \ 251 - (((dev)->hard_header_len&~(HH_DATA_MOD - 1)) + HH_DATA_MOD) 252 + ((((dev)->hard_header_len+(dev)->needed_headroom)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD) 252 253 #define LL_RESERVED_SPACE_EXTRA(dev,extra) \ 253 - ((((dev)->hard_header_len+extra)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD) 254 + ((((dev)->hard_header_len+(dev)->needed_headroom+(extra))&~(HH_DATA_MOD - 1)) + HH_DATA_MOD) 255 + #define LL_ALLOCATED_SPACE(dev) \ 256 + ((((dev)->hard_header_len+(dev)->needed_headroom+(dev)->needed_tailroom)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD) 254 257 255 258 struct header_ops { 256 259 int (*create) (struct sk_buff *skb, struct net_device *dev, ··· 574 567 unsigned short type; /* interface hardware type */ 575 568 unsigned short hard_header_len; /* hardware hdr length */ 576 569 570 + /* extra head- and tailroom the hardware may need, but not in all cases 571 + * can this be guaranteed, especially tailroom. Some cases also use 572 + * LL_MAX_HEADER instead to allocate the skb. 573 + */ 574 + unsigned short needed_headroom; 575 + unsigned short needed_tailroom; 576 + 577 577 struct net_device *master; /* Pointer to master device of a group, 578 578 * which this device is member of. 579 579 */ ··· 728 714 /* Network namespace this network device is inside */ 729 715 struct net *nd_net; 730 716 #endif 717 + 718 + /* mid-layer private */ 719 + void *ml_priv; 731 720 732 721 /* bridge stuff */ 733 722 struct net_bridge_port *br_port;
-3
include/net/irda/discovery.h
··· 57 57 __u8 byte[2]; 58 58 } __u16_host_order; 59 59 60 - /* Same purpose, different application */ 61 - #define u16ho(array) (* ((__u16 *) array)) 62 - 63 60 /* Types of discovery */ 64 61 typedef enum { 65 62 DISCOVERY_LOG, /* What's in our discovery log */
+1 -1
include/net/syncppp.h
··· 59 59 60 60 static inline struct sppp *sppp_of(struct net_device *dev) 61 61 { 62 - struct ppp_device **ppp = dev->priv; 62 + struct ppp_device **ppp = dev->ml_priv; 63 63 BUG_ON((*ppp)->dev != dev); 64 64 return &(*ppp)->sppp; 65 65 }
+1 -1
net/core/netpoll.c
··· 419 419 return; 420 420 421 421 size = arp_hdr_len(skb->dev); 422 - send_skb = find_skb(np, size + LL_RESERVED_SPACE(np->dev), 422 + send_skb = find_skb(np, size + LL_ALLOCATED_SPACE(np->dev), 423 423 LL_RESERVED_SPACE(np->dev)); 424 424 425 425 if (!send_skb)
+1 -1
net/core/sock.c
··· 270 270 int err = 0; 271 271 int skb_len; 272 272 273 - /* Cast skb->rcvbuf to unsigned... It's pointless, but reduces 273 + /* Cast sk->rcvbuf to unsigned... It's pointless, but reduces 274 274 number of warnings when compiling with -W --ANK 275 275 */ 276 276 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
+1 -1
net/econet/af_econet.c
··· 340 340 341 341 dev_hold(dev); 342 342 343 - skb = sock_alloc_send_skb(sk, len+LL_RESERVED_SPACE(dev), 343 + skb = sock_alloc_send_skb(sk, len+LL_ALLOCATED_SPACE(dev), 344 344 msg->msg_flags & MSG_DONTWAIT, &err); 345 345 if (skb==NULL) 346 346 goto out_unlock;
+1 -1
net/ipv4/arp.c
··· 570 570 * Allocate a buffer 571 571 */ 572 572 573 - skb = alloc_skb(arp_hdr_len(dev) + LL_RESERVED_SPACE(dev), GFP_ATOMIC); 573 + skb = alloc_skb(arp_hdr_len(dev) + LL_ALLOCATED_SPACE(dev), GFP_ATOMIC); 574 574 if (skb == NULL) 575 575 return NULL; 576 576
+2 -2
net/ipv4/cipso_ipv4.c
··· 338 338 return -ENOENT; 339 339 340 340 hash = cipso_v4_map_cache_hash(key, key_len); 341 - bkt = hash & (CIPSO_V4_CACHE_BUCKETBITS - 1); 341 + bkt = hash & (CIPSO_V4_CACHE_BUCKETS - 1); 342 342 spin_lock_bh(&cipso_v4_cache[bkt].lock); 343 343 list_for_each_entry(entry, &cipso_v4_cache[bkt].list, list) { 344 344 if (entry->hash == hash && ··· 417 417 atomic_inc(&secattr->cache->refcount); 418 418 entry->lsm_data = secattr->cache; 419 419 420 - bkt = entry->hash & (CIPSO_V4_CACHE_BUCKETBITS - 1); 420 + bkt = entry->hash & (CIPSO_V4_CACHE_BUCKETS - 1); 421 421 spin_lock_bh(&cipso_v4_cache[bkt].lock); 422 422 if (cipso_v4_cache[bkt].size < cipso_v4_cache_bucketsize) { 423 423 list_add(&entry->list, &cipso_v4_cache[bkt].list);
+2 -2
net/ipv4/igmp.c
··· 292 292 struct iphdr *pip; 293 293 struct igmpv3_report *pig; 294 294 295 - skb = alloc_skb(size + LL_RESERVED_SPACE(dev), GFP_ATOMIC); 295 + skb = alloc_skb(size + LL_ALLOCATED_SPACE(dev), GFP_ATOMIC); 296 296 if (skb == NULL) 297 297 return NULL; 298 298 ··· 653 653 return -1; 654 654 } 655 655 656 - skb=alloc_skb(IGMP_SIZE+LL_RESERVED_SPACE(dev), GFP_ATOMIC); 656 + skb=alloc_skb(IGMP_SIZE+LL_ALLOCATED_SPACE(dev), GFP_ATOMIC); 657 657 if (skb == NULL) { 658 658 ip_rt_put(rt); 659 659 return -1;
+3 -3
net/ipv4/ipconfig.c
··· 710 710 struct net_device *dev = d->dev; 711 711 struct sk_buff *skb; 712 712 struct bootp_pkt *b; 713 - int hh_len = LL_RESERVED_SPACE(dev); 714 713 struct iphdr *h; 715 714 716 715 /* Allocate packet */ 717 - skb = alloc_skb(sizeof(struct bootp_pkt) + hh_len + 15, GFP_KERNEL); 716 + skb = alloc_skb(sizeof(struct bootp_pkt) + LL_ALLOCATED_SPACE(dev) + 15, 717 + GFP_KERNEL); 718 718 if (!skb) 719 719 return; 720 - skb_reserve(skb, hh_len); 720 + skb_reserve(skb, LL_RESERVED_SPACE(dev)); 721 721 b = (struct bootp_pkt *) skb_put(skb, sizeof(struct bootp_pkt)); 722 722 memset(b, 0, sizeof(struct bootp_pkt)); 723 723
+4 -6
net/ipv4/raw.c
··· 322 322 unsigned int flags) 323 323 { 324 324 struct inet_sock *inet = inet_sk(sk); 325 - int hh_len; 326 325 struct iphdr *iph; 327 326 struct sk_buff *skb; 328 327 unsigned int iphlen; ··· 335 336 if (flags&MSG_PROBE) 336 337 goto out; 337 338 338 - hh_len = LL_RESERVED_SPACE(rt->u.dst.dev); 339 - 340 - skb = sock_alloc_send_skb(sk, length+hh_len+15, 341 - flags&MSG_DONTWAIT, &err); 339 + skb = sock_alloc_send_skb(sk, 340 + length + LL_ALLOCATED_SPACE(rt->u.dst.dev) + 15, 341 + flags & MSG_DONTWAIT, &err); 342 342 if (skb == NULL) 343 343 goto error; 344 - skb_reserve(skb, hh_len); 344 + skb_reserve(skb, LL_RESERVED_SPACE(rt->u.dst.dev)); 345 345 346 346 skb->priority = sk->sk_priority; 347 347 skb->mark = sk->sk_mark;
+12 -5
net/ipv4/tcp_input.c
··· 1842 1842 TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS; 1843 1843 } 1844 1844 1845 - /* Don't lost mark skbs that were fwd transmitted after RTO */ 1846 - if (!(TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED) && 1847 - !after(TCP_SKB_CB(skb)->end_seq, tp->frto_highmark)) { 1845 + /* Marking forward transmissions that were made after RTO lost 1846 + * can cause unnecessary retransmissions in some scenarios, 1847 + * SACK blocks will mitigate that in some but not in all cases. 1848 + * We used to not mark them but it was causing break-ups with 1849 + * receivers that do only in-order receival. 1850 + * 1851 + * TODO: we could detect presence of such receiver and select 1852 + * different behavior per flow. 1853 + */ 1854 + if (!(TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)) { 1848 1855 TCP_SKB_CB(skb)->sacked |= TCPCB_LOST; 1849 1856 tp->lost_out += tcp_skb_pcount(skb); 1850 1857 } ··· 1867 1860 tp->reordering = min_t(unsigned int, tp->reordering, 1868 1861 sysctl_tcp_reordering); 1869 1862 tcp_set_ca_state(sk, TCP_CA_Loss); 1870 - tp->high_seq = tp->frto_highmark; 1863 + tp->high_seq = tp->snd_nxt; 1871 1864 TCP_ECN_queue_cwr(tp); 1872 1865 1873 1866 tcp_clear_retrans_hints_partial(tp); ··· 2489 2482 2490 2483 tcp_verify_left_out(tp); 2491 2484 2492 - if (tp->retrans_out == 0) 2485 + if (!tp->frto_counter && tp->retrans_out == 0) 2493 2486 tp->retrans_stamp = 0; 2494 2487 2495 2488 if (flag & FLAG_ECE)
+1 -1
net/ipv6/ip6_output.c
··· 780 780 * Allocate buffer. 781 781 */ 782 782 783 - if ((frag = alloc_skb(len+hlen+sizeof(struct frag_hdr)+LL_RESERVED_SPACE(rt->u.dst.dev), GFP_ATOMIC)) == NULL) { 783 + if ((frag = alloc_skb(len+hlen+sizeof(struct frag_hdr)+LL_ALLOCATED_SPACE(rt->u.dst.dev), GFP_ATOMIC)) == NULL) { 784 784 NETDEBUG(KERN_INFO "IPv6: frag: no memory for new fragment!\n"); 785 785 IP6_INC_STATS(ip6_dst_idev(skb->dst), 786 786 IPSTATS_MIB_FRAGFAILS);
+2 -2
net/ipv6/mcast.c
··· 1411 1411 IPV6_TLV_PADN, 0 }; 1412 1412 1413 1413 /* we assume size > sizeof(ra) here */ 1414 - skb = sock_alloc_send_skb(sk, size + LL_RESERVED_SPACE(dev), 1, &err); 1414 + skb = sock_alloc_send_skb(sk, size + LL_ALLOCATED_SPACE(dev), 1, &err); 1415 1415 1416 1416 if (!skb) 1417 1417 return NULL; ··· 1790 1790 payload_len = len + sizeof(ra); 1791 1791 full_len = sizeof(struct ipv6hdr) + payload_len; 1792 1792 1793 - skb = sock_alloc_send_skb(sk, LL_RESERVED_SPACE(dev) + full_len, 1, &err); 1793 + skb = sock_alloc_send_skb(sk, LL_ALLOCATED_SPACE(dev) + full_len, 1, &err); 1794 1794 1795 1795 if (skb == NULL) { 1796 1796 rcu_read_lock();
+2 -2
net/ipv6/ndisc.c
··· 479 479 480 480 skb = sock_alloc_send_skb(sk, 481 481 (MAX_HEADER + sizeof(struct ipv6hdr) + 482 - len + LL_RESERVED_SPACE(dev)), 482 + len + LL_ALLOCATED_SPACE(dev)), 483 483 1, &err); 484 484 if (!skb) { 485 485 ND_PRINTK0(KERN_ERR ··· 1521 1521 1522 1522 buff = sock_alloc_send_skb(sk, 1523 1523 (MAX_HEADER + sizeof(struct ipv6hdr) + 1524 - len + LL_RESERVED_SPACE(dev)), 1524 + len + LL_ALLOCATED_SPACE(dev)), 1525 1525 1, &err); 1526 1526 if (buff == NULL) { 1527 1527 ND_PRINTK0(KERN_ERR
+4 -6
net/ipv6/raw.c
··· 609 609 struct ipv6_pinfo *np = inet6_sk(sk); 610 610 struct ipv6hdr *iph; 611 611 struct sk_buff *skb; 612 - unsigned int hh_len; 613 612 int err; 614 613 615 614 if (length > rt->u.dst.dev->mtu) { ··· 618 619 if (flags&MSG_PROBE) 619 620 goto out; 620 621 621 - hh_len = LL_RESERVED_SPACE(rt->u.dst.dev); 622 - 623 - skb = sock_alloc_send_skb(sk, length+hh_len+15, 624 - flags&MSG_DONTWAIT, &err); 622 + skb = sock_alloc_send_skb(sk, 623 + length + LL_ALLOCATED_SPACE(rt->u.dst.dev) + 15, 624 + flags & MSG_DONTWAIT, &err); 625 625 if (skb == NULL) 626 626 goto error; 627 - skb_reserve(skb, hh_len); 627 + skb_reserve(skb, LL_RESERVED_SPACE(rt->u.dst.dev)); 628 628 629 629 skb->priority = sk->sk_priority; 630 630 skb->mark = sk->sk_mark;
+5 -3
net/irda/discovery.c
··· 40 40 41 41 #include <net/irda/discovery.h> 42 42 43 + #include <asm/unaligned.h> 44 + 43 45 /* 44 46 * Function irlmp_add_discovery (cachelog, discovery) 45 47 * ··· 89 87 */ 90 88 hashbin_remove_this(cachelog, (irda_queue_t *) node); 91 89 /* Check if hints bits are unchanged */ 92 - if(u16ho(node->data.hints) == u16ho(new->data.hints)) 90 + if (get_unaligned((__u16 *)node->data.hints) == get_unaligned((__u16 *)new->data.hints)) 93 91 /* Set time of first discovery for this node */ 94 92 new->firststamp = node->firststamp; 95 93 kfree(node); ··· 283 281 /* Mask out the ones we don't want : 284 282 * We want to match the discovery mask, and to get only 285 283 * the most recent one (unless we want old ones) */ 286 - if ((u16ho(discovery->data.hints) & mask) && 284 + if ((get_unaligned((__u16 *)discovery->data.hints) & mask) && 287 285 ((old_entries) || 288 - ((jiffies - discovery->firststamp) < j_timeout)) ) { 286 + ((jiffies - discovery->firststamp) < j_timeout))) { 289 287 /* Create buffer as needed. 290 288 * As this function get called a lot and most time 291 289 * we don't have anything to put in the log (we are
+3 -2
net/irda/irlmp.c
··· 1062 1062 for(i = 0; i < number; i++) { 1063 1063 /* Check if we should notify client */ 1064 1064 if ((client->expir_callback) && 1065 - (client->hint_mask.word & u16ho(expiries[i].hints) 1065 + (client->hint_mask.word & 1066 + get_unaligned((__u16 *)expiries[i].hints) 1066 1067 & 0x7f7f) ) 1067 1068 client->expir_callback(&(expiries[i]), 1068 1069 EXPIRY_TIMEOUT, ··· 1087 1086 1088 1087 IRDA_ASSERT(irlmp != NULL, return NULL;); 1089 1088 1090 - u16ho(irlmp->discovery_rsp.data.hints) = irlmp->hints.word; 1089 + put_unaligned(irlmp->hints.word, (__u16 *)irlmp->discovery_rsp.data.hints); 1091 1090 1092 1091 /* 1093 1092 * Set character set for device name (we use ASCII), and
+2 -2
net/irda/irnet/irnet_irda.c
··· 1673 1673 /* Notify the control channel */ 1674 1674 irnet_post_event(NULL, IRNET_DISCOVER, 1675 1675 discovery->saddr, discovery->daddr, discovery->info, 1676 - u16ho(discovery->hints)); 1676 + get_unaligned((__u16 *)discovery->hints)); 1677 1677 1678 1678 DEXIT(IRDA_OCB_TRACE, "\n"); 1679 1679 } ··· 1704 1704 /* Notify the control channel */ 1705 1705 irnet_post_event(NULL, IRNET_EXPIRE, 1706 1706 expiry->saddr, expiry->daddr, expiry->info, 1707 - u16ho(expiry->hints)); 1707 + get_unaligned((__u16 *)expiry->hints)); 1708 1708 1709 1709 DEXIT(IRDA_OCB_TRACE, "\n"); 1710 1710 }
+12 -3
net/mac80211/debugfs_key.c
··· 255 255 void ieee80211_debugfs_key_add_default(struct ieee80211_sub_if_data *sdata) 256 256 { 257 257 char buf[50]; 258 + struct ieee80211_key *key; 258 259 259 260 if (!sdata->debugfsdir) 260 261 return; 261 262 262 - sprintf(buf, "../keys/%d", sdata->default_key->debugfs.cnt); 263 - sdata->debugfs.default_key = 264 - debugfs_create_symlink("default_key", sdata->debugfsdir, buf); 263 + /* this is running under the key lock */ 264 + 265 + key = sdata->default_key; 266 + if (key) { 267 + sprintf(buf, "../keys/%d", key->debugfs.cnt); 268 + sdata->debugfs.default_key = 269 + debugfs_create_symlink("default_key", 270 + sdata->debugfsdir, buf); 271 + } else 272 + ieee80211_debugfs_key_remove_default(sdata); 265 273 } 274 + 266 275 void ieee80211_debugfs_key_remove_default(struct ieee80211_sub_if_data *sdata) 267 276 { 268 277 if (!sdata)
+9
net/mac80211/iface.c
··· 54 54 if (!ndev) 55 55 return -ENOMEM; 56 56 57 + ndev->needed_headroom = local->tx_headroom + 58 + 4*6 /* four MAC addresses */ 59 + + 2 + 2 + 2 + 2 /* ctl, dur, seq, qos */ 60 + + 6 /* mesh */ 61 + + 8 /* rfc1042/bridge tunnel */ 62 + - ETH_HLEN /* ethernet hard_header_len */ 63 + + IEEE80211_ENCRYPT_HEADROOM; 64 + ndev->needed_tailroom = IEEE80211_ENCRYPT_TAILROOM; 65 + 57 66 ret = dev_alloc_name(ndev, ndev->name); 58 67 if (ret < 0) 59 68 goto fail;
+1 -1
net/mac80211/mesh.c
··· 397 397 put_unaligned(cpu_to_le32(sdata->u.sta.mesh_seqnum), &meshhdr->seqnum); 398 398 sdata->u.sta.mesh_seqnum++; 399 399 400 - return 5; 400 + return 6; 401 401 } 402 402 403 403 void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata)
+1 -1
net/mac80211/mesh_hwmp.c
··· 120 120 *pos++ = WLAN_EID_PREP; 121 121 break; 122 122 default: 123 - kfree(skb); 123 + kfree_skb(skb); 124 124 return -ENOTSUPP; 125 125 break; 126 126 }
+12 -5
net/mac80211/mesh_pathtbl.c
··· 158 158 if (atomic_add_unless(&sdata->u.sta.mpaths, 1, MESH_MAX_MPATHS) == 0) 159 159 return -ENOSPC; 160 160 161 - read_lock(&pathtbl_resize_lock); 162 - 163 161 new_mpath = kzalloc(sizeof(struct mesh_path), GFP_KERNEL); 164 162 if (!new_mpath) { 165 163 atomic_dec(&sdata->u.sta.mpaths); 166 164 err = -ENOMEM; 167 165 goto endadd2; 168 166 } 167 + new_node = kmalloc(sizeof(struct mpath_node), GFP_KERNEL); 168 + if (!new_node) { 169 + kfree(new_mpath); 170 + atomic_dec(&sdata->u.sta.mpaths); 171 + err = -ENOMEM; 172 + goto endadd2; 173 + } 174 + 175 + read_lock(&pathtbl_resize_lock); 169 176 memcpy(new_mpath->dst, dst, ETH_ALEN); 170 177 new_mpath->dev = dev; 171 178 new_mpath->flags = 0; 172 179 skb_queue_head_init(&new_mpath->frame_queue); 173 - new_node = kmalloc(sizeof(struct mpath_node), GFP_KERNEL); 174 180 new_node->mpath = new_mpath; 175 181 new_mpath->timer.data = (unsigned long) new_mpath; 176 182 new_mpath->timer.function = mesh_path_timer; ··· 208 202 209 203 endadd: 210 204 spin_unlock(&mesh_paths->hashwlock[hash_idx]); 211 - endadd2: 212 205 read_unlock(&pathtbl_resize_lock); 213 206 if (!err && grow) { 214 207 struct mesh_table *oldtbl, *newtbl; ··· 220 215 return -ENOMEM; 221 216 } 222 217 rcu_assign_pointer(mesh_paths, newtbl); 218 + write_unlock(&pathtbl_resize_lock); 219 + 223 220 synchronize_rcu(); 224 221 mesh_table_free(oldtbl, false); 225 - write_unlock(&pathtbl_resize_lock); 226 222 } 223 + endadd2: 227 224 return err; 228 225 } 229 226
+52 -16
net/mac80211/mlme.c
··· 665 665 mod_timer(&ifsta->timer, jiffies + IEEE80211_AUTH_TIMEOUT); 666 666 } 667 667 668 + static int ieee80211_compatible_rates(struct ieee80211_sta_bss *bss, 669 + struct ieee80211_supported_band *sband, 670 + u64 *rates) 671 + { 672 + int i, j, count; 673 + *rates = 0; 674 + count = 0; 675 + for (i = 0; i < bss->supp_rates_len; i++) { 676 + int rate = (bss->supp_rates[i] & 0x7F) * 5; 677 + 678 + for (j = 0; j < sband->n_bitrates; j++) 679 + if (sband->bitrates[j].bitrate == rate) { 680 + *rates |= BIT(j); 681 + count++; 682 + break; 683 + } 684 + } 685 + 686 + return count; 687 + } 668 688 669 689 static void ieee80211_send_assoc(struct net_device *dev, 670 690 struct ieee80211_if_sta *ifsta) ··· 693 673 struct sk_buff *skb; 694 674 struct ieee80211_mgmt *mgmt; 695 675 u8 *pos, *ies; 696 - int i, len; 676 + int i, len, count, rates_len, supp_rates_len; 697 677 u16 capab; 698 678 struct ieee80211_sta_bss *bss; 699 679 int wmm = 0; 700 680 struct ieee80211_supported_band *sband; 681 + u64 rates = 0; 701 682 702 683 skb = dev_alloc_skb(local->hw.extra_tx_headroom + 703 684 sizeof(*mgmt) + 200 + ifsta->extra_ie_len + ··· 761 740 *pos++ = ifsta->ssid_len; 762 741 memcpy(pos, ifsta->ssid, ifsta->ssid_len); 763 742 764 - len = sband->n_bitrates; 765 - if (len > 8) 766 - len = 8; 767 - pos = skb_put(skb, len + 2); 768 - *pos++ = WLAN_EID_SUPP_RATES; 769 - *pos++ = len; 770 - for (i = 0; i < len; i++) { 771 - int rate = sband->bitrates[i].bitrate; 772 - *pos++ = (u8) (rate / 5); 773 - } 743 + /* all supported rates should be added here but some APs 744 + * (e.g. D-Link DAP 1353 in b-only mode) don't like that 745 + * Therefore only add rates the AP supports */ 746 + rates_len = ieee80211_compatible_rates(bss, sband, &rates); 747 + supp_rates_len = rates_len; 748 + if (supp_rates_len > 8) 749 + supp_rates_len = 8; 774 750 775 - if (sband->n_bitrates > len) { 776 - pos = skb_put(skb, sband->n_bitrates - len + 2); 777 - *pos++ = WLAN_EID_EXT_SUPP_RATES; 778 - *pos++ = sband->n_bitrates - len; 779 - for (i = len; i < sband->n_bitrates; i++) { 751 + len = sband->n_bitrates; 752 + pos = skb_put(skb, supp_rates_len + 2); 753 + *pos++ = WLAN_EID_SUPP_RATES; 754 + *pos++ = supp_rates_len; 755 + 756 + count = 0; 757 + for (i = 0; i < sband->n_bitrates; i++) { 758 + if (BIT(i) & rates) { 780 759 int rate = sband->bitrates[i].bitrate; 781 760 *pos++ = (u8) (rate / 5); 761 + if (++count == 8) 762 + break; 763 + } 764 + } 765 + 766 + if (count == 8) { 767 + pos = skb_put(skb, rates_len - count + 2); 768 + *pos++ = WLAN_EID_EXT_SUPP_RATES; 769 + *pos++ = rates_len - count; 770 + 771 + for (i++; i < sband->n_bitrates; i++) { 772 + if (BIT(i) & rates) { 773 + int rate = sband->bitrates[i].bitrate; 774 + *pos++ = (u8) (rate / 5); 775 + } 782 776 } 783 777 } 784 778
+6 -6
net/mac80211/rx.c
··· 1305 1305 if (is_multicast_ether_addr(skb->data)) { 1306 1306 if (*mesh_ttl > 0) { 1307 1307 xmit_skb = skb_copy(skb, GFP_ATOMIC); 1308 - if (!xmit_skb && net_ratelimit()) 1308 + if (xmit_skb) 1309 + xmit_skb->pkt_type = PACKET_OTHERHOST; 1310 + else if (net_ratelimit()) 1309 1311 printk(KERN_DEBUG "%s: failed to clone " 1310 1312 "multicast frame\n", dev->name); 1311 - else 1312 - xmit_skb->pkt_type = PACKET_OTHERHOST; 1313 1313 } else 1314 1314 IEEE80211_IFSTA_MESH_CTR_INC(&sdata->u.sta, 1315 1315 dropped_frames_ttl); ··· 1395 1395 padding = ((4 - subframe_len) & 0x3); 1396 1396 /* the last MSDU has no padding */ 1397 1397 if (subframe_len > remaining) { 1398 - printk(KERN_DEBUG "%s: wrong buffer size", dev->name); 1398 + printk(KERN_DEBUG "%s: wrong buffer size\n", dev->name); 1399 1399 return RX_DROP_UNUSABLE; 1400 1400 } 1401 1401 ··· 1418 1418 eth = (struct ethhdr *) skb_pull(skb, ntohs(len) + 1419 1419 padding); 1420 1420 if (!eth) { 1421 - printk(KERN_DEBUG "%s: wrong buffer size ", 1421 + printk(KERN_DEBUG "%s: wrong buffer size\n", 1422 1422 dev->name); 1423 1423 dev_kfree_skb(frame); 1424 1424 return RX_DROP_UNUSABLE; ··· 1952 1952 if (!skb_new) { 1953 1953 if (net_ratelimit()) 1954 1954 printk(KERN_DEBUG "%s: failed to copy " 1955 - "multicast frame for %s", 1955 + "multicast frame for %s\n", 1956 1956 wiphy_name(local->hw.wiphy), 1957 1957 prev->dev->name); 1958 1958 continue;
+3 -2
net/mac80211/tx.c
··· 1562 1562 * be cloned. This could happen, e.g., with Linux bridge code passing 1563 1563 * us broadcast frames. */ 1564 1564 1565 - if (head_need > 0 || skb_cloned(skb)) { 1565 + if (head_need > 0 || skb_header_cloned(skb)) { 1566 1566 #if 0 1567 1567 printk(KERN_DEBUG "%s: need to reallocate buffer for %d bytes " 1568 1568 "of headroom\n", dev->name, head_need); 1569 1569 #endif 1570 1570 1571 - if (skb_cloned(skb)) 1571 + if (skb_header_cloned(skb)) 1572 1572 I802_DEBUG_INC(local->tx_expand_skb_head_cloned); 1573 1573 else 1574 1574 I802_DEBUG_INC(local->tx_expand_skb_head); ··· 1898 1898 control->flags |= IEEE80211_TXCTL_SHORT_PREAMBLE; 1899 1899 control->antenna_sel_tx = local->hw.conf.antenna_sel_tx; 1900 1900 control->flags |= IEEE80211_TXCTL_NO_ACK; 1901 + control->flags |= IEEE80211_TXCTL_DO_NOT_ENCRYPT; 1901 1902 control->retry_limit = 1; 1902 1903 control->flags |= IEEE80211_TXCTL_CLEAR_PS_FILT; 1903 1904 }
+5 -5
net/mac80211/util.c
··· 153 153 /* 7.1.3.5a.2 */ 154 154 switch (ae) { 155 155 case 0: 156 - return 5; 156 + return 6; 157 157 case 1: 158 - return 11; 158 + return 12; 159 159 case 2: 160 - return 17; 160 + return 18; 161 161 case 3: 162 - return 23; 162 + return 24; 163 163 default: 164 - return 5; 164 + return 6; 165 165 } 166 166 } 167 167
+2 -1
net/mac80211/wme.c
··· 394 394 qd->handle); 395 395 if (!q->queues[i]) { 396 396 q->queues[i] = &noop_qdisc; 397 - printk(KERN_ERR "%s child qdisc %i creation failed", dev->name, i); 397 + printk(KERN_ERR "%s child qdisc %i creation failed\n", 398 + dev->name, i); 398 399 } 399 400 } 400 401
+3
net/netfilter/nf_conntrack_netlink.c
··· 472 472 goto nla_put_failure; 473 473 nla_nest_end(skb, nest_parms); 474 474 475 + if (ctnetlink_dump_id(skb, ct) < 0) 476 + goto nla_put_failure; 477 + 475 478 if (events & IPCT_DESTROY) { 476 479 if (ctnetlink_dump_counters(skb, ct, IP_CT_DIR_ORIGINAL) < 0 || 477 480 ctnetlink_dump_counters(skb, ct, IP_CT_DIR_REPLY) < 0)
+2
net/netfilter/xt_iprange.c
··· 179 179 MODULE_LICENSE("GPL"); 180 180 MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>, Jan Engelhardt <jengelh@computergmbh.de>"); 181 181 MODULE_DESCRIPTION("Xtables: arbitrary IPv4 range matching"); 182 + MODULE_ALIAS("ipt_iprange"); 183 + MODULE_ALIAS("ip6t_iprange");
+1 -1
net/packet/af_packet.c
··· 743 743 if (len > dev->mtu+reserve) 744 744 goto out_unlock; 745 745 746 - skb = sock_alloc_send_skb(sk, len + LL_RESERVED_SPACE(dev), 746 + skb = sock_alloc_send_skb(sk, len + LL_ALLOCATED_SPACE(dev), 747 747 msg->msg_flags & MSG_DONTWAIT, &err); 748 748 if (skb==NULL) 749 749 goto out_unlock;
+15 -1
net/sctp/sm_make_chunk.c
··· 2418 2418 break; 2419 2419 2420 2420 case SCTP_PARAM_IPV6_ADDRESS: 2421 - asoc->peer.ipv6_address = 1; 2421 + if (PF_INET6 == asoc->base.sk->sk_family) 2422 + asoc->peer.ipv6_address = 1; 2422 2423 break; 2423 2424 2424 2425 case SCTP_PARAM_HOST_NAME_ADDRESS: ··· 2829 2828 2830 2829 addr_param = (union sctp_addr_param *) 2831 2830 ((void *)asconf_param + sizeof(sctp_addip_param_t)); 2831 + 2832 + switch (addr_param->v4.param_hdr.type) { 2833 + case SCTP_PARAM_IPV6_ADDRESS: 2834 + if (!asoc->peer.ipv6_address) 2835 + return SCTP_ERROR_INV_PARAM; 2836 + break; 2837 + case SCTP_PARAM_IPV4_ADDRESS: 2838 + if (!asoc->peer.ipv4_address) 2839 + return SCTP_ERROR_INV_PARAM; 2840 + break; 2841 + default: 2842 + return SCTP_ERROR_INV_PARAM; 2843 + } 2832 2844 2833 2845 af = sctp_get_af_specific(param_type2af(addr_param->v4.param_hdr.type)); 2834 2846 if (unlikely(!af))
+3 -3
net/xfrm/xfrm_output.c
··· 25 25 struct dst_entry *dst = skb->dst; 26 26 int nhead = dst->header_len + LL_RESERVED_SPACE(dst->dev) 27 27 - skb_headroom(skb); 28 + int ntail = dst->dev->needed_tailroom - skb_tailroom(skb); 28 29 29 - if (nhead > 0) 30 - return pskb_expand_head(skb, nhead, 0, GFP_ATOMIC); 30 + if (nhead > 0 || ntail > 0) 31 + return pskb_expand_head(skb, nhead, ntail, GFP_ATOMIC); 31 32 32 - /* Check tail too... */ 33 33 return 0; 34 34 } 35 35