Merge branch 'upstream-linus' of master.kernel.org:/pub/scm/linux/kernel/git/jgarzik/netdev-2.6

* 'upstream-linus' of master.kernel.org:/pub/scm/linux/kernel/git/jgarzik/netdev-2.6:
myri10ge: update driver version to 1.3.2-1.269
myri10ge: use pcie_get/set_readrq
ehea: fix queue destructor
ehea: fix module parameter description
ehea: fix interface to DLPAR tools
sgiseeq: Fix return type of sgiseeq_remove
sky2 1.17
sky2: only bring up watchdog if link is active
sky2: clear PCI power control reg at startup
DM9000: fix interface hang under load
phy layer: fix genphy_setup_forced (don't reset)
Don't use GFP_DMA for zone allocation.
fix realtek phy id in forcedeth

+64 -86
+7 -18
drivers/net/dm9000.c
··· 700 700 static int 701 701 dm9000_start_xmit(struct sk_buff *skb, struct net_device *dev) 702 702 { 703 + unsigned long flags; 703 704 board_info_t *db = (board_info_t *) dev->priv; 704 705 705 706 PRINTK3("dm9000_start_xmit\n"); ··· 708 707 if (db->tx_pkt_cnt > 1) 709 708 return 1; 710 709 711 - netif_stop_queue(dev); 712 - 713 - /* Disable all interrupts */ 714 - iow(db, DM9000_IMR, IMR_PAR); 710 + spin_lock_irqsave(&db->lock, flags); 715 711 716 712 /* Move data to DM9000 TX RAM */ 717 713 writeb(DM9000_MWCMD, db->io_addr); ··· 716 718 (db->outblk)(db->io_data, skb->data, skb->len); 717 719 db->stats.tx_bytes += skb->len; 718 720 721 + db->tx_pkt_cnt++; 719 722 /* TX control: First packet immediately send, second packet queue */ 720 - if (db->tx_pkt_cnt == 0) { 721 - 722 - /* First Packet */ 723 - db->tx_pkt_cnt++; 724 - 723 + if (db->tx_pkt_cnt == 1) { 725 724 /* Set TX length to DM9000 */ 726 725 iow(db, DM9000_TXPLL, skb->len & 0xff); 727 726 iow(db, DM9000_TXPLH, (skb->len >> 8) & 0xff); ··· 727 732 iow(db, DM9000_TCR, TCR_TXREQ); /* Cleared after TX complete */ 728 733 729 734 dev->trans_start = jiffies; /* save the time stamp */ 730 - 731 735 } else { 732 736 /* Second packet */ 733 - db->tx_pkt_cnt++; 734 737 db->queue_pkt_len = skb->len; 738 + netif_stop_queue(dev); 735 739 } 740 + 741 + spin_unlock_irqrestore(&db->lock, flags); 736 742 737 743 /* free this SKB */ 738 744 dev_kfree_skb(skb); 739 - 740 - /* Re-enable resource check */ 741 - if (db->tx_pkt_cnt == 1) 742 - netif_wake_queue(dev); 743 - 744 - /* Re-enable interrupt */ 745 - iow(db, DM9000_IMR, IMR_PAR | IMR_PTM | IMR_PRM); 746 745 747 746 return 0; 748 747 }
+4 -4
drivers/net/ehea/ehea_main.c
··· 76 76 MODULE_PARM_DESC(sq_entries, " Number of entries for the Send Queue " 77 77 "[2^x - 1], x = [6..14]. Default = " 78 78 __MODULE_STRING(EHEA_DEF_ENTRIES_SQ) ")"); 79 - MODULE_PARM_DESC(use_mcs, " 0:NAPI, 1:Multiple receive queues, Default = 1 "); 79 + MODULE_PARM_DESC(use_mcs, " 0:NAPI, 1:Multiple receive queues, Default = 0 "); 80 80 81 81 static int port_name_cnt = 0; 82 82 static LIST_HEAD(adapter_list); ··· 2490 2490 struct device_attribute *attr, char *buf) 2491 2491 { 2492 2492 struct ehea_port *port = container_of(dev, struct ehea_port, ofdev.dev); 2493 - return sprintf(buf, "0x%X", port->logical_port_id); 2493 + return sprintf(buf, "%d", port->logical_port_id); 2494 2494 } 2495 2495 2496 2496 static DEVICE_ATTR(log_port_id, S_IRUSR | S_IRGRP | S_IROTH, ehea_show_port_id, ··· 2781 2781 2782 2782 u32 logical_port_id; 2783 2783 2784 - sscanf(buf, "%X", &logical_port_id); 2784 + sscanf(buf, "%d", &logical_port_id); 2785 2785 2786 2786 port = ehea_get_port(adapter, logical_port_id); 2787 2787 ··· 2834 2834 int i; 2835 2835 u32 logical_port_id; 2836 2836 2837 - sscanf(buf, "%X", &logical_port_id); 2837 + sscanf(buf, "%d", &logical_port_id); 2838 2838 2839 2839 port = ehea_get_port(adapter, logical_port_id); 2840 2840
+6
drivers/net/ehea/ehea_qmr.c
··· 235 235 if (!cq) 236 236 return 0; 237 237 238 + hcp_epas_dtor(&cq->epas); 239 + 238 240 if ((hret = ehea_destroy_cq_res(cq, NORMAL_FREE)) == H_R_STATE) { 239 241 ehea_error_data(cq->adapter, cq->fw_handle); 240 242 hret = ehea_destroy_cq_res(cq, FORCE_FREE); ··· 362 360 u64 hret; 363 361 if (!eq) 364 362 return 0; 363 + 364 + hcp_epas_dtor(&eq->epas); 365 365 366 366 if ((hret = ehea_destroy_eq_res(eq, NORMAL_FREE)) == H_R_STATE) { 367 367 ehea_error_data(eq->adapter, eq->fw_handle); ··· 544 540 u64 hret; 545 541 if (!qp) 546 542 return 0; 543 + 544 + hcp_epas_dtor(&qp->epas); 547 545 548 546 if ((hret = ehea_destroy_qp_res(qp, NORMAL_FREE)) == H_R_STATE) { 549 547 ehea_error_data(qp->adapter, qp->fw_handle);
+1 -1
drivers/net/forcedeth.c
··· 552 552 #define PHY_OUI_MARVELL 0x5043 553 553 #define PHY_OUI_CICADA 0x03f1 554 554 #define PHY_OUI_VITESSE 0x01c1 555 - #define PHY_OUI_REALTEK 0x01c1 555 + #define PHY_OUI_REALTEK 0x0732 556 556 #define PHYID1_OUI_MASK 0x03ff 557 557 #define PHYID1_OUI_SHFT 6 558 558 #define PHYID2_OUI_MASK 0xfc00
+1 -1
drivers/net/meth.c
··· 405 405 priv->stats.rx_length_errors++; 406 406 skb = priv->rx_skbs[priv->rx_write]; 407 407 } else { 408 - skb = alloc_skb(METH_RX_BUFF_SIZE, GFP_ATOMIC | GFP_DMA); 408 + skb = alloc_skb(METH_RX_BUFF_SIZE, GFP_ATOMIC); 409 409 if (!skb) { 410 410 /* Ouch! No memory! Drop packet on the floor */ 411 411 DPRINTK("No mem: dropping packet\n");
+7 -27
drivers/net/myri10ge/myri10ge.c
··· 72 72 #include "myri10ge_mcp.h" 73 73 #include "myri10ge_mcp_gen_header.h" 74 74 75 - #define MYRI10GE_VERSION_STR "1.3.1-1.248" 75 + #define MYRI10GE_VERSION_STR "1.3.2-1.269" 76 76 77 77 MODULE_DESCRIPTION("Myricom 10G driver (10GbE)"); 78 78 MODULE_AUTHOR("Maintainer: help@myri.com"); ··· 2514 2514 { 2515 2515 struct pci_dev *pdev = mgp->pdev; 2516 2516 struct device *dev = &pdev->dev; 2517 - int cap, status; 2518 - u16 val; 2517 + int status; 2519 2518 2520 2519 mgp->tx.boundary = 4096; 2521 2520 /* 2522 2521 * Verify the max read request size was set to 4KB 2523 2522 * before trying the test with 4KB. 2524 2523 */ 2525 - cap = pci_find_capability(pdev, PCI_CAP_ID_EXP); 2526 - if (cap < 64) { 2527 - dev_err(dev, "Bad PCI_CAP_ID_EXP location %d\n", cap); 2528 - goto abort; 2529 - } 2530 - status = pci_read_config_word(pdev, cap + PCI_EXP_DEVCTL, &val); 2531 - if (status != 0) { 2524 + status = pcie_get_readrq(pdev); 2525 + if (status < 0) { 2532 2526 dev_err(dev, "Couldn't read max read req size: %d\n", status); 2533 2527 goto abort; 2534 2528 } 2535 - if ((val & (5 << 12)) != (5 << 12)) { 2536 - dev_warn(dev, "Max Read Request size != 4096 (0x%x)\n", val); 2529 + if (status != 4096) { 2530 + dev_warn(dev, "Max Read Request size != 4096 (%d)\n", status); 2537 2531 mgp->tx.boundary = 2048; 2538 2532 } 2539 2533 /* ··· 2844 2850 size_t bytes; 2845 2851 int i; 2846 2852 int status = -ENXIO; 2847 - int cap; 2848 2853 int dac_enabled; 2849 - u16 val; 2850 2854 2851 2855 netdev = alloc_etherdev(sizeof(*mgp)); 2852 2856 if (netdev == NULL) { ··· 2876 2884 = pci_find_capability(pdev, PCI_CAP_ID_VNDR); 2877 2885 2878 2886 /* Set our max read request to 4KB */ 2879 - cap = pci_find_capability(pdev, PCI_CAP_ID_EXP); 2880 - if (cap < 64) { 2881 - dev_err(&pdev->dev, "Bad PCI_CAP_ID_EXP location %d\n", cap); 2882 - goto abort_with_netdev; 2883 - } 2884 - status = pci_read_config_word(pdev, cap + PCI_EXP_DEVCTL, &val); 2885 - if (status != 0) { 2886 - dev_err(&pdev->dev, "Error %d reading PCI_EXP_DEVCTL\n", 2887 - status); 2888 - goto abort_with_netdev; 2889 - } 2890 - val = (val & ~PCI_EXP_DEVCTL_READRQ) | (5 << 12); 2891 - status = pci_write_config_word(pdev, cap + PCI_EXP_DEVCTL, val); 2887 + status = pcie_set_readrq(pdev, 4096); 2892 2888 if (status != 0) { 2893 2889 dev_err(&pdev->dev, "Error %d writing PCI_EXP_DEVCTL\n", 2894 2890 status);
+1 -1
drivers/net/phy/phy_device.c
··· 364 364 */ 365 365 int genphy_setup_forced(struct phy_device *phydev) 366 366 { 367 - int ctl = BMCR_RESET; 367 + int ctl = 0; 368 368 369 369 phydev->pause = phydev->asym_pause = 0; 370 370
+3 -1
drivers/net/sgiseeq.c
··· 726 726 return err; 727 727 } 728 728 729 - static void __exit sgiseeq_remove(struct platform_device *pdev) 729 + static int __exit sgiseeq_remove(struct platform_device *pdev) 730 730 { 731 731 struct net_device *dev = platform_get_drvdata(pdev); 732 732 struct sgiseeq_private *sp = netdev_priv(dev); ··· 735 735 free_page((unsigned long) sp->srings); 736 736 free_netdev(dev); 737 737 platform_set_drvdata(pdev, NULL); 738 + 739 + return 0; 738 740 } 739 741 740 742 static struct platform_driver sgiseeq_driver = {
+32 -32
drivers/net/sky2.c
··· 51 51 #include "sky2.h" 52 52 53 53 #define DRV_NAME "sky2" 54 - #define DRV_VERSION "1.16" 54 + #define DRV_VERSION "1.17" 55 55 #define PFX DRV_NAME " " 56 56 57 57 /* ··· 98 98 static int disable_msi = 0; 99 99 module_param(disable_msi, int, 0); 100 100 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)"); 101 - 102 - static int idle_timeout = 100; 103 - module_param(idle_timeout, int, 0); 104 - MODULE_PARM_DESC(idle_timeout, "Watchdog timer for lost interrupts (ms)"); 105 101 106 102 static const struct pci_device_id sky2_id_table[] = { 107 103 { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9000) }, /* SK-9Sxx */ ··· 215 219 else 216 220 sky2_write8(hw, B2_Y2_CLK_GATE, 0); 217 221 218 - if (hw->chip_id == CHIP_ID_YUKON_EC_U || hw->chip_id == CHIP_ID_YUKON_EX) { 222 + if (hw->chip_id == CHIP_ID_YUKON_EC_U || 223 + hw->chip_id == CHIP_ID_YUKON_EX) { 219 224 u32 reg; 225 + 226 + sky2_pci_write32(hw, PCI_DEV_REG3, 0); 220 227 221 228 reg = sky2_pci_read32(hw, PCI_DEV_REG4); 222 229 /* set all bits to 0 except bits 15..12 and 8 */ ··· 237 238 reg = sky2_read32(hw, B2_GP_IO); 238 239 reg |= GLB_GPIO_STAT_RACE_DIS; 239 240 sky2_write32(hw, B2_GP_IO, reg); 241 + 242 + sky2_read32(hw, B2_GP_IO); 240 243 } 241 244 } 242 245 ··· 1620 1619 if (netif_msg_ifdown(sky2)) 1621 1620 printk(KERN_INFO PFX "%s: disabling interface\n", dev->name); 1622 1621 1622 + if (netif_carrier_ok(dev) && --hw->active == 0) 1623 + del_timer(&hw->watchdog_timer); 1624 + 1623 1625 /* Stop more packets from being queued */ 1624 1626 netif_stop_queue(dev); 1625 1627 ··· 1743 1739 1744 1740 netif_carrier_on(sky2->netdev); 1745 1741 1742 + if (hw->active++ == 0) 1743 + mod_timer(&hw->watchdog_timer, jiffies + 1); 1744 + 1745 + 1746 1746 /* Turn on link LED */ 1747 1747 sky2_write8(hw, SK_REG(port, LNK_LED_REG), 1748 1748 LINKLED_ON | LINKLED_BLINK_OFF | LINKLED_LINKSYNC_OFF); ··· 1797 1789 gma_write16(hw, port, GM_GP_CTRL, reg); 1798 1790 1799 1791 netif_carrier_off(sky2->netdev); 1792 + 1793 + /* Stop watchdog if both ports are not active */ 1794 + if (--hw->active == 0) 1795 + del_timer(&hw->watchdog_timer); 1796 + 1800 1797 1801 1798 /* Turn on link LED */ 1802 1799 sky2_write8(hw, SK_REG(port, LNK_LED_REG), LINKLED_OFF); ··· 2434 2421 sky2_write32(hw, Q_ADDR(q, Q_CSR), BMU_CLR_IRQ_CHK); 2435 2422 } 2436 2423 2437 - /* If idle then force a fake soft NAPI poll once a second 2438 - * to work around cases where sharing an edge triggered interrupt. 2439 - */ 2440 - static inline void sky2_idle_start(struct sky2_hw *hw) 2441 - { 2442 - if (idle_timeout > 0) 2443 - mod_timer(&hw->idle_timer, 2444 - jiffies + msecs_to_jiffies(idle_timeout)); 2445 - } 2446 - 2447 - static void sky2_idle(unsigned long arg) 2424 + /* Check for lost IRQ once a second */ 2425 + static void sky2_watchdog(unsigned long arg) 2448 2426 { 2449 2427 struct sky2_hw *hw = (struct sky2_hw *) arg; 2450 - struct net_device *dev = hw->dev[0]; 2451 2428 2452 - if (__netif_rx_schedule_prep(dev)) 2453 - __netif_rx_schedule(dev); 2429 + if (sky2_read32(hw, B0_ISRC)) { 2430 + struct net_device *dev = hw->dev[0]; 2454 2431 2455 - mod_timer(&hw->idle_timer, jiffies + msecs_to_jiffies(idle_timeout)); 2432 + if (__netif_rx_schedule_prep(dev)) 2433 + __netif_rx_schedule(dev); 2434 + } 2435 + 2436 + if (hw->active > 0) 2437 + mod_timer(&hw->watchdog_timer, round_jiffies(jiffies + HZ)); 2456 2438 } 2457 2439 2458 2440 /* Hardware/software error handling */ ··· 2735 2727 struct net_device *dev; 2736 2728 int i, err; 2737 2729 2738 - del_timer_sync(&hw->idle_timer); 2739 - 2740 2730 rtnl_lock(); 2741 2731 sky2_write32(hw, B0_IMSK, 0); 2742 2732 sky2_read32(hw, B0_IMSK); ··· 2762 2756 } 2763 2757 } 2764 2758 } 2765 - 2766 - sky2_idle_start(hw); 2767 2759 2768 2760 rtnl_unlock(); 2769 2761 } ··· 4029 4025 sky2_show_addr(dev1); 4030 4026 } 4031 4027 4032 - setup_timer(&hw->idle_timer, sky2_idle, (unsigned long) hw); 4028 + setup_timer(&hw->watchdog_timer, sky2_watchdog, (unsigned long) hw); 4033 4029 INIT_WORK(&hw->restart_work, sky2_restart); 4034 - 4035 - sky2_idle_start(hw); 4036 4030 4037 4031 pci_set_drvdata(pdev, hw); 4038 4032 ··· 4066 4064 if (!hw) 4067 4065 return; 4068 4066 4069 - del_timer_sync(&hw->idle_timer); 4067 + del_timer_sync(&hw->watchdog_timer); 4070 4068 4071 4069 flush_scheduled_work(); 4072 4070 ··· 4110 4108 if (!hw) 4111 4109 return 0; 4112 4110 4113 - del_timer_sync(&hw->idle_timer); 4114 4111 netif_poll_disable(hw->dev[0]); 4115 4112 4116 4113 for (i = 0; i < hw->ports; i++) { ··· 4175 4174 } 4176 4175 4177 4176 netif_poll_enable(hw->dev[0]); 4178 - sky2_idle_start(hw); 4177 + 4179 4178 return 0; 4180 4179 out: 4181 4180 dev_err(&pdev->dev, "resume failed (%d)\n", err); ··· 4192 4191 if (!hw) 4193 4192 return; 4194 4193 4195 - del_timer_sync(&hw->idle_timer); 4196 4194 netif_poll_disable(hw->dev[0]); 4197 4195 4198 4196 for (i = 0; i < hw->ports; i++) {
+2 -1
drivers/net/sky2.h
··· 2045 2045 u8 chip_rev; 2046 2046 u8 pmd_type; 2047 2047 u8 ports; 2048 + u8 active; 2048 2049 2049 2050 struct sky2_status_le *st_le; 2050 2051 u32 st_idx; 2051 2052 dma_addr_t st_dma; 2052 2053 2053 - struct timer_list idle_timer; 2054 + struct timer_list watchdog_timer; 2054 2055 struct work_struct restart_work; 2055 2056 int msi; 2056 2057 wait_queue_head_t msi_wait;