Automatic merge of /spare/repo/netdev-2.6 branch e100

authored by and committed by Jeff Garzik d6d78f63 a83d5cf7

+139 -26
+139 -26
drivers/net/e100.c
··· 155 155 156 156 #define DRV_NAME "e100" 157 157 #define DRV_EXT "-NAPI" 158 - #define DRV_VERSION "3.3.6-k2"DRV_EXT 158 + #define DRV_VERSION "3.4.8-k2"DRV_EXT 159 159 #define DRV_DESCRIPTION "Intel(R) PRO/100 Network Driver" 160 - #define DRV_COPYRIGHT "Copyright(c) 1999-2004 Intel Corporation" 160 + #define DRV_COPYRIGHT "Copyright(c) 1999-2005 Intel Corporation" 161 161 #define PFX DRV_NAME ": " 162 162 163 163 #define E100_WATCHDOG_PERIOD (2 * HZ) ··· 210 210 INTEL_8255X_ETHERNET_DEVICE(0x1069, 6), 211 211 INTEL_8255X_ETHERNET_DEVICE(0x106A, 6), 212 212 INTEL_8255X_ETHERNET_DEVICE(0x106B, 6), 213 + INTEL_8255X_ETHERNET_DEVICE(0x1091, 7), 214 + INTEL_8255X_ETHERNET_DEVICE(0x1092, 7), 215 + INTEL_8255X_ETHERNET_DEVICE(0x1093, 7), 216 + INTEL_8255X_ETHERNET_DEVICE(0x1094, 7), 217 + INTEL_8255X_ETHERNET_DEVICE(0x1095, 7), 213 218 INTEL_8255X_ETHERNET_DEVICE(0x1209, 0), 214 219 INTEL_8255X_ETHERNET_DEVICE(0x1229, 0), 215 220 INTEL_8255X_ETHERNET_DEVICE(0x2449, 2), 216 221 INTEL_8255X_ETHERNET_DEVICE(0x2459, 2), 217 222 INTEL_8255X_ETHERNET_DEVICE(0x245D, 2), 223 + INTEL_8255X_ETHERNET_DEVICE(0x27DC, 7), 218 224 { 0, } 219 225 }; 220 226 MODULE_DEVICE_TABLE(pci, e100_id_table); ··· 273 267 enum scb_status { 274 268 rus_ready = 0x10, 275 269 rus_mask = 0x3C, 270 + }; 271 + 272 + enum ru_state { 273 + RU_SUSPENDED = 0, 274 + RU_RUNNING = 1, 275 + RU_UNINITIALIZED = -1, 276 276 }; 277 277 278 278 enum scb_stat_ack { ··· 522 510 struct rx *rx_to_use; 523 511 struct rx *rx_to_clean; 524 512 struct rfd blank_rfd; 525 - int ru_running; 513 + enum ru_state ru_running; 526 514 527 515 spinlock_t cb_lock ____cacheline_aligned; 528 516 spinlock_t cmd_lock; ··· 551 539 struct timer_list watchdog; 552 540 struct timer_list blink_timer; 553 541 struct mii_if_info mii; 542 + struct work_struct tx_timeout_task; 554 543 enum loopback loopback; 555 544 556 545 struct mem *mem; ··· 783 770 return 0; 784 771 } 785 772 786 - #define E100_WAIT_SCB_TIMEOUT 40 773 + #define E100_WAIT_SCB_TIMEOUT 20000 /* we might have to wait 100ms!!! */ 787 774 static inline int e100_exec_cmd(struct nic *nic, u8 cmd, dma_addr_t dma_addr) 788 775 { 789 776 unsigned long flags; ··· 853 840 * because the controller is too busy, so 854 841 * let's just queue the command and try again 855 842 * when another command is scheduled. */ 843 + if(err == -ENOSPC) { 844 + //request a reset 845 + schedule_work(&nic->tx_timeout_task); 846 + } 856 847 break; 857 848 } else { 858 849 nic->cuc_cmd = cuc_resume; ··· 901 884 902 885 static void e100_get_defaults(struct nic *nic) 903 886 { 904 - struct param_range rfds = { .min = 64, .max = 256, .count = 64 }; 887 + struct param_range rfds = { .min = 16, .max = 256, .count = 64 }; 905 888 struct param_range cbs = { .min = 64, .max = 256, .count = 64 }; 906 889 907 890 pci_read_config_byte(nic->pdev, PCI_REVISION_ID, &nic->rev_id); ··· 916 899 /* Quadwords to DMA into FIFO before starting frame transmit */ 917 900 nic->tx_threshold = 0xE0; 918 901 919 - nic->tx_command = cpu_to_le16(cb_tx | cb_i | cb_tx_sf | 920 - ((nic->mac >= mac_82558_D101_A4) ? cb_cid : 0)); 902 + /* no interrupt for every tx completion, delay = 256us if not 557*/ 903 + nic->tx_command = cpu_to_le16(cb_tx | cb_tx_sf | 904 + ((nic->mac >= mac_82558_D101_A4) ? cb_cid : cb_i)); 921 905 922 906 /* Template for a freshly allocated RFD */ 923 907 nic->blank_rfd.command = cpu_to_le16(cb_el); ··· 982 964 if(nic->flags & multicast_all) 983 965 config->multicast_all = 0x1; /* 1=accept, 0=no */ 984 966 985 - if(!(nic->flags & wol_magic)) 967 + /* disable WoL when up */ 968 + if(netif_running(nic->netdev) || !(nic->flags & wol_magic)) 986 969 config->magic_packet_disable = 0x1; /* 1=off, 0=on */ 987 970 988 971 if(nic->mac >= mac_82558_D101_A4) { ··· 1222 1203 } 1223 1204 } 1224 1205 1225 - e100_exec_cmd(nic, cuc_dump_reset, 0); 1206 + 1207 + if(e100_exec_cmd(nic, cuc_dump_reset, 0)) 1208 + DPRINTK(TX_ERR, DEBUG, "exec cuc_dump_reset failed\n"); 1226 1209 } 1227 1210 1228 1211 static void e100_adjust_adaptive_ifs(struct nic *nic, int speed, int duplex) ··· 1300 1279 struct sk_buff *skb) 1301 1280 { 1302 1281 cb->command = nic->tx_command; 1282 + /* interrupt every 16 packets regardless of delay */ 1283 + if((nic->cbs_avail & ~15) == nic->cbs_avail) cb->command |= cb_i; 1303 1284 cb->u.tcb.tbd_array = cb->dma_addr + offsetof(struct cb, u.tcb.tbd); 1304 1285 cb->u.tcb.tcb_byte_count = 0; 1305 1286 cb->u.tcb.threshold = nic->tx_threshold; 1306 1287 cb->u.tcb.tbd_count = 1; 1307 1288 cb->u.tcb.tbd.buf_addr = cpu_to_le32(pci_map_single(nic->pdev, 1308 1289 skb->data, skb->len, PCI_DMA_TODEVICE)); 1290 + // check for mapping failure? 1309 1291 cb->u.tcb.tbd.size = cpu_to_le16(skb->len); 1310 1292 } 1311 1293 ··· 1321 1297 /* SW workaround for ICH[x] 10Mbps/half duplex Tx hang. 1322 1298 Issue a NOP command followed by a 1us delay before 1323 1299 issuing the Tx command. */ 1324 - e100_exec_cmd(nic, cuc_nop, 0); 1300 + if(e100_exec_cmd(nic, cuc_nop, 0)) 1301 + DPRINTK(TX_ERR, DEBUG, "exec cuc_nop failed\n"); 1325 1302 udelay(1); 1326 1303 } 1327 1304 ··· 1440 1415 return 0; 1441 1416 } 1442 1417 1443 - static inline void e100_start_receiver(struct nic *nic) 1418 + static inline void e100_start_receiver(struct nic *nic, struct rx *rx) 1444 1419 { 1420 + if(!nic->rxs) return; 1421 + if(RU_SUSPENDED != nic->ru_running) return; 1422 + 1423 + /* handle init time starts */ 1424 + if(!rx) rx = nic->rxs; 1425 + 1445 1426 /* (Re)start RU if suspended or idle and RFA is non-NULL */ 1446 - if(!nic->ru_running && nic->rx_to_clean->skb) { 1447 - e100_exec_cmd(nic, ruc_start, nic->rx_to_clean->dma_addr); 1448 - nic->ru_running = 1; 1427 + if(rx->skb) { 1428 + e100_exec_cmd(nic, ruc_start, rx->dma_addr); 1429 + nic->ru_running = RU_RUNNING; 1449 1430 } 1450 1431 } 1451 1432 ··· 1467 1436 memcpy(rx->skb->data, &nic->blank_rfd, sizeof(struct rfd)); 1468 1437 rx->dma_addr = pci_map_single(nic->pdev, rx->skb->data, 1469 1438 RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL); 1439 + 1440 + if(pci_dma_mapping_error(rx->dma_addr)) { 1441 + dev_kfree_skb_any(rx->skb); 1442 + rx->skb = 0; 1443 + rx->dma_addr = 0; 1444 + return -ENOMEM; 1445 + } 1470 1446 1471 1447 /* Link the RFD to end of RFA by linking previous RFD to 1472 1448 * this one, and clearing EL bit of previous. */ ··· 1509 1471 1510 1472 /* If data isn't ready, nothing to indicate */ 1511 1473 if(unlikely(!(rfd_status & cb_complete))) 1512 - return -EAGAIN; 1474 + return -ENODATA; 1513 1475 1514 1476 /* Get actual data size */ 1515 1477 actual_size = le16_to_cpu(rfd->actual_size) & 0x3FFF; ··· 1519 1481 /* Get data */ 1520 1482 pci_unmap_single(nic->pdev, rx->dma_addr, 1521 1483 RFD_BUF_LEN, PCI_DMA_FROMDEVICE); 1484 + 1485 + /* this allows for a fast restart without re-enabling interrupts */ 1486 + if(le16_to_cpu(rfd->command) & cb_el) 1487 + nic->ru_running = RU_SUSPENDED; 1522 1488 1523 1489 /* Pull off the RFD and put the actual data (minus eth hdr) */ 1524 1490 skb_reserve(skb, sizeof(struct rfd)); ··· 1556 1514 unsigned int work_to_do) 1557 1515 { 1558 1516 struct rx *rx; 1517 + int restart_required = 0; 1518 + struct rx *rx_to_start = NULL; 1519 + 1520 + /* are we already rnr? then pay attention!!! this ensures that 1521 + * the state machine progression never allows a start with a 1522 + * partially cleaned list, avoiding a race between hardware 1523 + * and rx_to_clean when in NAPI mode */ 1524 + if(RU_SUSPENDED == nic->ru_running) 1525 + restart_required = 1; 1559 1526 1560 1527 /* Indicate newly arrived packets */ 1561 1528 for(rx = nic->rx_to_clean; rx->skb; rx = nic->rx_to_clean = rx->next) { 1562 - if(e100_rx_indicate(nic, rx, work_done, work_to_do)) 1529 + int err = e100_rx_indicate(nic, rx, work_done, work_to_do); 1530 + if(-EAGAIN == err) { 1531 + /* hit quota so have more work to do, restart once 1532 + * cleanup is complete */ 1533 + restart_required = 0; 1534 + break; 1535 + } else if(-ENODATA == err) 1563 1536 break; /* No more to clean */ 1564 1537 } 1538 + 1539 + /* save our starting point as the place we'll restart the receiver */ 1540 + if(restart_required) 1541 + rx_to_start = nic->rx_to_clean; 1565 1542 1566 1543 /* Alloc new skbs to refill list */ 1567 1544 for(rx = nic->rx_to_use; !rx->skb; rx = nic->rx_to_use = rx->next) { ··· 1588 1527 break; /* Better luck next time (see watchdog) */ 1589 1528 } 1590 1529 1591 - e100_start_receiver(nic); 1530 + if(restart_required) { 1531 + // ack the rnr? 1532 + writeb(stat_ack_rnr, &nic->csr->scb.stat_ack); 1533 + e100_start_receiver(nic, rx_to_start); 1534 + if(work_done) 1535 + (*work_done)++; 1536 + } 1592 1537 } 1593 1538 1594 1539 static void e100_rx_clean_list(struct nic *nic) 1595 1540 { 1596 1541 struct rx *rx; 1597 1542 unsigned int i, count = nic->params.rfds.count; 1543 + 1544 + nic->ru_running = RU_UNINITIALIZED; 1598 1545 1599 1546 if(nic->rxs) { 1600 1547 for(rx = nic->rxs, i = 0; i < count; rx++, i++) { ··· 1617 1548 } 1618 1549 1619 1550 nic->rx_to_use = nic->rx_to_clean = NULL; 1620 - nic->ru_running = 0; 1621 1551 } 1622 1552 1623 1553 static int e100_rx_alloc_list(struct nic *nic) ··· 1625 1557 unsigned int i, count = nic->params.rfds.count; 1626 1558 1627 1559 nic->rx_to_use = nic->rx_to_clean = NULL; 1560 + nic->ru_running = RU_UNINITIALIZED; 1628 1561 1629 1562 if(!(nic->rxs = kmalloc(sizeof(struct rx) * count, GFP_ATOMIC))) 1630 1563 return -ENOMEM; ··· 1641 1572 } 1642 1573 1643 1574 nic->rx_to_use = nic->rx_to_clean = nic->rxs; 1575 + nic->ru_running = RU_SUSPENDED; 1644 1576 1645 1577 return 0; 1646 1578 } ··· 1663 1593 1664 1594 /* We hit Receive No Resource (RNR); restart RU after cleaning */ 1665 1595 if(stat_ack & stat_ack_rnr) 1666 - nic->ru_running = 0; 1596 + nic->ru_running = RU_SUSPENDED; 1667 1597 1668 1598 e100_disable_irq(nic); 1669 1599 netif_rx_schedule(netdev); ··· 1733 1663 return 0; 1734 1664 } 1735 1665 1666 + #ifdef CONFIG_PM 1736 1667 static int e100_asf(struct nic *nic) 1737 1668 { 1738 1669 /* ASF can be enabled from eeprom */ ··· 1742 1671 !(nic->eeprom[eeprom_config_asf] & eeprom_gcl) && 1743 1672 ((nic->eeprom[eeprom_smbus_addr] & 0xFF) != 0xFE)); 1744 1673 } 1674 + #endif 1745 1675 1746 1676 static int e100_up(struct nic *nic) 1747 1677 { ··· 1755 1683 if((err = e100_hw_init(nic))) 1756 1684 goto err_clean_cbs; 1757 1685 e100_set_multicast_list(nic->netdev); 1758 - e100_start_receiver(nic); 1686 + e100_start_receiver(nic, 0); 1759 1687 mod_timer(&nic->watchdog, jiffies); 1760 1688 if((err = request_irq(nic->pdev->irq, e100_intr, SA_SHIRQ, 1761 1689 nic->netdev->name, nic->netdev))) 1762 1690 goto err_no_irq; 1763 - e100_enable_irq(nic); 1764 1691 netif_wake_queue(nic->netdev); 1692 + netif_poll_enable(nic->netdev); 1693 + /* enable ints _after_ enabling poll, preventing a race between 1694 + * disable ints+schedule */ 1695 + e100_enable_irq(nic); 1765 1696 return 0; 1766 1697 1767 1698 err_no_irq: ··· 1778 1703 1779 1704 static void e100_down(struct nic *nic) 1780 1705 { 1706 + /* wait here for poll to complete */ 1707 + netif_poll_disable(nic->netdev); 1708 + netif_stop_queue(nic->netdev); 1781 1709 e100_hw_reset(nic); 1782 1710 free_irq(nic->pdev->irq, nic->netdev); 1783 1711 del_timer_sync(&nic->watchdog); 1784 1712 netif_carrier_off(nic->netdev); 1785 - netif_stop_queue(nic->netdev); 1786 1713 e100_clean_cbs(nic); 1787 1714 e100_rx_clean_list(nic); 1788 1715 } 1789 1716 1790 1717 static void e100_tx_timeout(struct net_device *netdev) 1718 + { 1719 + struct nic *nic = netdev_priv(netdev); 1720 + 1721 + /* Reset outside of interrupt context, to avoid request_irq 1722 + * in interrupt context */ 1723 + schedule_work(&nic->tx_timeout_task); 1724 + } 1725 + 1726 + static void e100_tx_timeout_task(struct net_device *netdev) 1791 1727 { 1792 1728 struct nic *nic = netdev_priv(netdev); 1793 1729 ··· 1835 1749 mdio_write(nic->netdev, nic->mii.phy_id, MII_BMCR, 1836 1750 BMCR_LOOPBACK); 1837 1751 1838 - e100_start_receiver(nic); 1752 + e100_start_receiver(nic, 0); 1839 1753 1840 1754 if(!(skb = dev_alloc_skb(ETH_DATA_LEN))) { 1841 1755 err = -ENOMEM; ··· 1955 1869 else 1956 1870 nic->flags &= ~wol_magic; 1957 1871 1958 - pci_enable_wake(nic->pdev, 0, nic->flags & (wol_magic | e100_asf(nic))); 1959 1872 e100_exec_cb(nic, NULL, e100_configure); 1960 1873 1961 1874 return 0; ··· 2308 2223 2309 2224 e100_get_defaults(nic); 2310 2225 2226 + /* locks must be initialized before calling hw_reset */ 2311 2227 spin_lock_init(&nic->cb_lock); 2312 2228 spin_lock_init(&nic->cmd_lock); 2313 2229 ··· 2325 2239 init_timer(&nic->blink_timer); 2326 2240 nic->blink_timer.function = e100_blink_led; 2327 2241 nic->blink_timer.data = (unsigned long)nic; 2242 + 2243 + INIT_WORK(&nic->tx_timeout_task, 2244 + (void (*)(void *))e100_tx_timeout_task, netdev); 2328 2245 2329 2246 if((err = e100_alloc(nic))) { 2330 2247 DPRINTK(PROBE, ERR, "Cannot alloc driver memory, aborting.\n"); ··· 2352 2263 (nic->eeprom[eeprom_id] & eeprom_id_wol)) 2353 2264 nic->flags |= wol_magic; 2354 2265 2355 - pci_enable_wake(pdev, 0, nic->flags & (wol_magic | e100_asf(nic))); 2266 + /* ack any pending wake events, disable PME */ 2267 + pci_enable_wake(pdev, 0, 0); 2356 2268 2357 2269 strcpy(netdev->name, "eth%d"); 2358 2270 if((err = register_netdev(netdev))) { ··· 2425 2335 2426 2336 pci_set_power_state(pdev, PCI_D0); 2427 2337 pci_restore_state(pdev); 2428 - e100_hw_init(nic); 2338 + /* ack any pending wake events, disable PME */ 2339 + pci_enable_wake(pdev, 0, 0); 2340 + if(e100_hw_init(nic)) 2341 + DPRINTK(HW, ERR, "e100_hw_init failed\n"); 2429 2342 2430 2343 netif_device_attach(netdev); 2431 2344 if(netif_running(netdev)) ··· 2437 2344 return 0; 2438 2345 } 2439 2346 #endif 2347 + 2348 + 2349 + static void e100_shutdown(struct device *dev) 2350 + { 2351 + struct pci_dev *pdev = container_of(dev, struct pci_dev, dev); 2352 + struct net_device *netdev = pci_get_drvdata(pdev); 2353 + struct nic *nic = netdev_priv(netdev); 2354 + 2355 + #ifdef CONFIG_PM 2356 + pci_enable_wake(pdev, 0, nic->flags & (wol_magic | e100_asf(nic))); 2357 + #else 2358 + pci_enable_wake(pdev, 0, nic->flags & (wol_magic)); 2359 + #endif 2360 + } 2361 + 2440 2362 2441 2363 static struct pci_driver e100_driver = { 2442 2364 .name = DRV_NAME, ··· 2462 2354 .suspend = e100_suspend, 2463 2355 .resume = e100_resume, 2464 2356 #endif 2357 + 2358 + .driver = { 2359 + .shutdown = e100_shutdown, 2360 + } 2361 + 2465 2362 }; 2466 2363 2467 2364 static int __init e100_init_module(void)