Merge of 'for-linus' branch from

rsync://rsync.kernel.org/pub/scm/linux/kernel/git/jgarzik/netdev-2.6

+3407 -877
+14 -10
drivers/net/amd8111e.c
··· 738 short vtag; 739 #endif 740 int rx_pkt_limit = dev->quota; 741 742 do{ 743 /* process receive packets until we use the quota*/ ··· 842 /* Receive descriptor is empty now */ 843 dev->quota -= num_rx_pkt; 844 *budget -= num_rx_pkt; 845 netif_rx_complete(dev); 846 - /* enable receive interrupt */ 847 writel(VAL0|RINTEN0, mmio + INTEN0); 848 writel(VAL2 | RDMD0, mmio + CMD0); 849 return 0; 850 rx_not_empty: 851 /* Do not call a netif_rx_complete */ 852 dev->quota -= num_rx_pkt; 853 *budget -= num_rx_pkt; 854 return 1; 855 - 856 - 857 } 858 859 #else ··· 1263 struct net_device * dev = (struct net_device *) dev_id; 1264 struct amd8111e_priv *lp = netdev_priv(dev); 1265 void __iomem *mmio = lp->mmio; 1266 - unsigned int intr0; 1267 unsigned int handled = 1; 1268 1269 - if(dev == NULL) 1270 return IRQ_NONE; 1271 1272 - if (regs) spin_lock (&lp->lock); 1273 /* disabling interrupt */ 1274 writel(INTREN, mmio + CMD0); 1275 1276 /* Read interrupt status */ 1277 intr0 = readl(mmio + INT0); 1278 1279 /* Process all the INT event until INTR bit is clear. */ 1280 ··· 1297 /* Schedule a polling routine */ 1298 __netif_rx_schedule(dev); 1299 } 1300 - else { 1301 printk("************Driver bug! \ 1302 interrupt while in poll\n"); 1303 - /* Fix by disabling interrupts */ 1304 - writel(RINT0, mmio + INT0); 1305 } 1306 } 1307 #else ··· 1325 err_no_interrupt: 1326 writel( VAL0 | INTREN,mmio + CMD0); 1327 1328 - if (regs) spin_unlock(&lp->lock); 1329 1330 return IRQ_RETVAL(handled); 1331 }
··· 738 short vtag; 739 #endif 740 int rx_pkt_limit = dev->quota; 741 + unsigned long flags; 742 743 do{ 744 /* process receive packets until we use the quota*/ ··· 841 /* Receive descriptor is empty now */ 842 dev->quota -= num_rx_pkt; 843 *budget -= num_rx_pkt; 844 + 845 + spin_lock_irqsave(&lp->lock, flags); 846 netif_rx_complete(dev); 847 writel(VAL0|RINTEN0, mmio + INTEN0); 848 writel(VAL2 | RDMD0, mmio + CMD0); 849 + spin_unlock_irqrestore(&lp->lock, flags); 850 return 0; 851 + 852 rx_not_empty: 853 /* Do not call a netif_rx_complete */ 854 dev->quota -= num_rx_pkt; 855 *budget -= num_rx_pkt; 856 return 1; 857 } 858 859 #else ··· 1261 struct net_device * dev = (struct net_device *) dev_id; 1262 struct amd8111e_priv *lp = netdev_priv(dev); 1263 void __iomem *mmio = lp->mmio; 1264 + unsigned int intr0, intren0; 1265 unsigned int handled = 1; 1266 1267 + if(unlikely(dev == NULL)) 1268 return IRQ_NONE; 1269 1270 + spin_lock(&lp->lock); 1271 + 1272 /* disabling interrupt */ 1273 writel(INTREN, mmio + CMD0); 1274 1275 /* Read interrupt status */ 1276 intr0 = readl(mmio + INT0); 1277 + intren0 = readl(mmio + INTEN0); 1278 1279 /* Process all the INT event until INTR bit is clear. */ 1280 ··· 1293 /* Schedule a polling routine */ 1294 __netif_rx_schedule(dev); 1295 } 1296 + else if (intren0 & RINTEN0) { 1297 printk("************Driver bug! \ 1298 interrupt while in poll\n"); 1299 + /* Fix by disable receive interrupts */ 1300 + writel(RINTEN0, mmio + INTEN0); 1301 } 1302 } 1303 #else ··· 1321 err_no_interrupt: 1322 writel( VAL0 | INTREN,mmio + CMD0); 1323 1324 + spin_unlock(&lp->lock); 1325 1326 return IRQ_RETVAL(handled); 1327 }
+139 -26
drivers/net/e100.c
··· 155 156 #define DRV_NAME "e100" 157 #define DRV_EXT "-NAPI" 158 - #define DRV_VERSION "3.3.6-k2"DRV_EXT 159 #define DRV_DESCRIPTION "Intel(R) PRO/100 Network Driver" 160 - #define DRV_COPYRIGHT "Copyright(c) 1999-2004 Intel Corporation" 161 #define PFX DRV_NAME ": " 162 163 #define E100_WATCHDOG_PERIOD (2 * HZ) ··· 210 INTEL_8255X_ETHERNET_DEVICE(0x1069, 6), 211 INTEL_8255X_ETHERNET_DEVICE(0x106A, 6), 212 INTEL_8255X_ETHERNET_DEVICE(0x106B, 6), 213 INTEL_8255X_ETHERNET_DEVICE(0x1209, 0), 214 INTEL_8255X_ETHERNET_DEVICE(0x1229, 0), 215 INTEL_8255X_ETHERNET_DEVICE(0x2449, 2), 216 INTEL_8255X_ETHERNET_DEVICE(0x2459, 2), 217 INTEL_8255X_ETHERNET_DEVICE(0x245D, 2), 218 { 0, } 219 }; 220 MODULE_DEVICE_TABLE(pci, e100_id_table); ··· 273 enum scb_status { 274 rus_ready = 0x10, 275 rus_mask = 0x3C, 276 }; 277 278 enum scb_stat_ack { ··· 522 struct rx *rx_to_use; 523 struct rx *rx_to_clean; 524 struct rfd blank_rfd; 525 - int ru_running; 526 527 spinlock_t cb_lock ____cacheline_aligned; 528 spinlock_t cmd_lock; ··· 551 struct timer_list watchdog; 552 struct timer_list blink_timer; 553 struct mii_if_info mii; 554 enum loopback loopback; 555 556 struct mem *mem; ··· 783 return 0; 784 } 785 786 - #define E100_WAIT_SCB_TIMEOUT 40 787 static inline int e100_exec_cmd(struct nic *nic, u8 cmd, dma_addr_t dma_addr) 788 { 789 unsigned long flags; ··· 853 * because the controller is too busy, so 854 * let's just queue the command and try again 855 * when another command is scheduled. */ 856 break; 857 } else { 858 nic->cuc_cmd = cuc_resume; ··· 901 902 static void e100_get_defaults(struct nic *nic) 903 { 904 - struct param_range rfds = { .min = 64, .max = 256, .count = 64 }; 905 struct param_range cbs = { .min = 64, .max = 256, .count = 64 }; 906 907 pci_read_config_byte(nic->pdev, PCI_REVISION_ID, &nic->rev_id); ··· 916 /* Quadwords to DMA into FIFO before starting frame transmit */ 917 nic->tx_threshold = 0xE0; 918 919 - nic->tx_command = cpu_to_le16(cb_tx | cb_i | cb_tx_sf | 920 - ((nic->mac >= mac_82558_D101_A4) ? cb_cid : 0)); 921 922 /* Template for a freshly allocated RFD */ 923 nic->blank_rfd.command = cpu_to_le16(cb_el); ··· 982 if(nic->flags & multicast_all) 983 config->multicast_all = 0x1; /* 1=accept, 0=no */ 984 985 - if(!(nic->flags & wol_magic)) 986 config->magic_packet_disable = 0x1; /* 1=off, 0=on */ 987 988 if(nic->mac >= mac_82558_D101_A4) { ··· 1222 } 1223 } 1224 1225 - e100_exec_cmd(nic, cuc_dump_reset, 0); 1226 } 1227 1228 static void e100_adjust_adaptive_ifs(struct nic *nic, int speed, int duplex) ··· 1300 struct sk_buff *skb) 1301 { 1302 cb->command = nic->tx_command; 1303 cb->u.tcb.tbd_array = cb->dma_addr + offsetof(struct cb, u.tcb.tbd); 1304 cb->u.tcb.tcb_byte_count = 0; 1305 cb->u.tcb.threshold = nic->tx_threshold; 1306 cb->u.tcb.tbd_count = 1; 1307 cb->u.tcb.tbd.buf_addr = cpu_to_le32(pci_map_single(nic->pdev, 1308 skb->data, skb->len, PCI_DMA_TODEVICE)); 1309 cb->u.tcb.tbd.size = cpu_to_le16(skb->len); 1310 } 1311 ··· 1321 /* SW workaround for ICH[x] 10Mbps/half duplex Tx hang. 1322 Issue a NOP command followed by a 1us delay before 1323 issuing the Tx command. */ 1324 - e100_exec_cmd(nic, cuc_nop, 0); 1325 udelay(1); 1326 } 1327 ··· 1440 return 0; 1441 } 1442 1443 - static inline void e100_start_receiver(struct nic *nic) 1444 { 1445 /* (Re)start RU if suspended or idle and RFA is non-NULL */ 1446 - if(!nic->ru_running && nic->rx_to_clean->skb) { 1447 - e100_exec_cmd(nic, ruc_start, nic->rx_to_clean->dma_addr); 1448 - nic->ru_running = 1; 1449 } 1450 } 1451 ··· 1467 memcpy(rx->skb->data, &nic->blank_rfd, sizeof(struct rfd)); 1468 rx->dma_addr = pci_map_single(nic->pdev, rx->skb->data, 1469 RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL); 1470 1471 /* Link the RFD to end of RFA by linking previous RFD to 1472 * this one, and clearing EL bit of previous. */ ··· 1509 1510 /* If data isn't ready, nothing to indicate */ 1511 if(unlikely(!(rfd_status & cb_complete))) 1512 - return -EAGAIN; 1513 1514 /* Get actual data size */ 1515 actual_size = le16_to_cpu(rfd->actual_size) & 0x3FFF; ··· 1519 /* Get data */ 1520 pci_unmap_single(nic->pdev, rx->dma_addr, 1521 RFD_BUF_LEN, PCI_DMA_FROMDEVICE); 1522 1523 /* Pull off the RFD and put the actual data (minus eth hdr) */ 1524 skb_reserve(skb, sizeof(struct rfd)); ··· 1556 unsigned int work_to_do) 1557 { 1558 struct rx *rx; 1559 1560 /* Indicate newly arrived packets */ 1561 for(rx = nic->rx_to_clean; rx->skb; rx = nic->rx_to_clean = rx->next) { 1562 - if(e100_rx_indicate(nic, rx, work_done, work_to_do)) 1563 break; /* No more to clean */ 1564 } 1565 1566 /* Alloc new skbs to refill list */ 1567 for(rx = nic->rx_to_use; !rx->skb; rx = nic->rx_to_use = rx->next) { ··· 1588 break; /* Better luck next time (see watchdog) */ 1589 } 1590 1591 - e100_start_receiver(nic); 1592 } 1593 1594 static void e100_rx_clean_list(struct nic *nic) 1595 { 1596 struct rx *rx; 1597 unsigned int i, count = nic->params.rfds.count; 1598 1599 if(nic->rxs) { 1600 for(rx = nic->rxs, i = 0; i < count; rx++, i++) { ··· 1617 } 1618 1619 nic->rx_to_use = nic->rx_to_clean = NULL; 1620 - nic->ru_running = 0; 1621 } 1622 1623 static int e100_rx_alloc_list(struct nic *nic) ··· 1625 unsigned int i, count = nic->params.rfds.count; 1626 1627 nic->rx_to_use = nic->rx_to_clean = NULL; 1628 1629 if(!(nic->rxs = kmalloc(sizeof(struct rx) * count, GFP_ATOMIC))) 1630 return -ENOMEM; ··· 1641 } 1642 1643 nic->rx_to_use = nic->rx_to_clean = nic->rxs; 1644 1645 return 0; 1646 } ··· 1663 1664 /* We hit Receive No Resource (RNR); restart RU after cleaning */ 1665 if(stat_ack & stat_ack_rnr) 1666 - nic->ru_running = 0; 1667 1668 e100_disable_irq(nic); 1669 netif_rx_schedule(netdev); ··· 1733 return 0; 1734 } 1735 1736 static int e100_asf(struct nic *nic) 1737 { 1738 /* ASF can be enabled from eeprom */ ··· 1742 !(nic->eeprom[eeprom_config_asf] & eeprom_gcl) && 1743 ((nic->eeprom[eeprom_smbus_addr] & 0xFF) != 0xFE)); 1744 } 1745 1746 static int e100_up(struct nic *nic) 1747 { ··· 1755 if((err = e100_hw_init(nic))) 1756 goto err_clean_cbs; 1757 e100_set_multicast_list(nic->netdev); 1758 - e100_start_receiver(nic); 1759 mod_timer(&nic->watchdog, jiffies); 1760 if((err = request_irq(nic->pdev->irq, e100_intr, SA_SHIRQ, 1761 nic->netdev->name, nic->netdev))) 1762 goto err_no_irq; 1763 - e100_enable_irq(nic); 1764 netif_wake_queue(nic->netdev); 1765 return 0; 1766 1767 err_no_irq: ··· 1778 1779 static void e100_down(struct nic *nic) 1780 { 1781 e100_hw_reset(nic); 1782 free_irq(nic->pdev->irq, nic->netdev); 1783 del_timer_sync(&nic->watchdog); 1784 netif_carrier_off(nic->netdev); 1785 - netif_stop_queue(nic->netdev); 1786 e100_clean_cbs(nic); 1787 e100_rx_clean_list(nic); 1788 } 1789 1790 static void e100_tx_timeout(struct net_device *netdev) 1791 { 1792 struct nic *nic = netdev_priv(netdev); 1793 ··· 1835 mdio_write(nic->netdev, nic->mii.phy_id, MII_BMCR, 1836 BMCR_LOOPBACK); 1837 1838 - e100_start_receiver(nic); 1839 1840 if(!(skb = dev_alloc_skb(ETH_DATA_LEN))) { 1841 err = -ENOMEM; ··· 1955 else 1956 nic->flags &= ~wol_magic; 1957 1958 - pci_enable_wake(nic->pdev, 0, nic->flags & (wol_magic | e100_asf(nic))); 1959 e100_exec_cb(nic, NULL, e100_configure); 1960 1961 return 0; ··· 2308 2309 e100_get_defaults(nic); 2310 2311 spin_lock_init(&nic->cb_lock); 2312 spin_lock_init(&nic->cmd_lock); 2313 ··· 2325 init_timer(&nic->blink_timer); 2326 nic->blink_timer.function = e100_blink_led; 2327 nic->blink_timer.data = (unsigned long)nic; 2328 2329 if((err = e100_alloc(nic))) { 2330 DPRINTK(PROBE, ERR, "Cannot alloc driver memory, aborting.\n"); ··· 2352 (nic->eeprom[eeprom_id] & eeprom_id_wol)) 2353 nic->flags |= wol_magic; 2354 2355 - pci_enable_wake(pdev, 0, nic->flags & (wol_magic | e100_asf(nic))); 2356 2357 strcpy(netdev->name, "eth%d"); 2358 if((err = register_netdev(netdev))) { ··· 2425 2426 pci_set_power_state(pdev, PCI_D0); 2427 pci_restore_state(pdev); 2428 - e100_hw_init(nic); 2429 2430 netif_device_attach(netdev); 2431 if(netif_running(netdev)) ··· 2437 return 0; 2438 } 2439 #endif 2440 2441 static struct pci_driver e100_driver = { 2442 .name = DRV_NAME, ··· 2462 .suspend = e100_suspend, 2463 .resume = e100_resume, 2464 #endif 2465 }; 2466 2467 static int __init e100_init_module(void)
··· 155 156 #define DRV_NAME "e100" 157 #define DRV_EXT "-NAPI" 158 + #define DRV_VERSION "3.4.8-k2"DRV_EXT 159 #define DRV_DESCRIPTION "Intel(R) PRO/100 Network Driver" 160 + #define DRV_COPYRIGHT "Copyright(c) 1999-2005 Intel Corporation" 161 #define PFX DRV_NAME ": " 162 163 #define E100_WATCHDOG_PERIOD (2 * HZ) ··· 210 INTEL_8255X_ETHERNET_DEVICE(0x1069, 6), 211 INTEL_8255X_ETHERNET_DEVICE(0x106A, 6), 212 INTEL_8255X_ETHERNET_DEVICE(0x106B, 6), 213 + INTEL_8255X_ETHERNET_DEVICE(0x1091, 7), 214 + INTEL_8255X_ETHERNET_DEVICE(0x1092, 7), 215 + INTEL_8255X_ETHERNET_DEVICE(0x1093, 7), 216 + INTEL_8255X_ETHERNET_DEVICE(0x1094, 7), 217 + INTEL_8255X_ETHERNET_DEVICE(0x1095, 7), 218 INTEL_8255X_ETHERNET_DEVICE(0x1209, 0), 219 INTEL_8255X_ETHERNET_DEVICE(0x1229, 0), 220 INTEL_8255X_ETHERNET_DEVICE(0x2449, 2), 221 INTEL_8255X_ETHERNET_DEVICE(0x2459, 2), 222 INTEL_8255X_ETHERNET_DEVICE(0x245D, 2), 223 + INTEL_8255X_ETHERNET_DEVICE(0x27DC, 7), 224 { 0, } 225 }; 226 MODULE_DEVICE_TABLE(pci, e100_id_table); ··· 267 enum scb_status { 268 rus_ready = 0x10, 269 rus_mask = 0x3C, 270 + }; 271 + 272 + enum ru_state { 273 + RU_SUSPENDED = 0, 274 + RU_RUNNING = 1, 275 + RU_UNINITIALIZED = -1, 276 }; 277 278 enum scb_stat_ack { ··· 510 struct rx *rx_to_use; 511 struct rx *rx_to_clean; 512 struct rfd blank_rfd; 513 + enum ru_state ru_running; 514 515 spinlock_t cb_lock ____cacheline_aligned; 516 spinlock_t cmd_lock; ··· 539 struct timer_list watchdog; 540 struct timer_list blink_timer; 541 struct mii_if_info mii; 542 + struct work_struct tx_timeout_task; 543 enum loopback loopback; 544 545 struct mem *mem; ··· 770 return 0; 771 } 772 773 + #define E100_WAIT_SCB_TIMEOUT 20000 /* we might have to wait 100ms!!! */ 774 static inline int e100_exec_cmd(struct nic *nic, u8 cmd, dma_addr_t dma_addr) 775 { 776 unsigned long flags; ··· 840 * because the controller is too busy, so 841 * let's just queue the command and try again 842 * when another command is scheduled. */ 843 + if(err == -ENOSPC) { 844 + //request a reset 845 + schedule_work(&nic->tx_timeout_task); 846 + } 847 break; 848 } else { 849 nic->cuc_cmd = cuc_resume; ··· 884 885 static void e100_get_defaults(struct nic *nic) 886 { 887 + struct param_range rfds = { .min = 16, .max = 256, .count = 64 }; 888 struct param_range cbs = { .min = 64, .max = 256, .count = 64 }; 889 890 pci_read_config_byte(nic->pdev, PCI_REVISION_ID, &nic->rev_id); ··· 899 /* Quadwords to DMA into FIFO before starting frame transmit */ 900 nic->tx_threshold = 0xE0; 901 902 + /* no interrupt for every tx completion, delay = 256us if not 557*/ 903 + nic->tx_command = cpu_to_le16(cb_tx | cb_tx_sf | 904 + ((nic->mac >= mac_82558_D101_A4) ? cb_cid : cb_i)); 905 906 /* Template for a freshly allocated RFD */ 907 nic->blank_rfd.command = cpu_to_le16(cb_el); ··· 964 if(nic->flags & multicast_all) 965 config->multicast_all = 0x1; /* 1=accept, 0=no */ 966 967 + /* disable WoL when up */ 968 + if(netif_running(nic->netdev) || !(nic->flags & wol_magic)) 969 config->magic_packet_disable = 0x1; /* 1=off, 0=on */ 970 971 if(nic->mac >= mac_82558_D101_A4) { ··· 1203 } 1204 } 1205 1206 + 1207 + if(e100_exec_cmd(nic, cuc_dump_reset, 0)) 1208 + DPRINTK(TX_ERR, DEBUG, "exec cuc_dump_reset failed\n"); 1209 } 1210 1211 static void e100_adjust_adaptive_ifs(struct nic *nic, int speed, int duplex) ··· 1279 struct sk_buff *skb) 1280 { 1281 cb->command = nic->tx_command; 1282 + /* interrupt every 16 packets regardless of delay */ 1283 + if((nic->cbs_avail & ~15) == nic->cbs_avail) cb->command |= cb_i; 1284 cb->u.tcb.tbd_array = cb->dma_addr + offsetof(struct cb, u.tcb.tbd); 1285 cb->u.tcb.tcb_byte_count = 0; 1286 cb->u.tcb.threshold = nic->tx_threshold; 1287 cb->u.tcb.tbd_count = 1; 1288 cb->u.tcb.tbd.buf_addr = cpu_to_le32(pci_map_single(nic->pdev, 1289 skb->data, skb->len, PCI_DMA_TODEVICE)); 1290 + // check for mapping failure? 1291 cb->u.tcb.tbd.size = cpu_to_le16(skb->len); 1292 } 1293 ··· 1297 /* SW workaround for ICH[x] 10Mbps/half duplex Tx hang. 1298 Issue a NOP command followed by a 1us delay before 1299 issuing the Tx command. */ 1300 + if(e100_exec_cmd(nic, cuc_nop, 0)) 1301 + DPRINTK(TX_ERR, DEBUG, "exec cuc_nop failed\n"); 1302 udelay(1); 1303 } 1304 ··· 1415 return 0; 1416 } 1417 1418 + static inline void e100_start_receiver(struct nic *nic, struct rx *rx) 1419 { 1420 + if(!nic->rxs) return; 1421 + if(RU_SUSPENDED != nic->ru_running) return; 1422 + 1423 + /* handle init time starts */ 1424 + if(!rx) rx = nic->rxs; 1425 + 1426 /* (Re)start RU if suspended or idle and RFA is non-NULL */ 1427 + if(rx->skb) { 1428 + e100_exec_cmd(nic, ruc_start, rx->dma_addr); 1429 + nic->ru_running = RU_RUNNING; 1430 } 1431 } 1432 ··· 1436 memcpy(rx->skb->data, &nic->blank_rfd, sizeof(struct rfd)); 1437 rx->dma_addr = pci_map_single(nic->pdev, rx->skb->data, 1438 RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL); 1439 + 1440 + if(pci_dma_mapping_error(rx->dma_addr)) { 1441 + dev_kfree_skb_any(rx->skb); 1442 + rx->skb = 0; 1443 + rx->dma_addr = 0; 1444 + return -ENOMEM; 1445 + } 1446 1447 /* Link the RFD to end of RFA by linking previous RFD to 1448 * this one, and clearing EL bit of previous. */ ··· 1471 1472 /* If data isn't ready, nothing to indicate */ 1473 if(unlikely(!(rfd_status & cb_complete))) 1474 + return -ENODATA; 1475 1476 /* Get actual data size */ 1477 actual_size = le16_to_cpu(rfd->actual_size) & 0x3FFF; ··· 1481 /* Get data */ 1482 pci_unmap_single(nic->pdev, rx->dma_addr, 1483 RFD_BUF_LEN, PCI_DMA_FROMDEVICE); 1484 + 1485 + /* this allows for a fast restart without re-enabling interrupts */ 1486 + if(le16_to_cpu(rfd->command) & cb_el) 1487 + nic->ru_running = RU_SUSPENDED; 1488 1489 /* Pull off the RFD and put the actual data (minus eth hdr) */ 1490 skb_reserve(skb, sizeof(struct rfd)); ··· 1514 unsigned int work_to_do) 1515 { 1516 struct rx *rx; 1517 + int restart_required = 0; 1518 + struct rx *rx_to_start = NULL; 1519 + 1520 + /* are we already rnr? then pay attention!!! this ensures that 1521 + * the state machine progression never allows a start with a 1522 + * partially cleaned list, avoiding a race between hardware 1523 + * and rx_to_clean when in NAPI mode */ 1524 + if(RU_SUSPENDED == nic->ru_running) 1525 + restart_required = 1; 1526 1527 /* Indicate newly arrived packets */ 1528 for(rx = nic->rx_to_clean; rx->skb; rx = nic->rx_to_clean = rx->next) { 1529 + int err = e100_rx_indicate(nic, rx, work_done, work_to_do); 1530 + if(-EAGAIN == err) { 1531 + /* hit quota so have more work to do, restart once 1532 + * cleanup is complete */ 1533 + restart_required = 0; 1534 + break; 1535 + } else if(-ENODATA == err) 1536 break; /* No more to clean */ 1537 } 1538 + 1539 + /* save our starting point as the place we'll restart the receiver */ 1540 + if(restart_required) 1541 + rx_to_start = nic->rx_to_clean; 1542 1543 /* Alloc new skbs to refill list */ 1544 for(rx = nic->rx_to_use; !rx->skb; rx = nic->rx_to_use = rx->next) { ··· 1527 break; /* Better luck next time (see watchdog) */ 1528 } 1529 1530 + if(restart_required) { 1531 + // ack the rnr? 1532 + writeb(stat_ack_rnr, &nic->csr->scb.stat_ack); 1533 + e100_start_receiver(nic, rx_to_start); 1534 + if(work_done) 1535 + (*work_done)++; 1536 + } 1537 } 1538 1539 static void e100_rx_clean_list(struct nic *nic) 1540 { 1541 struct rx *rx; 1542 unsigned int i, count = nic->params.rfds.count; 1543 + 1544 + nic->ru_running = RU_UNINITIALIZED; 1545 1546 if(nic->rxs) { 1547 for(rx = nic->rxs, i = 0; i < count; rx++, i++) { ··· 1548 } 1549 1550 nic->rx_to_use = nic->rx_to_clean = NULL; 1551 } 1552 1553 static int e100_rx_alloc_list(struct nic *nic) ··· 1557 unsigned int i, count = nic->params.rfds.count; 1558 1559 nic->rx_to_use = nic->rx_to_clean = NULL; 1560 + nic->ru_running = RU_UNINITIALIZED; 1561 1562 if(!(nic->rxs = kmalloc(sizeof(struct rx) * count, GFP_ATOMIC))) 1563 return -ENOMEM; ··· 1572 } 1573 1574 nic->rx_to_use = nic->rx_to_clean = nic->rxs; 1575 + nic->ru_running = RU_SUSPENDED; 1576 1577 return 0; 1578 } ··· 1593 1594 /* We hit Receive No Resource (RNR); restart RU after cleaning */ 1595 if(stat_ack & stat_ack_rnr) 1596 + nic->ru_running = RU_SUSPENDED; 1597 1598 e100_disable_irq(nic); 1599 netif_rx_schedule(netdev); ··· 1663 return 0; 1664 } 1665 1666 + #ifdef CONFIG_PM 1667 static int e100_asf(struct nic *nic) 1668 { 1669 /* ASF can be enabled from eeprom */ ··· 1671 !(nic->eeprom[eeprom_config_asf] & eeprom_gcl) && 1672 ((nic->eeprom[eeprom_smbus_addr] & 0xFF) != 0xFE)); 1673 } 1674 + #endif 1675 1676 static int e100_up(struct nic *nic) 1677 { ··· 1683 if((err = e100_hw_init(nic))) 1684 goto err_clean_cbs; 1685 e100_set_multicast_list(nic->netdev); 1686 + e100_start_receiver(nic, 0); 1687 mod_timer(&nic->watchdog, jiffies); 1688 if((err = request_irq(nic->pdev->irq, e100_intr, SA_SHIRQ, 1689 nic->netdev->name, nic->netdev))) 1690 goto err_no_irq; 1691 netif_wake_queue(nic->netdev); 1692 + netif_poll_enable(nic->netdev); 1693 + /* enable ints _after_ enabling poll, preventing a race between 1694 + * disable ints+schedule */ 1695 + e100_enable_irq(nic); 1696 return 0; 1697 1698 err_no_irq: ··· 1703 1704 static void e100_down(struct nic *nic) 1705 { 1706 + /* wait here for poll to complete */ 1707 + netif_poll_disable(nic->netdev); 1708 + netif_stop_queue(nic->netdev); 1709 e100_hw_reset(nic); 1710 free_irq(nic->pdev->irq, nic->netdev); 1711 del_timer_sync(&nic->watchdog); 1712 netif_carrier_off(nic->netdev); 1713 e100_clean_cbs(nic); 1714 e100_rx_clean_list(nic); 1715 } 1716 1717 static void e100_tx_timeout(struct net_device *netdev) 1718 + { 1719 + struct nic *nic = netdev_priv(netdev); 1720 + 1721 + /* Reset outside of interrupt context, to avoid request_irq 1722 + * in interrupt context */ 1723 + schedule_work(&nic->tx_timeout_task); 1724 + } 1725 + 1726 + static void e100_tx_timeout_task(struct net_device *netdev) 1727 { 1728 struct nic *nic = netdev_priv(netdev); 1729 ··· 1749 mdio_write(nic->netdev, nic->mii.phy_id, MII_BMCR, 1750 BMCR_LOOPBACK); 1751 1752 + e100_start_receiver(nic, 0); 1753 1754 if(!(skb = dev_alloc_skb(ETH_DATA_LEN))) { 1755 err = -ENOMEM; ··· 1869 else 1870 nic->flags &= ~wol_magic; 1871 1872 e100_exec_cb(nic, NULL, e100_configure); 1873 1874 return 0; ··· 2223 2224 e100_get_defaults(nic); 2225 2226 + /* locks must be initialized before calling hw_reset */ 2227 spin_lock_init(&nic->cb_lock); 2228 spin_lock_init(&nic->cmd_lock); 2229 ··· 2239 init_timer(&nic->blink_timer); 2240 nic->blink_timer.function = e100_blink_led; 2241 nic->blink_timer.data = (unsigned long)nic; 2242 + 2243 + INIT_WORK(&nic->tx_timeout_task, 2244 + (void (*)(void *))e100_tx_timeout_task, netdev); 2245 2246 if((err = e100_alloc(nic))) { 2247 DPRINTK(PROBE, ERR, "Cannot alloc driver memory, aborting.\n"); ··· 2263 (nic->eeprom[eeprom_id] & eeprom_id_wol)) 2264 nic->flags |= wol_magic; 2265 2266 + /* ack any pending wake events, disable PME */ 2267 + pci_enable_wake(pdev, 0, 0); 2268 2269 strcpy(netdev->name, "eth%d"); 2270 if((err = register_netdev(netdev))) { ··· 2335 2336 pci_set_power_state(pdev, PCI_D0); 2337 pci_restore_state(pdev); 2338 + /* ack any pending wake events, disable PME */ 2339 + pci_enable_wake(pdev, 0, 0); 2340 + if(e100_hw_init(nic)) 2341 + DPRINTK(HW, ERR, "e100_hw_init failed\n"); 2342 2343 netif_device_attach(netdev); 2344 if(netif_running(netdev)) ··· 2344 return 0; 2345 } 2346 #endif 2347 + 2348 + 2349 + static void e100_shutdown(struct device *dev) 2350 + { 2351 + struct pci_dev *pdev = container_of(dev, struct pci_dev, dev); 2352 + struct net_device *netdev = pci_get_drvdata(pdev); 2353 + struct nic *nic = netdev_priv(netdev); 2354 + 2355 + #ifdef CONFIG_PM 2356 + pci_enable_wake(pdev, 0, nic->flags & (wol_magic | e100_asf(nic))); 2357 + #else 2358 + pci_enable_wake(pdev, 0, nic->flags & (wol_magic)); 2359 + #endif 2360 + } 2361 + 2362 2363 static struct pci_driver e100_driver = { 2364 .name = DRV_NAME, ··· 2354 .suspend = e100_suspend, 2355 .resume = e100_resume, 2356 #endif 2357 + 2358 + .driver = { 2359 + .shutdown = e100_shutdown, 2360 + } 2361 + 2362 }; 2363 2364 static int __init e100_init_module(void)
+33 -4
drivers/net/e1000/e1000.h
··· 1 /******************************************************************************* 2 3 4 - Copyright(c) 1999 - 2004 Intel Corporation. All rights reserved. 5 6 This program is free software; you can redistribute it and/or modify it 7 under the terms of the GNU General Public License as published by the Free ··· 112 #define E1000_MAX_82544_RXD 4096 113 114 /* Supported Rx Buffer Sizes */ 115 #define E1000_RXBUFFER_2048 2048 116 #define E1000_RXBUFFER_4096 4096 117 #define E1000_RXBUFFER_8192 8192 ··· 139 /* How many Rx Buffers do we bundle into one write to the hardware ? */ 140 #define E1000_RX_BUFFER_WRITE 16 /* Must be power of 2 */ 141 142 - #define AUTO_ALL_MODES 0 143 - #define E1000_EEPROM_82544_APM 0x0004 144 - #define E1000_EEPROM_APME 0x0400 145 146 #ifndef E1000_MASTER_SLAVE 147 /* Switch to override PHY master/slave setting */ 148 #define E1000_MASTER_SLAVE e1000_ms_hw_default 149 #endif 150 151 /* only works for sizes that are powers of 2 */ 152 #define E1000_ROUNDUP(i, size) ((i) = (((i) + (size) - 1) & ~((size) - 1))) ··· 164 uint16_t length; 165 uint16_t next_to_watch; 166 }; 167 168 struct e1000_desc_ring { 169 /* pointer to the descriptor ring memory */ ··· 183 unsigned int next_to_clean; 184 /* array of buffer information structs */ 185 struct e1000_buffer *buffer_info; 186 }; 187 188 #define E1000_DESC_UNUSED(R) \ 189 ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \ 190 (R)->next_to_clean - (R)->next_to_use - 1) 191 192 #define E1000_GET_DESC(R, i, type) (&(((struct type *)((R).desc))[i])) 193 #define E1000_RX_DESC(R, i) E1000_GET_DESC(R, i, e1000_rx_desc) 194 #define E1000_TX_DESC(R, i) E1000_GET_DESC(R, i, e1000_tx_desc) ··· 208 struct timer_list watchdog_timer; 209 struct timer_list phy_info_timer; 210 struct vlan_group *vlgrp; 211 uint32_t bd_number; 212 uint32_t rx_buffer_len; 213 uint32_t part_num; ··· 245 boolean_t detect_tx_hung; 246 247 /* RX */ 248 struct e1000_desc_ring rx_ring; 249 uint64_t hw_csum_err; 250 uint64_t hw_csum_good; 251 uint32_t rx_int_delay; 252 uint32_t rx_abs_int_delay; 253 boolean_t rx_csum; 254 uint32_t gorcl; 255 uint64_t gorcl_old; 256 257 /* Interrupt Throttle Rate */ 258 uint32_t itr; ··· 283 284 285 int msg_enable; 286 }; 287 #endif /* _E1000_H_ */
··· 1 /******************************************************************************* 2 3 4 + Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved. 5 6 This program is free software; you can redistribute it and/or modify it 7 under the terms of the GNU General Public License as published by the Free ··· 112 #define E1000_MAX_82544_RXD 4096 113 114 /* Supported Rx Buffer Sizes */ 115 + #define E1000_RXBUFFER_128 128 /* Used for packet split */ 116 + #define E1000_RXBUFFER_256 256 /* Used for packet split */ 117 #define E1000_RXBUFFER_2048 2048 118 #define E1000_RXBUFFER_4096 4096 119 #define E1000_RXBUFFER_8192 8192 ··· 137 /* How many Rx Buffers do we bundle into one write to the hardware ? */ 138 #define E1000_RX_BUFFER_WRITE 16 /* Must be power of 2 */ 139 140 + #define AUTO_ALL_MODES 0 141 + #define E1000_EEPROM_82544_APM 0x0400 142 + #define E1000_EEPROM_APME 0x0400 143 144 #ifndef E1000_MASTER_SLAVE 145 /* Switch to override PHY master/slave setting */ 146 #define E1000_MASTER_SLAVE e1000_ms_hw_default 147 #endif 148 + 149 + #define E1000_MNG_VLAN_NONE -1 150 + /* Number of packet split data buffers (not including the header buffer) */ 151 + #define PS_PAGE_BUFFERS MAX_PS_BUFFERS-1 152 153 /* only works for sizes that are powers of 2 */ 154 #define E1000_ROUNDUP(i, size) ((i) = (((i) + (size) - 1) & ~((size) - 1))) ··· 158 uint16_t length; 159 uint16_t next_to_watch; 160 }; 161 + 162 + struct e1000_ps_page { struct page *ps_page[MAX_PS_BUFFERS]; }; 163 + struct e1000_ps_page_dma { uint64_t ps_page_dma[MAX_PS_BUFFERS]; }; 164 165 struct e1000_desc_ring { 166 /* pointer to the descriptor ring memory */ ··· 174 unsigned int next_to_clean; 175 /* array of buffer information structs */ 176 struct e1000_buffer *buffer_info; 177 + /* arrays of page information for packet split */ 178 + struct e1000_ps_page *ps_page; 179 + struct e1000_ps_page_dma *ps_page_dma; 180 }; 181 182 #define E1000_DESC_UNUSED(R) \ 183 ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \ 184 (R)->next_to_clean - (R)->next_to_use - 1) 185 186 + #define E1000_RX_DESC_PS(R, i) \ 187 + (&(((union e1000_rx_desc_packet_split *)((R).desc))[i])) 188 + #define E1000_RX_DESC_EXT(R, i) \ 189 + (&(((union e1000_rx_desc_extended *)((R).desc))[i])) 190 #define E1000_GET_DESC(R, i, type) (&(((struct type *)((R).desc))[i])) 191 #define E1000_RX_DESC(R, i) E1000_GET_DESC(R, i, e1000_rx_desc) 192 #define E1000_TX_DESC(R, i) E1000_GET_DESC(R, i, e1000_tx_desc) ··· 192 struct timer_list watchdog_timer; 193 struct timer_list phy_info_timer; 194 struct vlan_group *vlgrp; 195 + uint16_t mng_vlan_id; 196 uint32_t bd_number; 197 uint32_t rx_buffer_len; 198 uint32_t part_num; ··· 228 boolean_t detect_tx_hung; 229 230 /* RX */ 231 + #ifdef CONFIG_E1000_NAPI 232 + boolean_t (*clean_rx) (struct e1000_adapter *adapter, int *work_done, 233 + int work_to_do); 234 + #else 235 + boolean_t (*clean_rx) (struct e1000_adapter *adapter); 236 + #endif 237 + void (*alloc_rx_buf) (struct e1000_adapter *adapter); 238 struct e1000_desc_ring rx_ring; 239 uint64_t hw_csum_err; 240 uint64_t hw_csum_good; 241 uint32_t rx_int_delay; 242 uint32_t rx_abs_int_delay; 243 boolean_t rx_csum; 244 + boolean_t rx_ps; 245 uint32_t gorcl; 246 uint64_t gorcl_old; 247 + uint16_t rx_ps_bsize0; 248 249 /* Interrupt Throttle Rate */ 250 uint32_t itr; ··· 257 258 259 int msg_enable; 260 + #ifdef CONFIG_PCI_MSI 261 + boolean_t have_msi; 262 + #endif 263 }; 264 #endif /* _E1000_H_ */
+68 -35
drivers/net/e1000/e1000_ethtool.c
··· 1 /******************************************************************************* 2 3 4 - Copyright(c) 1999 - 2004 Intel Corporation. All rights reserved. 5 6 This program is free software; you can redistribute it and/or modify it 7 under the terms of the GNU General Public License as published by the Free ··· 69 { "rx_crc_errors", E1000_STAT(net_stats.rx_crc_errors) }, 70 { "rx_frame_errors", E1000_STAT(net_stats.rx_frame_errors) }, 71 { "rx_fifo_errors", E1000_STAT(net_stats.rx_fifo_errors) }, 72 { "rx_missed_errors", E1000_STAT(net_stats.rx_missed_errors) }, 73 { "tx_aborted_errors", E1000_STAT(net_stats.tx_aborted_errors) }, 74 { "tx_carrier_errors", E1000_STAT(net_stats.tx_carrier_errors) }, ··· 594 tx_old = adapter->tx_ring; 595 rx_old = adapter->rx_ring; 596 597 - if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) 598 return -EINVAL; 599 600 if(netif_running(adapter->netdev)) ··· 785 /* Hook up test interrupt handler just for this test */ 786 if(!request_irq(irq, &e1000_test_intr, 0, netdev->name, netdev)) { 787 shared_int = FALSE; 788 - } else if(request_irq(irq, &e1000_test_intr, SA_SHIRQ, 789 - netdev->name, netdev)){ 790 *data = 1; 791 return -1; 792 } ··· 843 * test failed. 844 */ 845 adapter->test_icr = 0; 846 - E1000_WRITE_REG(&adapter->hw, IMC, 847 - (~mask & 0x00007FFF)); 848 - E1000_WRITE_REG(&adapter->hw, ICS, 849 - (~mask & 0x00007FFF)); 850 msec_delay(10); 851 852 if(adapter->test_icr) { ··· 918 919 /* Setup Tx descriptor ring and Tx buffers */ 920 921 - txdr->count = 80; 922 923 size = txdr->count * sizeof(struct e1000_buffer); 924 if(!(txdr->buffer_info = kmalloc(size, GFP_KERNEL))) { ··· 974 975 /* Setup Rx descriptor ring and Rx buffers */ 976 977 - rxdr->count = 80; 978 979 size = rxdr->count * sizeof(struct e1000_buffer); 980 if(!(rxdr->buffer_info = kmalloc(size, GFP_KERNEL))) { ··· 1009 struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rxdr, i); 1010 struct sk_buff *skb; 1011 1012 - if(!(skb = alloc_skb(E1000_RXBUFFER_2048 + NET_IP_ALIGN, 1013 GFP_KERNEL))) { 1014 ret_val = 6; 1015 goto err_nomem; ··· 1311 struct e1000_desc_ring *txdr = &adapter->test_tx_ring; 1312 struct e1000_desc_ring *rxdr = &adapter->test_rx_ring; 1313 struct pci_dev *pdev = adapter->pdev; 1314 - int i, ret_val; 1315 1316 E1000_WRITE_REG(&adapter->hw, RDT, rxdr->count - 1); 1317 1318 - for(i = 0; i < 64; i++) { 1319 - e1000_create_lbtest_frame(txdr->buffer_info[i].skb, 1024); 1320 - pci_dma_sync_single_for_device(pdev, txdr->buffer_info[i].dma, 1321 - txdr->buffer_info[i].length, 1322 - PCI_DMA_TODEVICE); 1323 - } 1324 - E1000_WRITE_REG(&adapter->hw, TDT, i); 1325 1326 - msec_delay(200); 1327 1328 - i = 0; 1329 - do { 1330 - pci_dma_sync_single_for_cpu(pdev, rxdr->buffer_info[i].dma, 1331 - rxdr->buffer_info[i].length, 1332 - PCI_DMA_FROMDEVICE); 1333 - 1334 - ret_val = e1000_check_lbtest_frame(rxdr->buffer_info[i].skb, 1335 - 1024); 1336 - i++; 1337 - } while (ret_val != 0 && i < 64); 1338 - 1339 return ret_val; 1340 } 1341 ··· 1386 e1000_link_test(struct e1000_adapter *adapter, uint64_t *data) 1387 { 1388 *data = 0; 1389 - 1390 if (adapter->hw.media_type == e1000_media_type_internal_serdes) { 1391 int i = 0; 1392 adapter->hw.serdes_link_down = TRUE; 1393 1394 - /* on some blade server designs link establishment */ 1395 - /* could take as long as 2-3 minutes. */ 1396 do { 1397 e1000_check_for_link(&adapter->hw); 1398 if (adapter->hw.serdes_link_down == FALSE) ··· 1399 msec_delay(20); 1400 } while (i++ < 3750); 1401 1402 - *data = 1; 1403 } else { 1404 e1000_check_for_link(&adapter->hw); 1405 1406 if(!(E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU)) { 1407 *data = 1;
··· 1 /******************************************************************************* 2 3 4 + Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved. 5 6 This program is free software; you can redistribute it and/or modify it 7 under the terms of the GNU General Public License as published by the Free ··· 69 { "rx_crc_errors", E1000_STAT(net_stats.rx_crc_errors) }, 70 { "rx_frame_errors", E1000_STAT(net_stats.rx_frame_errors) }, 71 { "rx_fifo_errors", E1000_STAT(net_stats.rx_fifo_errors) }, 72 + { "rx_no_buffer_count", E1000_STAT(stats.rnbc) }, 73 { "rx_missed_errors", E1000_STAT(net_stats.rx_missed_errors) }, 74 { "tx_aborted_errors", E1000_STAT(net_stats.tx_aborted_errors) }, 75 { "tx_carrier_errors", E1000_STAT(net_stats.tx_carrier_errors) }, ··· 593 tx_old = adapter->tx_ring; 594 rx_old = adapter->rx_ring; 595 596 + if((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) 597 return -EINVAL; 598 599 if(netif_running(adapter->netdev)) ··· 784 /* Hook up test interrupt handler just for this test */ 785 if(!request_irq(irq, &e1000_test_intr, 0, netdev->name, netdev)) { 786 shared_int = FALSE; 787 + } else if(request_irq(irq, &e1000_test_intr, SA_SHIRQ, 788 + netdev->name, netdev)){ 789 *data = 1; 790 return -1; 791 } ··· 842 * test failed. 843 */ 844 adapter->test_icr = 0; 845 + E1000_WRITE_REG(&adapter->hw, IMC, ~mask & 0x00007FFF); 846 + E1000_WRITE_REG(&adapter->hw, ICS, ~mask & 0x00007FFF); 847 msec_delay(10); 848 849 if(adapter->test_icr) { ··· 919 920 /* Setup Tx descriptor ring and Tx buffers */ 921 922 + if(!txdr->count) 923 + txdr->count = E1000_DEFAULT_TXD; 924 925 size = txdr->count * sizeof(struct e1000_buffer); 926 if(!(txdr->buffer_info = kmalloc(size, GFP_KERNEL))) { ··· 974 975 /* Setup Rx descriptor ring and Rx buffers */ 976 977 + if(!rxdr->count) 978 + rxdr->count = E1000_DEFAULT_RXD; 979 980 size = rxdr->count * sizeof(struct e1000_buffer); 981 if(!(rxdr->buffer_info = kmalloc(size, GFP_KERNEL))) { ··· 1008 struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rxdr, i); 1009 struct sk_buff *skb; 1010 1011 + if(!(skb = alloc_skb(E1000_RXBUFFER_2048 + NET_IP_ALIGN, 1012 GFP_KERNEL))) { 1013 ret_val = 6; 1014 goto err_nomem; ··· 1310 struct e1000_desc_ring *txdr = &adapter->test_tx_ring; 1311 struct e1000_desc_ring *rxdr = &adapter->test_rx_ring; 1312 struct pci_dev *pdev = adapter->pdev; 1313 + int i, j, k, l, lc, good_cnt, ret_val=0; 1314 + unsigned long time; 1315 1316 E1000_WRITE_REG(&adapter->hw, RDT, rxdr->count - 1); 1317 1318 + /* Calculate the loop count based on the largest descriptor ring 1319 + * The idea is to wrap the largest ring a number of times using 64 1320 + * send/receive pairs during each loop 1321 + */ 1322 1323 + if(rxdr->count <= txdr->count) 1324 + lc = ((txdr->count / 64) * 2) + 1; 1325 + else 1326 + lc = ((rxdr->count / 64) * 2) + 1; 1327 1328 + k = l = 0; 1329 + for(j = 0; j <= lc; j++) { /* loop count loop */ 1330 + for(i = 0; i < 64; i++) { /* send the packets */ 1331 + e1000_create_lbtest_frame(txdr->buffer_info[i].skb, 1332 + 1024); 1333 + pci_dma_sync_single_for_device(pdev, 1334 + txdr->buffer_info[k].dma, 1335 + txdr->buffer_info[k].length, 1336 + PCI_DMA_TODEVICE); 1337 + if(unlikely(++k == txdr->count)) k = 0; 1338 + } 1339 + E1000_WRITE_REG(&adapter->hw, TDT, k); 1340 + msec_delay(200); 1341 + time = jiffies; /* set the start time for the receive */ 1342 + good_cnt = 0; 1343 + do { /* receive the sent packets */ 1344 + pci_dma_sync_single_for_cpu(pdev, 1345 + rxdr->buffer_info[l].dma, 1346 + rxdr->buffer_info[l].length, 1347 + PCI_DMA_FROMDEVICE); 1348 + 1349 + ret_val = e1000_check_lbtest_frame( 1350 + rxdr->buffer_info[l].skb, 1351 + 1024); 1352 + if(!ret_val) 1353 + good_cnt++; 1354 + if(unlikely(++l == rxdr->count)) l = 0; 1355 + /* time + 20 msecs (200 msecs on 2.4) is more than 1356 + * enough time to complete the receives, if it's 1357 + * exceeded, break and error off 1358 + */ 1359 + } while (good_cnt < 64 && jiffies < (time + 20)); 1360 + if(good_cnt != 64) { 1361 + ret_val = 13; /* ret_val is the same as mis-compare */ 1362 + break; 1363 + } 1364 + if(jiffies >= (time + 2)) { 1365 + ret_val = 14; /* error code for time out error */ 1366 + break; 1367 + } 1368 + } /* end loop count loop */ 1369 return ret_val; 1370 } 1371 ··· 1354 e1000_link_test(struct e1000_adapter *adapter, uint64_t *data) 1355 { 1356 *data = 0; 1357 if (adapter->hw.media_type == e1000_media_type_internal_serdes) { 1358 int i = 0; 1359 adapter->hw.serdes_link_down = TRUE; 1360 1361 + /* On some blade server designs, link establishment 1362 + * could take as long as 2-3 minutes */ 1363 do { 1364 e1000_check_for_link(&adapter->hw); 1365 if (adapter->hw.serdes_link_down == FALSE) ··· 1368 msec_delay(20); 1369 } while (i++ < 3750); 1370 1371 + *data = 1; 1372 } else { 1373 e1000_check_for_link(&adapter->hw); 1374 + if(adapter->hw.autoneg) /* if auto_neg is set wait for it */ 1375 + msec_delay(4000); 1376 1377 if(!(E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU)) { 1378 *data = 1;
+1603 -396
drivers/net/e1000/e1000_hw.c
··· 1 /******************************************************************************* 2 3 4 - Copyright(c) 1999 - 2004 Intel Corporation. All rights reserved. 5 6 This program is free software; you can redistribute it and/or modify it 7 under the terms of the GNU General Public License as published by the Free ··· 63 static int32_t e1000_acquire_eeprom(struct e1000_hw *hw); 64 static void e1000_release_eeprom(struct e1000_hw *hw); 65 static void e1000_standby_eeprom(struct e1000_hw *hw); 66 - static int32_t e1000_id_led_init(struct e1000_hw * hw); 67 static int32_t e1000_set_vco_speed(struct e1000_hw *hw); 68 static int32_t e1000_polarity_reversal_workaround(struct e1000_hw *hw); 69 static int32_t e1000_set_phy_mode(struct e1000_hw *hw); 70 71 /* IGP cable length table */ 72 static const ··· 81 100, 100, 100, 100, 110, 110, 110, 110, 110, 110, 110, 110, 110, 110, 110, 110, 82 110, 110, 110, 110, 110, 110, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120}; 83 84 85 /****************************************************************************** 86 * Set the phy type member in the hw struct. ··· 103 { 104 DEBUGFUNC("e1000_set_phy_type"); 105 106 switch(hw->phy_id) { 107 case M88E1000_E_PHY_ID: 108 case M88E1000_I_PHY_ID: 109 case M88E1011_I_PHY_ID: 110 hw->phy_type = e1000_phy_m88; 111 break; 112 case IGP01E1000_I_PHY_ID: ··· 143 uint16_t phy_saved_data; 144 145 DEBUGFUNC("e1000_phy_init_script"); 146 - 147 148 if(hw->phy_init_script) { 149 msec_delay(20); ··· 286 case E1000_DEV_ID_82546GB_FIBER: 287 case E1000_DEV_ID_82546GB_SERDES: 288 case E1000_DEV_ID_82546GB_PCIE: 289 hw->mac_type = e1000_82546_rev_3; 290 break; 291 case E1000_DEV_ID_82541EI: ··· 305 case E1000_DEV_ID_82547GI: 306 hw->mac_type = e1000_82547_rev_2; 307 break; 308 default: 309 /* Should never have loaded on this device */ 310 return -E1000_ERR_MAC_TYPE; 311 } 312 313 switch(hw->mac_type) { 314 case e1000_82541: 315 case e1000_82547: 316 case e1000_82541_rev_2: ··· 383 uint32_t icr; 384 uint32_t manc; 385 uint32_t led_ctrl; 386 387 DEBUGFUNC("e1000_reset_hw"); 388 ··· 393 if(hw->mac_type == e1000_82542_rev2_0) { 394 DEBUGOUT("Disabling MWI on 82542 rev 2.0\n"); 395 e1000_pci_clear_mwi(hw); 396 } 397 398 /* Clear interrupt mask to stop board from generating interrupts */ ··· 428 429 /* Must reset the PHY before resetting the MAC */ 430 if((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) { 431 - E1000_WRITE_REG_IO(hw, CTRL, (ctrl | E1000_CTRL_PHY_RST)); 432 msec_delay(5); 433 } 434 435 /* Issue a global reset to the MAC. This will reset the chip's ··· 507 /* Wait for EEPROM reload */ 508 msec_delay(20); 509 break; 510 default: 511 /* Wait for EEPROM reload (it happens automatically) */ 512 msec_delay(5); ··· 526 } 527 528 /* Disable HW ARPs on ASF enabled adapters */ 529 - if(hw->mac_type >= e1000_82540) { 530 manc = E1000_READ_REG(hw, MANC); 531 manc &= ~(E1000_MANC_ARP_EN); 532 E1000_WRITE_REG(hw, MANC, manc); ··· 579 uint16_t pcix_stat_hi_word; 580 uint16_t cmd_mmrbc; 581 uint16_t stat_mmrbc; 582 DEBUGFUNC("e1000_init_hw"); 583 584 /* Initialize Identification LED */ ··· 595 596 /* Disabling VLAN filtering. */ 597 DEBUGOUT("Initializing the IEEE VLAN\n"); 598 - E1000_WRITE_REG(hw, VET, 0); 599 - 600 e1000_clear_vfta(hw); 601 602 /* For 82542 (rev 2.0), disable MWI and put the receiver into reset */ ··· 624 625 /* Zero out the Multicast HASH table */ 626 DEBUGOUT("Zeroing the MTA\n"); 627 - for(i = 0; i < E1000_MC_TBL_SIZE; i++) 628 E1000_WRITE_REG_ARRAY(hw, MTA, i, 0); 629 630 /* Set the PCI priority bit correctly in the CTRL register. This 631 * determines if the adapter gives priority to receives, or if it 632 - * gives equal priority to transmits and receives. 633 */ 634 - if(hw->dma_fairness) { 635 ctrl = E1000_READ_REG(hw, CTRL); 636 E1000_WRITE_REG(hw, CTRL, ctrl | E1000_CTRL_PRIOR); 637 } ··· 671 if(hw->mac_type > e1000_82544) { 672 ctrl = E1000_READ_REG(hw, TXDCTL); 673 ctrl = (ctrl & ~E1000_TXDCTL_WTHRESH) | E1000_TXDCTL_FULL_TX_DESC_WB; 674 E1000_WRITE_REG(hw, TXDCTL, ctrl); 675 } 676 677 /* Clear all of the statistics registers (clear on read). It is 678 * important that we do this after we have tried to establish link ··· 764 * control setting, then the variable hw->fc will 765 * be initialized based on a value in the EEPROM. 766 */ 767 - if(e1000_read_eeprom(hw, EEPROM_INIT_CONTROL2_REG, 1, &eeprom_data) < 0) { 768 DEBUGOUT("EEPROM Read Error\n"); 769 return -E1000_ERR_EEPROM; 770 } ··· 821 E1000_WRITE_REG(hw, FCAL, FLOW_CONTROL_ADDRESS_LOW); 822 E1000_WRITE_REG(hw, FCAH, FLOW_CONTROL_ADDRESS_HIGH); 823 E1000_WRITE_REG(hw, FCT, FLOW_CONTROL_TYPE); 824 E1000_WRITE_REG(hw, FCTTV, hw->fc_pause_time); 825 826 /* Set the flow control receive threshold registers. Normally, ··· 992 } 993 994 /****************************************************************************** 995 - * Detects which PHY is present and the speed and duplex 996 * 997 * hw - Struct containing variables accessed by shared code 998 ******************************************************************************/ 999 static int32_t 1000 - e1000_setup_copper_link(struct e1000_hw *hw) 1001 { 1002 uint32_t ctrl; 1003 - uint32_t led_ctrl; 1004 int32_t ret_val; 1005 - uint16_t i; 1006 uint16_t phy_data; 1007 1008 - DEBUGFUNC("e1000_setup_copper_link"); 1009 1010 ctrl = E1000_READ_REG(hw, CTRL); 1011 /* With 82543, we need to force speed and duplex on the MAC equal to what ··· 1017 } else { 1018 ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX | E1000_CTRL_SLU); 1019 E1000_WRITE_REG(hw, CTRL, ctrl); 1020 - e1000_phy_hw_reset(hw); 1021 } 1022 1023 /* Make sure we have a valid PHY */ ··· 1047 hw->mac_type == e1000_82541_rev_2 || hw->mac_type == e1000_82547_rev_2) 1048 hw->phy_reset_disable = FALSE; 1049 1050 - if(!hw->phy_reset_disable) { 1051 - if (hw->phy_type == e1000_phy_igp) { 1052 1053 - ret_val = e1000_phy_reset(hw); 1054 - if(ret_val) { 1055 - DEBUGOUT("Error Resetting the PHY\n"); 1056 - return ret_val; 1057 - } 1058 1059 - /* Wait 10ms for MAC to configure PHY from eeprom settings */ 1060 - msec_delay(15); 1061 1062 - /* Configure activity LED after PHY reset */ 1063 - led_ctrl = E1000_READ_REG(hw, LEDCTL); 1064 - led_ctrl &= IGP_ACTIVITY_LED_MASK; 1065 - led_ctrl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE); 1066 - E1000_WRITE_REG(hw, LEDCTL, led_ctrl); 1067 1068 - /* disable lplu d3 during driver init */ 1069 - ret_val = e1000_set_d3_lplu_state(hw, FALSE); 1070 - if(ret_val) { 1071 - DEBUGOUT("Error Disabling LPLU D3\n"); 1072 - return ret_val; 1073 - } 1074 1075 - /* Configure mdi-mdix settings */ 1076 - ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, 1077 - &phy_data); 1078 if(ret_val) 1079 return ret_val; 1080 - 1081 - if((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) { 1082 - hw->dsp_config_state = e1000_dsp_config_disabled; 1083 - /* Force MDI for earlier revs of the IGP PHY */ 1084 - phy_data &= ~(IGP01E1000_PSCR_AUTO_MDIX | 1085 - IGP01E1000_PSCR_FORCE_MDI_MDIX); 1086 - hw->mdix = 1; 1087 - 1088 - } else { 1089 - hw->dsp_config_state = e1000_dsp_config_enabled; 1090 - phy_data &= ~IGP01E1000_PSCR_AUTO_MDIX; 1091 - 1092 - switch (hw->mdix) { 1093 - case 1: 1094 - phy_data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX; 1095 - break; 1096 - case 2: 1097 - phy_data |= IGP01E1000_PSCR_FORCE_MDI_MDIX; 1098 - break; 1099 - case 0: 1100 - default: 1101 - phy_data |= IGP01E1000_PSCR_AUTO_MDIX; 1102 - break; 1103 - } 1104 - } 1105 - ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, 1106 - phy_data); 1107 - if(ret_val) 1108 - return ret_val; 1109 - 1110 - /* set auto-master slave resolution settings */ 1111 - if(hw->autoneg) { 1112 - e1000_ms_type phy_ms_setting = hw->master_slave; 1113 - 1114 - if(hw->ffe_config_state == e1000_ffe_config_active) 1115 - hw->ffe_config_state = e1000_ffe_config_enabled; 1116 - 1117 - if(hw->dsp_config_state == e1000_dsp_config_activated) 1118 - hw->dsp_config_state = e1000_dsp_config_enabled; 1119 - 1120 - /* when autonegotiation advertisment is only 1000Mbps then we 1121 - * should disable SmartSpeed and enable Auto MasterSlave 1122 - * resolution as hardware default. */ 1123 - if(hw->autoneg_advertised == ADVERTISE_1000_FULL) { 1124 - /* Disable SmartSpeed */ 1125 - ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, 1126 - &phy_data); 1127 - if(ret_val) 1128 - return ret_val; 1129 - phy_data &= ~IGP01E1000_PSCFR_SMART_SPEED; 1130 - ret_val = e1000_write_phy_reg(hw, 1131 IGP01E1000_PHY_PORT_CONFIG, 1132 phy_data); 1133 - if(ret_val) 1134 - return ret_val; 1135 - /* Set auto Master/Slave resolution process */ 1136 - ret_val = e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_data); 1137 - if(ret_val) 1138 - return ret_val; 1139 - phy_data &= ~CR_1000T_MS_ENABLE; 1140 - ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_data); 1141 - if(ret_val) 1142 - return ret_val; 1143 - } 1144 - 1145 - ret_val = e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_data); 1146 - if(ret_val) 1147 - return ret_val; 1148 - 1149 - /* load defaults for future use */ 1150 - hw->original_master_slave = (phy_data & CR_1000T_MS_ENABLE) ? 1151 - ((phy_data & CR_1000T_MS_VALUE) ? 1152 - e1000_ms_force_master : 1153 - e1000_ms_force_slave) : 1154 - e1000_ms_auto; 1155 - 1156 - switch (phy_ms_setting) { 1157 - case e1000_ms_force_master: 1158 - phy_data |= (CR_1000T_MS_ENABLE | CR_1000T_MS_VALUE); 1159 - break; 1160 - case e1000_ms_force_slave: 1161 - phy_data |= CR_1000T_MS_ENABLE; 1162 - phy_data &= ~(CR_1000T_MS_VALUE); 1163 - break; 1164 - case e1000_ms_auto: 1165 - phy_data &= ~CR_1000T_MS_ENABLE; 1166 - default: 1167 - break; 1168 - } 1169 - ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_data); 1170 - if(ret_val) 1171 - return ret_val; 1172 - } 1173 - } else { 1174 - /* Enable CRS on TX. This must be set for half-duplex operation. */ 1175 - ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, 1176 - &phy_data); 1177 if(ret_val) 1178 return ret_val; 1179 1180 - phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX; 1181 1182 - /* Options: 1183 - * MDI/MDI-X = 0 (default) 1184 - * 0 - Auto for all speeds 1185 - * 1 - MDI mode 1186 - * 2 - MDI-X mode 1187 - * 3 - Auto for 1000Base-T only (MDI-X for 10/100Base-T modes) 1188 - */ 1189 - phy_data &= ~M88E1000_PSCR_AUTO_X_MODE; 1190 1191 - switch (hw->mdix) { 1192 - case 1: 1193 - phy_data |= M88E1000_PSCR_MDI_MANUAL_MODE; 1194 - break; 1195 - case 2: 1196 - phy_data |= M88E1000_PSCR_MDIX_MANUAL_MODE; 1197 - break; 1198 - case 3: 1199 - phy_data |= M88E1000_PSCR_AUTO_X_1000T; 1200 - break; 1201 - case 0: 1202 default: 1203 - phy_data |= M88E1000_PSCR_AUTO_X_MODE; 1204 - break; 1205 - } 1206 1207 - /* Options: 1208 - * disable_polarity_correction = 0 (default) 1209 - * Automatic Correction for Reversed Cable Polarity 1210 - * 0 - Disabled 1211 - * 1 - Enabled 1212 - */ 1213 - phy_data &= ~M88E1000_PSCR_POLARITY_REVERSAL; 1214 - if(hw->disable_polarity_correction == 1) 1215 - phy_data |= M88E1000_PSCR_POLARITY_REVERSAL; 1216 - ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, 1217 - phy_data); 1218 - if(ret_val) 1219 - return ret_val; 1220 1221 - /* Force TX_CLK in the Extended PHY Specific Control Register 1222 - * to 25MHz clock. 1223 - */ 1224 - ret_val = e1000_read_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, 1225 - &phy_data); 1226 - if(ret_val) 1227 - return ret_val; 1228 1229 - phy_data |= M88E1000_EPSCR_TX_CLK_25; 1230 1231 - if (hw->phy_revision < M88E1011_I_REV_4) { 1232 - /* Configure Master and Slave downshift values */ 1233 - phy_data &= ~(M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK | 1234 M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK); 1235 - phy_data |= (M88E1000_EPSCR_MASTER_DOWNSHIFT_1X | 1236 M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X); 1237 - ret_val = e1000_write_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, 1238 - phy_data); 1239 - if(ret_val) 1240 - return ret_val; 1241 - } 1242 1243 - /* SW Reset the PHY so all changes take effect */ 1244 - ret_val = e1000_phy_reset(hw); 1245 - if(ret_val) { 1246 - DEBUGOUT("Error Resetting the PHY\n"); 1247 - return ret_val; 1248 - } 1249 } 1250 1251 - /* Options: 1252 - * autoneg = 1 (default) 1253 - * PHY will advertise value(s) parsed from 1254 - * autoneg_advertised and fc 1255 - * autoneg = 0 1256 - * PHY will be set to 10H, 10F, 100H, or 100F 1257 - * depending on value parsed from forced_speed_duplex. 1258 - */ 1259 1260 - /* Is autoneg enabled? This is enabled by default or by software 1261 - * override. If so, call e1000_phy_setup_autoneg routine to parse the 1262 - * autoneg_advertised and fc options. If autoneg is NOT enabled, then 1263 - * the user should have provided a speed/duplex override. If so, then 1264 - * call e1000_phy_force_speed_duplex to parse and set this up. 1265 - */ 1266 - if(hw->autoneg) { 1267 - /* Perform some bounds checking on the hw->autoneg_advertised 1268 - * parameter. If this variable is zero, then set it to the default. 1269 - */ 1270 - hw->autoneg_advertised &= AUTONEG_ADVERTISE_SPEED_DEFAULT; 1271 1272 - /* If autoneg_advertised is zero, we assume it was not defaulted 1273 - * by the calling code so we set to advertise full capability. 1274 - */ 1275 - if(hw->autoneg_advertised == 0) 1276 - hw->autoneg_advertised = AUTONEG_ADVERTISE_SPEED_DEFAULT; 1277 1278 - DEBUGOUT("Reconfiguring auto-neg advertisement params\n"); 1279 - ret_val = e1000_phy_setup_autoneg(hw); 1280 - if(ret_val) { 1281 - DEBUGOUT("Error Setting up Auto-Negotiation\n"); 1282 - return ret_val; 1283 - } 1284 - DEBUGOUT("Restarting Auto-Neg\n"); 1285 - 1286 - /* Restart auto-negotiation by setting the Auto Neg Enable bit and 1287 - * the Auto Neg Restart bit in the PHY control register. 1288 - */ 1289 - ret_val = e1000_read_phy_reg(hw, PHY_CTRL, &phy_data); 1290 - if(ret_val) 1291 - return ret_val; 1292 - 1293 - phy_data |= (MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG); 1294 - ret_val = e1000_write_phy_reg(hw, PHY_CTRL, phy_data); 1295 - if(ret_val) 1296 - return ret_val; 1297 - 1298 - /* Does the user want to wait for Auto-Neg to complete here, or 1299 - * check at a later time (for example, callback routine). 1300 - */ 1301 - if(hw->wait_autoneg_complete) { 1302 - ret_val = e1000_wait_autoneg(hw); 1303 - if(ret_val) { 1304 - DEBUGOUT("Error while waiting for autoneg to complete\n"); 1305 - return ret_val; 1306 - } 1307 - } 1308 - hw->get_link_status = TRUE; 1309 - } else { 1310 - DEBUGOUT("Forcing speed and duplex\n"); 1311 - ret_val = e1000_phy_force_speed_duplex(hw); 1312 - if(ret_val) { 1313 - DEBUGOUT("Error Forcing Speed and Duplex\n"); 1314 - return ret_val; 1315 - } 1316 } 1317 - } /* !hw->phy_reset_disable */ 1318 1319 /* Check link status. Wait up to 100 microseconds for link to become 1320 * valid. ··· 1452 return ret_val; 1453 1454 if(phy_data & MII_SR_LINK_STATUS) { 1455 - /* We have link, so we need to finish the config process: 1456 - * 1) Set up the MAC to the current PHY speed/duplex 1457 - * if we are on 82543. If we 1458 - * are on newer silicon, we only need to configure 1459 - * collision distance in the Transmit Control Register. 1460 - * 2) Set up flow control on the MAC to that established with 1461 - * the link partner. 1462 - */ 1463 - if(hw->mac_type >= e1000_82544) { 1464 - e1000_config_collision_dist(hw); 1465 - } else { 1466 - ret_val = e1000_config_mac_to_phy(hw); 1467 - if(ret_val) { 1468 - DEBUGOUT("Error configuring MAC to PHY settings\n"); 1469 - return ret_val; 1470 - } 1471 - } 1472 - ret_val = e1000_config_fc_after_link_up(hw); 1473 - if(ret_val) { 1474 - DEBUGOUT("Error Configuring Flow Control\n"); 1475 return ret_val; 1476 - } 1477 - DEBUGOUT("Valid link established!!!\n"); 1478 - 1479 - if(hw->phy_type == e1000_phy_igp) { 1480 - ret_val = e1000_config_dsp_after_link_change(hw, TRUE); 1481 - if(ret_val) { 1482 - DEBUGOUT("Error Configuring DSP after link up\n"); 1483 - return ret_val; 1484 - } 1485 - } 1486 DEBUGOUT("Valid link established!!!\n"); 1487 return E1000_SUCCESS; 1488 } ··· 1486 if(ret_val) 1487 return ret_val; 1488 1489 - /* Read the MII 1000Base-T Control Register (Address 9). */ 1490 - ret_val = e1000_read_phy_reg(hw, PHY_1000T_CTRL, &mii_1000t_ctrl_reg); 1491 - if(ret_val) 1492 - return ret_val; 1493 1494 /* Need to parse both autoneg_advertised and fc and set up 1495 * the appropriate PHY registers. First we will parse for ··· 1601 1602 DEBUGOUT1("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg); 1603 1604 - ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL, mii_1000t_ctrl_reg); 1605 if(ret_val) 1606 return ret_val; 1607 ··· 1862 1863 DEBUGFUNC("e1000_config_mac_to_phy"); 1864 1865 /* Read the Device Control Register and set the bits to Force Speed 1866 * and Duplex. 1867 */ ··· 1877 /* Set up duplex in the Device Control and Transmit Control 1878 * registers depending on negotiated values. 1879 */ 1880 - if (hw->phy_type == e1000_phy_igp) { 1881 - ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_STATUS, 1882 - &phy_data); 1883 - if(ret_val) 1884 - return ret_val; 1885 1886 - if(phy_data & IGP01E1000_PSSR_FULL_DUPLEX) ctrl |= E1000_CTRL_FD; 1887 - else ctrl &= ~E1000_CTRL_FD; 1888 1889 - e1000_config_collision_dist(hw); 1890 1891 - /* Set up speed in the Device Control register depending on 1892 - * negotiated values. 1893 - */ 1894 - if((phy_data & IGP01E1000_PSSR_SPEED_MASK) == 1895 - IGP01E1000_PSSR_SPEED_1000MBPS) 1896 - ctrl |= E1000_CTRL_SPD_1000; 1897 - else if((phy_data & IGP01E1000_PSSR_SPEED_MASK) == 1898 - IGP01E1000_PSSR_SPEED_100MBPS) 1899 - ctrl |= E1000_CTRL_SPD_100; 1900 - } else { 1901 - ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, 1902 - &phy_data); 1903 - if(ret_val) 1904 - return ret_val; 1905 1906 - if(phy_data & M88E1000_PSSR_DPLX) ctrl |= E1000_CTRL_FD; 1907 - else ctrl &= ~E1000_CTRL_FD; 1908 - 1909 - e1000_config_collision_dist(hw); 1910 - 1911 - /* Set up speed in the Device Control register depending on 1912 - * negotiated values. 1913 - */ 1914 - if((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS) 1915 - ctrl |= E1000_CTRL_SPD_1000; 1916 - else if((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_100MBS) 1917 - ctrl |= E1000_CTRL_SPD_100; 1918 - } 1919 /* Write the configured values back to the Device Control Reg. */ 1920 E1000_WRITE_REG(hw, CTRL, ctrl); 1921 return E1000_SUCCESS; ··· 2663 2664 DEBUGFUNC("e1000_read_phy_reg"); 2665 2666 - 2667 - if(hw->phy_type == e1000_phy_igp && 2668 (reg_addr > MAX_PHY_MULTI_PAGE_REG)) { 2669 ret_val = e1000_write_phy_reg_ex(hw, IGP01E1000_PHY_PAGE_SELECT, 2670 (uint16_t)reg_addr); ··· 2769 2770 DEBUGFUNC("e1000_write_phy_reg"); 2771 2772 - 2773 - if(hw->phy_type == e1000_phy_igp && 2774 (reg_addr > MAX_PHY_MULTI_PAGE_REG)) { 2775 ret_val = e1000_write_phy_reg_ex(hw, IGP01E1000_PHY_PAGE_SELECT, 2776 (uint16_t)reg_addr); ··· 2848 return E1000_SUCCESS; 2849 } 2850 2851 /****************************************************************************** 2852 * Returns the PHY to the power-on reset state 2853 * 2854 * hw - Struct containing variables accessed by shared code 2855 ******************************************************************************/ 2856 - void 2857 e1000_phy_hw_reset(struct e1000_hw *hw) 2858 { 2859 uint32_t ctrl, ctrl_ext; 2860 uint32_t led_ctrl; 2861 2862 DEBUGFUNC("e1000_phy_hw_reset"); 2863 2864 DEBUGOUT("Resetting Phy...\n"); 2865 ··· 2904 led_ctrl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE); 2905 E1000_WRITE_REG(hw, LEDCTL, led_ctrl); 2906 } 2907 } 2908 2909 /****************************************************************************** ··· 2926 2927 DEBUGFUNC("e1000_phy_reset"); 2928 2929 - if(hw->mac_type != e1000_82541_rev_2) { 2930 ret_val = e1000_read_phy_reg(hw, PHY_CTRL, &phy_data); 2931 if(ret_val) 2932 return ret_val; ··· 2949 return ret_val; 2950 2951 udelay(1); 2952 - } else e1000_phy_hw_reset(hw); 2953 2954 - if(hw->phy_type == e1000_phy_igp) 2955 e1000_phy_init_script(hw); 2956 2957 return E1000_SUCCESS; ··· 3005 case e1000_82547: 3006 case e1000_82547_rev_2: 3007 if(hw->phy_id == IGP01E1000_I_PHY_ID) match = TRUE; 3008 break; 3009 default: 3010 DEBUGOUT1("Invalid MAC type %d\n", hw->mac_type); ··· 3064 3065 /* The downshift status is checked only once, after link is established, 3066 * and it stored in the hw->speed_downgraded parameter. */ 3067 - phy_info->downshift = hw->speed_downgraded; 3068 3069 /* IGP01E1000 does not need to support it. */ 3070 phy_info->extended_10bt_distance = e1000_10bt_ext_dist_enable_normal; ··· 3103 if(ret_val) 3104 return ret_val; 3105 3106 - /* transalte to old method */ 3107 average = (max_length + min_length) / 2; 3108 3109 if(average <= e1000_igp_cable_length_50) ··· 3138 3139 /* The downshift status is checked only once, after link is established, 3140 * and it stored in the hw->speed_downgraded parameter. */ 3141 - phy_info->downshift = hw->speed_downgraded; 3142 3143 ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); 3144 if(ret_val) ··· 3227 return -E1000_ERR_CONFIG; 3228 } 3229 3230 - if(hw->phy_type == e1000_phy_igp) 3231 return e1000_phy_igp_get_info(hw, phy_info); 3232 else 3233 return e1000_phy_m88_get_info(hw, phy_info); ··· 3254 * 3255 * hw - Struct containing variables accessed by shared code 3256 *****************************************************************************/ 3257 - void 3258 e1000_init_eeprom_params(struct e1000_hw *hw) 3259 { 3260 struct e1000_eeprom_info *eeprom = &hw->eeprom; 3261 uint32_t eecd = E1000_READ_REG(hw, EECD); 3262 uint16_t eeprom_size; 3263 3264 DEBUGFUNC("e1000_init_eeprom_params"); ··· 3274 eeprom->opcode_bits = 3; 3275 eeprom->address_bits = 6; 3276 eeprom->delay_usec = 50; 3277 break; 3278 case e1000_82540: 3279 case e1000_82545: ··· 3292 eeprom->word_size = 64; 3293 eeprom->address_bits = 6; 3294 } 3295 break; 3296 case e1000_82541: 3297 case e1000_82541_rev_2: ··· 3322 eeprom->address_bits = 6; 3323 } 3324 } 3325 break; 3326 default: 3327 break; 3328 } 3329 3330 if (eeprom->type == e1000_eeprom_spi) { 3331 - eeprom->word_size = 64; 3332 - if (e1000_read_eeprom(hw, EEPROM_CFG, 1, &eeprom_size) == 0) { 3333 - eeprom_size &= EEPROM_SIZE_MASK; 3334 - 3335 - switch (eeprom_size) { 3336 - case EEPROM_SIZE_16KB: 3337 - eeprom->word_size = 8192; 3338 - break; 3339 - case EEPROM_SIZE_8KB: 3340 - eeprom->word_size = 4096; 3341 - break; 3342 - case EEPROM_SIZE_4KB: 3343 - eeprom->word_size = 2048; 3344 - break; 3345 - case EEPROM_SIZE_2KB: 3346 - eeprom->word_size = 1024; 3347 - break; 3348 - case EEPROM_SIZE_1KB: 3349 - eeprom->word_size = 512; 3350 - break; 3351 - case EEPROM_SIZE_512B: 3352 - eeprom->word_size = 256; 3353 - break; 3354 - case EEPROM_SIZE_128B: 3355 - default: 3356 - eeprom->word_size = 64; 3357 - break; 3358 - } 3359 } 3360 } 3361 } 3362 3363 /****************************************************************************** ··· 3528 3529 DEBUGFUNC("e1000_acquire_eeprom"); 3530 3531 eecd = E1000_READ_REG(hw, EECD); 3532 3533 /* Request EEPROM Access */ 3534 if(hw->mac_type > e1000_82544) { 3535 eecd |= E1000_EECD_REQ; ··· 3551 DEBUGOUT("Could not acquire EEPROM grant\n"); 3552 return -E1000_ERR_EEPROM; 3553 } 3554 } 3555 3556 /* Setup EEPROM for Read/Write */ ··· 3670 eecd &= ~E1000_EECD_REQ; 3671 E1000_WRITE_REG(hw, EECD, eecd); 3672 } 3673 } 3674 3675 /****************************************************************************** ··· 3733 { 3734 struct e1000_eeprom_info *eeprom = &hw->eeprom; 3735 uint32_t i = 0; 3736 3737 DEBUGFUNC("e1000_read_eeprom"); 3738 /* A check for invalid values: offset too large, too many words, and not 3739 * enough words. 3740 */ ··· 3746 return -E1000_ERR_EEPROM; 3747 } 3748 3749 - /* Prepare the EEPROM for reading */ 3750 - if(e1000_acquire_eeprom(hw) != E1000_SUCCESS) 3751 - return -E1000_ERR_EEPROM; 3752 3753 if(eeprom->type == e1000_eeprom_spi) { 3754 uint16_t word_in; ··· 3814 } 3815 3816 /****************************************************************************** 3817 * Verifies that the EEPROM has a valid checksum 3818 * 3819 * hw - Struct containing variables accessed by shared code ··· 3955 uint16_t i, eeprom_data; 3956 3957 DEBUGFUNC("e1000_validate_eeprom_checksum"); 3958 3959 for(i = 0; i < (EEPROM_CHECKSUM_REG + 1); i++) { 3960 if(e1000_read_eeprom(hw, i, 1, &eeprom_data) < 0) { ··· 4018 if(e1000_write_eeprom(hw, EEPROM_CHECKSUM_REG, 1, &checksum) < 0) { 4019 DEBUGOUT("EEPROM Write Error\n"); 4020 return -E1000_ERR_EEPROM; 4021 } 4022 return E1000_SUCCESS; 4023 } ··· 4054 DEBUGOUT("\"words\" parameter out of bounds\n"); 4055 return -E1000_ERR_EEPROM; 4056 } 4057 4058 /* Prepare the EEPROM for writing */ 4059 if (e1000_acquire_eeprom(hw) != E1000_SUCCESS) ··· 4229 } 4230 4231 /****************************************************************************** 4232 * Reads the adapter's part number from the EEPROM 4233 * 4234 * hw - Struct containing variables accessed by shared code ··· 4366 e1000_init_rx_addrs(struct e1000_hw *hw) 4367 { 4368 uint32_t i; 4369 4370 DEBUGFUNC("e1000_init_rx_addrs"); 4371 ··· 4375 4376 e1000_rar_set(hw, hw->mac_addr, 0); 4377 4378 /* Zero out the other 15 receive addresses. */ 4379 DEBUGOUT("Clearing RAR[1-15]\n"); 4380 - for(i = 1; i < E1000_RAR_ENTRIES; i++) { 4381 E1000_WRITE_REG_ARRAY(hw, RA, (i << 1), 0); 4382 E1000_WRITE_REG_ARRAY(hw, RA, ((i << 1) + 1), 0); 4383 } ··· 4407 { 4408 uint32_t hash_value; 4409 uint32_t i; 4410 - 4411 DEBUGFUNC("e1000_mc_addr_list_update"); 4412 4413 /* Set the new number of MC addresses that we are being requested to use. */ ··· 4417 4418 /* Clear RAR[1-15] */ 4419 DEBUGOUT(" Clearing RAR[1-15]\n"); 4420 - for(i = rar_used_count; i < E1000_RAR_ENTRIES; i++) { 4421 E1000_WRITE_REG_ARRAY(hw, RA, (i << 1), 0); 4422 E1000_WRITE_REG_ARRAY(hw, RA, ((i << 1) + 1), 0); 4423 } 4424 4425 /* Clear the MTA */ 4426 DEBUGOUT(" Clearing MTA\n"); 4427 - for(i = 0; i < E1000_NUM_MTA_REGISTERS; i++) { 4428 E1000_WRITE_REG_ARRAY(hw, MTA, i, 0); 4429 } 4430 ··· 4450 /* Place this multicast address in the RAR if there is room, * 4451 * else put it in the MTA 4452 */ 4453 - if(rar_used_count < E1000_RAR_ENTRIES) { 4454 e1000_rar_set(hw, 4455 mc_addr_list + (i * (ETH_LENGTH_OF_ADDRESS + pad)), 4456 rar_used_count); ··· 4501 } 4502 4503 hash_value &= 0xFFF; 4504 return hash_value; 4505 } 4506 ··· 4606 e1000_clear_vfta(struct e1000_hw *hw) 4607 { 4608 uint32_t offset; 4609 4610 - for(offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) 4611 - E1000_WRITE_REG_ARRAY(hw, VFTA, offset, 0); 4612 } 4613 4614 - static int32_t 4615 e1000_id_led_init(struct e1000_hw * hw) 4616 { 4617 uint32_t ledctl; ··· 4963 temp = E1000_READ_REG(hw, MGTPRC); 4964 temp = E1000_READ_REG(hw, MGTPDC); 4965 temp = E1000_READ_REG(hw, MGTPTC); 4966 } 4967 4968 /****************************************************************************** ··· 5142 hw->bus_speed = e1000_bus_speed_unknown; 5143 hw->bus_width = e1000_bus_width_unknown; 5144 break; 5145 default: 5146 status = E1000_READ_REG(hw, STATUS); 5147 hw->bus_type = (status & E1000_STATUS_PCIX_MODE) ? ··· 5250 5251 /* Use old method for Phy older than IGP */ 5252 if(hw->phy_type == e1000_phy_m88) { 5253 ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, 5254 &phy_data); 5255 if(ret_val) ··· 5367 return ret_val; 5368 *polarity = (phy_data & M88E1000_PSSR_REV_POLARITY) >> 5369 M88E1000_PSSR_REV_POLARITY_SHIFT; 5370 - } else if(hw->phy_type == e1000_phy_igp) { 5371 /* Read the Status register to check the speed */ 5372 ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_STATUS, 5373 &phy_data); ··· 5420 5421 DEBUGFUNC("e1000_check_downshift"); 5422 5423 - if(hw->phy_type == e1000_phy_igp) { 5424 ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_LINK_HEALTH, 5425 &phy_data); 5426 if(ret_val) ··· 5437 hw->speed_downgraded = (phy_data & M88E1000_PSSR_DOWNSHIFT) >> 5438 M88E1000_PSSR_DOWNSHIFT_SHIFT; 5439 } 5440 return E1000_SUCCESS; 5441 } 5442 ··· 5552 if(ret_val) 5553 return ret_val; 5554 5555 - msec_delay(20); 5556 5557 ret_val = e1000_write_phy_reg(hw, 0x0000, 5558 IGP01E1000_IEEE_FORCE_GIGA); ··· 5576 if(ret_val) 5577 return ret_val; 5578 5579 - msec_delay(20); 5580 5581 /* Now enable the transmitter */ 5582 ret_val = e1000_write_phy_reg(hw, 0x2F5B, phy_saved_data); ··· 5601 if(ret_val) 5602 return ret_val; 5603 5604 - msec_delay(20); 5605 5606 ret_val = e1000_write_phy_reg(hw, 0x0000, 5607 IGP01E1000_IEEE_FORCE_GIGA); ··· 5617 if(ret_val) 5618 return ret_val; 5619 5620 - msec_delay(20); 5621 5622 /* Now enable the transmitter */ 5623 ret_val = e1000_write_phy_reg(hw, 0x2F5B, phy_saved_data); ··· 5692 uint16_t phy_data; 5693 DEBUGFUNC("e1000_set_d3_lplu_state"); 5694 5695 - if(!((hw->mac_type == e1000_82541_rev_2) || 5696 - (hw->mac_type == e1000_82547_rev_2))) 5697 return E1000_SUCCESS; 5698 5699 /* During driver activity LPLU should not be used or it will attain link 5700 * from the lowest speeds starting from 10Mbps. The capability is used for 5701 * Dx transitions and states */ 5702 - ret_val = e1000_read_phy_reg(hw, IGP01E1000_GMII_FIFO, &phy_data); 5703 - if(ret_val) 5704 - return ret_val; 5705 - 5706 - if(!active) { 5707 - phy_data &= ~IGP01E1000_GMII_FLEX_SPD; 5708 - ret_val = e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO, phy_data); 5709 if(ret_val) 5710 return ret_val; 5711 5712 /* LPLU and SmartSpeed are mutually exclusive. LPLU is used during 5713 * Dx states where the power conservation is most important. During ··· 5755 (hw->autoneg_advertised == AUTONEG_ADVERTISE_10_ALL ) || 5756 (hw->autoneg_advertised == AUTONEG_ADVERTISE_10_100_ALL)) { 5757 5758 - phy_data |= IGP01E1000_GMII_FLEX_SPD; 5759 - ret_val = e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO, phy_data); 5760 if(ret_val) 5761 return ret_val; 5762 5763 /* When LPLU is enabled we should disable SmartSpeed */ 5764 ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, &phy_data); ··· 5929 return ret_val; 5930 5931 return E1000_SUCCESS; 5932 } 5933 5934 static int32_t ··· 6347 } 6348 return E1000_SUCCESS; 6349 } 6350
··· 1 /******************************************************************************* 2 3 4 + Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved. 5 6 This program is free software; you can redistribute it and/or modify it 7 under the terms of the GNU General Public License as published by the Free ··· 63 static int32_t e1000_acquire_eeprom(struct e1000_hw *hw); 64 static void e1000_release_eeprom(struct e1000_hw *hw); 65 static void e1000_standby_eeprom(struct e1000_hw *hw); 66 static int32_t e1000_set_vco_speed(struct e1000_hw *hw); 67 static int32_t e1000_polarity_reversal_workaround(struct e1000_hw *hw); 68 static int32_t e1000_set_phy_mode(struct e1000_hw *hw); 69 + static int32_t e1000_host_if_read_cookie(struct e1000_hw *hw, uint8_t *buffer); 70 + static uint8_t e1000_calculate_mng_checksum(char *buffer, uint32_t length); 71 72 /* IGP cable length table */ 73 static const ··· 80 100, 100, 100, 100, 110, 110, 110, 110, 110, 110, 110, 110, 110, 110, 110, 110, 81 110, 110, 110, 110, 110, 110, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120}; 82 83 + static const 84 + uint16_t e1000_igp_2_cable_length_table[IGP02E1000_AGC_LENGTH_TABLE_SIZE] = 85 + { 8, 13, 17, 19, 21, 23, 25, 27, 29, 31, 33, 35, 37, 39, 41, 43, 86 + 22, 24, 27, 30, 32, 35, 37, 40, 42, 44, 47, 49, 51, 54, 56, 58, 87 + 32, 35, 38, 41, 44, 47, 50, 53, 55, 58, 61, 63, 66, 69, 71, 74, 88 + 43, 47, 51, 54, 58, 61, 64, 67, 71, 74, 77, 80, 82, 85, 88, 90, 89 + 57, 62, 66, 70, 74, 77, 81, 85, 88, 91, 94, 97, 100, 103, 106, 108, 90 + 73, 78, 82, 87, 91, 95, 98, 102, 105, 109, 112, 114, 117, 119, 122, 124, 91 + 91, 96, 101, 105, 109, 113, 116, 119, 122, 125, 127, 128, 128, 128, 128, 128, 92 + 108, 113, 117, 121, 124, 127, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128}; 93 + 94 95 /****************************************************************************** 96 * Set the phy type member in the hw struct. ··· 91 { 92 DEBUGFUNC("e1000_set_phy_type"); 93 94 + if(hw->mac_type == e1000_undefined) 95 + return -E1000_ERR_PHY_TYPE; 96 + 97 switch(hw->phy_id) { 98 case M88E1000_E_PHY_ID: 99 case M88E1000_I_PHY_ID: 100 case M88E1011_I_PHY_ID: 101 + case M88E1111_I_PHY_ID: 102 hw->phy_type = e1000_phy_m88; 103 break; 104 case IGP01E1000_I_PHY_ID: ··· 127 uint16_t phy_saved_data; 128 129 DEBUGFUNC("e1000_phy_init_script"); 130 131 if(hw->phy_init_script) { 132 msec_delay(20); ··· 271 case E1000_DEV_ID_82546GB_FIBER: 272 case E1000_DEV_ID_82546GB_SERDES: 273 case E1000_DEV_ID_82546GB_PCIE: 274 + case E1000_DEV_ID_82546GB_QUAD_COPPER: 275 hw->mac_type = e1000_82546_rev_3; 276 break; 277 case E1000_DEV_ID_82541EI: ··· 289 case E1000_DEV_ID_82547GI: 290 hw->mac_type = e1000_82547_rev_2; 291 break; 292 + case E1000_DEV_ID_82573E: 293 + case E1000_DEV_ID_82573E_IAMT: 294 + hw->mac_type = e1000_82573; 295 + break; 296 default: 297 /* Should never have loaded on this device */ 298 return -E1000_ERR_MAC_TYPE; 299 } 300 301 switch(hw->mac_type) { 302 + case e1000_82573: 303 + hw->eeprom_semaphore_present = TRUE; 304 + /* fall through */ 305 case e1000_82541: 306 case e1000_82547: 307 case e1000_82541_rev_2: ··· 360 uint32_t icr; 361 uint32_t manc; 362 uint32_t led_ctrl; 363 + uint32_t timeout; 364 + uint32_t extcnf_ctrl; 365 + int32_t ret_val; 366 367 DEBUGFUNC("e1000_reset_hw"); 368 ··· 367 if(hw->mac_type == e1000_82542_rev2_0) { 368 DEBUGOUT("Disabling MWI on 82542 rev 2.0\n"); 369 e1000_pci_clear_mwi(hw); 370 + } 371 + 372 + if(hw->bus_type == e1000_bus_type_pci_express) { 373 + /* Prevent the PCI-E bus from sticking if there is no TLP connection 374 + * on the last TLP read/write transaction when MAC is reset. 375 + */ 376 + if(e1000_disable_pciex_master(hw) != E1000_SUCCESS) { 377 + DEBUGOUT("PCI-E Master disable polling has failed.\n"); 378 + } 379 } 380 381 /* Clear interrupt mask to stop board from generating interrupts */ ··· 393 394 /* Must reset the PHY before resetting the MAC */ 395 if((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) { 396 + E1000_WRITE_REG(hw, CTRL, (ctrl | E1000_CTRL_PHY_RST)); 397 msec_delay(5); 398 + } 399 + 400 + /* Must acquire the MDIO ownership before MAC reset. 401 + * Ownership defaults to firmware after a reset. */ 402 + if(hw->mac_type == e1000_82573) { 403 + timeout = 10; 404 + 405 + extcnf_ctrl = E1000_READ_REG(hw, EXTCNF_CTRL); 406 + extcnf_ctrl |= E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP; 407 + 408 + do { 409 + E1000_WRITE_REG(hw, EXTCNF_CTRL, extcnf_ctrl); 410 + extcnf_ctrl = E1000_READ_REG(hw, EXTCNF_CTRL); 411 + 412 + if(extcnf_ctrl & E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP) 413 + break; 414 + else 415 + extcnf_ctrl |= E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP; 416 + 417 + msec_delay(2); 418 + timeout--; 419 + } while(timeout); 420 } 421 422 /* Issue a global reset to the MAC. This will reset the chip's ··· 450 /* Wait for EEPROM reload */ 451 msec_delay(20); 452 break; 453 + case e1000_82573: 454 + udelay(10); 455 + ctrl_ext = E1000_READ_REG(hw, CTRL_EXT); 456 + ctrl_ext |= E1000_CTRL_EXT_EE_RST; 457 + E1000_WRITE_REG(hw, CTRL_EXT, ctrl_ext); 458 + E1000_WRITE_FLUSH(hw); 459 + /* fall through */ 460 + ret_val = e1000_get_auto_rd_done(hw); 461 + if(ret_val) 462 + /* We don't want to continue accessing MAC registers. */ 463 + return ret_val; 464 + break; 465 default: 466 /* Wait for EEPROM reload (it happens automatically) */ 467 msec_delay(5); ··· 457 } 458 459 /* Disable HW ARPs on ASF enabled adapters */ 460 + if(hw->mac_type >= e1000_82540 && hw->mac_type <= e1000_82547_rev_2) { 461 manc = E1000_READ_REG(hw, MANC); 462 manc &= ~(E1000_MANC_ARP_EN); 463 E1000_WRITE_REG(hw, MANC, manc); ··· 510 uint16_t pcix_stat_hi_word; 511 uint16_t cmd_mmrbc; 512 uint16_t stat_mmrbc; 513 + uint32_t mta_size; 514 + 515 DEBUGFUNC("e1000_init_hw"); 516 517 /* Initialize Identification LED */ ··· 524 525 /* Disabling VLAN filtering. */ 526 DEBUGOUT("Initializing the IEEE VLAN\n"); 527 + if (hw->mac_type < e1000_82545_rev_3) 528 + E1000_WRITE_REG(hw, VET, 0); 529 e1000_clear_vfta(hw); 530 531 /* For 82542 (rev 2.0), disable MWI and put the receiver into reset */ ··· 553 554 /* Zero out the Multicast HASH table */ 555 DEBUGOUT("Zeroing the MTA\n"); 556 + mta_size = E1000_MC_TBL_SIZE; 557 + for(i = 0; i < mta_size; i++) 558 E1000_WRITE_REG_ARRAY(hw, MTA, i, 0); 559 560 /* Set the PCI priority bit correctly in the CTRL register. This 561 * determines if the adapter gives priority to receives, or if it 562 + * gives equal priority to transmits and receives. Valid only on 563 + * 82542 and 82543 silicon. 564 */ 565 + if(hw->dma_fairness && hw->mac_type <= e1000_82543) { 566 ctrl = E1000_READ_REG(hw, CTRL); 567 E1000_WRITE_REG(hw, CTRL, ctrl | E1000_CTRL_PRIOR); 568 } ··· 598 if(hw->mac_type > e1000_82544) { 599 ctrl = E1000_READ_REG(hw, TXDCTL); 600 ctrl = (ctrl & ~E1000_TXDCTL_WTHRESH) | E1000_TXDCTL_FULL_TX_DESC_WB; 601 + switch (hw->mac_type) { 602 + default: 603 + break; 604 + case e1000_82573: 605 + ctrl |= E1000_TXDCTL_COUNT_DESC; 606 + break; 607 + } 608 E1000_WRITE_REG(hw, TXDCTL, ctrl); 609 } 610 + 611 + if (hw->mac_type == e1000_82573) { 612 + e1000_enable_tx_pkt_filtering(hw); 613 + } 614 + 615 616 /* Clear all of the statistics registers (clear on read). It is 617 * important that we do this after we have tried to establish link ··· 679 * control setting, then the variable hw->fc will 680 * be initialized based on a value in the EEPROM. 681 */ 682 + if(e1000_read_eeprom(hw, EEPROM_INIT_CONTROL2_REG, 1, &eeprom_data)) { 683 DEBUGOUT("EEPROM Read Error\n"); 684 return -E1000_ERR_EEPROM; 685 } ··· 736 E1000_WRITE_REG(hw, FCAL, FLOW_CONTROL_ADDRESS_LOW); 737 E1000_WRITE_REG(hw, FCAH, FLOW_CONTROL_ADDRESS_HIGH); 738 E1000_WRITE_REG(hw, FCT, FLOW_CONTROL_TYPE); 739 + 740 E1000_WRITE_REG(hw, FCTTV, hw->fc_pause_time); 741 742 /* Set the flow control receive threshold registers. Normally, ··· 906 } 907 908 /****************************************************************************** 909 + * Make sure we have a valid PHY and change PHY mode before link setup. 910 * 911 * hw - Struct containing variables accessed by shared code 912 ******************************************************************************/ 913 static int32_t 914 + e1000_copper_link_preconfig(struct e1000_hw *hw) 915 { 916 uint32_t ctrl; 917 int32_t ret_val; 918 uint16_t phy_data; 919 920 + DEBUGFUNC("e1000_copper_link_preconfig"); 921 922 ctrl = E1000_READ_REG(hw, CTRL); 923 /* With 82543, we need to force speed and duplex on the MAC equal to what ··· 933 } else { 934 ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX | E1000_CTRL_SLU); 935 E1000_WRITE_REG(hw, CTRL, ctrl); 936 + ret_val = e1000_phy_hw_reset(hw); 937 + if(ret_val) 938 + return ret_val; 939 } 940 941 /* Make sure we have a valid PHY */ ··· 961 hw->mac_type == e1000_82541_rev_2 || hw->mac_type == e1000_82547_rev_2) 962 hw->phy_reset_disable = FALSE; 963 964 + return E1000_SUCCESS; 965 + } 966 967 968 + /******************************************************************** 969 + * Copper link setup for e1000_phy_igp series. 970 + * 971 + * hw - Struct containing variables accessed by shared code 972 + *********************************************************************/ 973 + static int32_t 974 + e1000_copper_link_igp_setup(struct e1000_hw *hw) 975 + { 976 + uint32_t led_ctrl; 977 + int32_t ret_val; 978 + uint16_t phy_data; 979 980 + DEBUGFUNC("e1000_copper_link_igp_setup"); 981 982 + if (hw->phy_reset_disable) 983 + return E1000_SUCCESS; 984 + 985 + ret_val = e1000_phy_reset(hw); 986 + if (ret_val) { 987 + DEBUGOUT("Error Resetting the PHY\n"); 988 + return ret_val; 989 + } 990 991 + /* Wait 10ms for MAC to configure PHY from eeprom settings */ 992 + msec_delay(15); 993 + 994 + /* Configure activity LED after PHY reset */ 995 + led_ctrl = E1000_READ_REG(hw, LEDCTL); 996 + led_ctrl &= IGP_ACTIVITY_LED_MASK; 997 + led_ctrl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE); 998 + E1000_WRITE_REG(hw, LEDCTL, led_ctrl); 999 + 1000 + /* disable lplu d3 during driver init */ 1001 + ret_val = e1000_set_d3_lplu_state(hw, FALSE); 1002 + if (ret_val) { 1003 + DEBUGOUT("Error Disabling LPLU D3\n"); 1004 + return ret_val; 1005 + } 1006 + 1007 + /* disable lplu d0 during driver init */ 1008 + ret_val = e1000_set_d0_lplu_state(hw, FALSE); 1009 + if (ret_val) { 1010 + DEBUGOUT("Error Disabling LPLU D0\n"); 1011 + return ret_val; 1012 + } 1013 + /* Configure mdi-mdix settings */ 1014 + ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, &phy_data); 1015 + if (ret_val) 1016 + return ret_val; 1017 + 1018 + if ((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) { 1019 + hw->dsp_config_state = e1000_dsp_config_disabled; 1020 + /* Force MDI for earlier revs of the IGP PHY */ 1021 + phy_data &= ~(IGP01E1000_PSCR_AUTO_MDIX | IGP01E1000_PSCR_FORCE_MDI_MDIX); 1022 + hw->mdix = 1; 1023 + 1024 + } else { 1025 + hw->dsp_config_state = e1000_dsp_config_enabled; 1026 + phy_data &= ~IGP01E1000_PSCR_AUTO_MDIX; 1027 + 1028 + switch (hw->mdix) { 1029 + case 1: 1030 + phy_data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX; 1031 + break; 1032 + case 2: 1033 + phy_data |= IGP01E1000_PSCR_FORCE_MDI_MDIX; 1034 + break; 1035 + case 0: 1036 + default: 1037 + phy_data |= IGP01E1000_PSCR_AUTO_MDIX; 1038 + break; 1039 + } 1040 + } 1041 + ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, phy_data); 1042 + if(ret_val) 1043 + return ret_val; 1044 + 1045 + /* set auto-master slave resolution settings */ 1046 + if(hw->autoneg) { 1047 + e1000_ms_type phy_ms_setting = hw->master_slave; 1048 + 1049 + if(hw->ffe_config_state == e1000_ffe_config_active) 1050 + hw->ffe_config_state = e1000_ffe_config_enabled; 1051 + 1052 + if(hw->dsp_config_state == e1000_dsp_config_activated) 1053 + hw->dsp_config_state = e1000_dsp_config_enabled; 1054 + 1055 + /* when autonegotiation advertisment is only 1000Mbps then we 1056 + * should disable SmartSpeed and enable Auto MasterSlave 1057 + * resolution as hardware default. */ 1058 + if(hw->autoneg_advertised == ADVERTISE_1000_FULL) { 1059 + /* Disable SmartSpeed */ 1060 + ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, &phy_data); 1061 if(ret_val) 1062 return ret_val; 1063 + phy_data &= ~IGP01E1000_PSCFR_SMART_SPEED; 1064 + ret_val = e1000_write_phy_reg(hw, 1065 IGP01E1000_PHY_PORT_CONFIG, 1066 phy_data); 1067 if(ret_val) 1068 return ret_val; 1069 + /* Set auto Master/Slave resolution process */ 1070 + ret_val = e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_data); 1071 + if(ret_val) 1072 + return ret_val; 1073 + phy_data &= ~CR_1000T_MS_ENABLE; 1074 + ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_data); 1075 + if(ret_val) 1076 + return ret_val; 1077 + } 1078 1079 + ret_val = e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_data); 1080 + if(ret_val) 1081 + return ret_val; 1082 1083 + /* load defaults for future use */ 1084 + hw->original_master_slave = (phy_data & CR_1000T_MS_ENABLE) ? 1085 + ((phy_data & CR_1000T_MS_VALUE) ? 1086 + e1000_ms_force_master : 1087 + e1000_ms_force_slave) : 1088 + e1000_ms_auto; 1089 1090 + switch (phy_ms_setting) { 1091 + case e1000_ms_force_master: 1092 + phy_data |= (CR_1000T_MS_ENABLE | CR_1000T_MS_VALUE); 1093 + break; 1094 + case e1000_ms_force_slave: 1095 + phy_data |= CR_1000T_MS_ENABLE; 1096 + phy_data &= ~(CR_1000T_MS_VALUE); 1097 + break; 1098 + case e1000_ms_auto: 1099 + phy_data &= ~CR_1000T_MS_ENABLE; 1100 default: 1101 + break; 1102 + } 1103 + ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_data); 1104 + if(ret_val) 1105 + return ret_val; 1106 + } 1107 1108 + return E1000_SUCCESS; 1109 + } 1110 1111 1112 + /******************************************************************** 1113 + * Copper link setup for e1000_phy_m88 series. 1114 + * 1115 + * hw - Struct containing variables accessed by shared code 1116 + *********************************************************************/ 1117 + static int32_t 1118 + e1000_copper_link_mgp_setup(struct e1000_hw *hw) 1119 + { 1120 + int32_t ret_val; 1121 + uint16_t phy_data; 1122 1123 + DEBUGFUNC("e1000_copper_link_mgp_setup"); 1124 + 1125 + if(hw->phy_reset_disable) 1126 + return E1000_SUCCESS; 1127 + 1128 + /* Enable CRS on TX. This must be set for half-duplex operation. */ 1129 + ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); 1130 + if(ret_val) 1131 + return ret_val; 1132 + 1133 + phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX; 1134 + 1135 + /* Options: 1136 + * MDI/MDI-X = 0 (default) 1137 + * 0 - Auto for all speeds 1138 + * 1 - MDI mode 1139 + * 2 - MDI-X mode 1140 + * 3 - Auto for 1000Base-T only (MDI-X for 10/100Base-T modes) 1141 + */ 1142 + phy_data &= ~M88E1000_PSCR_AUTO_X_MODE; 1143 + 1144 + switch (hw->mdix) { 1145 + case 1: 1146 + phy_data |= M88E1000_PSCR_MDI_MANUAL_MODE; 1147 + break; 1148 + case 2: 1149 + phy_data |= M88E1000_PSCR_MDIX_MANUAL_MODE; 1150 + break; 1151 + case 3: 1152 + phy_data |= M88E1000_PSCR_AUTO_X_1000T; 1153 + break; 1154 + case 0: 1155 + default: 1156 + phy_data |= M88E1000_PSCR_AUTO_X_MODE; 1157 + break; 1158 + } 1159 + 1160 + /* Options: 1161 + * disable_polarity_correction = 0 (default) 1162 + * Automatic Correction for Reversed Cable Polarity 1163 + * 0 - Disabled 1164 + * 1 - Enabled 1165 + */ 1166 + phy_data &= ~M88E1000_PSCR_POLARITY_REVERSAL; 1167 + if(hw->disable_polarity_correction == 1) 1168 + phy_data |= M88E1000_PSCR_POLARITY_REVERSAL; 1169 + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data); 1170 + if(ret_val) 1171 + return ret_val; 1172 + 1173 + /* Force TX_CLK in the Extended PHY Specific Control Register 1174 + * to 25MHz clock. 1175 + */ 1176 + ret_val = e1000_read_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data); 1177 + if(ret_val) 1178 + return ret_val; 1179 + 1180 + phy_data |= M88E1000_EPSCR_TX_CLK_25; 1181 + 1182 + if (hw->phy_revision < M88E1011_I_REV_4) { 1183 + /* Configure Master and Slave downshift values */ 1184 + phy_data &= ~(M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK | 1185 M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK); 1186 + phy_data |= (M88E1000_EPSCR_MASTER_DOWNSHIFT_1X | 1187 M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X); 1188 + ret_val = e1000_write_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, phy_data); 1189 + if(ret_val) 1190 + return ret_val; 1191 + } 1192 1193 + /* SW Reset the PHY so all changes take effect */ 1194 + ret_val = e1000_phy_reset(hw); 1195 + if(ret_val) { 1196 + DEBUGOUT("Error Resetting the PHY\n"); 1197 + return ret_val; 1198 + } 1199 + 1200 + return E1000_SUCCESS; 1201 + } 1202 + 1203 + /******************************************************************** 1204 + * Setup auto-negotiation and flow control advertisements, 1205 + * and then perform auto-negotiation. 1206 + * 1207 + * hw - Struct containing variables accessed by shared code 1208 + *********************************************************************/ 1209 + static int32_t 1210 + e1000_copper_link_autoneg(struct e1000_hw *hw) 1211 + { 1212 + int32_t ret_val; 1213 + uint16_t phy_data; 1214 + 1215 + DEBUGFUNC("e1000_copper_link_autoneg"); 1216 + 1217 + /* Perform some bounds checking on the hw->autoneg_advertised 1218 + * parameter. If this variable is zero, then set it to the default. 1219 + */ 1220 + hw->autoneg_advertised &= AUTONEG_ADVERTISE_SPEED_DEFAULT; 1221 + 1222 + /* If autoneg_advertised is zero, we assume it was not defaulted 1223 + * by the calling code so we set to advertise full capability. 1224 + */ 1225 + if(hw->autoneg_advertised == 0) 1226 + hw->autoneg_advertised = AUTONEG_ADVERTISE_SPEED_DEFAULT; 1227 + 1228 + DEBUGOUT("Reconfiguring auto-neg advertisement params\n"); 1229 + ret_val = e1000_phy_setup_autoneg(hw); 1230 + if(ret_val) { 1231 + DEBUGOUT("Error Setting up Auto-Negotiation\n"); 1232 + return ret_val; 1233 + } 1234 + DEBUGOUT("Restarting Auto-Neg\n"); 1235 + 1236 + /* Restart auto-negotiation by setting the Auto Neg Enable bit and 1237 + * the Auto Neg Restart bit in the PHY control register. 1238 + */ 1239 + ret_val = e1000_read_phy_reg(hw, PHY_CTRL, &phy_data); 1240 + if(ret_val) 1241 + return ret_val; 1242 + 1243 + phy_data |= (MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG); 1244 + ret_val = e1000_write_phy_reg(hw, PHY_CTRL, phy_data); 1245 + if(ret_val) 1246 + return ret_val; 1247 + 1248 + /* Does the user want to wait for Auto-Neg to complete here, or 1249 + * check at a later time (for example, callback routine). 1250 + */ 1251 + if(hw->wait_autoneg_complete) { 1252 + ret_val = e1000_wait_autoneg(hw); 1253 + if(ret_val) { 1254 + DEBUGOUT("Error while waiting for autoneg to complete\n"); 1255 + return ret_val; 1256 } 1257 + } 1258 1259 + hw->get_link_status = TRUE; 1260 1261 + return E1000_SUCCESS; 1262 + } 1263 1264 1265 + /****************************************************************************** 1266 + * Config the MAC and the PHY after link is up. 1267 + * 1) Set up the MAC to the current PHY speed/duplex 1268 + * if we are on 82543. If we 1269 + * are on newer silicon, we only need to configure 1270 + * collision distance in the Transmit Control Register. 1271 + * 2) Set up flow control on the MAC to that established with 1272 + * the link partner. 1273 + * 3) Config DSP to improve Gigabit link quality for some PHY revisions. 1274 + * 1275 + * hw - Struct containing variables accessed by shared code 1276 + ******************************************************************************/ 1277 + static int32_t 1278 + e1000_copper_link_postconfig(struct e1000_hw *hw) 1279 + { 1280 + int32_t ret_val; 1281 + DEBUGFUNC("e1000_copper_link_postconfig"); 1282 + 1283 + if(hw->mac_type >= e1000_82544) { 1284 + e1000_config_collision_dist(hw); 1285 + } else { 1286 + ret_val = e1000_config_mac_to_phy(hw); 1287 + if(ret_val) { 1288 + DEBUGOUT("Error configuring MAC to PHY settings\n"); 1289 + return ret_val; 1290 } 1291 + } 1292 + ret_val = e1000_config_fc_after_link_up(hw); 1293 + if(ret_val) { 1294 + DEBUGOUT("Error Configuring Flow Control\n"); 1295 + return ret_val; 1296 + } 1297 + 1298 + /* Config DSP to improve Giga link quality */ 1299 + if(hw->phy_type == e1000_phy_igp) { 1300 + ret_val = e1000_config_dsp_after_link_change(hw, TRUE); 1301 + if(ret_val) { 1302 + DEBUGOUT("Error Configuring DSP after link up\n"); 1303 + return ret_val; 1304 + } 1305 + } 1306 + 1307 + return E1000_SUCCESS; 1308 + } 1309 + 1310 + /****************************************************************************** 1311 + * Detects which PHY is present and setup the speed and duplex 1312 + * 1313 + * hw - Struct containing variables accessed by shared code 1314 + ******************************************************************************/ 1315 + static int32_t 1316 + e1000_setup_copper_link(struct e1000_hw *hw) 1317 + { 1318 + int32_t ret_val; 1319 + uint16_t i; 1320 + uint16_t phy_data; 1321 + 1322 + DEBUGFUNC("e1000_setup_copper_link"); 1323 + 1324 + /* Check if it is a valid PHY and set PHY mode if necessary. */ 1325 + ret_val = e1000_copper_link_preconfig(hw); 1326 + if(ret_val) 1327 + return ret_val; 1328 + 1329 + if (hw->phy_type == e1000_phy_igp || 1330 + hw->phy_type == e1000_phy_igp_2) { 1331 + ret_val = e1000_copper_link_igp_setup(hw); 1332 + if(ret_val) 1333 + return ret_val; 1334 + } else if (hw->phy_type == e1000_phy_m88) { 1335 + ret_val = e1000_copper_link_mgp_setup(hw); 1336 + if(ret_val) 1337 + return ret_val; 1338 + } 1339 + 1340 + if(hw->autoneg) { 1341 + /* Setup autoneg and flow control advertisement 1342 + * and perform autonegotiation */ 1343 + ret_val = e1000_copper_link_autoneg(hw); 1344 + if(ret_val) 1345 + return ret_val; 1346 + } else { 1347 + /* PHY will be set to 10H, 10F, 100H,or 100F 1348 + * depending on value from forced_speed_duplex. */ 1349 + DEBUGOUT("Forcing speed and duplex\n"); 1350 + ret_val = e1000_phy_force_speed_duplex(hw); 1351 + if(ret_val) { 1352 + DEBUGOUT("Error Forcing Speed and Duplex\n"); 1353 + return ret_val; 1354 + } 1355 + } 1356 1357 /* Check link status. Wait up to 100 microseconds for link to become 1358 * valid. ··· 1242 return ret_val; 1243 1244 if(phy_data & MII_SR_LINK_STATUS) { 1245 + /* Config the MAC and PHY after link is up */ 1246 + ret_val = e1000_copper_link_postconfig(hw); 1247 + if(ret_val) 1248 return ret_val; 1249 + 1250 DEBUGOUT("Valid link established!!!\n"); 1251 return E1000_SUCCESS; 1252 } ··· 1302 if(ret_val) 1303 return ret_val; 1304 1305 + /* Read the MII 1000Base-T Control Register (Address 9). */ 1306 + ret_val = e1000_read_phy_reg(hw, PHY_1000T_CTRL, &mii_1000t_ctrl_reg); 1307 + if(ret_val) 1308 + return ret_val; 1309 1310 /* Need to parse both autoneg_advertised and fc and set up 1311 * the appropriate PHY registers. First we will parse for ··· 1417 1418 DEBUGOUT1("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg); 1419 1420 + ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL, mii_1000t_ctrl_reg); 1421 if(ret_val) 1422 return ret_val; 1423 ··· 1678 1679 DEBUGFUNC("e1000_config_mac_to_phy"); 1680 1681 + /* 82544 or newer MAC, Auto Speed Detection takes care of 1682 + * MAC speed/duplex configuration.*/ 1683 + if (hw->mac_type >= e1000_82544) 1684 + return E1000_SUCCESS; 1685 + 1686 /* Read the Device Control Register and set the bits to Force Speed 1687 * and Duplex. 1688 */ ··· 1688 /* Set up duplex in the Device Control and Transmit Control 1689 * registers depending on negotiated values. 1690 */ 1691 + ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data); 1692 + if(ret_val) 1693 + return ret_val; 1694 1695 + if(phy_data & M88E1000_PSSR_DPLX) 1696 + ctrl |= E1000_CTRL_FD; 1697 + else 1698 + ctrl &= ~E1000_CTRL_FD; 1699 1700 + e1000_config_collision_dist(hw); 1701 1702 + /* Set up speed in the Device Control register depending on 1703 + * negotiated values. 1704 + */ 1705 + if((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS) 1706 + ctrl |= E1000_CTRL_SPD_1000; 1707 + else if((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_100MBS) 1708 + ctrl |= E1000_CTRL_SPD_100; 1709 1710 /* Write the configured values back to the Device Control Reg. */ 1711 E1000_WRITE_REG(hw, CTRL, ctrl); 1712 return E1000_SUCCESS; ··· 2494 2495 DEBUGFUNC("e1000_read_phy_reg"); 2496 2497 + if((hw->phy_type == e1000_phy_igp || 2498 + hw->phy_type == e1000_phy_igp_2) && 2499 (reg_addr > MAX_PHY_MULTI_PAGE_REG)) { 2500 ret_val = e1000_write_phy_reg_ex(hw, IGP01E1000_PHY_PAGE_SELECT, 2501 (uint16_t)reg_addr); ··· 2600 2601 DEBUGFUNC("e1000_write_phy_reg"); 2602 2603 + if((hw->phy_type == e1000_phy_igp || 2604 + hw->phy_type == e1000_phy_igp_2) && 2605 (reg_addr > MAX_PHY_MULTI_PAGE_REG)) { 2606 ret_val = e1000_write_phy_reg_ex(hw, IGP01E1000_PHY_PAGE_SELECT, 2607 (uint16_t)reg_addr); ··· 2679 return E1000_SUCCESS; 2680 } 2681 2682 + 2683 /****************************************************************************** 2684 * Returns the PHY to the power-on reset state 2685 * 2686 * hw - Struct containing variables accessed by shared code 2687 ******************************************************************************/ 2688 + int32_t 2689 e1000_phy_hw_reset(struct e1000_hw *hw) 2690 { 2691 uint32_t ctrl, ctrl_ext; 2692 uint32_t led_ctrl; 2693 + int32_t ret_val; 2694 2695 DEBUGFUNC("e1000_phy_hw_reset"); 2696 + 2697 + /* In the case of the phy reset being blocked, it's not an error, we 2698 + * simply return success without performing the reset. */ 2699 + ret_val = e1000_check_phy_reset_block(hw); 2700 + if (ret_val) 2701 + return E1000_SUCCESS; 2702 2703 DEBUGOUT("Resetting Phy...\n"); 2704 ··· 2727 led_ctrl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE); 2728 E1000_WRITE_REG(hw, LEDCTL, led_ctrl); 2729 } 2730 + 2731 + /* Wait for FW to finish PHY configuration. */ 2732 + ret_val = e1000_get_phy_cfg_done(hw); 2733 + 2734 + return ret_val; 2735 } 2736 2737 /****************************************************************************** ··· 2744 2745 DEBUGFUNC("e1000_phy_reset"); 2746 2747 + /* In the case of the phy reset being blocked, it's not an error, we 2748 + * simply return success without performing the reset. */ 2749 + ret_val = e1000_check_phy_reset_block(hw); 2750 + if (ret_val) 2751 + return E1000_SUCCESS; 2752 + 2753 + switch (hw->mac_type) { 2754 + case e1000_82541_rev_2: 2755 + ret_val = e1000_phy_hw_reset(hw); 2756 + if(ret_val) 2757 + return ret_val; 2758 + break; 2759 + default: 2760 ret_val = e1000_read_phy_reg(hw, PHY_CTRL, &phy_data); 2761 if(ret_val) 2762 return ret_val; ··· 2755 return ret_val; 2756 2757 udelay(1); 2758 + break; 2759 + } 2760 2761 + if(hw->phy_type == e1000_phy_igp || hw->phy_type == e1000_phy_igp_2) 2762 e1000_phy_init_script(hw); 2763 2764 return E1000_SUCCESS; ··· 2810 case e1000_82547: 2811 case e1000_82547_rev_2: 2812 if(hw->phy_id == IGP01E1000_I_PHY_ID) match = TRUE; 2813 + break; 2814 + case e1000_82573: 2815 + if(hw->phy_id == M88E1111_I_PHY_ID) match = TRUE; 2816 break; 2817 default: 2818 DEBUGOUT1("Invalid MAC type %d\n", hw->mac_type); ··· 2866 2867 /* The downshift status is checked only once, after link is established, 2868 * and it stored in the hw->speed_downgraded parameter. */ 2869 + phy_info->downshift = (e1000_downshift)hw->speed_downgraded; 2870 2871 /* IGP01E1000 does not need to support it. */ 2872 phy_info->extended_10bt_distance = e1000_10bt_ext_dist_enable_normal; ··· 2905 if(ret_val) 2906 return ret_val; 2907 2908 + /* Translate to old method */ 2909 average = (max_length + min_length) / 2; 2910 2911 if(average <= e1000_igp_cable_length_50) ··· 2940 2941 /* The downshift status is checked only once, after link is established, 2942 * and it stored in the hw->speed_downgraded parameter. */ 2943 + phy_info->downshift = (e1000_downshift)hw->speed_downgraded; 2944 2945 ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); 2946 if(ret_val) ··· 3029 return -E1000_ERR_CONFIG; 3030 } 3031 3032 + if(hw->phy_type == e1000_phy_igp || 3033 + hw->phy_type == e1000_phy_igp_2) 3034 return e1000_phy_igp_get_info(hw, phy_info); 3035 else 3036 return e1000_phy_m88_get_info(hw, phy_info); ··· 3055 * 3056 * hw - Struct containing variables accessed by shared code 3057 *****************************************************************************/ 3058 + int32_t 3059 e1000_init_eeprom_params(struct e1000_hw *hw) 3060 { 3061 struct e1000_eeprom_info *eeprom = &hw->eeprom; 3062 uint32_t eecd = E1000_READ_REG(hw, EECD); 3063 + int32_t ret_val = E1000_SUCCESS; 3064 uint16_t eeprom_size; 3065 3066 DEBUGFUNC("e1000_init_eeprom_params"); ··· 3074 eeprom->opcode_bits = 3; 3075 eeprom->address_bits = 6; 3076 eeprom->delay_usec = 50; 3077 + eeprom->use_eerd = FALSE; 3078 + eeprom->use_eewr = FALSE; 3079 break; 3080 case e1000_82540: 3081 case e1000_82545: ··· 3090 eeprom->word_size = 64; 3091 eeprom->address_bits = 6; 3092 } 3093 + eeprom->use_eerd = FALSE; 3094 + eeprom->use_eewr = FALSE; 3095 break; 3096 case e1000_82541: 3097 case e1000_82541_rev_2: ··· 3118 eeprom->address_bits = 6; 3119 } 3120 } 3121 + eeprom->use_eerd = FALSE; 3122 + eeprom->use_eewr = FALSE; 3123 + break; 3124 + case e1000_82573: 3125 + eeprom->type = e1000_eeprom_spi; 3126 + eeprom->opcode_bits = 8; 3127 + eeprom->delay_usec = 1; 3128 + if (eecd & E1000_EECD_ADDR_BITS) { 3129 + eeprom->page_size = 32; 3130 + eeprom->address_bits = 16; 3131 + } else { 3132 + eeprom->page_size = 8; 3133 + eeprom->address_bits = 8; 3134 + } 3135 + eeprom->use_eerd = TRUE; 3136 + eeprom->use_eewr = TRUE; 3137 + if(e1000_is_onboard_nvm_eeprom(hw) == FALSE) { 3138 + eeprom->type = e1000_eeprom_flash; 3139 + eeprom->word_size = 2048; 3140 + 3141 + /* Ensure that the Autonomous FLASH update bit is cleared due to 3142 + * Flash update issue on parts which use a FLASH for NVM. */ 3143 + eecd &= ~E1000_EECD_AUPDEN; 3144 + E1000_WRITE_REG(hw, EECD, eecd); 3145 + } 3146 break; 3147 default: 3148 break; 3149 } 3150 3151 if (eeprom->type == e1000_eeprom_spi) { 3152 + /* eeprom_size will be an enum [0..8] that maps to eeprom sizes 128B to 3153 + * 32KB (incremented by powers of 2). 3154 + */ 3155 + if(hw->mac_type <= e1000_82547_rev_2) { 3156 + /* Set to default value for initial eeprom read. */ 3157 + eeprom->word_size = 64; 3158 + ret_val = e1000_read_eeprom(hw, EEPROM_CFG, 1, &eeprom_size); 3159 + if(ret_val) 3160 + return ret_val; 3161 + eeprom_size = (eeprom_size & EEPROM_SIZE_MASK) >> EEPROM_SIZE_SHIFT; 3162 + /* 256B eeprom size was not supported in earlier hardware, so we 3163 + * bump eeprom_size up one to ensure that "1" (which maps to 256B) 3164 + * is never the result used in the shifting logic below. */ 3165 + if(eeprom_size) 3166 + eeprom_size++; 3167 + } else { 3168 + eeprom_size = (uint16_t)((eecd & E1000_EECD_SIZE_EX_MASK) >> 3169 + E1000_EECD_SIZE_EX_SHIFT); 3170 } 3171 + 3172 + eeprom->word_size = 1 << (eeprom_size + EEPROM_WORD_SIZE_SHIFT); 3173 } 3174 + return ret_val; 3175 } 3176 3177 /****************************************************************************** ··· 3306 3307 DEBUGFUNC("e1000_acquire_eeprom"); 3308 3309 + if(e1000_get_hw_eeprom_semaphore(hw)) 3310 + return -E1000_ERR_EEPROM; 3311 + 3312 eecd = E1000_READ_REG(hw, EECD); 3313 3314 + if (hw->mac_type != e1000_82573) { 3315 /* Request EEPROM Access */ 3316 if(hw->mac_type > e1000_82544) { 3317 eecd |= E1000_EECD_REQ; ··· 3325 DEBUGOUT("Could not acquire EEPROM grant\n"); 3326 return -E1000_ERR_EEPROM; 3327 } 3328 + } 3329 } 3330 3331 /* Setup EEPROM for Read/Write */ ··· 3443 eecd &= ~E1000_EECD_REQ; 3444 E1000_WRITE_REG(hw, EECD, eecd); 3445 } 3446 + 3447 + e1000_put_hw_eeprom_semaphore(hw); 3448 } 3449 3450 /****************************************************************************** ··· 3504 { 3505 struct e1000_eeprom_info *eeprom = &hw->eeprom; 3506 uint32_t i = 0; 3507 + int32_t ret_val; 3508 3509 DEBUGFUNC("e1000_read_eeprom"); 3510 + 3511 /* A check for invalid values: offset too large, too many words, and not 3512 * enough words. 3513 */ ··· 3515 return -E1000_ERR_EEPROM; 3516 } 3517 3518 + /* FLASH reads without acquiring the semaphore are safe in 82573-based 3519 + * controllers. 3520 + */ 3521 + if ((e1000_is_onboard_nvm_eeprom(hw) == TRUE) || 3522 + (hw->mac_type != e1000_82573)) { 3523 + /* Prepare the EEPROM for reading */ 3524 + if(e1000_acquire_eeprom(hw) != E1000_SUCCESS) 3525 + return -E1000_ERR_EEPROM; 3526 + } 3527 + 3528 + if(eeprom->use_eerd == TRUE) { 3529 + ret_val = e1000_read_eeprom_eerd(hw, offset, words, data); 3530 + if ((e1000_is_onboard_nvm_eeprom(hw) == TRUE) || 3531 + (hw->mac_type != e1000_82573)) 3532 + e1000_release_eeprom(hw); 3533 + return ret_val; 3534 + } 3535 3536 if(eeprom->type == e1000_eeprom_spi) { 3537 uint16_t word_in; ··· 3569 } 3570 3571 /****************************************************************************** 3572 + * Reads a 16 bit word from the EEPROM using the EERD register. 3573 + * 3574 + * hw - Struct containing variables accessed by shared code 3575 + * offset - offset of word in the EEPROM to read 3576 + * data - word read from the EEPROM 3577 + * words - number of words to read 3578 + *****************************************************************************/ 3579 + int32_t 3580 + e1000_read_eeprom_eerd(struct e1000_hw *hw, 3581 + uint16_t offset, 3582 + uint16_t words, 3583 + uint16_t *data) 3584 + { 3585 + uint32_t i, eerd = 0; 3586 + int32_t error = 0; 3587 + 3588 + for (i = 0; i < words; i++) { 3589 + eerd = ((offset+i) << E1000_EEPROM_RW_ADDR_SHIFT) + 3590 + E1000_EEPROM_RW_REG_START; 3591 + 3592 + E1000_WRITE_REG(hw, EERD, eerd); 3593 + error = e1000_poll_eerd_eewr_done(hw, E1000_EEPROM_POLL_READ); 3594 + 3595 + if(error) { 3596 + break; 3597 + } 3598 + data[i] = (E1000_READ_REG(hw, EERD) >> E1000_EEPROM_RW_REG_DATA); 3599 + 3600 + } 3601 + 3602 + return error; 3603 + } 3604 + 3605 + /****************************************************************************** 3606 + * Writes a 16 bit word from the EEPROM using the EEWR register. 3607 + * 3608 + * hw - Struct containing variables accessed by shared code 3609 + * offset - offset of word in the EEPROM to read 3610 + * data - word read from the EEPROM 3611 + * words - number of words to read 3612 + *****************************************************************************/ 3613 + int32_t 3614 + e1000_write_eeprom_eewr(struct e1000_hw *hw, 3615 + uint16_t offset, 3616 + uint16_t words, 3617 + uint16_t *data) 3618 + { 3619 + uint32_t register_value = 0; 3620 + uint32_t i = 0; 3621 + int32_t error = 0; 3622 + 3623 + for (i = 0; i < words; i++) { 3624 + register_value = (data[i] << E1000_EEPROM_RW_REG_DATA) | 3625 + ((offset+i) << E1000_EEPROM_RW_ADDR_SHIFT) | 3626 + E1000_EEPROM_RW_REG_START; 3627 + 3628 + error = e1000_poll_eerd_eewr_done(hw, E1000_EEPROM_POLL_WRITE); 3629 + if(error) { 3630 + break; 3631 + } 3632 + 3633 + E1000_WRITE_REG(hw, EEWR, register_value); 3634 + 3635 + error = e1000_poll_eerd_eewr_done(hw, E1000_EEPROM_POLL_WRITE); 3636 + 3637 + if(error) { 3638 + break; 3639 + } 3640 + } 3641 + 3642 + return error; 3643 + } 3644 + 3645 + /****************************************************************************** 3646 + * Polls the status bit (bit 1) of the EERD to determine when the read is done. 3647 + * 3648 + * hw - Struct containing variables accessed by shared code 3649 + *****************************************************************************/ 3650 + int32_t 3651 + e1000_poll_eerd_eewr_done(struct e1000_hw *hw, int eerd) 3652 + { 3653 + uint32_t attempts = 100000; 3654 + uint32_t i, reg = 0; 3655 + int32_t done = E1000_ERR_EEPROM; 3656 + 3657 + for(i = 0; i < attempts; i++) { 3658 + if(eerd == E1000_EEPROM_POLL_READ) 3659 + reg = E1000_READ_REG(hw, EERD); 3660 + else 3661 + reg = E1000_READ_REG(hw, EEWR); 3662 + 3663 + if(reg & E1000_EEPROM_RW_REG_DONE) { 3664 + done = E1000_SUCCESS; 3665 + break; 3666 + } 3667 + udelay(5); 3668 + } 3669 + 3670 + return done; 3671 + } 3672 + 3673 + /*************************************************************************** 3674 + * Description: Determines if the onboard NVM is FLASH or EEPROM. 3675 + * 3676 + * hw - Struct containing variables accessed by shared code 3677 + ****************************************************************************/ 3678 + boolean_t 3679 + e1000_is_onboard_nvm_eeprom(struct e1000_hw *hw) 3680 + { 3681 + uint32_t eecd = 0; 3682 + 3683 + if(hw->mac_type == e1000_82573) { 3684 + eecd = E1000_READ_REG(hw, EECD); 3685 + 3686 + /* Isolate bits 15 & 16 */ 3687 + eecd = ((eecd >> 15) & 0x03); 3688 + 3689 + /* If both bits are set, device is Flash type */ 3690 + if(eecd == 0x03) { 3691 + return FALSE; 3692 + } 3693 + } 3694 + return TRUE; 3695 + } 3696 + 3697 + /****************************************************************************** 3698 * Verifies that the EEPROM has a valid checksum 3699 * 3700 * hw - Struct containing variables accessed by shared code ··· 3584 uint16_t i, eeprom_data; 3585 3586 DEBUGFUNC("e1000_validate_eeprom_checksum"); 3587 + 3588 + if ((hw->mac_type == e1000_82573) && 3589 + (e1000_is_onboard_nvm_eeprom(hw) == FALSE)) { 3590 + /* Check bit 4 of word 10h. If it is 0, firmware is done updating 3591 + * 10h-12h. Checksum may need to be fixed. */ 3592 + e1000_read_eeprom(hw, 0x10, 1, &eeprom_data); 3593 + if ((eeprom_data & 0x10) == 0) { 3594 + /* Read 0x23 and check bit 15. This bit is a 1 when the checksum 3595 + * has already been fixed. If the checksum is still wrong and this 3596 + * bit is a 1, we need to return bad checksum. Otherwise, we need 3597 + * to set this bit to a 1 and update the checksum. */ 3598 + e1000_read_eeprom(hw, 0x23, 1, &eeprom_data); 3599 + if ((eeprom_data & 0x8000) == 0) { 3600 + eeprom_data |= 0x8000; 3601 + e1000_write_eeprom(hw, 0x23, 1, &eeprom_data); 3602 + e1000_update_eeprom_checksum(hw); 3603 + } 3604 + } 3605 + } 3606 3607 for(i = 0; i < (EEPROM_CHECKSUM_REG + 1); i++) { 3608 if(e1000_read_eeprom(hw, i, 1, &eeprom_data) < 0) { ··· 3628 if(e1000_write_eeprom(hw, EEPROM_CHECKSUM_REG, 1, &checksum) < 0) { 3629 DEBUGOUT("EEPROM Write Error\n"); 3630 return -E1000_ERR_EEPROM; 3631 + } else if (hw->eeprom.type == e1000_eeprom_flash) { 3632 + e1000_commit_shadow_ram(hw); 3633 } 3634 return E1000_SUCCESS; 3635 } ··· 3662 DEBUGOUT("\"words\" parameter out of bounds\n"); 3663 return -E1000_ERR_EEPROM; 3664 } 3665 + 3666 + /* 82573 reads only through eerd */ 3667 + if(eeprom->use_eewr == TRUE) 3668 + return e1000_write_eeprom_eewr(hw, offset, words, data); 3669 3670 /* Prepare the EEPROM for writing */ 3671 if (e1000_acquire_eeprom(hw) != E1000_SUCCESS) ··· 3833 } 3834 3835 /****************************************************************************** 3836 + * Flushes the cached eeprom to NVM. This is done by saving the modified values 3837 + * in the eeprom cache and the non modified values in the currently active bank 3838 + * to the new bank. 3839 + * 3840 + * hw - Struct containing variables accessed by shared code 3841 + * offset - offset of word in the EEPROM to read 3842 + * data - word read from the EEPROM 3843 + * words - number of words to read 3844 + *****************************************************************************/ 3845 + int32_t 3846 + e1000_commit_shadow_ram(struct e1000_hw *hw) 3847 + { 3848 + uint32_t attempts = 100000; 3849 + uint32_t eecd = 0; 3850 + uint32_t flop = 0; 3851 + uint32_t i = 0; 3852 + int32_t error = E1000_SUCCESS; 3853 + 3854 + /* The flop register will be used to determine if flash type is STM */ 3855 + flop = E1000_READ_REG(hw, FLOP); 3856 + 3857 + if (hw->mac_type == e1000_82573) { 3858 + for (i=0; i < attempts; i++) { 3859 + eecd = E1000_READ_REG(hw, EECD); 3860 + if ((eecd & E1000_EECD_FLUPD) == 0) { 3861 + break; 3862 + } 3863 + udelay(5); 3864 + } 3865 + 3866 + if (i == attempts) { 3867 + return -E1000_ERR_EEPROM; 3868 + } 3869 + 3870 + /* If STM opcode located in bits 15:8 of flop, reset firmware */ 3871 + if ((flop & 0xFF00) == E1000_STM_OPCODE) { 3872 + E1000_WRITE_REG(hw, HICR, E1000_HICR_FW_RESET); 3873 + } 3874 + 3875 + /* Perform the flash update */ 3876 + E1000_WRITE_REG(hw, EECD, eecd | E1000_EECD_FLUPD); 3877 + 3878 + for (i=0; i < attempts; i++) { 3879 + eecd = E1000_READ_REG(hw, EECD); 3880 + if ((eecd & E1000_EECD_FLUPD) == 0) { 3881 + break; 3882 + } 3883 + udelay(5); 3884 + } 3885 + 3886 + if (i == attempts) { 3887 + return -E1000_ERR_EEPROM; 3888 + } 3889 + } 3890 + 3891 + return error; 3892 + } 3893 + 3894 + /****************************************************************************** 3895 * Reads the adapter's part number from the EEPROM 3896 * 3897 * hw - Struct containing variables accessed by shared code ··· 3911 e1000_init_rx_addrs(struct e1000_hw *hw) 3912 { 3913 uint32_t i; 3914 + uint32_t rar_num; 3915 3916 DEBUGFUNC("e1000_init_rx_addrs"); 3917 ··· 3919 3920 e1000_rar_set(hw, hw->mac_addr, 0); 3921 3922 + rar_num = E1000_RAR_ENTRIES; 3923 /* Zero out the other 15 receive addresses. */ 3924 DEBUGOUT("Clearing RAR[1-15]\n"); 3925 + for(i = 1; i < rar_num; i++) { 3926 E1000_WRITE_REG_ARRAY(hw, RA, (i << 1), 0); 3927 E1000_WRITE_REG_ARRAY(hw, RA, ((i << 1) + 1), 0); 3928 } ··· 3950 { 3951 uint32_t hash_value; 3952 uint32_t i; 3953 + uint32_t num_rar_entry; 3954 + uint32_t num_mta_entry; 3955 + 3956 DEBUGFUNC("e1000_mc_addr_list_update"); 3957 3958 /* Set the new number of MC addresses that we are being requested to use. */ ··· 3958 3959 /* Clear RAR[1-15] */ 3960 DEBUGOUT(" Clearing RAR[1-15]\n"); 3961 + num_rar_entry = E1000_RAR_ENTRIES; 3962 + for(i = rar_used_count; i < num_rar_entry; i++) { 3963 E1000_WRITE_REG_ARRAY(hw, RA, (i << 1), 0); 3964 E1000_WRITE_REG_ARRAY(hw, RA, ((i << 1) + 1), 0); 3965 } 3966 3967 /* Clear the MTA */ 3968 DEBUGOUT(" Clearing MTA\n"); 3969 + num_mta_entry = E1000_NUM_MTA_REGISTERS; 3970 + for(i = 0; i < num_mta_entry; i++) { 3971 E1000_WRITE_REG_ARRAY(hw, MTA, i, 0); 3972 } 3973 ··· 3989 /* Place this multicast address in the RAR if there is room, * 3990 * else put it in the MTA 3991 */ 3992 + if (rar_used_count < num_rar_entry) { 3993 e1000_rar_set(hw, 3994 mc_addr_list + (i * (ETH_LENGTH_OF_ADDRESS + pad)), 3995 rar_used_count); ··· 4040 } 4041 4042 hash_value &= 0xFFF; 4043 + 4044 return hash_value; 4045 } 4046 ··· 4144 e1000_clear_vfta(struct e1000_hw *hw) 4145 { 4146 uint32_t offset; 4147 + uint32_t vfta_value = 0; 4148 + uint32_t vfta_offset = 0; 4149 + uint32_t vfta_bit_in_reg = 0; 4150 4151 + if (hw->mac_type == e1000_82573) { 4152 + if (hw->mng_cookie.vlan_id != 0) { 4153 + /* The VFTA is a 4096b bit-field, each identifying a single VLAN 4154 + * ID. The following operations determine which 32b entry 4155 + * (i.e. offset) into the array we want to set the VLAN ID 4156 + * (i.e. bit) of the manageability unit. */ 4157 + vfta_offset = (hw->mng_cookie.vlan_id >> 4158 + E1000_VFTA_ENTRY_SHIFT) & 4159 + E1000_VFTA_ENTRY_MASK; 4160 + vfta_bit_in_reg = 1 << (hw->mng_cookie.vlan_id & 4161 + E1000_VFTA_ENTRY_BIT_SHIFT_MASK); 4162 + } 4163 + } 4164 + for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) { 4165 + /* If the offset we want to clear is the same offset of the 4166 + * manageability VLAN ID, then clear all bits except that of the 4167 + * manageability unit */ 4168 + vfta_value = (offset == vfta_offset) ? vfta_bit_in_reg : 0; 4169 + E1000_WRITE_REG_ARRAY(hw, VFTA, offset, vfta_value); 4170 + } 4171 } 4172 4173 + int32_t 4174 e1000_id_led_init(struct e1000_hw * hw) 4175 { 4176 uint32_t ledctl; ··· 4480 temp = E1000_READ_REG(hw, MGTPRC); 4481 temp = E1000_READ_REG(hw, MGTPDC); 4482 temp = E1000_READ_REG(hw, MGTPTC); 4483 + 4484 + if(hw->mac_type <= e1000_82547_rev_2) return; 4485 + 4486 + temp = E1000_READ_REG(hw, IAC); 4487 + temp = E1000_READ_REG(hw, ICRXOC); 4488 + temp = E1000_READ_REG(hw, ICRXPTC); 4489 + temp = E1000_READ_REG(hw, ICRXATC); 4490 + temp = E1000_READ_REG(hw, ICTXPTC); 4491 + temp = E1000_READ_REG(hw, ICTXATC); 4492 + temp = E1000_READ_REG(hw, ICTXQEC); 4493 + temp = E1000_READ_REG(hw, ICTXQMTC); 4494 + temp = E1000_READ_REG(hw, ICRXDMTC); 4495 + 4496 } 4497 4498 /****************************************************************************** ··· 4646 hw->bus_speed = e1000_bus_speed_unknown; 4647 hw->bus_width = e1000_bus_width_unknown; 4648 break; 4649 + case e1000_82573: 4650 + hw->bus_type = e1000_bus_type_pci_express; 4651 + hw->bus_speed = e1000_bus_speed_2500; 4652 + hw->bus_width = e1000_bus_width_pciex_4; 4653 + break; 4654 default: 4655 status = E1000_READ_REG(hw, STATUS); 4656 hw->bus_type = (status & E1000_STATUS_PCIX_MODE) ? ··· 4749 4750 /* Use old method for Phy older than IGP */ 4751 if(hw->phy_type == e1000_phy_m88) { 4752 + 4753 ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, 4754 &phy_data); 4755 if(ret_val) ··· 4865 return ret_val; 4866 *polarity = (phy_data & M88E1000_PSSR_REV_POLARITY) >> 4867 M88E1000_PSSR_REV_POLARITY_SHIFT; 4868 + } else if(hw->phy_type == e1000_phy_igp || 4869 + hw->phy_type == e1000_phy_igp_2) { 4870 /* Read the Status register to check the speed */ 4871 ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_STATUS, 4872 &phy_data); ··· 4917 4918 DEBUGFUNC("e1000_check_downshift"); 4919 4920 + if(hw->phy_type == e1000_phy_igp || 4921 + hw->phy_type == e1000_phy_igp_2) { 4922 ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_LINK_HEALTH, 4923 &phy_data); 4924 if(ret_val) ··· 4933 hw->speed_downgraded = (phy_data & M88E1000_PSSR_DOWNSHIFT) >> 4934 M88E1000_PSSR_DOWNSHIFT_SHIFT; 4935 } 4936 + 4937 return E1000_SUCCESS; 4938 } 4939 ··· 5047 if(ret_val) 5048 return ret_val; 5049 5050 + msec_delay_irq(20); 5051 5052 ret_val = e1000_write_phy_reg(hw, 0x0000, 5053 IGP01E1000_IEEE_FORCE_GIGA); ··· 5071 if(ret_val) 5072 return ret_val; 5073 5074 + msec_delay_irq(20); 5075 5076 /* Now enable the transmitter */ 5077 ret_val = e1000_write_phy_reg(hw, 0x2F5B, phy_saved_data); ··· 5096 if(ret_val) 5097 return ret_val; 5098 5099 + msec_delay_irq(20); 5100 5101 ret_val = e1000_write_phy_reg(hw, 0x0000, 5102 IGP01E1000_IEEE_FORCE_GIGA); ··· 5112 if(ret_val) 5113 return ret_val; 5114 5115 + msec_delay_irq(20); 5116 5117 /* Now enable the transmitter */ 5118 ret_val = e1000_write_phy_reg(hw, 0x2F5B, phy_saved_data); ··· 5187 uint16_t phy_data; 5188 DEBUGFUNC("e1000_set_d3_lplu_state"); 5189 5190 + if(hw->phy_type != e1000_phy_igp && hw->phy_type != e1000_phy_igp_2) 5191 return E1000_SUCCESS; 5192 5193 /* During driver activity LPLU should not be used or it will attain link 5194 * from the lowest speeds starting from 10Mbps. The capability is used for 5195 * Dx transitions and states */ 5196 + if(hw->mac_type == e1000_82541_rev_2 || hw->mac_type == e1000_82547_rev_2) { 5197 + ret_val = e1000_read_phy_reg(hw, IGP01E1000_GMII_FIFO, &phy_data); 5198 if(ret_val) 5199 return ret_val; 5200 + } else { 5201 + ret_val = e1000_read_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, &phy_data); 5202 + if(ret_val) 5203 + return ret_val; 5204 + } 5205 + 5206 + if(!active) { 5207 + if(hw->mac_type == e1000_82541_rev_2 || 5208 + hw->mac_type == e1000_82547_rev_2) { 5209 + phy_data &= ~IGP01E1000_GMII_FLEX_SPD; 5210 + ret_val = e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO, phy_data); 5211 + if(ret_val) 5212 + return ret_val; 5213 + } else { 5214 + phy_data &= ~IGP02E1000_PM_D3_LPLU; 5215 + ret_val = e1000_write_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, 5216 + phy_data); 5217 + if (ret_val) 5218 + return ret_val; 5219 + } 5220 5221 /* LPLU and SmartSpeed are mutually exclusive. LPLU is used during 5222 * Dx states where the power conservation is most important. During ··· 5236 (hw->autoneg_advertised == AUTONEG_ADVERTISE_10_ALL ) || 5237 (hw->autoneg_advertised == AUTONEG_ADVERTISE_10_100_ALL)) { 5238 5239 + if(hw->mac_type == e1000_82541_rev_2 || 5240 + hw->mac_type == e1000_82547_rev_2) { 5241 + phy_data |= IGP01E1000_GMII_FLEX_SPD; 5242 + ret_val = e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO, phy_data); 5243 + if(ret_val) 5244 + return ret_val; 5245 + } else { 5246 + phy_data |= IGP02E1000_PM_D3_LPLU; 5247 + ret_val = e1000_write_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, 5248 + phy_data); 5249 + if (ret_val) 5250 + return ret_val; 5251 + } 5252 + 5253 + /* When LPLU is enabled we should disable SmartSpeed */ 5254 + ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, &phy_data); 5255 if(ret_val) 5256 return ret_val; 5257 + 5258 + phy_data &= ~IGP01E1000_PSCFR_SMART_SPEED; 5259 + ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, phy_data); 5260 + if(ret_val) 5261 + return ret_val; 5262 + 5263 + } 5264 + return E1000_SUCCESS; 5265 + } 5266 + 5267 + /***************************************************************************** 5268 + * 5269 + * This function sets the lplu d0 state according to the active flag. When 5270 + * activating lplu this function also disables smart speed and vise versa. 5271 + * lplu will not be activated unless the device autonegotiation advertisment 5272 + * meets standards of either 10 or 10/100 or 10/100/1000 at all duplexes. 5273 + * hw: Struct containing variables accessed by shared code 5274 + * active - true to enable lplu false to disable lplu. 5275 + * 5276 + * returns: - E1000_ERR_PHY if fail to read/write the PHY 5277 + * E1000_SUCCESS at any other case. 5278 + * 5279 + ****************************************************************************/ 5280 + 5281 + int32_t 5282 + e1000_set_d0_lplu_state(struct e1000_hw *hw, 5283 + boolean_t active) 5284 + { 5285 + int32_t ret_val; 5286 + uint16_t phy_data; 5287 + DEBUGFUNC("e1000_set_d0_lplu_state"); 5288 + 5289 + if(hw->mac_type <= e1000_82547_rev_2) 5290 + return E1000_SUCCESS; 5291 + 5292 + ret_val = e1000_read_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, &phy_data); 5293 + if(ret_val) 5294 + return ret_val; 5295 + 5296 + if (!active) { 5297 + phy_data &= ~IGP02E1000_PM_D0_LPLU; 5298 + ret_val = e1000_write_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, phy_data); 5299 + if (ret_val) 5300 + return ret_val; 5301 + 5302 + /* LPLU and SmartSpeed are mutually exclusive. LPLU is used during 5303 + * Dx states where the power conservation is most important. During 5304 + * driver activity we should enable SmartSpeed, so performance is 5305 + * maintained. */ 5306 + if (hw->smart_speed == e1000_smart_speed_on) { 5307 + ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, 5308 + &phy_data); 5309 + if(ret_val) 5310 + return ret_val; 5311 + 5312 + phy_data |= IGP01E1000_PSCFR_SMART_SPEED; 5313 + ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, 5314 + phy_data); 5315 + if(ret_val) 5316 + return ret_val; 5317 + } else if (hw->smart_speed == e1000_smart_speed_off) { 5318 + ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, 5319 + &phy_data); 5320 + if (ret_val) 5321 + return ret_val; 5322 + 5323 + phy_data &= ~IGP01E1000_PSCFR_SMART_SPEED; 5324 + ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, 5325 + phy_data); 5326 + if(ret_val) 5327 + return ret_val; 5328 + } 5329 + 5330 + 5331 + } else { 5332 + 5333 + phy_data |= IGP02E1000_PM_D0_LPLU; 5334 + ret_val = e1000_write_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, phy_data); 5335 + if (ret_val) 5336 + return ret_val; 5337 5338 /* When LPLU is enabled we should disable SmartSpeed */ 5339 ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, &phy_data); ··· 5316 return ret_val; 5317 5318 return E1000_SUCCESS; 5319 + } 5320 + 5321 + 5322 + /***************************************************************************** 5323 + * This function reads the cookie from ARC ram. 5324 + * 5325 + * returns: - E1000_SUCCESS . 5326 + ****************************************************************************/ 5327 + int32_t 5328 + e1000_host_if_read_cookie(struct e1000_hw * hw, uint8_t *buffer) 5329 + { 5330 + uint8_t i; 5331 + uint32_t offset = E1000_MNG_DHCP_COOKIE_OFFSET; 5332 + uint8_t length = E1000_MNG_DHCP_COOKIE_LENGTH; 5333 + 5334 + length = (length >> 2); 5335 + offset = (offset >> 2); 5336 + 5337 + for (i = 0; i < length; i++) { 5338 + *((uint32_t *) buffer + i) = 5339 + E1000_READ_REG_ARRAY_DWORD(hw, HOST_IF, offset + i); 5340 + } 5341 + return E1000_SUCCESS; 5342 + } 5343 + 5344 + 5345 + /***************************************************************************** 5346 + * This function checks whether the HOST IF is enabled for command operaton 5347 + * and also checks whether the previous command is completed. 5348 + * It busy waits in case of previous command is not completed. 5349 + * 5350 + * returns: - E1000_ERR_HOST_INTERFACE_COMMAND in case if is not ready or 5351 + * timeout 5352 + * - E1000_SUCCESS for success. 5353 + ****************************************************************************/ 5354 + int32_t 5355 + e1000_mng_enable_host_if(struct e1000_hw * hw) 5356 + { 5357 + uint32_t hicr; 5358 + uint8_t i; 5359 + 5360 + /* Check that the host interface is enabled. */ 5361 + hicr = E1000_READ_REG(hw, HICR); 5362 + if ((hicr & E1000_HICR_EN) == 0) { 5363 + DEBUGOUT("E1000_HOST_EN bit disabled.\n"); 5364 + return -E1000_ERR_HOST_INTERFACE_COMMAND; 5365 + } 5366 + /* check the previous command is completed */ 5367 + for (i = 0; i < E1000_MNG_DHCP_COMMAND_TIMEOUT; i++) { 5368 + hicr = E1000_READ_REG(hw, HICR); 5369 + if (!(hicr & E1000_HICR_C)) 5370 + break; 5371 + msec_delay_irq(1); 5372 + } 5373 + 5374 + if (i == E1000_MNG_DHCP_COMMAND_TIMEOUT) { 5375 + DEBUGOUT("Previous command timeout failed .\n"); 5376 + return -E1000_ERR_HOST_INTERFACE_COMMAND; 5377 + } 5378 + return E1000_SUCCESS; 5379 + } 5380 + 5381 + /***************************************************************************** 5382 + * This function writes the buffer content at the offset given on the host if. 5383 + * It also does alignment considerations to do the writes in most efficient way. 5384 + * Also fills up the sum of the buffer in *buffer parameter. 5385 + * 5386 + * returns - E1000_SUCCESS for success. 5387 + ****************************************************************************/ 5388 + int32_t 5389 + e1000_mng_host_if_write(struct e1000_hw * hw, uint8_t *buffer, 5390 + uint16_t length, uint16_t offset, uint8_t *sum) 5391 + { 5392 + uint8_t *tmp; 5393 + uint8_t *bufptr = buffer; 5394 + uint32_t data; 5395 + uint16_t remaining, i, j, prev_bytes; 5396 + 5397 + /* sum = only sum of the data and it is not checksum */ 5398 + 5399 + if (length == 0 || offset + length > E1000_HI_MAX_MNG_DATA_LENGTH) { 5400 + return -E1000_ERR_PARAM; 5401 + } 5402 + 5403 + tmp = (uint8_t *)&data; 5404 + prev_bytes = offset & 0x3; 5405 + offset &= 0xFFFC; 5406 + offset >>= 2; 5407 + 5408 + if (prev_bytes) { 5409 + data = E1000_READ_REG_ARRAY_DWORD(hw, HOST_IF, offset); 5410 + for (j = prev_bytes; j < sizeof(uint32_t); j++) { 5411 + *(tmp + j) = *bufptr++; 5412 + *sum += *(tmp + j); 5413 + } 5414 + E1000_WRITE_REG_ARRAY_DWORD(hw, HOST_IF, offset, data); 5415 + length -= j - prev_bytes; 5416 + offset++; 5417 + } 5418 + 5419 + remaining = length & 0x3; 5420 + length -= remaining; 5421 + 5422 + /* Calculate length in DWORDs */ 5423 + length >>= 2; 5424 + 5425 + /* The device driver writes the relevant command block into the 5426 + * ram area. */ 5427 + for (i = 0; i < length; i++) { 5428 + for (j = 0; j < sizeof(uint32_t); j++) { 5429 + *(tmp + j) = *bufptr++; 5430 + *sum += *(tmp + j); 5431 + } 5432 + 5433 + E1000_WRITE_REG_ARRAY_DWORD(hw, HOST_IF, offset + i, data); 5434 + } 5435 + if (remaining) { 5436 + for (j = 0; j < sizeof(uint32_t); j++) { 5437 + if (j < remaining) 5438 + *(tmp + j) = *bufptr++; 5439 + else 5440 + *(tmp + j) = 0; 5441 + 5442 + *sum += *(tmp + j); 5443 + } 5444 + E1000_WRITE_REG_ARRAY_DWORD(hw, HOST_IF, offset + i, data); 5445 + } 5446 + 5447 + return E1000_SUCCESS; 5448 + } 5449 + 5450 + 5451 + /***************************************************************************** 5452 + * This function writes the command header after does the checksum calculation. 5453 + * 5454 + * returns - E1000_SUCCESS for success. 5455 + ****************************************************************************/ 5456 + int32_t 5457 + e1000_mng_write_cmd_header(struct e1000_hw * hw, 5458 + struct e1000_host_mng_command_header * hdr) 5459 + { 5460 + uint16_t i; 5461 + uint8_t sum; 5462 + uint8_t *buffer; 5463 + 5464 + /* Write the whole command header structure which includes sum of 5465 + * the buffer */ 5466 + 5467 + uint16_t length = sizeof(struct e1000_host_mng_command_header); 5468 + 5469 + sum = hdr->checksum; 5470 + hdr->checksum = 0; 5471 + 5472 + buffer = (uint8_t *) hdr; 5473 + i = length; 5474 + while(i--) 5475 + sum += buffer[i]; 5476 + 5477 + hdr->checksum = 0 - sum; 5478 + 5479 + length >>= 2; 5480 + /* The device driver writes the relevant command block into the ram area. */ 5481 + for (i = 0; i < length; i++) 5482 + E1000_WRITE_REG_ARRAY_DWORD(hw, HOST_IF, i, *((uint32_t *) hdr + i)); 5483 + 5484 + return E1000_SUCCESS; 5485 + } 5486 + 5487 + 5488 + /***************************************************************************** 5489 + * This function indicates to ARC that a new command is pending which completes 5490 + * one write operation by the driver. 5491 + * 5492 + * returns - E1000_SUCCESS for success. 5493 + ****************************************************************************/ 5494 + int32_t 5495 + e1000_mng_write_commit( 5496 + struct e1000_hw * hw) 5497 + { 5498 + uint32_t hicr; 5499 + 5500 + hicr = E1000_READ_REG(hw, HICR); 5501 + /* Setting this bit tells the ARC that a new command is pending. */ 5502 + E1000_WRITE_REG(hw, HICR, hicr | E1000_HICR_C); 5503 + 5504 + return E1000_SUCCESS; 5505 + } 5506 + 5507 + 5508 + /***************************************************************************** 5509 + * This function checks the mode of the firmware. 5510 + * 5511 + * returns - TRUE when the mode is IAMT or FALSE. 5512 + ****************************************************************************/ 5513 + boolean_t 5514 + e1000_check_mng_mode( 5515 + struct e1000_hw *hw) 5516 + { 5517 + uint32_t fwsm; 5518 + 5519 + fwsm = E1000_READ_REG(hw, FWSM); 5520 + 5521 + if((fwsm & E1000_FWSM_MODE_MASK) == 5522 + (E1000_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT)) 5523 + return TRUE; 5524 + 5525 + return FALSE; 5526 + } 5527 + 5528 + 5529 + /***************************************************************************** 5530 + * This function writes the dhcp info . 5531 + ****************************************************************************/ 5532 + int32_t 5533 + e1000_mng_write_dhcp_info(struct e1000_hw * hw, uint8_t *buffer, 5534 + uint16_t length) 5535 + { 5536 + int32_t ret_val; 5537 + struct e1000_host_mng_command_header hdr; 5538 + 5539 + hdr.command_id = E1000_MNG_DHCP_TX_PAYLOAD_CMD; 5540 + hdr.command_length = length; 5541 + hdr.reserved1 = 0; 5542 + hdr.reserved2 = 0; 5543 + hdr.checksum = 0; 5544 + 5545 + ret_val = e1000_mng_enable_host_if(hw); 5546 + if (ret_val == E1000_SUCCESS) { 5547 + ret_val = e1000_mng_host_if_write(hw, buffer, length, sizeof(hdr), 5548 + &(hdr.checksum)); 5549 + if (ret_val == E1000_SUCCESS) { 5550 + ret_val = e1000_mng_write_cmd_header(hw, &hdr); 5551 + if (ret_val == E1000_SUCCESS) 5552 + ret_val = e1000_mng_write_commit(hw); 5553 + } 5554 + } 5555 + return ret_val; 5556 + } 5557 + 5558 + 5559 + /***************************************************************************** 5560 + * This function calculates the checksum. 5561 + * 5562 + * returns - checksum of buffer contents. 5563 + ****************************************************************************/ 5564 + uint8_t 5565 + e1000_calculate_mng_checksum(char *buffer, uint32_t length) 5566 + { 5567 + uint8_t sum = 0; 5568 + uint32_t i; 5569 + 5570 + if (!buffer) 5571 + return 0; 5572 + 5573 + for (i=0; i < length; i++) 5574 + sum += buffer[i]; 5575 + 5576 + return (uint8_t) (0 - sum); 5577 + } 5578 + 5579 + /***************************************************************************** 5580 + * This function checks whether tx pkt filtering needs to be enabled or not. 5581 + * 5582 + * returns - TRUE for packet filtering or FALSE. 5583 + ****************************************************************************/ 5584 + boolean_t 5585 + e1000_enable_tx_pkt_filtering(struct e1000_hw *hw) 5586 + { 5587 + /* called in init as well as watchdog timer functions */ 5588 + 5589 + int32_t ret_val, checksum; 5590 + boolean_t tx_filter = FALSE; 5591 + struct e1000_host_mng_dhcp_cookie *hdr = &(hw->mng_cookie); 5592 + uint8_t *buffer = (uint8_t *) &(hw->mng_cookie); 5593 + 5594 + if (e1000_check_mng_mode(hw)) { 5595 + ret_val = e1000_mng_enable_host_if(hw); 5596 + if (ret_val == E1000_SUCCESS) { 5597 + ret_val = e1000_host_if_read_cookie(hw, buffer); 5598 + if (ret_val == E1000_SUCCESS) { 5599 + checksum = hdr->checksum; 5600 + hdr->checksum = 0; 5601 + if ((hdr->signature == E1000_IAMT_SIGNATURE) && 5602 + checksum == e1000_calculate_mng_checksum((char *)buffer, 5603 + E1000_MNG_DHCP_COOKIE_LENGTH)) { 5604 + if (hdr->status & 5605 + E1000_MNG_DHCP_COOKIE_STATUS_PARSING_SUPPORT) 5606 + tx_filter = TRUE; 5607 + } else 5608 + tx_filter = TRUE; 5609 + } else 5610 + tx_filter = TRUE; 5611 + } 5612 + } 5613 + 5614 + hw->tx_pkt_filtering = tx_filter; 5615 + return tx_filter; 5616 + } 5617 + 5618 + /****************************************************************************** 5619 + * Verifies the hardware needs to allow ARPs to be processed by the host 5620 + * 5621 + * hw - Struct containing variables accessed by shared code 5622 + * 5623 + * returns: - TRUE/FALSE 5624 + * 5625 + *****************************************************************************/ 5626 + uint32_t 5627 + e1000_enable_mng_pass_thru(struct e1000_hw *hw) 5628 + { 5629 + uint32_t manc; 5630 + uint32_t fwsm, factps; 5631 + 5632 + if (hw->asf_firmware_present) { 5633 + manc = E1000_READ_REG(hw, MANC); 5634 + 5635 + if (!(manc & E1000_MANC_RCV_TCO_EN) || 5636 + !(manc & E1000_MANC_EN_MAC_ADDR_FILTER)) 5637 + return FALSE; 5638 + if (e1000_arc_subsystem_valid(hw) == TRUE) { 5639 + fwsm = E1000_READ_REG(hw, FWSM); 5640 + factps = E1000_READ_REG(hw, FACTPS); 5641 + 5642 + if (((fwsm & E1000_FWSM_MODE_MASK) == 5643 + (e1000_mng_mode_pt << E1000_FWSM_MODE_SHIFT)) && 5644 + (factps & E1000_FACTPS_MNGCG)) 5645 + return TRUE; 5646 + } else 5647 + if ((manc & E1000_MANC_SMBUS_EN) && !(manc & E1000_MANC_ASF_EN)) 5648 + return TRUE; 5649 + } 5650 + return FALSE; 5651 } 5652 5653 static int32_t ··· 5402 } 5403 return E1000_SUCCESS; 5404 } 5405 + 5406 + /*************************************************************************** 5407 + * 5408 + * Disables PCI-Express master access. 5409 + * 5410 + * hw: Struct containing variables accessed by shared code 5411 + * 5412 + * returns: - none. 5413 + * 5414 + ***************************************************************************/ 5415 + void 5416 + e1000_set_pci_express_master_disable(struct e1000_hw *hw) 5417 + { 5418 + uint32_t ctrl; 5419 + 5420 + DEBUGFUNC("e1000_set_pci_express_master_disable"); 5421 + 5422 + if (hw->bus_type != e1000_bus_type_pci_express) 5423 + return; 5424 + 5425 + ctrl = E1000_READ_REG(hw, CTRL); 5426 + ctrl |= E1000_CTRL_GIO_MASTER_DISABLE; 5427 + E1000_WRITE_REG(hw, CTRL, ctrl); 5428 + } 5429 + 5430 + /*************************************************************************** 5431 + * 5432 + * Enables PCI-Express master access. 5433 + * 5434 + * hw: Struct containing variables accessed by shared code 5435 + * 5436 + * returns: - none. 5437 + * 5438 + ***************************************************************************/ 5439 + void 5440 + e1000_enable_pciex_master(struct e1000_hw *hw) 5441 + { 5442 + uint32_t ctrl; 5443 + 5444 + DEBUGFUNC("e1000_enable_pciex_master"); 5445 + 5446 + if (hw->bus_type != e1000_bus_type_pci_express) 5447 + return; 5448 + 5449 + ctrl = E1000_READ_REG(hw, CTRL); 5450 + ctrl &= ~E1000_CTRL_GIO_MASTER_DISABLE; 5451 + E1000_WRITE_REG(hw, CTRL, ctrl); 5452 + } 5453 + 5454 + /******************************************************************************* 5455 + * 5456 + * Disables PCI-Express master access and verifies there are no pending requests 5457 + * 5458 + * hw: Struct containing variables accessed by shared code 5459 + * 5460 + * returns: - E1000_ERR_MASTER_REQUESTS_PENDING if master disable bit hasn't 5461 + * caused the master requests to be disabled. 5462 + * E1000_SUCCESS master requests disabled. 5463 + * 5464 + ******************************************************************************/ 5465 + int32_t 5466 + e1000_disable_pciex_master(struct e1000_hw *hw) 5467 + { 5468 + int32_t timeout = MASTER_DISABLE_TIMEOUT; /* 80ms */ 5469 + 5470 + DEBUGFUNC("e1000_disable_pciex_master"); 5471 + 5472 + if (hw->bus_type != e1000_bus_type_pci_express) 5473 + return E1000_SUCCESS; 5474 + 5475 + e1000_set_pci_express_master_disable(hw); 5476 + 5477 + while(timeout) { 5478 + if(!(E1000_READ_REG(hw, STATUS) & E1000_STATUS_GIO_MASTER_ENABLE)) 5479 + break; 5480 + else 5481 + udelay(100); 5482 + timeout--; 5483 + } 5484 + 5485 + if(!timeout) { 5486 + DEBUGOUT("Master requests are pending.\n"); 5487 + return -E1000_ERR_MASTER_REQUESTS_PENDING; 5488 + } 5489 + 5490 + return E1000_SUCCESS; 5491 + } 5492 + 5493 + /******************************************************************************* 5494 + * 5495 + * Check for EEPROM Auto Read bit done. 5496 + * 5497 + * hw: Struct containing variables accessed by shared code 5498 + * 5499 + * returns: - E1000_ERR_RESET if fail to reset MAC 5500 + * E1000_SUCCESS at any other case. 5501 + * 5502 + ******************************************************************************/ 5503 + int32_t 5504 + e1000_get_auto_rd_done(struct e1000_hw *hw) 5505 + { 5506 + int32_t timeout = AUTO_READ_DONE_TIMEOUT; 5507 + 5508 + DEBUGFUNC("e1000_get_auto_rd_done"); 5509 + 5510 + switch (hw->mac_type) { 5511 + default: 5512 + msec_delay(5); 5513 + break; 5514 + case e1000_82573: 5515 + while(timeout) { 5516 + if (E1000_READ_REG(hw, EECD) & E1000_EECD_AUTO_RD) break; 5517 + else msec_delay(1); 5518 + timeout--; 5519 + } 5520 + 5521 + if(!timeout) { 5522 + DEBUGOUT("Auto read by HW from EEPROM has not completed.\n"); 5523 + return -E1000_ERR_RESET; 5524 + } 5525 + break; 5526 + } 5527 + 5528 + return E1000_SUCCESS; 5529 + } 5530 + 5531 + /*************************************************************************** 5532 + * Checks if the PHY configuration is done 5533 + * 5534 + * hw: Struct containing variables accessed by shared code 5535 + * 5536 + * returns: - E1000_ERR_RESET if fail to reset MAC 5537 + * E1000_SUCCESS at any other case. 5538 + * 5539 + ***************************************************************************/ 5540 + int32_t 5541 + e1000_get_phy_cfg_done(struct e1000_hw *hw) 5542 + { 5543 + DEBUGFUNC("e1000_get_phy_cfg_done"); 5544 + 5545 + /* Simply wait for 10ms */ 5546 + msec_delay(10); 5547 + 5548 + return E1000_SUCCESS; 5549 + } 5550 + 5551 + /*************************************************************************** 5552 + * 5553 + * Using the combination of SMBI and SWESMBI semaphore bits when resetting 5554 + * adapter or Eeprom access. 5555 + * 5556 + * hw: Struct containing variables accessed by shared code 5557 + * 5558 + * returns: - E1000_ERR_EEPROM if fail to access EEPROM. 5559 + * E1000_SUCCESS at any other case. 5560 + * 5561 + ***************************************************************************/ 5562 + int32_t 5563 + e1000_get_hw_eeprom_semaphore(struct e1000_hw *hw) 5564 + { 5565 + int32_t timeout; 5566 + uint32_t swsm; 5567 + 5568 + DEBUGFUNC("e1000_get_hw_eeprom_semaphore"); 5569 + 5570 + if(!hw->eeprom_semaphore_present) 5571 + return E1000_SUCCESS; 5572 + 5573 + 5574 + /* Get the FW semaphore. */ 5575 + timeout = hw->eeprom.word_size + 1; 5576 + while(timeout) { 5577 + swsm = E1000_READ_REG(hw, SWSM); 5578 + swsm |= E1000_SWSM_SWESMBI; 5579 + E1000_WRITE_REG(hw, SWSM, swsm); 5580 + /* if we managed to set the bit we got the semaphore. */ 5581 + swsm = E1000_READ_REG(hw, SWSM); 5582 + if(swsm & E1000_SWSM_SWESMBI) 5583 + break; 5584 + 5585 + udelay(50); 5586 + timeout--; 5587 + } 5588 + 5589 + if(!timeout) { 5590 + /* Release semaphores */ 5591 + e1000_put_hw_eeprom_semaphore(hw); 5592 + DEBUGOUT("Driver can't access the Eeprom - SWESMBI bit is set.\n"); 5593 + return -E1000_ERR_EEPROM; 5594 + } 5595 + 5596 + return E1000_SUCCESS; 5597 + } 5598 + 5599 + /*************************************************************************** 5600 + * This function clears HW semaphore bits. 5601 + * 5602 + * hw: Struct containing variables accessed by shared code 5603 + * 5604 + * returns: - None. 5605 + * 5606 + ***************************************************************************/ 5607 + void 5608 + e1000_put_hw_eeprom_semaphore(struct e1000_hw *hw) 5609 + { 5610 + uint32_t swsm; 5611 + 5612 + DEBUGFUNC("e1000_put_hw_eeprom_semaphore"); 5613 + 5614 + if(!hw->eeprom_semaphore_present) 5615 + return; 5616 + 5617 + swsm = E1000_READ_REG(hw, SWSM); 5618 + /* Release both semaphores. */ 5619 + swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI); 5620 + E1000_WRITE_REG(hw, SWSM, swsm); 5621 + } 5622 + 5623 + /****************************************************************************** 5624 + * Checks if PHY reset is blocked due to SOL/IDER session, for example. 5625 + * Returning E1000_BLK_PHY_RESET isn't necessarily an error. But it's up to 5626 + * the caller to figure out how to deal with it. 5627 + * 5628 + * hw - Struct containing variables accessed by shared code 5629 + * 5630 + * returns: - E1000_BLK_PHY_RESET 5631 + * E1000_SUCCESS 5632 + * 5633 + *****************************************************************************/ 5634 + int32_t 5635 + e1000_check_phy_reset_block(struct e1000_hw *hw) 5636 + { 5637 + uint32_t manc = 0; 5638 + if(hw->mac_type > e1000_82547_rev_2) 5639 + manc = E1000_READ_REG(hw, MANC); 5640 + return (manc & E1000_MANC_BLK_PHY_RST_ON_IDE) ? 5641 + E1000_BLK_PHY_RESET : E1000_SUCCESS; 5642 + } 5643 + 5644 + uint8_t 5645 + e1000_arc_subsystem_valid(struct e1000_hw *hw) 5646 + { 5647 + uint32_t fwsm; 5648 + 5649 + /* On 8257x silicon, registers in the range of 0x8800 - 0x8FFC 5650 + * may not be provided a DMA clock when no manageability features are 5651 + * enabled. We do not want to perform any reads/writes to these registers 5652 + * if this is the case. We read FWSM to determine the manageability mode. 5653 + */ 5654 + switch (hw->mac_type) { 5655 + case e1000_82573: 5656 + fwsm = E1000_READ_REG(hw, FWSM); 5657 + if((fwsm & E1000_FWSM_MODE_MASK) != 0) 5658 + return TRUE; 5659 + break; 5660 + default: 5661 + break; 5662 + } 5663 + return FALSE; 5664 + } 5665 + 5666 + 5667
+547 -23
drivers/net/e1000/e1000_hw.h
··· 1 /******************************************************************************* 2 3 4 - Copyright(c) 1999 - 2004 Intel Corporation. All rights reserved. 5 6 This program is free software; you can redistribute it and/or modify it 7 under the terms of the GNU General Public License as published by the Free ··· 57 e1000_82541_rev_2, 58 e1000_82547, 59 e1000_82547_rev_2, 60 e1000_num_macs 61 } e1000_mac_type; 62 ··· 65 e1000_eeprom_uninitialized = 0, 66 e1000_eeprom_spi, 67 e1000_eeprom_microwire, 68 e1000_num_eeprom_types 69 } e1000_eeprom_type; 70 ··· 98 e1000_bus_type_unknown = 0, 99 e1000_bus_type_pci, 100 e1000_bus_type_pcix, 101 e1000_bus_type_reserved 102 } e1000_bus_type; 103 ··· 110 e1000_bus_speed_100, 111 e1000_bus_speed_120, 112 e1000_bus_speed_133, 113 e1000_bus_speed_reserved 114 } e1000_bus_speed; 115 ··· 119 e1000_bus_width_unknown = 0, 120 e1000_bus_width_32, 121 e1000_bus_width_64, 122 e1000_bus_width_reserved 123 } e1000_bus_width; 124 ··· 202 typedef enum { 203 e1000_phy_m88 = 0, 204 e1000_phy_igp, 205 e1000_phy_undefined = 0xFF 206 } e1000_phy_type; 207 ··· 249 uint16_t address_bits; 250 uint16_t delay_usec; 251 uint16_t page_size; 252 }; 253 254 255 ··· 272 #define E1000_ERR_PARAM 4 273 #define E1000_ERR_MAC_TYPE 5 274 #define E1000_ERR_PHY_TYPE 6 275 276 /* Function prototypes */ 277 /* Initialization */ 278 int32_t e1000_reset_hw(struct e1000_hw *hw); 279 int32_t e1000_init_hw(struct e1000_hw *hw); 280 int32_t e1000_set_mac_type(struct e1000_hw *hw); 281 void e1000_set_media_type(struct e1000_hw *hw); 282 ··· 298 /* PHY */ 299 int32_t e1000_read_phy_reg(struct e1000_hw *hw, uint32_t reg_addr, uint16_t *phy_data); 300 int32_t e1000_write_phy_reg(struct e1000_hw *hw, uint32_t reg_addr, uint16_t data); 301 - void e1000_phy_hw_reset(struct e1000_hw *hw); 302 int32_t e1000_phy_reset(struct e1000_hw *hw); 303 int32_t e1000_detect_gig_phy(struct e1000_hw *hw); 304 int32_t e1000_phy_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info); ··· 310 int32_t e1000_validate_mdi_setting(struct e1000_hw *hw); 311 312 /* EEPROM Functions */ 313 - void e1000_init_eeprom_params(struct e1000_hw *hw); 314 int32_t e1000_read_eeprom(struct e1000_hw *hw, uint16_t reg, uint16_t words, uint16_t *data); 315 int32_t e1000_validate_eeprom_checksum(struct e1000_hw *hw); 316 int32_t e1000_update_eeprom_checksum(struct e1000_hw *hw); 317 int32_t e1000_write_eeprom(struct e1000_hw *hw, uint16_t reg, uint16_t words, uint16_t *data); 318 int32_t e1000_read_part_num(struct e1000_hw *hw, uint32_t * part_num); 319 int32_t e1000_read_mac_addr(struct e1000_hw * hw); 320 321 /* Filters (multicast, vlan, receive) */ 322 void e1000_init_rx_addrs(struct e1000_hw *hw); ··· 409 /* Adaptive IFS Functions */ 410 411 /* Everything else */ 412 - uint32_t e1000_enable_mng_pass_thru(struct e1000_hw *hw); 413 void e1000_clear_hw_cntrs(struct e1000_hw *hw); 414 void e1000_reset_adaptive(struct e1000_hw *hw); 415 void e1000_update_adaptive(struct e1000_hw *hw); ··· 425 void e1000_write_reg_io(struct e1000_hw *hw, uint32_t offset, uint32_t value); 426 int32_t e1000_config_dsp_after_link_change(struct e1000_hw *hw, boolean_t link_up); 427 int32_t e1000_set_d3_lplu_state(struct e1000_hw *hw, boolean_t active); 428 429 #define E1000_READ_REG_IO(a, reg) \ 430 e1000_read_reg_io((a), E1000_##reg) ··· 477 #define E1000_DEV_ID_82546GB_SERDES 0x107B 478 #define E1000_DEV_ID_82546GB_PCIE 0x108A 479 #define E1000_DEV_ID_82547EI 0x1019 480 481 #define NODE_ADDRESS_SIZE 6 482 #define ETH_LENGTH_OF_ADDRESS 6 ··· 493 #define E1000_REVISION_0 0 494 #define E1000_REVISION_1 1 495 #define E1000_REVISION_2 2 496 497 #define SPEED_10 10 498 #define SPEED_100 100 ··· 550 E1000_IMS_RXSEQ | \ 551 E1000_IMS_LSC) 552 553 /* Number of high/low register pairs in the RAR. The RAR (Receive Address 554 * Registers) holds the directed and multicast addresses that we monitor. We 555 * reserve one of these spots for our directed address, allowing us room for ··· 571 uint16_t special; 572 }; 573 574 /* Receive Decriptor bit definitions */ 575 #define E1000_RXD_STAT_DD 0x01 /* Descriptor Done */ 576 #define E1000_RXD_STAT_EOP 0x02 /* End of Packet */ 577 #define E1000_RXD_STAT_IXSM 0x04 /* Ignore checksum */ 578 #define E1000_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */ 579 #define E1000_RXD_STAT_TCPCS 0x20 /* TCP xsum calculated */ 580 #define E1000_RXD_STAT_IPCS 0x40 /* IP xsum calculated */ 581 #define E1000_RXD_STAT_PIF 0x80 /* passed in-exact filter */ 582 #define E1000_RXD_ERR_CE 0x01 /* CRC Error */ 583 #define E1000_RXD_ERR_SE 0x02 /* Symbol Error */ 584 #define E1000_RXD_ERR_SEQ 0x04 /* Sequence Error */ ··· 648 #define E1000_RXD_ERR_RXE 0x80 /* Rx Data Error */ 649 #define E1000_RXD_SPC_VLAN_MASK 0x0FFF /* VLAN ID is in lower 12 bits */ 650 #define E1000_RXD_SPC_PRI_MASK 0xE000 /* Priority is in upper 3 bits */ 651 - #define E1000_RXD_SPC_PRI_SHIFT 0x000D /* Priority is in upper 3 of 16 */ 652 #define E1000_RXD_SPC_CFI_MASK 0x1000 /* CFI is bit 12 */ 653 - #define E1000_RXD_SPC_CFI_SHIFT 0x000C /* CFI is bit 12 */ 654 655 /* mask to determine if packets should be dropped due to frame errors */ 656 #define E1000_RXD_ERR_FRAME_ERR_MASK ( \ ··· 670 E1000_RXD_ERR_SEQ | \ 671 E1000_RXD_ERR_CXE | \ 672 E1000_RXD_ERR_RXE) 673 674 /* Transmit Descriptor */ 675 struct e1000_tx_desc { ··· 861 #define E1000_ICS 0x000C8 /* Interrupt Cause Set - WO */ 862 #define E1000_IMS 0x000D0 /* Interrupt Mask Set - RW */ 863 #define E1000_IMC 0x000D8 /* Interrupt Mask Clear - WO */ 864 #define E1000_RCTL 0x00100 /* RX Control - RW */ 865 #define E1000_FCTTV 0x00170 /* Flow Control Transmit Timer Value - RW */ 866 #define E1000_TXCW 0x00178 /* TX Configuration Word - RW */ ··· 871 #define E1000_TBT 0x00448 /* TX Burst Timer - RW */ 872 #define E1000_AIT 0x00458 /* Adaptive Interframe Spacing Throttle - RW */ 873 #define E1000_LEDCTL 0x00E00 /* LED Control - RW */ 874 #define E1000_PBA 0x01000 /* Packet Buffer Allocation - RW */ 875 #define E1000_FCRTL 0x02160 /* Flow Control Receive Threshold Low - RW */ 876 #define E1000_FCRTH 0x02168 /* Flow Control Receive Threshold High - RW */ 877 #define E1000_RDBAL 0x02800 /* RX Descriptor Base Address Low - RW */ 878 #define E1000_RDBAH 0x02804 /* RX Descriptor Base Address High - RW */ 879 #define E1000_RDLEN 0x02808 /* RX Descriptor Length - RW */ ··· 897 #define E1000_RXDCTL 0x02828 /* RX Descriptor Control - RW */ 898 #define E1000_RADV 0x0282C /* RX Interrupt Absolute Delay Timer - RW */ 899 #define E1000_RSRPD 0x02C00 /* RX Small Packet Detect - RW */ 900 #define E1000_TXDMAC 0x03000 /* TX DMA Control - RW */ 901 #define E1000_TDFH 0x03410 /* TX Data FIFO Head - RW */ 902 #define E1000_TDFT 0x03418 /* TX Data FIFO Tail - RW */ ··· 913 #define E1000_TXDCTL 0x03828 /* TX Descriptor Control - RW */ 914 #define E1000_TADV 0x0382C /* TX Interrupt Absolute Delay Val - RW */ 915 #define E1000_TSPMT 0x03830 /* TCP Segmentation PAD & Min Threshold - RW */ 916 #define E1000_CRCERRS 0x04000 /* CRC Error Count - R/clr */ 917 #define E1000_ALGNERRC 0x04004 /* Alignment Error Count - R/clr */ 918 #define E1000_SYMERRS 0x04008 /* Symbol Error Count - R/clr */ ··· 979 #define E1000_BPTC 0x040F4 /* Broadcast Packets TX Count - R/clr */ 980 #define E1000_TSCTC 0x040F8 /* TCP Segmentation Context TX - R/clr */ 981 #define E1000_TSCTFC 0x040FC /* TCP Segmentation Context TX Fail - R/clr */ 982 #define E1000_RXCSUM 0x05000 /* RX Checksum Control - RW */ 983 #define E1000_MTA 0x05200 /* Multicast Table Array - RW Array */ 984 #define E1000_RA 0x05400 /* Receive Address - RW Array */ 985 #define E1000_VFTA 0x05600 /* VLAN Filter Table Array - RW Array */ ··· 1007 #define E1000_FFMT 0x09000 /* Flexible Filter Mask Table - RW Array */ 1008 #define E1000_FFVT 0x09800 /* Flexible Filter Value Table - RW Array */ 1009 1010 /* Register Set (82542) 1011 * 1012 * Some of the 82542 registers are located at different offsets than they are ··· 1067 #define E1000_82542_VFTA 0x00600 1068 #define E1000_82542_LEDCTL E1000_LEDCTL 1069 #define E1000_82542_PBA E1000_PBA 1070 #define E1000_82542_RXDCTL E1000_RXDCTL 1071 #define E1000_82542_RADV E1000_RADV 1072 #define E1000_82542_RSRPD E1000_RSRPD ··· 1163 #define E1000_82542_FFMT E1000_FFMT 1164 #define E1000_82542_FFVT E1000_FFVT 1165 #define E1000_82542_HOST_IF E1000_HOST_IF 1166 1167 /* Statistics counters collected by the MAC */ 1168 struct e1000_hw_stats { ··· 1256 uint64_t bptc; 1257 uint64_t tsctc; 1258 uint64_t tsctfc; 1259 }; 1260 1261 /* Structure containing variables used by the shared code (e1000_hw.c) */ 1262 struct e1000_hw { 1263 - uint8_t __iomem *hw_addr; 1264 e1000_mac_type mac_type; 1265 e1000_phy_type phy_type; 1266 uint32_t phy_init_script; ··· 1285 e1000_ms_type original_master_slave; 1286 e1000_ffe_config ffe_config_state; 1287 uint32_t asf_firmware_present; 1288 unsigned long io_base; 1289 uint32_t phy_id; 1290 uint32_t phy_revision; ··· 1302 uint32_t ledctl_default; 1303 uint32_t ledctl_mode1; 1304 uint32_t ledctl_mode2; 1305 uint16_t phy_spd_default; 1306 uint16_t autoneg_advertised; 1307 uint16_t pci_cmd_word; ··· 1342 boolean_t adaptive_ifs; 1343 boolean_t ifs_params_forced; 1344 boolean_t in_ifs_mode; 1345 }; 1346 1347 1348 #define E1000_EEPROM_SWDPIN0 0x0001 /* SWDPIN 0 EEPROM Value */ 1349 #define E1000_EEPROM_LED_LOGIC 0x0020 /* Led Logic Word */ 1350 /* Register Bit Masks */ 1351 /* Device Control */ 1352 #define E1000_CTRL_FD 0x00000001 /* Full duplex.0=half; 1=full */ 1353 #define E1000_CTRL_BEM 0x00000002 /* Endian Mode.0=little,1=big */ 1354 #define E1000_CTRL_PRIOR 0x00000004 /* Priority on PCI. 0=rx,1=fair */ 1355 #define E1000_CTRL_LRST 0x00000008 /* Link reset. 0=normal,1=reset */ 1356 #define E1000_CTRL_TME 0x00000010 /* Test mode. 0=normal,1=test */ 1357 #define E1000_CTRL_SLE 0x00000020 /* Serial Link on 0=dis,1=en */ ··· 1373 #define E1000_CTRL_BEM32 0x00000400 /* Big Endian 32 mode */ 1374 #define E1000_CTRL_FRCSPD 0x00000800 /* Force Speed */ 1375 #define E1000_CTRL_FRCDPX 0x00001000 /* Force Duplex */ 1376 #define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */ 1377 #define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */ 1378 #define E1000_CTRL_SWDPIN2 0x00100000 /* SWDPIN 2 value */ ··· 1393 #define E1000_STATUS_FD 0x00000001 /* Full duplex.0=half,1=full */ 1394 #define E1000_STATUS_LU 0x00000002 /* Link up.0=no,1=link */ 1395 #define E1000_STATUS_FUNC_MASK 0x0000000C /* PCI Function Mask */ 1396 #define E1000_STATUS_FUNC_0 0x00000000 /* Function 0 */ 1397 #define E1000_STATUS_FUNC_1 0x00000004 /* Function 1 */ 1398 #define E1000_STATUS_TXOFF 0x00000010 /* transmission paused */ ··· 1403 #define E1000_STATUS_SPEED_100 0x00000040 /* Speed 100Mb/s */ 1404 #define E1000_STATUS_SPEED_1000 0x00000080 /* Speed 1000Mb/s */ 1405 #define E1000_STATUS_ASDV 0x00000300 /* Auto speed detect value */ 1406 #define E1000_STATUS_MTXCKOK 0x00000400 /* MTX clock running OK */ 1407 #define E1000_STATUS_PCI66 0x00000800 /* In 66Mhz slot */ 1408 #define E1000_STATUS_BUS64 0x00001000 /* In 64 bit slot */ ··· 1435 #ifndef E1000_EEPROM_GRANT_ATTEMPTS 1436 #define E1000_EEPROM_GRANT_ATTEMPTS 1000 /* EEPROM # attempts to gain grant */ 1437 #endif 1438 1439 /* EEPROM Read */ 1440 #define E1000_EERD_START 0x00000001 /* Start Read */ ··· 1490 #define E1000_CTRL_EXT_WR_WMARK_320 0x01000000 1491 #define E1000_CTRL_EXT_WR_WMARK_384 0x02000000 1492 #define E1000_CTRL_EXT_WR_WMARK_448 0x03000000 1493 1494 /* MDI Control */ 1495 #define E1000_MDIC_DATA_MASK 0x0000FFFF ··· 1508 /* LED Control */ 1509 #define E1000_LEDCTL_LED0_MODE_MASK 0x0000000F 1510 #define E1000_LEDCTL_LED0_MODE_SHIFT 0 1511 #define E1000_LEDCTL_LED0_IVRT 0x00000040 1512 #define E1000_LEDCTL_LED0_BLINK 0x00000080 1513 #define E1000_LEDCTL_LED1_MODE_MASK 0x00000F00 1514 #define E1000_LEDCTL_LED1_MODE_SHIFT 8 1515 #define E1000_LEDCTL_LED1_IVRT 0x00004000 1516 #define E1000_LEDCTL_LED1_BLINK 0x00008000 1517 #define E1000_LEDCTL_LED2_MODE_MASK 0x000F0000 1518 #define E1000_LEDCTL_LED2_MODE_SHIFT 16 1519 #define E1000_LEDCTL_LED2_IVRT 0x00400000 1520 #define E1000_LEDCTL_LED2_BLINK 0x00800000 1521 #define E1000_LEDCTL_LED3_MODE_MASK 0x0F000000 ··· 1562 #define E1000_ICR_GPI_EN3 0x00004000 /* GP Int 3 */ 1563 #define E1000_ICR_TXD_LOW 0x00008000 1564 #define E1000_ICR_SRPD 0x00010000 1565 1566 /* Interrupt Cause Set */ 1567 #define E1000_ICS_TXDW E1000_ICR_TXDW /* Transmit desc written back */ ··· 1583 #define E1000_ICS_GPI_EN3 E1000_ICR_GPI_EN3 /* GP Int 3 */ 1584 #define E1000_ICS_TXD_LOW E1000_ICR_TXD_LOW 1585 #define E1000_ICS_SRPD E1000_ICR_SRPD 1586 1587 /* Interrupt Mask Set */ 1588 #define E1000_IMS_TXDW E1000_ICR_TXDW /* Transmit desc written back */ ··· 1603 #define E1000_IMS_GPI_EN3 E1000_ICR_GPI_EN3 /* GP Int 3 */ 1604 #define E1000_IMS_TXD_LOW E1000_ICR_TXD_LOW 1605 #define E1000_IMS_SRPD E1000_ICR_SRPD 1606 1607 /* Interrupt Mask Clear */ 1608 #define E1000_IMC_TXDW E1000_ICR_TXDW /* Transmit desc written back */ ··· 1623 #define E1000_IMC_GPI_EN3 E1000_ICR_GPI_EN3 /* GP Int 3 */ 1624 #define E1000_IMC_TXD_LOW E1000_ICR_TXD_LOW 1625 #define E1000_IMC_SRPD E1000_ICR_SRPD 1626 1627 /* Receive Control */ 1628 #define E1000_RCTL_RST 0x00000001 /* Software reset */ ··· 1638 #define E1000_RCTL_LBM_MAC 0x00000040 /* MAC loopback mode */ 1639 #define E1000_RCTL_LBM_SLP 0x00000080 /* serial link loopback mode */ 1640 #define E1000_RCTL_LBM_TCVR 0x000000C0 /* tcvr loopback mode */ 1641 #define E1000_RCTL_RDMTS_HALF 0x00000000 /* rx desc min threshold size */ 1642 #define E1000_RCTL_RDMTS_QUAT 0x00000100 /* rx desc min threshold size */ 1643 #define E1000_RCTL_RDMTS_EIGTH 0x00000200 /* rx desc min threshold size */ ··· 1666 #define E1000_RCTL_PMCF 0x00800000 /* pass MAC control frames */ 1667 #define E1000_RCTL_BSEX 0x02000000 /* Buffer size extension */ 1668 #define E1000_RCTL_SECRC 0x04000000 /* Strip Ethernet CRC */ 1669 1670 /* Receive Descriptor */ 1671 #define E1000_RDT_DELAY 0x0000ffff /* Delay timer (1=1024us) */ ··· 1708 #define E1000_FCRTL_RTL 0x0000FFF8 /* Mask Bits[15:3] for RTL */ 1709 #define E1000_FCRTL_XONE 0x80000000 /* Enable XON frame transmission */ 1710 1711 /* Receive Descriptor Control */ 1712 #define E1000_RXDCTL_PTHRESH 0x0000003F /* RXDCTL Prefetch Threshold */ 1713 #define E1000_RXDCTL_HTHRESH 0x00003F00 /* RXDCTL Host Threshold */ ··· 1738 #define E1000_TXDCTL_GRAN 0x01000000 /* TXDCTL Granularity */ 1739 #define E1000_TXDCTL_LWTHRESH 0xFE000000 /* TXDCTL Low Threshold */ 1740 #define E1000_TXDCTL_FULL_TX_DESC_WB 0x01010000 /* GRAN=1, WTHRESH=1 */ 1741 1742 /* Transmit Configuration Word */ 1743 #define E1000_TXCW_FD 0x00000020 /* TXCW full duplex */ ··· 1773 #define E1000_TCTL_PBE 0x00800000 /* Packet Burst Enable */ 1774 #define E1000_TCTL_RTLC 0x01000000 /* Re-transmit on late collision */ 1775 #define E1000_TCTL_NRTU 0x02000000 /* No Re-transmit on underrun */ 1776 1777 /* Receive Checksum Control */ 1778 #define E1000_RXCSUM_PCSS_MASK 0x000000FF /* Packet Checksum Start */ 1779 #define E1000_RXCSUM_IPOFL 0x00000100 /* IPv4 checksum offload */ 1780 #define E1000_RXCSUM_TUOFL 0x00000200 /* TCP / UDP checksum offload */ 1781 #define E1000_RXCSUM_IPV6OFL 0x00000400 /* IPv6 checksum offload */ 1782 1783 /* Definitions for power management and wakeup registers */ 1784 /* Wake Up Control */ ··· 1801 #define E1000_WUFC_ARP 0x00000020 /* ARP Request Packet Wakeup Enable */ 1802 #define E1000_WUFC_IPV4 0x00000040 /* Directed IPv4 Packet Wakeup Enable */ 1803 #define E1000_WUFC_IPV6 0x00000080 /* Directed IPv6 Packet Wakeup Enable */ 1804 #define E1000_WUFC_FLX0 0x00010000 /* Flexible Filter 0 Enable */ 1805 #define E1000_WUFC_FLX1 0x00020000 /* Flexible Filter 1 Enable */ 1806 #define E1000_WUFC_FLX2 0x00040000 /* Flexible Filter 2 Enable */ ··· 1837 #define E1000_MANC_ARP_EN 0x00002000 /* Enable ARP Request Filtering */ 1838 #define E1000_MANC_NEIGHBOR_EN 0x00004000 /* Enable Neighbor Discovery 1839 * Filtering */ 1840 #define E1000_MANC_TCO_RESET 0x00010000 /* TCO Reset Occurred */ 1841 #define E1000_MANC_RCV_TCO_EN 0x00020000 /* Receive TCO Packets Enabled */ 1842 #define E1000_MANC_REPORT_STATUS 0x00040000 /* Status Reporting Enabled */ 1843 #define E1000_MANC_EN_MAC_ADDR_FILTER 0x00100000 /* Enable MAC address 1844 * filtering */ 1845 #define E1000_MANC_EN_MNG2HOST 0x00200000 /* Enable MNG packets to host 1846 * memory */ 1847 #define E1000_MANC_SMB_REQ 0x01000000 /* SMBus Request */ 1848 #define E1000_MANC_SMB_GNT 0x02000000 /* SMBus Grant */ 1849 #define E1000_MANC_SMB_CLK_IN 0x04000000 /* SMBus Clock In */ ··· 1860 #define E1000_MANC_SMB_DATA_OUT_SHIFT 28 /* SMBus Data Out Shift */ 1861 #define E1000_MANC_SMB_CLK_OUT_SHIFT 29 /* SMBus Clock Out Shift */ 1862 1863 /* Wake Up Packet Length */ 1864 #define E1000_WUPL_LENGTH_MASK 0x0FFF /* Only the lower 12 bits are valid */ 1865 1866 #define E1000_MDALIGN 4096 1867 1868 /* EEPROM Commands - Microwire */ 1869 #define EEPROM_READ_OPCODE_MICROWIRE 0x6 /* EEPROM read opcode */ ··· 1960 1961 /* EEPROM Commands - SPI */ 1962 #define EEPROM_MAX_RETRY_SPI 5000 /* Max wait of 5ms, for RDY signal */ 1963 - #define EEPROM_READ_OPCODE_SPI 0x3 /* EEPROM read opcode */ 1964 - #define EEPROM_WRITE_OPCODE_SPI 0x2 /* EEPROM write opcode */ 1965 - #define EEPROM_A8_OPCODE_SPI 0x8 /* opcode bit-3 = address bit-8 */ 1966 - #define EEPROM_WREN_OPCODE_SPI 0x6 /* EEPROM set Write Enable latch */ 1967 - #define EEPROM_WRDI_OPCODE_SPI 0x4 /* EEPROM reset Write Enable latch */ 1968 - #define EEPROM_RDSR_OPCODE_SPI 0x5 /* EEPROM read Status register */ 1969 - #define EEPROM_WRSR_OPCODE_SPI 0x1 /* EEPROM write Status register */ 1970 1971 /* EEPROM Size definitions */ 1972 - #define EEPROM_SIZE_16KB 0x1800 1973 - #define EEPROM_SIZE_8KB 0x1400 1974 - #define EEPROM_SIZE_4KB 0x1000 1975 - #define EEPROM_SIZE_2KB 0x0C00 1976 - #define EEPROM_SIZE_1KB 0x0800 1977 - #define EEPROM_SIZE_512B 0x0400 1978 - #define EEPROM_SIZE_128B 0x0000 1979 #define EEPROM_SIZE_MASK 0x1C00 1980 1981 /* EEPROM Word Offsets */ ··· 2087 #define IFS_MIN 40 2088 #define IFS_RATIO 4 2089 2090 /* PBA constants */ 2091 #define E1000_PBA_16K 0x0010 /* 16KB, default TX allocation */ 2092 #define E1000_PBA_22K 0x0016 2093 #define E1000_PBA_24K 0x0018 ··· 2158 2159 /* Number of milliseconds we wait for auto-negotiation to complete */ 2160 #define LINK_UP_TIMEOUT 500 2161 2162 #define E1000_TX_BUFFER_SIZE ((uint32_t)1514) 2163 ··· 2266 #define IGP01E1000_PHY_LINK_HEALTH 0x13 /* PHY Link Health Register */ 2267 #define IGP01E1000_GMII_FIFO 0x14 /* GMII FIFO Register */ 2268 #define IGP01E1000_PHY_CHANNEL_QUALITY 0x15 /* PHY Channel Quality Register */ 2269 #define IGP01E1000_PHY_PAGE_SELECT 0x1F /* PHY Page Select Core Register */ 2270 2271 /* IGP01E1000 AGC Registers - stores the cable length values*/ ··· 2275 #define IGP01E1000_PHY_AGC_C 0x1472 2276 #define IGP01E1000_PHY_AGC_D 0x1872 2277 2278 /* IGP01E1000 DSP Reset Register */ 2279 #define IGP01E1000_PHY_DSP_RESET 0x1F33 2280 #define IGP01E1000_PHY_DSP_SET 0x1F71 2281 #define IGP01E1000_PHY_DSP_FFE 0x1F35 2282 2283 #define IGP01E1000_PHY_CHANNEL_NUM 4 2284 #define IGP01E1000_PHY_AGC_PARAM_A 0x1171 2285 #define IGP01E1000_PHY_AGC_PARAM_B 0x1271 2286 #define IGP01E1000_PHY_AGC_PARAM_C 0x1471 ··· 2572 #define IGP01E1000_MSE_CHANNEL_B 0x0F00 2573 #define IGP01E1000_MSE_CHANNEL_A 0xF000 2574 2575 /* IGP01E1000 DSP reset macros */ 2576 #define DSP_RESET_ENABLE 0x0 2577 #define DSP_RESET_DISABLE 0x2 2578 #define E1000_MAX_DSP_RESETS 10 2579 2580 - /* IGP01E1000 AGC Registers */ 2581 2582 #define IGP01E1000_AGC_LENGTH_SHIFT 7 /* Coarse - 13:11, Fine - 10:7 */ 2583 2584 /* 7 bits (3 Coarse + 4 Fine) --> 128 optional values */ 2585 #define IGP01E1000_AGC_LENGTH_TABLE_SIZE 128 2586 2587 - /* The precision of the length is +/- 10 meters */ 2588 #define IGP01E1000_AGC_RANGE 10 2589 2590 /* IGP01E1000 PCS Initialization register */ 2591 /* bits 3:6 in the PCS registers stores the channels polarity */ ··· 2635 #define M88E1000_12_PHY_ID M88E1000_E_PHY_ID 2636 #define M88E1000_14_PHY_ID M88E1000_E_PHY_ID 2637 #define M88E1011_I_REV_4 0x04 2638 2639 /* Miscellaneous PHY bit definitions. */ 2640 #define PHY_PREAMBLE 0xFFFFFFFF
··· 1 /******************************************************************************* 2 3 4 + Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved. 5 6 This program is free software; you can redistribute it and/or modify it 7 under the terms of the GNU General Public License as published by the Free ··· 57 e1000_82541_rev_2, 58 e1000_82547, 59 e1000_82547_rev_2, 60 + e1000_82573, 61 e1000_num_macs 62 } e1000_mac_type; 63 ··· 64 e1000_eeprom_uninitialized = 0, 65 e1000_eeprom_spi, 66 e1000_eeprom_microwire, 67 + e1000_eeprom_flash, 68 e1000_num_eeprom_types 69 } e1000_eeprom_type; 70 ··· 96 e1000_bus_type_unknown = 0, 97 e1000_bus_type_pci, 98 e1000_bus_type_pcix, 99 + e1000_bus_type_pci_express, 100 e1000_bus_type_reserved 101 } e1000_bus_type; 102 ··· 107 e1000_bus_speed_100, 108 e1000_bus_speed_120, 109 e1000_bus_speed_133, 110 + e1000_bus_speed_2500, 111 e1000_bus_speed_reserved 112 } e1000_bus_speed; 113 ··· 115 e1000_bus_width_unknown = 0, 116 e1000_bus_width_32, 117 e1000_bus_width_64, 118 + e1000_bus_width_pciex_1, 119 + e1000_bus_width_pciex_4, 120 e1000_bus_width_reserved 121 } e1000_bus_width; 122 ··· 196 typedef enum { 197 e1000_phy_m88 = 0, 198 e1000_phy_igp, 199 + e1000_phy_igp_2, 200 e1000_phy_undefined = 0xFF 201 } e1000_phy_type; 202 ··· 242 uint16_t address_bits; 243 uint16_t delay_usec; 244 uint16_t page_size; 245 + boolean_t use_eerd; 246 + boolean_t use_eewr; 247 }; 248 + 249 + /* Flex ASF Information */ 250 + #define E1000_HOST_IF_MAX_SIZE 2048 251 + 252 + typedef enum { 253 + e1000_byte_align = 0, 254 + e1000_word_align = 1, 255 + e1000_dword_align = 2 256 + } e1000_align_type; 257 258 259 ··· 254 #define E1000_ERR_PARAM 4 255 #define E1000_ERR_MAC_TYPE 5 256 #define E1000_ERR_PHY_TYPE 6 257 + #define E1000_ERR_RESET 9 258 + #define E1000_ERR_MASTER_REQUESTS_PENDING 10 259 + #define E1000_ERR_HOST_INTERFACE_COMMAND 11 260 + #define E1000_BLK_PHY_RESET 12 261 262 /* Function prototypes */ 263 /* Initialization */ 264 int32_t e1000_reset_hw(struct e1000_hw *hw); 265 int32_t e1000_init_hw(struct e1000_hw *hw); 266 + int32_t e1000_id_led_init(struct e1000_hw * hw); 267 int32_t e1000_set_mac_type(struct e1000_hw *hw); 268 void e1000_set_media_type(struct e1000_hw *hw); 269 ··· 275 /* PHY */ 276 int32_t e1000_read_phy_reg(struct e1000_hw *hw, uint32_t reg_addr, uint16_t *phy_data); 277 int32_t e1000_write_phy_reg(struct e1000_hw *hw, uint32_t reg_addr, uint16_t data); 278 + int32_t e1000_phy_hw_reset(struct e1000_hw *hw); 279 int32_t e1000_phy_reset(struct e1000_hw *hw); 280 int32_t e1000_detect_gig_phy(struct e1000_hw *hw); 281 int32_t e1000_phy_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info); ··· 287 int32_t e1000_validate_mdi_setting(struct e1000_hw *hw); 288 289 /* EEPROM Functions */ 290 + int32_t e1000_init_eeprom_params(struct e1000_hw *hw); 291 + boolean_t e1000_is_onboard_nvm_eeprom(struct e1000_hw *hw); 292 + int32_t e1000_read_eeprom_eerd(struct e1000_hw *hw, uint16_t offset, uint16_t words, uint16_t *data); 293 + int32_t e1000_write_eeprom_eewr(struct e1000_hw *hw, uint16_t offset, uint16_t words, uint16_t *data); 294 + int32_t e1000_poll_eerd_eewr_done(struct e1000_hw *hw, int eerd); 295 + 296 + /* MNG HOST IF functions */ 297 + uint32_t e1000_enable_mng_pass_thru(struct e1000_hw *hw); 298 + 299 + #define E1000_MNG_DHCP_TX_PAYLOAD_CMD 64 300 + #define E1000_HI_MAX_MNG_DATA_LENGTH 0x6F8 /* Host Interface data length */ 301 + 302 + #define E1000_MNG_DHCP_COMMAND_TIMEOUT 10 /* Time in ms to process MNG command */ 303 + #define E1000_MNG_DHCP_COOKIE_OFFSET 0x6F0 /* Cookie offset */ 304 + #define E1000_MNG_DHCP_COOKIE_LENGTH 0x10 /* Cookie length */ 305 + #define E1000_MNG_IAMT_MODE 0x3 306 + #define E1000_IAMT_SIGNATURE 0x544D4149 /* Intel(R) Active Management Technology signature */ 307 + 308 + #define E1000_MNG_DHCP_COOKIE_STATUS_PARSING_SUPPORT 0x1 /* DHCP parsing enabled */ 309 + #define E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT 0x2 /* DHCP parsing enabled */ 310 + #define E1000_VFTA_ENTRY_SHIFT 0x5 311 + #define E1000_VFTA_ENTRY_MASK 0x7F 312 + #define E1000_VFTA_ENTRY_BIT_SHIFT_MASK 0x1F 313 + 314 + struct e1000_host_mng_command_header { 315 + uint8_t command_id; 316 + uint8_t checksum; 317 + uint16_t reserved1; 318 + uint16_t reserved2; 319 + uint16_t command_length; 320 + }; 321 + 322 + struct e1000_host_mng_command_info { 323 + struct e1000_host_mng_command_header command_header; /* Command Head/Command Result Head has 4 bytes */ 324 + uint8_t command_data[E1000_HI_MAX_MNG_DATA_LENGTH]; /* Command data can length 0..0x658*/ 325 + }; 326 + #ifdef __BIG_ENDIAN 327 + struct e1000_host_mng_dhcp_cookie{ 328 + uint32_t signature; 329 + uint16_t vlan_id; 330 + uint8_t reserved0; 331 + uint8_t status; 332 + uint32_t reserved1; 333 + uint8_t checksum; 334 + uint8_t reserved3; 335 + uint16_t reserved2; 336 + }; 337 + #else 338 + struct e1000_host_mng_dhcp_cookie{ 339 + uint32_t signature; 340 + uint8_t status; 341 + uint8_t reserved0; 342 + uint16_t vlan_id; 343 + uint32_t reserved1; 344 + uint16_t reserved2; 345 + uint8_t reserved3; 346 + uint8_t checksum; 347 + }; 348 + #endif 349 + 350 + int32_t e1000_mng_write_dhcp_info(struct e1000_hw *hw, uint8_t *buffer, 351 + uint16_t length); 352 + boolean_t e1000_check_mng_mode(struct e1000_hw *hw); 353 + boolean_t e1000_enable_tx_pkt_filtering(struct e1000_hw *hw); 354 + int32_t e1000_mng_enable_host_if(struct e1000_hw *hw); 355 + int32_t e1000_mng_host_if_write(struct e1000_hw *hw, uint8_t *buffer, 356 + uint16_t length, uint16_t offset, uint8_t *sum); 357 + int32_t e1000_mng_write_cmd_header(struct e1000_hw* hw, 358 + struct e1000_host_mng_command_header* hdr); 359 + 360 + int32_t e1000_mng_write_commit(struct e1000_hw *hw); 361 + 362 int32_t e1000_read_eeprom(struct e1000_hw *hw, uint16_t reg, uint16_t words, uint16_t *data); 363 int32_t e1000_validate_eeprom_checksum(struct e1000_hw *hw); 364 int32_t e1000_update_eeprom_checksum(struct e1000_hw *hw); 365 int32_t e1000_write_eeprom(struct e1000_hw *hw, uint16_t reg, uint16_t words, uint16_t *data); 366 int32_t e1000_read_part_num(struct e1000_hw *hw, uint32_t * part_num); 367 int32_t e1000_read_mac_addr(struct e1000_hw * hw); 368 + int32_t e1000_swfw_sync_acquire(struct e1000_hw *hw, uint16_t mask); 369 + void e1000_swfw_sync_release(struct e1000_hw *hw, uint16_t mask); 370 371 /* Filters (multicast, vlan, receive) */ 372 void e1000_init_rx_addrs(struct e1000_hw *hw); ··· 313 /* Adaptive IFS Functions */ 314 315 /* Everything else */ 316 void e1000_clear_hw_cntrs(struct e1000_hw *hw); 317 void e1000_reset_adaptive(struct e1000_hw *hw); 318 void e1000_update_adaptive(struct e1000_hw *hw); ··· 330 void e1000_write_reg_io(struct e1000_hw *hw, uint32_t offset, uint32_t value); 331 int32_t e1000_config_dsp_after_link_change(struct e1000_hw *hw, boolean_t link_up); 332 int32_t e1000_set_d3_lplu_state(struct e1000_hw *hw, boolean_t active); 333 + int32_t e1000_set_d0_lplu_state(struct e1000_hw *hw, boolean_t active); 334 + void e1000_set_pci_express_master_disable(struct e1000_hw *hw); 335 + void e1000_enable_pciex_master(struct e1000_hw *hw); 336 + int32_t e1000_disable_pciex_master(struct e1000_hw *hw); 337 + int32_t e1000_get_auto_rd_done(struct e1000_hw *hw); 338 + int32_t e1000_get_phy_cfg_done(struct e1000_hw *hw); 339 + int32_t e1000_get_software_semaphore(struct e1000_hw *hw); 340 + void e1000_release_software_semaphore(struct e1000_hw *hw); 341 + int32_t e1000_check_phy_reset_block(struct e1000_hw *hw); 342 + int32_t e1000_get_hw_eeprom_semaphore(struct e1000_hw *hw); 343 + void e1000_put_hw_eeprom_semaphore(struct e1000_hw *hw); 344 + int32_t e1000_commit_shadow_ram(struct e1000_hw *hw); 345 + uint8_t e1000_arc_subsystem_valid(struct e1000_hw *hw); 346 347 #define E1000_READ_REG_IO(a, reg) \ 348 e1000_read_reg_io((a), E1000_##reg) ··· 369 #define E1000_DEV_ID_82546GB_SERDES 0x107B 370 #define E1000_DEV_ID_82546GB_PCIE 0x108A 371 #define E1000_DEV_ID_82547EI 0x1019 372 + #define E1000_DEV_ID_82573E 0x108B 373 + #define E1000_DEV_ID_82573E_IAMT 0x108C 374 + 375 + #define E1000_DEV_ID_82546GB_QUAD_COPPER 0x1099 376 377 #define NODE_ADDRESS_SIZE 6 378 #define ETH_LENGTH_OF_ADDRESS 6 ··· 381 #define E1000_REVISION_0 0 382 #define E1000_REVISION_1 1 383 #define E1000_REVISION_2 2 384 + #define E1000_REVISION_3 3 385 386 #define SPEED_10 10 387 #define SPEED_100 100 ··· 437 E1000_IMS_RXSEQ | \ 438 E1000_IMS_LSC) 439 440 + 441 /* Number of high/low register pairs in the RAR. The RAR (Receive Address 442 * Registers) holds the directed and multicast addresses that we monitor. We 443 * reserve one of these spots for our directed address, allowing us room for ··· 457 uint16_t special; 458 }; 459 460 + /* Receive Descriptor - Extended */ 461 + union e1000_rx_desc_extended { 462 + struct { 463 + uint64_t buffer_addr; 464 + uint64_t reserved; 465 + } read; 466 + struct { 467 + struct { 468 + uint32_t mrq; /* Multiple Rx Queues */ 469 + union { 470 + uint32_t rss; /* RSS Hash */ 471 + struct { 472 + uint16_t ip_id; /* IP id */ 473 + uint16_t csum; /* Packet Checksum */ 474 + } csum_ip; 475 + } hi_dword; 476 + } lower; 477 + struct { 478 + uint32_t status_error; /* ext status/error */ 479 + uint16_t length; 480 + uint16_t vlan; /* VLAN tag */ 481 + } upper; 482 + } wb; /* writeback */ 483 + }; 484 + 485 + #define MAX_PS_BUFFERS 4 486 + /* Receive Descriptor - Packet Split */ 487 + union e1000_rx_desc_packet_split { 488 + struct { 489 + /* one buffer for protocol header(s), three data buffers */ 490 + uint64_t buffer_addr[MAX_PS_BUFFERS]; 491 + } read; 492 + struct { 493 + struct { 494 + uint32_t mrq; /* Multiple Rx Queues */ 495 + union { 496 + uint32_t rss; /* RSS Hash */ 497 + struct { 498 + uint16_t ip_id; /* IP id */ 499 + uint16_t csum; /* Packet Checksum */ 500 + } csum_ip; 501 + } hi_dword; 502 + } lower; 503 + struct { 504 + uint32_t status_error; /* ext status/error */ 505 + uint16_t length0; /* length of buffer 0 */ 506 + uint16_t vlan; /* VLAN tag */ 507 + } middle; 508 + struct { 509 + uint16_t header_status; 510 + uint16_t length[3]; /* length of buffers 1-3 */ 511 + } upper; 512 + uint64_t reserved; 513 + } wb; /* writeback */ 514 + }; 515 + 516 /* Receive Decriptor bit definitions */ 517 #define E1000_RXD_STAT_DD 0x01 /* Descriptor Done */ 518 #define E1000_RXD_STAT_EOP 0x02 /* End of Packet */ 519 #define E1000_RXD_STAT_IXSM 0x04 /* Ignore checksum */ 520 #define E1000_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */ 521 + #define E1000_RXD_STAT_UDPCS 0x10 /* UDP xsum caculated */ 522 #define E1000_RXD_STAT_TCPCS 0x20 /* TCP xsum calculated */ 523 #define E1000_RXD_STAT_IPCS 0x40 /* IP xsum calculated */ 524 #define E1000_RXD_STAT_PIF 0x80 /* passed in-exact filter */ 525 + #define E1000_RXD_STAT_IPIDV 0x200 /* IP identification valid */ 526 + #define E1000_RXD_STAT_UDPV 0x400 /* Valid UDP checksum */ 527 + #define E1000_RXD_STAT_ACK 0x8000 /* ACK Packet indication */ 528 #define E1000_RXD_ERR_CE 0x01 /* CRC Error */ 529 #define E1000_RXD_ERR_SE 0x02 /* Symbol Error */ 530 #define E1000_RXD_ERR_SEQ 0x04 /* Sequence Error */ ··· 474 #define E1000_RXD_ERR_RXE 0x80 /* Rx Data Error */ 475 #define E1000_RXD_SPC_VLAN_MASK 0x0FFF /* VLAN ID is in lower 12 bits */ 476 #define E1000_RXD_SPC_PRI_MASK 0xE000 /* Priority is in upper 3 bits */ 477 + #define E1000_RXD_SPC_PRI_SHIFT 13 478 #define E1000_RXD_SPC_CFI_MASK 0x1000 /* CFI is bit 12 */ 479 + #define E1000_RXD_SPC_CFI_SHIFT 12 480 + 481 + #define E1000_RXDEXT_STATERR_CE 0x01000000 482 + #define E1000_RXDEXT_STATERR_SE 0x02000000 483 + #define E1000_RXDEXT_STATERR_SEQ 0x04000000 484 + #define E1000_RXDEXT_STATERR_CXE 0x10000000 485 + #define E1000_RXDEXT_STATERR_TCPE 0x20000000 486 + #define E1000_RXDEXT_STATERR_IPE 0x40000000 487 + #define E1000_RXDEXT_STATERR_RXE 0x80000000 488 + 489 + #define E1000_RXDPS_HDRSTAT_HDRSP 0x00008000 490 + #define E1000_RXDPS_HDRSTAT_HDRLEN_MASK 0x000003FF 491 492 /* mask to determine if packets should be dropped due to frame errors */ 493 #define E1000_RXD_ERR_FRAME_ERR_MASK ( \ ··· 485 E1000_RXD_ERR_SEQ | \ 486 E1000_RXD_ERR_CXE | \ 487 E1000_RXD_ERR_RXE) 488 + 489 + 490 + /* Same mask, but for extended and packet split descriptors */ 491 + #define E1000_RXDEXT_ERR_FRAME_ERR_MASK ( \ 492 + E1000_RXDEXT_STATERR_CE | \ 493 + E1000_RXDEXT_STATERR_SE | \ 494 + E1000_RXDEXT_STATERR_SEQ | \ 495 + E1000_RXDEXT_STATERR_CXE | \ 496 + E1000_RXDEXT_STATERR_RXE) 497 498 /* Transmit Descriptor */ 499 struct e1000_tx_desc { ··· 667 #define E1000_ICS 0x000C8 /* Interrupt Cause Set - WO */ 668 #define E1000_IMS 0x000D0 /* Interrupt Mask Set - RW */ 669 #define E1000_IMC 0x000D8 /* Interrupt Mask Clear - WO */ 670 + #define E1000_IAM 0x000E0 /* Interrupt Acknowledge Auto Mask */ 671 #define E1000_RCTL 0x00100 /* RX Control - RW */ 672 #define E1000_FCTTV 0x00170 /* Flow Control Transmit Timer Value - RW */ 673 #define E1000_TXCW 0x00178 /* TX Configuration Word - RW */ ··· 676 #define E1000_TBT 0x00448 /* TX Burst Timer - RW */ 677 #define E1000_AIT 0x00458 /* Adaptive Interframe Spacing Throttle - RW */ 678 #define E1000_LEDCTL 0x00E00 /* LED Control - RW */ 679 + #define E1000_EXTCNF_CTRL 0x00F00 /* Extended Configuration Control */ 680 + #define E1000_EXTCNF_SIZE 0x00F08 /* Extended Configuration Size */ 681 #define E1000_PBA 0x01000 /* Packet Buffer Allocation - RW */ 682 + #define E1000_PBS 0x01008 /* Packet Buffer Size */ 683 + #define E1000_EEMNGCTL 0x01010 /* MNG EEprom Control */ 684 + #define E1000_FLASH_UPDATES 1000 685 + #define E1000_EEARBC 0x01024 /* EEPROM Auto Read Bus Control */ 686 + #define E1000_FLASHT 0x01028 /* FLASH Timer Register */ 687 + #define E1000_EEWR 0x0102C /* EEPROM Write Register - RW */ 688 + #define E1000_FLSWCTL 0x01030 /* FLASH control register */ 689 + #define E1000_FLSWDATA 0x01034 /* FLASH data register */ 690 + #define E1000_FLSWCNT 0x01038 /* FLASH Access Counter */ 691 + #define E1000_FLOP 0x0103C /* FLASH Opcode Register */ 692 + #define E1000_ERT 0x02008 /* Early Rx Threshold - RW */ 693 #define E1000_FCRTL 0x02160 /* Flow Control Receive Threshold Low - RW */ 694 #define E1000_FCRTH 0x02168 /* Flow Control Receive Threshold High - RW */ 695 + #define E1000_PSRCTL 0x02170 /* Packet Split Receive Control - RW */ 696 #define E1000_RDBAL 0x02800 /* RX Descriptor Base Address Low - RW */ 697 #define E1000_RDBAH 0x02804 /* RX Descriptor Base Address High - RW */ 698 #define E1000_RDLEN 0x02808 /* RX Descriptor Length - RW */ ··· 688 #define E1000_RXDCTL 0x02828 /* RX Descriptor Control - RW */ 689 #define E1000_RADV 0x0282C /* RX Interrupt Absolute Delay Timer - RW */ 690 #define E1000_RSRPD 0x02C00 /* RX Small Packet Detect - RW */ 691 + #define E1000_RAID 0x02C08 /* Receive Ack Interrupt Delay - RW */ 692 #define E1000_TXDMAC 0x03000 /* TX DMA Control - RW */ 693 #define E1000_TDFH 0x03410 /* TX Data FIFO Head - RW */ 694 #define E1000_TDFT 0x03418 /* TX Data FIFO Tail - RW */ ··· 703 #define E1000_TXDCTL 0x03828 /* TX Descriptor Control - RW */ 704 #define E1000_TADV 0x0382C /* TX Interrupt Absolute Delay Val - RW */ 705 #define E1000_TSPMT 0x03830 /* TCP Segmentation PAD & Min Threshold - RW */ 706 + #define E1000_TARC0 0x03840 /* TX Arbitration Count (0) */ 707 + #define E1000_TDBAL1 0x03900 /* TX Desc Base Address Low (1) - RW */ 708 + #define E1000_TDBAH1 0x03904 /* TX Desc Base Address High (1) - RW */ 709 + #define E1000_TDLEN1 0x03908 /* TX Desc Length (1) - RW */ 710 + #define E1000_TDH1 0x03910 /* TX Desc Head (1) - RW */ 711 + #define E1000_TDT1 0x03918 /* TX Desc Tail (1) - RW */ 712 + #define E1000_TXDCTL1 0x03928 /* TX Descriptor Control (1) - RW */ 713 + #define E1000_TARC1 0x03940 /* TX Arbitration Count (1) */ 714 #define E1000_CRCERRS 0x04000 /* CRC Error Count - R/clr */ 715 #define E1000_ALGNERRC 0x04004 /* Alignment Error Count - R/clr */ 716 #define E1000_SYMERRS 0x04008 /* Symbol Error Count - R/clr */ ··· 761 #define E1000_BPTC 0x040F4 /* Broadcast Packets TX Count - R/clr */ 762 #define E1000_TSCTC 0x040F8 /* TCP Segmentation Context TX - R/clr */ 763 #define E1000_TSCTFC 0x040FC /* TCP Segmentation Context TX Fail - R/clr */ 764 + #define E1000_IAC 0x4100 /* Interrupt Assertion Count */ 765 + #define E1000_ICRXPTC 0x4104 /* Interrupt Cause Rx Packet Timer Expire Count */ 766 + #define E1000_ICRXATC 0x4108 /* Interrupt Cause Rx Absolute Timer Expire Count */ 767 + #define E1000_ICTXPTC 0x410C /* Interrupt Cause Tx Packet Timer Expire Count */ 768 + #define E1000_ICTXATC 0x4110 /* Interrupt Cause Tx Absolute Timer Expire Count */ 769 + #define E1000_ICTXQEC 0x4118 /* Interrupt Cause Tx Queue Empty Count */ 770 + #define E1000_ICTXQMTC 0x411C /* Interrupt Cause Tx Queue Minimum Threshold Count */ 771 + #define E1000_ICRXDMTC 0x4120 /* Interrupt Cause Rx Descriptor Minimum Threshold Count */ 772 + #define E1000_ICRXOC 0x4124 /* Interrupt Cause Receiver Overrun Count */ 773 #define E1000_RXCSUM 0x05000 /* RX Checksum Control - RW */ 774 + #define E1000_RFCTL 0x05008 /* Receive Filter Control*/ 775 #define E1000_MTA 0x05200 /* Multicast Table Array - RW Array */ 776 #define E1000_RA 0x05400 /* Receive Address - RW Array */ 777 #define E1000_VFTA 0x05600 /* VLAN Filter Table Array - RW Array */ ··· 779 #define E1000_FFMT 0x09000 /* Flexible Filter Mask Table - RW Array */ 780 #define E1000_FFVT 0x09800 /* Flexible Filter Value Table - RW Array */ 781 782 + #define E1000_GCR 0x05B00 /* PCI-Ex Control */ 783 + #define E1000_GSCL_1 0x05B10 /* PCI-Ex Statistic Control #1 */ 784 + #define E1000_GSCL_2 0x05B14 /* PCI-Ex Statistic Control #2 */ 785 + #define E1000_GSCL_3 0x05B18 /* PCI-Ex Statistic Control #3 */ 786 + #define E1000_GSCL_4 0x05B1C /* PCI-Ex Statistic Control #4 */ 787 + #define E1000_FACTPS 0x05B30 /* Function Active and Power State to MNG */ 788 + #define E1000_SWSM 0x05B50 /* SW Semaphore */ 789 + #define E1000_FWSM 0x05B54 /* FW Semaphore */ 790 + #define E1000_FFLT_DBG 0x05F04 /* Debug Register */ 791 + #define E1000_HICR 0x08F00 /* Host Inteface Control */ 792 /* Register Set (82542) 793 * 794 * Some of the 82542 registers are located at different offsets than they are ··· 829 #define E1000_82542_VFTA 0x00600 830 #define E1000_82542_LEDCTL E1000_LEDCTL 831 #define E1000_82542_PBA E1000_PBA 832 + #define E1000_82542_PBS E1000_PBS 833 + #define E1000_82542_EEMNGCTL E1000_EEMNGCTL 834 + #define E1000_82542_EEARBC E1000_EEARBC 835 + #define E1000_82542_FLASHT E1000_FLASHT 836 + #define E1000_82542_EEWR E1000_EEWR 837 + #define E1000_82542_FLSWCTL E1000_FLSWCTL 838 + #define E1000_82542_FLSWDATA E1000_FLSWDATA 839 + #define E1000_82542_FLSWCNT E1000_FLSWCNT 840 + #define E1000_82542_FLOP E1000_FLOP 841 + #define E1000_82542_EXTCNF_CTRL E1000_EXTCNF_CTRL 842 + #define E1000_82542_EXTCNF_SIZE E1000_EXTCNF_SIZE 843 + #define E1000_82542_ERT E1000_ERT 844 #define E1000_82542_RXDCTL E1000_RXDCTL 845 #define E1000_82542_RADV E1000_RADV 846 #define E1000_82542_RSRPD E1000_RSRPD ··· 913 #define E1000_82542_FFMT E1000_FFMT 914 #define E1000_82542_FFVT E1000_FFVT 915 #define E1000_82542_HOST_IF E1000_HOST_IF 916 + #define E1000_82542_IAM E1000_IAM 917 + #define E1000_82542_EEMNGCTL E1000_EEMNGCTL 918 + #define E1000_82542_PSRCTL E1000_PSRCTL 919 + #define E1000_82542_RAID E1000_RAID 920 + #define E1000_82542_TARC0 E1000_TARC0 921 + #define E1000_82542_TDBAL1 E1000_TDBAL1 922 + #define E1000_82542_TDBAH1 E1000_TDBAH1 923 + #define E1000_82542_TDLEN1 E1000_TDLEN1 924 + #define E1000_82542_TDH1 E1000_TDH1 925 + #define E1000_82542_TDT1 E1000_TDT1 926 + #define E1000_82542_TXDCTL1 E1000_TXDCTL1 927 + #define E1000_82542_TARC1 E1000_TARC1 928 + #define E1000_82542_RFCTL E1000_RFCTL 929 + #define E1000_82542_GCR E1000_GCR 930 + #define E1000_82542_GSCL_1 E1000_GSCL_1 931 + #define E1000_82542_GSCL_2 E1000_GSCL_2 932 + #define E1000_82542_GSCL_3 E1000_GSCL_3 933 + #define E1000_82542_GSCL_4 E1000_GSCL_4 934 + #define E1000_82542_FACTPS E1000_FACTPS 935 + #define E1000_82542_SWSM E1000_SWSM 936 + #define E1000_82542_FWSM E1000_FWSM 937 + #define E1000_82542_FFLT_DBG E1000_FFLT_DBG 938 + #define E1000_82542_IAC E1000_IAC 939 + #define E1000_82542_ICRXPTC E1000_ICRXPTC 940 + #define E1000_82542_ICRXATC E1000_ICRXATC 941 + #define E1000_82542_ICTXPTC E1000_ICTXPTC 942 + #define E1000_82542_ICTXATC E1000_ICTXATC 943 + #define E1000_82542_ICTXQEC E1000_ICTXQEC 944 + #define E1000_82542_ICTXQMTC E1000_ICTXQMTC 945 + #define E1000_82542_ICRXDMTC E1000_ICRXDMTC 946 + #define E1000_82542_ICRXOC E1000_ICRXOC 947 + #define E1000_82542_HICR E1000_HICR 948 949 /* Statistics counters collected by the MAC */ 950 struct e1000_hw_stats { ··· 974 uint64_t bptc; 975 uint64_t tsctc; 976 uint64_t tsctfc; 977 + uint64_t iac; 978 + uint64_t icrxptc; 979 + uint64_t icrxatc; 980 + uint64_t ictxptc; 981 + uint64_t ictxatc; 982 + uint64_t ictxqec; 983 + uint64_t ictxqmtc; 984 + uint64_t icrxdmtc; 985 + uint64_t icrxoc; 986 }; 987 988 /* Structure containing variables used by the shared code (e1000_hw.c) */ 989 struct e1000_hw { 990 + uint8_t *hw_addr; 991 + uint8_t *flash_address; 992 e1000_mac_type mac_type; 993 e1000_phy_type phy_type; 994 uint32_t phy_init_script; ··· 993 e1000_ms_type original_master_slave; 994 e1000_ffe_config ffe_config_state; 995 uint32_t asf_firmware_present; 996 + uint32_t eeprom_semaphore_present; 997 unsigned long io_base; 998 uint32_t phy_id; 999 uint32_t phy_revision; ··· 1009 uint32_t ledctl_default; 1010 uint32_t ledctl_mode1; 1011 uint32_t ledctl_mode2; 1012 + boolean_t tx_pkt_filtering; 1013 + struct e1000_host_mng_dhcp_cookie mng_cookie; 1014 uint16_t phy_spd_default; 1015 uint16_t autoneg_advertised; 1016 uint16_t pci_cmd_word; ··· 1047 boolean_t adaptive_ifs; 1048 boolean_t ifs_params_forced; 1049 boolean_t in_ifs_mode; 1050 + boolean_t mng_reg_access_disabled; 1051 }; 1052 1053 1054 #define E1000_EEPROM_SWDPIN0 0x0001 /* SWDPIN 0 EEPROM Value */ 1055 #define E1000_EEPROM_LED_LOGIC 0x0020 /* Led Logic Word */ 1056 + #define E1000_EEPROM_RW_REG_DATA 16 /* Offset to data in EEPROM read/write registers */ 1057 + #define E1000_EEPROM_RW_REG_DONE 2 /* Offset to READ/WRITE done bit */ 1058 + #define E1000_EEPROM_RW_REG_START 1 /* First bit for telling part to start operation */ 1059 + #define E1000_EEPROM_RW_ADDR_SHIFT 2 /* Shift to the address bits */ 1060 + #define E1000_EEPROM_POLL_WRITE 1 /* Flag for polling for write complete */ 1061 + #define E1000_EEPROM_POLL_READ 0 /* Flag for polling for read complete */ 1062 /* Register Bit Masks */ 1063 /* Device Control */ 1064 #define E1000_CTRL_FD 0x00000001 /* Full duplex.0=half; 1=full */ 1065 #define E1000_CTRL_BEM 0x00000002 /* Endian Mode.0=little,1=big */ 1066 #define E1000_CTRL_PRIOR 0x00000004 /* Priority on PCI. 0=rx,1=fair */ 1067 + #define E1000_CTRL_GIO_MASTER_DISABLE 0x00000004 /*Blocks new Master requests */ 1068 #define E1000_CTRL_LRST 0x00000008 /* Link reset. 0=normal,1=reset */ 1069 #define E1000_CTRL_TME 0x00000010 /* Test mode. 0=normal,1=test */ 1070 #define E1000_CTRL_SLE 0x00000020 /* Serial Link on 0=dis,1=en */ ··· 1070 #define E1000_CTRL_BEM32 0x00000400 /* Big Endian 32 mode */ 1071 #define E1000_CTRL_FRCSPD 0x00000800 /* Force Speed */ 1072 #define E1000_CTRL_FRCDPX 0x00001000 /* Force Duplex */ 1073 + #define E1000_CTRL_D_UD_POLARITY 0x00004000 /* Defined polarity of Dock/Undock indication in SDP[0] */ 1074 #define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */ 1075 #define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */ 1076 #define E1000_CTRL_SWDPIN2 0x00100000 /* SWDPIN 2 value */ ··· 1089 #define E1000_STATUS_FD 0x00000001 /* Full duplex.0=half,1=full */ 1090 #define E1000_STATUS_LU 0x00000002 /* Link up.0=no,1=link */ 1091 #define E1000_STATUS_FUNC_MASK 0x0000000C /* PCI Function Mask */ 1092 + #define E1000_STATUS_FUNC_SHIFT 2 1093 #define E1000_STATUS_FUNC_0 0x00000000 /* Function 0 */ 1094 #define E1000_STATUS_FUNC_1 0x00000004 /* Function 1 */ 1095 #define E1000_STATUS_TXOFF 0x00000010 /* transmission paused */ ··· 1098 #define E1000_STATUS_SPEED_100 0x00000040 /* Speed 100Mb/s */ 1099 #define E1000_STATUS_SPEED_1000 0x00000080 /* Speed 1000Mb/s */ 1100 #define E1000_STATUS_ASDV 0x00000300 /* Auto speed detect value */ 1101 + #define E1000_STATUS_DOCK_CI 0x00000800 /* Change in Dock/Undock state. Clear on write '0'. */ 1102 + #define E1000_STATUS_GIO_MASTER_ENABLE 0x00080000 /* Status of Master requests. */ 1103 #define E1000_STATUS_MTXCKOK 0x00000400 /* MTX clock running OK */ 1104 #define E1000_STATUS_PCI66 0x00000800 /* In 66Mhz slot */ 1105 #define E1000_STATUS_BUS64 0x00001000 /* In 64 bit slot */ ··· 1128 #ifndef E1000_EEPROM_GRANT_ATTEMPTS 1129 #define E1000_EEPROM_GRANT_ATTEMPTS 1000 /* EEPROM # attempts to gain grant */ 1130 #endif 1131 + #define E1000_EECD_AUTO_RD 0x00000200 /* EEPROM Auto Read done */ 1132 + #define E1000_EECD_SIZE_EX_MASK 0x00007800 /* EEprom Size */ 1133 + #define E1000_EECD_SIZE_EX_SHIFT 11 1134 + #define E1000_EECD_NVADDS 0x00018000 /* NVM Address Size */ 1135 + #define E1000_EECD_SELSHAD 0x00020000 /* Select Shadow RAM */ 1136 + #define E1000_EECD_INITSRAM 0x00040000 /* Initialize Shadow RAM */ 1137 + #define E1000_EECD_FLUPD 0x00080000 /* Update FLASH */ 1138 + #define E1000_EECD_AUPDEN 0x00100000 /* Enable Autonomous FLASH update */ 1139 + #define E1000_EECD_SHADV 0x00200000 /* Shadow RAM Data Valid */ 1140 + #define E1000_EECD_SEC1VAL 0x00400000 /* Sector One Valid */ 1141 + #define E1000_STM_OPCODE 0xDB00 1142 + #define E1000_HICR_FW_RESET 0xC0 1143 1144 /* EEPROM Read */ 1145 #define E1000_EERD_START 0x00000001 /* Start Read */ ··· 1171 #define E1000_CTRL_EXT_WR_WMARK_320 0x01000000 1172 #define E1000_CTRL_EXT_WR_WMARK_384 0x02000000 1173 #define E1000_CTRL_EXT_WR_WMARK_448 0x03000000 1174 + #define E1000_CTRL_EXT_IAME 0x08000000 /* Interrupt acknowledge Auto-mask */ 1175 + #define E1000_CTRL_EXT_INT_TIMER_CLR 0x20000000 /* Clear Interrupt timers after IMS clear */ 1176 1177 /* MDI Control */ 1178 #define E1000_MDIC_DATA_MASK 0x0000FFFF ··· 1187 /* LED Control */ 1188 #define E1000_LEDCTL_LED0_MODE_MASK 0x0000000F 1189 #define E1000_LEDCTL_LED0_MODE_SHIFT 0 1190 + #define E1000_LEDCTL_LED0_BLINK_RATE 0x0000020 1191 #define E1000_LEDCTL_LED0_IVRT 0x00000040 1192 #define E1000_LEDCTL_LED0_BLINK 0x00000080 1193 #define E1000_LEDCTL_LED1_MODE_MASK 0x00000F00 1194 #define E1000_LEDCTL_LED1_MODE_SHIFT 8 1195 + #define E1000_LEDCTL_LED1_BLINK_RATE 0x0002000 1196 #define E1000_LEDCTL_LED1_IVRT 0x00004000 1197 #define E1000_LEDCTL_LED1_BLINK 0x00008000 1198 #define E1000_LEDCTL_LED2_MODE_MASK 0x000F0000 1199 #define E1000_LEDCTL_LED2_MODE_SHIFT 16 1200 + #define E1000_LEDCTL_LED2_BLINK_RATE 0x00200000 1201 #define E1000_LEDCTL_LED2_IVRT 0x00400000 1202 #define E1000_LEDCTL_LED2_BLINK 0x00800000 1203 #define E1000_LEDCTL_LED3_MODE_MASK 0x0F000000 ··· 1238 #define E1000_ICR_GPI_EN3 0x00004000 /* GP Int 3 */ 1239 #define E1000_ICR_TXD_LOW 0x00008000 1240 #define E1000_ICR_SRPD 0x00010000 1241 + #define E1000_ICR_ACK 0x00020000 /* Receive Ack frame */ 1242 + #define E1000_ICR_MNG 0x00040000 /* Manageability event */ 1243 + #define E1000_ICR_DOCK 0x00080000 /* Dock/Undock */ 1244 + #define E1000_ICR_INT_ASSERTED 0x80000000 /* If this bit asserted, the driver should claim the interrupt */ 1245 1246 /* Interrupt Cause Set */ 1247 #define E1000_ICS_TXDW E1000_ICR_TXDW /* Transmit desc written back */ ··· 1255 #define E1000_ICS_GPI_EN3 E1000_ICR_GPI_EN3 /* GP Int 3 */ 1256 #define E1000_ICS_TXD_LOW E1000_ICR_TXD_LOW 1257 #define E1000_ICS_SRPD E1000_ICR_SRPD 1258 + #define E1000_ICS_ACK E1000_ICR_ACK /* Receive Ack frame */ 1259 + #define E1000_ICS_MNG E1000_ICR_MNG /* Manageability event */ 1260 + #define E1000_ICS_DOCK E1000_ICR_DOCK /* Dock/Undock */ 1261 1262 /* Interrupt Mask Set */ 1263 #define E1000_IMS_TXDW E1000_ICR_TXDW /* Transmit desc written back */ ··· 1272 #define E1000_IMS_GPI_EN3 E1000_ICR_GPI_EN3 /* GP Int 3 */ 1273 #define E1000_IMS_TXD_LOW E1000_ICR_TXD_LOW 1274 #define E1000_IMS_SRPD E1000_ICR_SRPD 1275 + #define E1000_IMS_ACK E1000_ICR_ACK /* Receive Ack frame */ 1276 + #define E1000_IMS_MNG E1000_ICR_MNG /* Manageability event */ 1277 + #define E1000_IMS_DOCK E1000_ICR_DOCK /* Dock/Undock */ 1278 1279 /* Interrupt Mask Clear */ 1280 #define E1000_IMC_TXDW E1000_ICR_TXDW /* Transmit desc written back */ ··· 1289 #define E1000_IMC_GPI_EN3 E1000_ICR_GPI_EN3 /* GP Int 3 */ 1290 #define E1000_IMC_TXD_LOW E1000_ICR_TXD_LOW 1291 #define E1000_IMC_SRPD E1000_ICR_SRPD 1292 + #define E1000_IMC_ACK E1000_ICR_ACK /* Receive Ack frame */ 1293 + #define E1000_IMC_MNG E1000_ICR_MNG /* Manageability event */ 1294 + #define E1000_IMC_DOCK E1000_ICR_DOCK /* Dock/Undock */ 1295 1296 /* Receive Control */ 1297 #define E1000_RCTL_RST 0x00000001 /* Software reset */ ··· 1301 #define E1000_RCTL_LBM_MAC 0x00000040 /* MAC loopback mode */ 1302 #define E1000_RCTL_LBM_SLP 0x00000080 /* serial link loopback mode */ 1303 #define E1000_RCTL_LBM_TCVR 0x000000C0 /* tcvr loopback mode */ 1304 + #define E1000_RCTL_DTYP_MASK 0x00000C00 /* Descriptor type mask */ 1305 + #define E1000_RCTL_DTYP_PS 0x00000400 /* Packet Split descriptor */ 1306 #define E1000_RCTL_RDMTS_HALF 0x00000000 /* rx desc min threshold size */ 1307 #define E1000_RCTL_RDMTS_QUAT 0x00000100 /* rx desc min threshold size */ 1308 #define E1000_RCTL_RDMTS_EIGTH 0x00000200 /* rx desc min threshold size */ ··· 1327 #define E1000_RCTL_PMCF 0x00800000 /* pass MAC control frames */ 1328 #define E1000_RCTL_BSEX 0x02000000 /* Buffer size extension */ 1329 #define E1000_RCTL_SECRC 0x04000000 /* Strip Ethernet CRC */ 1330 + #define E1000_RCTL_FLXBUF_MASK 0x78000000 /* Flexible buffer size */ 1331 + #define E1000_RCTL_FLXBUF_SHIFT 27 /* Flexible buffer shift */ 1332 + 1333 + /* Use byte values for the following shift parameters 1334 + * Usage: 1335 + * psrctl |= (((ROUNDUP(value0, 128) >> E1000_PSRCTL_BSIZE0_SHIFT) & 1336 + * E1000_PSRCTL_BSIZE0_MASK) | 1337 + * ((ROUNDUP(value1, 1024) >> E1000_PSRCTL_BSIZE1_SHIFT) & 1338 + * E1000_PSRCTL_BSIZE1_MASK) | 1339 + * ((ROUNDUP(value2, 1024) << E1000_PSRCTL_BSIZE2_SHIFT) & 1340 + * E1000_PSRCTL_BSIZE2_MASK) | 1341 + * ((ROUNDUP(value3, 1024) << E1000_PSRCTL_BSIZE3_SHIFT) |; 1342 + * E1000_PSRCTL_BSIZE3_MASK)) 1343 + * where value0 = [128..16256], default=256 1344 + * value1 = [1024..64512], default=4096 1345 + * value2 = [0..64512], default=4096 1346 + * value3 = [0..64512], default=0 1347 + */ 1348 + 1349 + #define E1000_PSRCTL_BSIZE0_MASK 0x0000007F 1350 + #define E1000_PSRCTL_BSIZE1_MASK 0x00003F00 1351 + #define E1000_PSRCTL_BSIZE2_MASK 0x003F0000 1352 + #define E1000_PSRCTL_BSIZE3_MASK 0x3F000000 1353 + 1354 + #define E1000_PSRCTL_BSIZE0_SHIFT 7 /* Shift _right_ 7 */ 1355 + #define E1000_PSRCTL_BSIZE1_SHIFT 2 /* Shift _right_ 2 */ 1356 + #define E1000_PSRCTL_BSIZE2_SHIFT 6 /* Shift _left_ 6 */ 1357 + #define E1000_PSRCTL_BSIZE3_SHIFT 14 /* Shift _left_ 14 */ 1358 1359 /* Receive Descriptor */ 1360 #define E1000_RDT_DELAY 0x0000ffff /* Delay timer (1=1024us) */ ··· 1341 #define E1000_FCRTL_RTL 0x0000FFF8 /* Mask Bits[15:3] for RTL */ 1342 #define E1000_FCRTL_XONE 0x80000000 /* Enable XON frame transmission */ 1343 1344 + /* Header split receive */ 1345 + #define E1000_RFCTL_ISCSI_DIS 0x00000001 1346 + #define E1000_RFCTL_ISCSI_DWC_MASK 0x0000003E 1347 + #define E1000_RFCTL_ISCSI_DWC_SHIFT 1 1348 + #define E1000_RFCTL_NFSW_DIS 0x00000040 1349 + #define E1000_RFCTL_NFSR_DIS 0x00000080 1350 + #define E1000_RFCTL_NFS_VER_MASK 0x00000300 1351 + #define E1000_RFCTL_NFS_VER_SHIFT 8 1352 + #define E1000_RFCTL_IPV6_DIS 0x00000400 1353 + #define E1000_RFCTL_IPV6_XSUM_DIS 0x00000800 1354 + #define E1000_RFCTL_ACK_DIS 0x00001000 1355 + #define E1000_RFCTL_ACKD_DIS 0x00002000 1356 + #define E1000_RFCTL_IPFRSP_DIS 0x00004000 1357 + #define E1000_RFCTL_EXTEN 0x00008000 1358 + #define E1000_RFCTL_IPV6_EX_DIS 0x00010000 1359 + #define E1000_RFCTL_NEW_IPV6_EXT_DIS 0x00020000 1360 + 1361 /* Receive Descriptor Control */ 1362 #define E1000_RXDCTL_PTHRESH 0x0000003F /* RXDCTL Prefetch Threshold */ 1363 #define E1000_RXDCTL_HTHRESH 0x00003F00 /* RXDCTL Host Threshold */ ··· 1354 #define E1000_TXDCTL_GRAN 0x01000000 /* TXDCTL Granularity */ 1355 #define E1000_TXDCTL_LWTHRESH 0xFE000000 /* TXDCTL Low Threshold */ 1356 #define E1000_TXDCTL_FULL_TX_DESC_WB 0x01010000 /* GRAN=1, WTHRESH=1 */ 1357 + #define E1000_TXDCTL_COUNT_DESC 0x00400000 /* Enable the counting of desc. 1358 + still to be processed. */ 1359 1360 /* Transmit Configuration Word */ 1361 #define E1000_TXCW_FD 0x00000020 /* TXCW full duplex */ ··· 1387 #define E1000_TCTL_PBE 0x00800000 /* Packet Burst Enable */ 1388 #define E1000_TCTL_RTLC 0x01000000 /* Re-transmit on late collision */ 1389 #define E1000_TCTL_NRTU 0x02000000 /* No Re-transmit on underrun */ 1390 + #define E1000_TCTL_MULR 0x10000000 /* Multiple request support */ 1391 1392 /* Receive Checksum Control */ 1393 #define E1000_RXCSUM_PCSS_MASK 0x000000FF /* Packet Checksum Start */ 1394 #define E1000_RXCSUM_IPOFL 0x00000100 /* IPv4 checksum offload */ 1395 #define E1000_RXCSUM_TUOFL 0x00000200 /* TCP / UDP checksum offload */ 1396 #define E1000_RXCSUM_IPV6OFL 0x00000400 /* IPv6 checksum offload */ 1397 + #define E1000_RXCSUM_IPPCSE 0x00001000 /* IP payload checksum enable */ 1398 + #define E1000_RXCSUM_PCSD 0x00002000 /* packet checksum disabled */ 1399 + 1400 1401 /* Definitions for power management and wakeup registers */ 1402 /* Wake Up Control */ ··· 1411 #define E1000_WUFC_ARP 0x00000020 /* ARP Request Packet Wakeup Enable */ 1412 #define E1000_WUFC_IPV4 0x00000040 /* Directed IPv4 Packet Wakeup Enable */ 1413 #define E1000_WUFC_IPV6 0x00000080 /* Directed IPv6 Packet Wakeup Enable */ 1414 + #define E1000_WUFC_IGNORE_TCO 0x00008000 /* Ignore WakeOn TCO packets */ 1415 #define E1000_WUFC_FLX0 0x00010000 /* Flexible Filter 0 Enable */ 1416 #define E1000_WUFC_FLX1 0x00020000 /* Flexible Filter 1 Enable */ 1417 #define E1000_WUFC_FLX2 0x00040000 /* Flexible Filter 2 Enable */ ··· 1446 #define E1000_MANC_ARP_EN 0x00002000 /* Enable ARP Request Filtering */ 1447 #define E1000_MANC_NEIGHBOR_EN 0x00004000 /* Enable Neighbor Discovery 1448 * Filtering */ 1449 + #define E1000_MANC_ARP_RES_EN 0x00008000 /* Enable ARP response Filtering */ 1450 #define E1000_MANC_TCO_RESET 0x00010000 /* TCO Reset Occurred */ 1451 #define E1000_MANC_RCV_TCO_EN 0x00020000 /* Receive TCO Packets Enabled */ 1452 #define E1000_MANC_REPORT_STATUS 0x00040000 /* Status Reporting Enabled */ 1453 + #define E1000_MANC_BLK_PHY_RST_ON_IDE 0x00040000 /* Block phy resets */ 1454 #define E1000_MANC_EN_MAC_ADDR_FILTER 0x00100000 /* Enable MAC address 1455 * filtering */ 1456 #define E1000_MANC_EN_MNG2HOST 0x00200000 /* Enable MNG packets to host 1457 * memory */ 1458 + #define E1000_MANC_EN_IP_ADDR_FILTER 0x00400000 /* Enable IP address 1459 + * filtering */ 1460 + #define E1000_MANC_EN_XSUM_FILTER 0x00800000 /* Enable checksum filtering */ 1461 + #define E1000_MANC_BR_EN 0x01000000 /* Enable broadcast filtering */ 1462 #define E1000_MANC_SMB_REQ 0x01000000 /* SMBus Request */ 1463 #define E1000_MANC_SMB_GNT 0x02000000 /* SMBus Grant */ 1464 #define E1000_MANC_SMB_CLK_IN 0x04000000 /* SMBus Clock In */ ··· 1463 #define E1000_MANC_SMB_DATA_OUT_SHIFT 28 /* SMBus Data Out Shift */ 1464 #define E1000_MANC_SMB_CLK_OUT_SHIFT 29 /* SMBus Clock Out Shift */ 1465 1466 + /* SW Semaphore Register */ 1467 + #define E1000_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */ 1468 + #define E1000_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */ 1469 + #define E1000_SWSM_WMNG 0x00000004 /* Wake MNG Clock */ 1470 + #define E1000_SWSM_DRV_LOAD 0x00000008 /* Driver Loaded Bit */ 1471 + 1472 + /* FW Semaphore Register */ 1473 + #define E1000_FWSM_MODE_MASK 0x0000000E /* FW mode */ 1474 + #define E1000_FWSM_MODE_SHIFT 1 1475 + #define E1000_FWSM_FW_VALID 0x00008000 /* FW established a valid mode */ 1476 + 1477 + /* FFLT Debug Register */ 1478 + #define E1000_FFLT_DBG_INVC 0x00100000 /* Invalid /C/ code handling */ 1479 + 1480 + typedef enum { 1481 + e1000_mng_mode_none = 0, 1482 + e1000_mng_mode_asf, 1483 + e1000_mng_mode_pt, 1484 + e1000_mng_mode_ipmi, 1485 + e1000_mng_mode_host_interface_only 1486 + } e1000_mng_mode; 1487 + 1488 + /* Host Inteface Control Register */ 1489 + #define E1000_HICR_EN 0x00000001 /* Enable Bit - RO */ 1490 + #define E1000_HICR_C 0x00000002 /* Driver sets this bit when done 1491 + * to put command in RAM */ 1492 + #define E1000_HICR_SV 0x00000004 /* Status Validity */ 1493 + #define E1000_HICR_FWR 0x00000080 /* FW reset. Set by the Host */ 1494 + 1495 + /* Host Interface Command Interface - Address range 0x8800-0x8EFF */ 1496 + #define E1000_HI_MAX_DATA_LENGTH 252 /* Host Interface data length */ 1497 + #define E1000_HI_MAX_BLOCK_BYTE_LENGTH 1792 /* Number of bytes in range */ 1498 + #define E1000_HI_MAX_BLOCK_DWORD_LENGTH 448 /* Number of dwords in range */ 1499 + #define E1000_HI_COMMAND_TIMEOUT 500 /* Time in ms to process HI command */ 1500 + 1501 + struct e1000_host_command_header { 1502 + uint8_t command_id; 1503 + uint8_t command_length; 1504 + uint8_t command_options; /* I/F bits for command, status for return */ 1505 + uint8_t checksum; 1506 + }; 1507 + struct e1000_host_command_info { 1508 + struct e1000_host_command_header command_header; /* Command Head/Command Result Head has 4 bytes */ 1509 + uint8_t command_data[E1000_HI_MAX_DATA_LENGTH]; /* Command data can length 0..252 */ 1510 + }; 1511 + 1512 + /* Host SMB register #0 */ 1513 + #define E1000_HSMC0R_CLKIN 0x00000001 /* SMB Clock in */ 1514 + #define E1000_HSMC0R_DATAIN 0x00000002 /* SMB Data in */ 1515 + #define E1000_HSMC0R_DATAOUT 0x00000004 /* SMB Data out */ 1516 + #define E1000_HSMC0R_CLKOUT 0x00000008 /* SMB Clock out */ 1517 + 1518 + /* Host SMB register #1 */ 1519 + #define E1000_HSMC1R_CLKIN E1000_HSMC0R_CLKIN 1520 + #define E1000_HSMC1R_DATAIN E1000_HSMC0R_DATAIN 1521 + #define E1000_HSMC1R_DATAOUT E1000_HSMC0R_DATAOUT 1522 + #define E1000_HSMC1R_CLKOUT E1000_HSMC0R_CLKOUT 1523 + 1524 + /* FW Status Register */ 1525 + #define E1000_FWSTS_FWS_MASK 0x000000FF /* FW Status */ 1526 + 1527 /* Wake Up Packet Length */ 1528 #define E1000_WUPL_LENGTH_MASK 0x0FFF /* Only the lower 12 bits are valid */ 1529 1530 #define E1000_MDALIGN 4096 1531 + 1532 + #define E1000_GCR_BEM32 0x00400000 1533 + /* Function Active and Power State to MNG */ 1534 + #define E1000_FACTPS_FUNC0_POWER_STATE_MASK 0x00000003 1535 + #define E1000_FACTPS_LAN0_VALID 0x00000004 1536 + #define E1000_FACTPS_FUNC0_AUX_EN 0x00000008 1537 + #define E1000_FACTPS_FUNC1_POWER_STATE_MASK 0x000000C0 1538 + #define E1000_FACTPS_FUNC1_POWER_STATE_SHIFT 6 1539 + #define E1000_FACTPS_LAN1_VALID 0x00000100 1540 + #define E1000_FACTPS_FUNC1_AUX_EN 0x00000200 1541 + #define E1000_FACTPS_FUNC2_POWER_STATE_MASK 0x00003000 1542 + #define E1000_FACTPS_FUNC2_POWER_STATE_SHIFT 12 1543 + #define E1000_FACTPS_IDE_ENABLE 0x00004000 1544 + #define E1000_FACTPS_FUNC2_AUX_EN 0x00008000 1545 + #define E1000_FACTPS_FUNC3_POWER_STATE_MASK 0x000C0000 1546 + #define E1000_FACTPS_FUNC3_POWER_STATE_SHIFT 18 1547 + #define E1000_FACTPS_SP_ENABLE 0x00100000 1548 + #define E1000_FACTPS_FUNC3_AUX_EN 0x00200000 1549 + #define E1000_FACTPS_FUNC4_POWER_STATE_MASK 0x03000000 1550 + #define E1000_FACTPS_FUNC4_POWER_STATE_SHIFT 24 1551 + #define E1000_FACTPS_IPMI_ENABLE 0x04000000 1552 + #define E1000_FACTPS_FUNC4_AUX_EN 0x08000000 1553 + #define E1000_FACTPS_MNGCG 0x20000000 1554 + #define E1000_FACTPS_LAN_FUNC_SEL 0x40000000 1555 + #define E1000_FACTPS_PM_STATE_CHANGED 0x80000000 1556 1557 /* EEPROM Commands - Microwire */ 1558 #define EEPROM_READ_OPCODE_MICROWIRE 0x6 /* EEPROM read opcode */ ··· 1477 1478 /* EEPROM Commands - SPI */ 1479 #define EEPROM_MAX_RETRY_SPI 5000 /* Max wait of 5ms, for RDY signal */ 1480 + #define EEPROM_READ_OPCODE_SPI 0x03 /* EEPROM read opcode */ 1481 + #define EEPROM_WRITE_OPCODE_SPI 0x02 /* EEPROM write opcode */ 1482 + #define EEPROM_A8_OPCODE_SPI 0x08 /* opcode bit-3 = address bit-8 */ 1483 + #define EEPROM_WREN_OPCODE_SPI 0x06 /* EEPROM set Write Enable latch */ 1484 + #define EEPROM_WRDI_OPCODE_SPI 0x04 /* EEPROM reset Write Enable latch */ 1485 + #define EEPROM_RDSR_OPCODE_SPI 0x05 /* EEPROM read Status register */ 1486 + #define EEPROM_WRSR_OPCODE_SPI 0x01 /* EEPROM write Status register */ 1487 + #define EEPROM_ERASE4K_OPCODE_SPI 0x20 /* EEPROM ERASE 4KB */ 1488 + #define EEPROM_ERASE64K_OPCODE_SPI 0xD8 /* EEPROM ERASE 64KB */ 1489 + #define EEPROM_ERASE256_OPCODE_SPI 0xDB /* EEPROM ERASE 256B */ 1490 1491 /* EEPROM Size definitions */ 1492 + #define EEPROM_WORD_SIZE_SHIFT 6 1493 + #define EEPROM_SIZE_SHIFT 10 1494 #define EEPROM_SIZE_MASK 0x1C00 1495 1496 /* EEPROM Word Offsets */ ··· 1606 #define IFS_MIN 40 1607 #define IFS_RATIO 4 1608 1609 + /* Extended Configuration Control and Size */ 1610 + #define E1000_EXTCNF_CTRL_PCIE_WRITE_ENABLE 0x00000001 1611 + #define E1000_EXTCNF_CTRL_PHY_WRITE_ENABLE 0x00000002 1612 + #define E1000_EXTCNF_CTRL_D_UD_ENABLE 0x00000004 1613 + #define E1000_EXTCNF_CTRL_D_UD_LATENCY 0x00000008 1614 + #define E1000_EXTCNF_CTRL_D_UD_OWNER 0x00000010 1615 + #define E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP 0x00000020 1616 + #define E1000_EXTCNF_CTRL_MDIO_HW_OWNERSHIP 0x00000040 1617 + #define E1000_EXTCNF_CTRL_EXT_CNF_POINTER 0x1FFF0000 1618 + 1619 + #define E1000_EXTCNF_SIZE_EXT_PHY_LENGTH 0x000000FF 1620 + #define E1000_EXTCNF_SIZE_EXT_DOCK_LENGTH 0x0000FF00 1621 + #define E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH 0x00FF0000 1622 + 1623 /* PBA constants */ 1624 + #define E1000_PBA_12K 0x000C /* 12KB, default Rx allocation */ 1625 #define E1000_PBA_16K 0x0010 /* 16KB, default TX allocation */ 1626 #define E1000_PBA_22K 0x0016 1627 #define E1000_PBA_24K 0x0018 ··· 1662 1663 /* Number of milliseconds we wait for auto-negotiation to complete */ 1664 #define LINK_UP_TIMEOUT 500 1665 + 1666 + /* Number of 100 microseconds we wait for PCI Express master disable */ 1667 + #define MASTER_DISABLE_TIMEOUT 800 1668 + /* Number of milliseconds we wait for Eeprom auto read bit done after MAC reset */ 1669 + #define AUTO_READ_DONE_TIMEOUT 10 1670 + /* Number of milliseconds we wait for PHY configuration done after MAC reset */ 1671 + #define PHY_CFG_TIMEOUT 40 1672 1673 #define E1000_TX_BUFFER_SIZE ((uint32_t)1514) 1674 ··· 1763 #define IGP01E1000_PHY_LINK_HEALTH 0x13 /* PHY Link Health Register */ 1764 #define IGP01E1000_GMII_FIFO 0x14 /* GMII FIFO Register */ 1765 #define IGP01E1000_PHY_CHANNEL_QUALITY 0x15 /* PHY Channel Quality Register */ 1766 + #define IGP02E1000_PHY_POWER_MGMT 0x19 1767 #define IGP01E1000_PHY_PAGE_SELECT 0x1F /* PHY Page Select Core Register */ 1768 1769 /* IGP01E1000 AGC Registers - stores the cable length values*/ ··· 1771 #define IGP01E1000_PHY_AGC_C 0x1472 1772 #define IGP01E1000_PHY_AGC_D 0x1872 1773 1774 + /* IGP02E1000 AGC Registers for cable length values */ 1775 + #define IGP02E1000_PHY_AGC_A 0x11B1 1776 + #define IGP02E1000_PHY_AGC_B 0x12B1 1777 + #define IGP02E1000_PHY_AGC_C 0x14B1 1778 + #define IGP02E1000_PHY_AGC_D 0x18B1 1779 + 1780 /* IGP01E1000 DSP Reset Register */ 1781 #define IGP01E1000_PHY_DSP_RESET 0x1F33 1782 #define IGP01E1000_PHY_DSP_SET 0x1F71 1783 #define IGP01E1000_PHY_DSP_FFE 0x1F35 1784 1785 #define IGP01E1000_PHY_CHANNEL_NUM 4 1786 + #define IGP02E1000_PHY_CHANNEL_NUM 4 1787 + 1788 #define IGP01E1000_PHY_AGC_PARAM_A 0x1171 1789 #define IGP01E1000_PHY_AGC_PARAM_B 0x1271 1790 #define IGP01E1000_PHY_AGC_PARAM_C 0x1471 ··· 2060 #define IGP01E1000_MSE_CHANNEL_B 0x0F00 2061 #define IGP01E1000_MSE_CHANNEL_A 0xF000 2062 2063 + #define IGP02E1000_PM_SPD 0x0001 /* Smart Power Down */ 2064 + #define IGP02E1000_PM_D3_LPLU 0x0004 /* Enable LPLU in non-D0a modes */ 2065 + #define IGP02E1000_PM_D0_LPLU 0x0002 /* Enable LPLU in D0a mode */ 2066 + 2067 /* IGP01E1000 DSP reset macros */ 2068 #define DSP_RESET_ENABLE 0x0 2069 #define DSP_RESET_DISABLE 0x2 2070 #define E1000_MAX_DSP_RESETS 10 2071 2072 + /* IGP01E1000 & IGP02E1000 AGC Registers */ 2073 2074 #define IGP01E1000_AGC_LENGTH_SHIFT 7 /* Coarse - 13:11, Fine - 10:7 */ 2075 + #define IGP02E1000_AGC_LENGTH_SHIFT 9 /* Coarse - 15:13, Fine - 12:9 */ 2076 + 2077 + /* IGP02E1000 AGC Register Length 9-bit mask */ 2078 + #define IGP02E1000_AGC_LENGTH_MASK 0x7F 2079 2080 /* 7 bits (3 Coarse + 4 Fine) --> 128 optional values */ 2081 #define IGP01E1000_AGC_LENGTH_TABLE_SIZE 128 2082 + #define IGP02E1000_AGC_LENGTH_TABLE_SIZE 128 2083 2084 + /* The precision error of the cable length is +/- 10 meters */ 2085 #define IGP01E1000_AGC_RANGE 10 2086 + #define IGP02E1000_AGC_RANGE 10 2087 2088 /* IGP01E1000 PCS Initialization register */ 2089 /* bits 3:6 in the PCS registers stores the channels polarity */ ··· 2113 #define M88E1000_12_PHY_ID M88E1000_E_PHY_ID 2114 #define M88E1000_14_PHY_ID M88E1000_E_PHY_ID 2115 #define M88E1011_I_REV_4 0x04 2116 + #define M88E1111_I_PHY_ID 0x01410CC0 2117 + #define L1LXT971A_PHY_ID 0x001378E0 2118 2119 /* Miscellaneous PHY bit definitions. */ 2120 #define PHY_PREAMBLE 0xFFFFFFFF
+896 -261
drivers/net/e1000/e1000_main.c
··· 1 /******************************************************************************* 2 3 4 - Copyright(c) 1999 - 2004 Intel Corporation. All rights reserved. 5 6 This program is free software; you can redistribute it and/or modify it 7 under the terms of the GNU General Public License as published by the Free ··· 29 #include "e1000.h" 30 31 /* Change Log 32 - * 5.3.12 6/7/04 33 - * - kcompat NETIF_MSG for older kernels (2.4.9) <sean.p.mcdermott@intel.com> 34 - * - if_mii support and associated kcompat for older kernels 35 - * - More errlogging support from Jon Mason <jonmason@us.ibm.com> 36 - * - Fix TSO issues on PPC64 machines -- Jon Mason <jonmason@us.ibm.com> 37 - * 38 - * 5.7.1 12/16/04 39 - * - Resurrect 82547EI/GI related fix in e1000_intr to avoid deadlocks. This 40 - * fix was removed as it caused system instability. The suspected cause of 41 - * this is the called to e1000_irq_disable in e1000_intr. Inlined the 42 - * required piece of e1000_irq_disable into e1000_intr - Anton Blanchard 43 - * 5.7.0 12/10/04 44 - * - include fix to the condition that determines when to quit NAPI - Robert Olsson 45 - * - use netif_poll_{disable/enable} to synchronize between NAPI and i/f up/down 46 - * 5.6.5 11/01/04 47 - * - Enabling NETIF_F_SG without checksum offload is illegal - 48 - John Mason <jdmason@us.ibm.com> 49 - * 5.6.3 10/26/04 50 - * - Remove redundant initialization - Jamal Hadi 51 - * - Reset buffer_info->dma in tx resource cleanup logic 52 - * 5.6.2 10/12/04 53 - * - Avoid filling tx_ring completely - shemminger@osdl.org 54 - * - Replace schedule_timeout() with msleep()/msleep_interruptible() - 55 - * nacc@us.ibm.com 56 - * - Sparse cleanup - shemminger@osdl.org 57 - * - Fix tx resource cleanup logic 58 - * - LLTX support - ak@suse.de and hadi@cyberus.ca 59 */ 60 61 char e1000_driver_name[] = "e1000"; ··· 41 #else 42 #define DRIVERNAPI "-NAPI" 43 #endif 44 - #define DRV_VERSION "5.7.6-k2"DRIVERNAPI 45 char e1000_driver_version[] = DRV_VERSION; 46 char e1000_copyright[] = "Copyright (c) 1999-2004 Intel Corporation."; 47 ··· 72 INTEL_E1000_ETHERNET_DEVICE(0x1017), 73 INTEL_E1000_ETHERNET_DEVICE(0x1018), 74 INTEL_E1000_ETHERNET_DEVICE(0x1019), 75 INTEL_E1000_ETHERNET_DEVICE(0x101D), 76 INTEL_E1000_ETHERNET_DEVICE(0x101E), 77 INTEL_E1000_ETHERNET_DEVICE(0x1026), ··· 87 INTEL_E1000_ETHERNET_DEVICE(0x107B), 88 INTEL_E1000_ETHERNET_DEVICE(0x107C), 89 INTEL_E1000_ETHERNET_DEVICE(0x108A), 90 /* required last entry */ 91 {0,} 92 }; ··· 135 static int e1000_clean(struct net_device *netdev, int *budget); 136 static boolean_t e1000_clean_rx_irq(struct e1000_adapter *adapter, 137 int *work_done, int work_to_do); 138 #else 139 static boolean_t e1000_clean_rx_irq(struct e1000_adapter *adapter); 140 #endif 141 static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter); 142 static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd); 143 static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, 144 int cmd); ··· 270 E1000_WRITE_FLUSH(&adapter->hw); 271 } 272 } 273 - 274 int 275 e1000_up(struct e1000_adapter *adapter) 276 { ··· 316 e1000_configure_tx(adapter); 317 e1000_setup_rctl(adapter); 318 e1000_configure_rx(adapter); 319 - e1000_alloc_rx_buffers(adapter); 320 321 if((err = request_irq(adapter->pdev->irq, &e1000_intr, 322 SA_SHIRQ | SA_SAMPLE_RANDOM, 323 - netdev->name, netdev))) 324 return err; 325 326 mod_timer(&adapter->watchdog_timer, jiffies); 327 - e1000_irq_enable(adapter); 328 329 #ifdef CONFIG_E1000_NAPI 330 netif_poll_enable(netdev); 331 #endif 332 return 0; 333 } 334 ··· 353 354 e1000_irq_disable(adapter); 355 free_irq(adapter->pdev->irq, netdev); 356 del_timer_sync(&adapter->tx_fifo_stall_timer); 357 del_timer_sync(&adapter->watchdog_timer); 358 del_timer_sync(&adapter->phy_info_timer); ··· 375 e1000_clean_rx_ring(adapter); 376 377 /* If WoL is not enabled 378 * Power down the PHY so no link is implied when interface is down */ 379 - if(!adapter->wol && adapter->hw.media_type == e1000_media_type_copper) { 380 uint16_t mii_reg; 381 e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &mii_reg); 382 mii_reg |= MII_CR_POWER_DOWN; 383 e1000_write_phy_reg(&adapter->hw, PHY_CTRL, mii_reg); 384 } 385 } 386 387 void 388 e1000_reset(struct e1000_adapter *adapter) 389 { 390 - uint32_t pba; 391 392 /* Repartition Pba for greater than 9k mtu 393 * To take effect CTRL.RST is required. 394 */ 395 396 - if(adapter->hw.mac_type < e1000_82547) { 397 - if(adapter->rx_buffer_len > E1000_RXBUFFER_8192) 398 - pba = E1000_PBA_40K; 399 - else 400 - pba = E1000_PBA_48K; 401 - } else { 402 - if(adapter->rx_buffer_len > E1000_RXBUFFER_8192) 403 - pba = E1000_PBA_22K; 404 - else 405 - pba = E1000_PBA_30K; 406 adapter->tx_fifo_head = 0; 407 adapter->tx_head_addr = pba << E1000_TX_HEAD_ADDR_SHIFT; 408 adapter->tx_fifo_size = 409 (E1000_PBA_40K - pba) << E1000_PBA_BYTES_SHIFT; 410 atomic_set(&adapter->tx_fifo_stall, 0); 411 } 412 E1000_WRITE_REG(&adapter->hw, PBA, pba); 413 414 /* flow control settings */ 415 adapter->hw.fc_high_water = (pba << E1000_PBA_BYTES_SHIFT) - 416 - E1000_FC_HIGH_DIFF; 417 adapter->hw.fc_low_water = (pba << E1000_PBA_BYTES_SHIFT) - 418 - E1000_FC_LOW_DIFF; 419 adapter->hw.fc_pause_time = E1000_FC_PAUSE_TIME; 420 adapter->hw.fc_send_xon = 1; 421 adapter->hw.fc = adapter->hw.original_fc; 422 423 e1000_reset_hw(&adapter->hw); 424 if(adapter->hw.mac_type >= e1000_82544) 425 E1000_WRITE_REG(&adapter->hw, WUC, 0); 426 if(e1000_init_hw(&adapter->hw)) 427 DPRINTK(PROBE, ERR, "Hardware Error\n"); 428 - 429 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */ 430 E1000_WRITE_REG(&adapter->hw, VET, ETHERNET_IEEE_VLAN_TYPE); 431 432 e1000_reset_adaptive(&adapter->hw); 433 e1000_phy_get_info(&adapter->hw, &adapter->phy_info); 434 } 435 436 /** ··· 482 { 483 struct net_device *netdev; 484 struct e1000_adapter *adapter; 485 static int cards_found = 0; 486 - unsigned long mmio_start; 487 - int mmio_len; 488 - int pci_using_dac; 489 - int i; 490 - int err; 491 uint16_t eeprom_data; 492 uint16_t eeprom_apme_mask = E1000_EEPROM_APME; 493 - 494 if((err = pci_enable_device(pdev))) 495 return err; 496 ··· 575 if((err = e1000_sw_init(adapter))) 576 goto err_sw_init; 577 578 if(adapter->hw.mac_type >= e1000_82543) { 579 netdev->features = NETIF_F_SG | 580 NETIF_F_HW_CSUM | ··· 590 if((adapter->hw.mac_type >= e1000_82544) && 591 (adapter->hw.mac_type != e1000_82547)) 592 netdev->features |= NETIF_F_TSO; 593 #endif 594 if(pci_using_dac) 595 netdev->features |= NETIF_F_HIGHDMA; ··· 602 /* hard_start_xmit is safe against parallel locking */ 603 netdev->features |= NETIF_F_LLTX; 604 605 /* before reading the EEPROM, reset the controller to 606 * put the device in a known good starting state */ 607 ··· 619 620 /* copy the MAC address out of the EEPROM */ 621 622 - if (e1000_read_mac_addr(&adapter->hw)) 623 DPRINTK(PROBE, ERR, "EEPROM Read Error\n"); 624 memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len); 625 ··· 693 /* reset the hardware with the new settings */ 694 e1000_reset(adapter); 695 696 strcpy(netdev->name, "eth%d"); 697 if((err = register_netdev(netdev))) 698 goto err_register; ··· 739 { 740 struct net_device *netdev = pci_get_drvdata(pdev); 741 struct e1000_adapter *adapter = netdev->priv; 742 - uint32_t manc; 743 744 flush_scheduled_work(); 745 ··· 752 } 753 } 754 755 unregister_netdev(netdev); 756 757 - e1000_phy_hw_reset(&adapter->hw); 758 759 iounmap(adapter->hw.hw_addr); 760 pci_release_regions(pdev); ··· 804 pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word); 805 806 adapter->rx_buffer_len = E1000_RXBUFFER_2048; 807 hw->max_frame_size = netdev->mtu + 808 ENET_HEADER_SIZE + ETHERNET_FCS_SIZE; 809 hw->min_frame_size = MINIMUM_ETHERNET_FRAME_SIZE; ··· 818 819 /* initialize eeprom parameters */ 820 821 - e1000_init_eeprom_params(hw); 822 823 switch(hw->mac_type) { 824 default: ··· 886 887 if((err = e1000_up(adapter))) 888 goto err_up; 889 890 return E1000_SUCCESS; 891 ··· 926 e1000_free_tx_resources(adapter); 927 e1000_free_rx_resources(adapter); 928 929 return 0; 930 } 931 932 /** 933 * e1000_check_64k_bound - check that memory doesn't cross 64kB boundary 934 * @adapter: address of board private structure 935 - * @begin: address of beginning of memory 936 - * @end: address of end of memory 937 **/ 938 static inline boolean_t 939 e1000_check_64k_bound(struct e1000_adapter *adapter, ··· 946 unsigned long begin = (unsigned long) start; 947 unsigned long end = begin + len; 948 949 - /* first rev 82545 and 82546 need to not allow any memory 950 - * write location to cross a 64k boundary due to errata 23 */ 951 if (adapter->hw.mac_type == e1000_82545 || 952 - adapter->hw.mac_type == e1000_82546 ) { 953 - 954 - /* check buffer doesn't cross 64kB */ 955 return ((begin ^ (end - 1)) >> 16) != 0 ? FALSE : TRUE; 956 } 957 ··· 973 size = sizeof(struct e1000_buffer) * txdr->count; 974 txdr->buffer_info = vmalloc(size); 975 if(!txdr->buffer_info) { 976 - DPRINTK(PROBE, ERR, 977 - "Unable to Allocate Memory for the Transmit descriptor ring\n"); 978 return -ENOMEM; 979 } 980 memset(txdr->buffer_info, 0, size); ··· 987 txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma); 988 if(!txdr->desc) { 989 setup_tx_desc_die: 990 - DPRINTK(PROBE, ERR, 991 - "Unable to Allocate Memory for the Transmit descriptor ring\n"); 992 vfree(txdr->buffer_info); 993 return -ENOMEM; 994 } 995 996 - /* fix for errata 23, cant cross 64kB boundary */ 997 if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) { 998 void *olddesc = txdr->desc; 999 dma_addr_t olddma = txdr->dma; 1000 - DPRINTK(TX_ERR,ERR,"txdr align check failed: %u bytes at %p\n", 1001 - txdr->size, txdr->desc); 1002 - /* try again, without freeing the previous */ 1003 txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma); 1004 - /* failed allocation, critial failure */ 1005 if(!txdr->desc) { 1006 pci_free_consistent(pdev, txdr->size, olddesc, olddma); 1007 goto setup_tx_desc_die; 1008 } 1009 1010 if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) { 1011 /* give up */ 1012 - pci_free_consistent(pdev, txdr->size, 1013 - txdr->desc, txdr->dma); 1014 pci_free_consistent(pdev, txdr->size, olddesc, olddma); 1015 DPRINTK(PROBE, ERR, 1016 - "Unable to Allocate aligned Memory for the Transmit" 1017 - " descriptor ring\n"); 1018 vfree(txdr->buffer_info); 1019 return -ENOMEM; 1020 } else { 1021 - /* free old, move on with the new one since its okay */ 1022 pci_free_consistent(pdev, txdr->size, olddesc, olddma); 1023 } 1024 } ··· 1120 { 1121 struct e1000_desc_ring *rxdr = &adapter->rx_ring; 1122 struct pci_dev *pdev = adapter->pdev; 1123 - int size; 1124 1125 size = sizeof(struct e1000_buffer) * rxdr->count; 1126 rxdr->buffer_info = vmalloc(size); 1127 if(!rxdr->buffer_info) { 1128 - DPRINTK(PROBE, ERR, 1129 - "Unable to Allocate Memory for the Recieve descriptor ring\n"); 1130 return -ENOMEM; 1131 } 1132 memset(rxdr->buffer_info, 0, size); 1133 1134 /* Round up to nearest 4K */ 1135 1136 - rxdr->size = rxdr->count * sizeof(struct e1000_rx_desc); 1137 E1000_ROUNDUP(rxdr->size, 4096); 1138 1139 rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma); 1140 1141 if(!rxdr->desc) { 1142 setup_rx_desc_die: 1143 - DPRINTK(PROBE, ERR, 1144 - "Unble to Allocate Memory for the Recieve descriptor ring\n"); 1145 vfree(rxdr->buffer_info); 1146 return -ENOMEM; 1147 } 1148 1149 - /* fix for errata 23, cant cross 64kB boundary */ 1150 if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) { 1151 void *olddesc = rxdr->desc; 1152 dma_addr_t olddma = rxdr->dma; 1153 - DPRINTK(RX_ERR,ERR, 1154 - "rxdr align check failed: %u bytes at %p\n", 1155 - rxdr->size, rxdr->desc); 1156 - /* try again, without freeing the previous */ 1157 rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma); 1158 - /* failed allocation, critial failure */ 1159 if(!rxdr->desc) { 1160 pci_free_consistent(pdev, rxdr->size, olddesc, olddma); 1161 goto setup_rx_desc_die; 1162 } 1163 1164 if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) { 1165 /* give up */ 1166 - pci_free_consistent(pdev, rxdr->size, 1167 - rxdr->desc, rxdr->dma); 1168 pci_free_consistent(pdev, rxdr->size, olddesc, olddma); 1169 - DPRINTK(PROBE, ERR, 1170 - "Unable to Allocate aligned Memory for the" 1171 - " Receive descriptor ring\n"); 1172 vfree(rxdr->buffer_info); 1173 return -ENOMEM; 1174 } else { 1175 - /* free old, move on with the new one since its okay */ 1176 pci_free_consistent(pdev, rxdr->size, olddesc, olddma); 1177 } 1178 } ··· 1214 } 1215 1216 /** 1217 - * e1000_setup_rctl - configure the receive control register 1218 * @adapter: Board private structure 1219 **/ 1220 1221 static void 1222 e1000_setup_rctl(struct e1000_adapter *adapter) 1223 { 1224 - uint32_t rctl; 1225 1226 rctl = E1000_READ_REG(&adapter->hw, RCTL); 1227 ··· 1237 else 1238 rctl &= ~E1000_RCTL_SBP; 1239 1240 /* Setup buffer sizes */ 1241 - rctl &= ~(E1000_RCTL_SZ_4096); 1242 - rctl |= (E1000_RCTL_BSEX | E1000_RCTL_LPE); 1243 - switch (adapter->rx_buffer_len) { 1244 - case E1000_RXBUFFER_2048: 1245 - default: 1246 - rctl |= E1000_RCTL_SZ_2048; 1247 - rctl &= ~(E1000_RCTL_BSEX | E1000_RCTL_LPE); 1248 - break; 1249 - case E1000_RXBUFFER_4096: 1250 - rctl |= E1000_RCTL_SZ_4096; 1251 - break; 1252 - case E1000_RXBUFFER_8192: 1253 - rctl |= E1000_RCTL_SZ_8192; 1254 - break; 1255 - case E1000_RXBUFFER_16384: 1256 - rctl |= E1000_RCTL_SZ_16384; 1257 - break; 1258 } 1259 1260 E1000_WRITE_REG(&adapter->hw, RCTL, rctl); ··· 1316 e1000_configure_rx(struct e1000_adapter *adapter) 1317 { 1318 uint64_t rdba = adapter->rx_ring.dma; 1319 - uint32_t rdlen = adapter->rx_ring.count * sizeof(struct e1000_rx_desc); 1320 - uint32_t rctl; 1321 - uint32_t rxcsum; 1322 1323 /* disable receives while setting up the descriptors */ 1324 rctl = E1000_READ_REG(&adapter->hw, RCTL); ··· 1354 E1000_WRITE_REG(&adapter->hw, RDT, 0); 1355 1356 /* Enable 82543 Receive Checksum Offload for TCP and UDP */ 1357 - if((adapter->hw.mac_type >= e1000_82543) && 1358 - (adapter->rx_csum == TRUE)) { 1359 rxcsum = E1000_READ_REG(&adapter->hw, RXCSUM); 1360 - rxcsum |= E1000_RXCSUM_TUOFL; 1361 E1000_WRITE_REG(&adapter->hw, RXCSUM, rxcsum); 1362 } 1363 1364 /* Enable Receives */ 1365 E1000_WRITE_REG(&adapter->hw, RCTL, rctl); ··· 1406 e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter, 1407 struct e1000_buffer *buffer_info) 1408 { 1409 - struct pci_dev *pdev = adapter->pdev; 1410 - 1411 if(buffer_info->dma) { 1412 - pci_unmap_page(pdev, 1413 - buffer_info->dma, 1414 - buffer_info->length, 1415 - PCI_DMA_TODEVICE); 1416 buffer_info->dma = 0; 1417 } 1418 if(buffer_info->skb) { ··· 1435 /* Free all the Tx ring sk_buffs */ 1436 1437 if (likely(adapter->previous_buffer_info.skb != NULL)) { 1438 - e1000_unmap_and_free_tx_resource(adapter, 1439 &adapter->previous_buffer_info); 1440 } 1441 ··· 1475 1476 vfree(rx_ring->buffer_info); 1477 rx_ring->buffer_info = NULL; 1478 1479 pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma); 1480 ··· 1495 { 1496 struct e1000_desc_ring *rx_ring = &adapter->rx_ring; 1497 struct e1000_buffer *buffer_info; 1498 struct pci_dev *pdev = adapter->pdev; 1499 unsigned long size; 1500 - unsigned int i; 1501 1502 /* Free all the Rx ring sk_buffs */ 1503 1504 for(i = 0; i < rx_ring->count; i++) { 1505 buffer_info = &rx_ring->buffer_info[i]; 1506 if(buffer_info->skb) { 1507 - 1508 pci_unmap_single(pdev, 1509 buffer_info->dma, 1510 buffer_info->length, ··· 1515 1516 dev_kfree_skb(buffer_info->skb); 1517 buffer_info->skb = NULL; 1518 } 1519 } 1520 1521 size = sizeof(struct e1000_buffer) * rx_ring->count; 1522 memset(rx_ring->buffer_info, 0, size); 1523 1524 /* Zero out the descriptor ring */ 1525 ··· 1637 struct e1000_adapter *adapter = netdev->priv; 1638 struct e1000_hw *hw = &adapter->hw; 1639 struct dev_mc_list *mc_ptr; 1640 uint32_t rctl; 1641 uint32_t hash_value; 1642 int i; 1643 - unsigned long flags; 1644 - 1645 - /* Check for Promiscuous and All Multicast modes */ 1646 1647 spin_lock_irqsave(&adapter->tx_lock, flags); 1648 1649 rctl = E1000_READ_REG(hw, RCTL); 1650 ··· 1771 uint32_t link; 1772 1773 e1000_check_for_link(&adapter->hw); 1774 1775 if((adapter->hw.media_type == e1000_media_type_internal_serdes) && 1776 !(E1000_READ_REG(&adapter->hw, TXCW) & E1000_TXCW_ANE)) ··· 1852 /* Cause software interrupt to ensure rx ring is cleaned */ 1853 E1000_WRITE_REG(&adapter->hw, ICS, E1000_ICS_RXDMT0); 1854 1855 - /* Force detection of hung controller every watchdog period*/ 1856 adapter->detect_tx_hung = TRUE; 1857 1858 /* Reset the timer */ ··· 1862 #define E1000_TX_FLAGS_CSUM 0x00000001 1863 #define E1000_TX_FLAGS_VLAN 0x00000002 1864 #define E1000_TX_FLAGS_TSO 0x00000004 1865 #define E1000_TX_FLAGS_VLAN_MASK 0xffff0000 1866 #define E1000_TX_FLAGS_VLAN_SHIFT 16 1867 ··· 1873 struct e1000_context_desc *context_desc; 1874 unsigned int i; 1875 uint32_t cmd_length = 0; 1876 - uint16_t ipcse, tucse, mss; 1877 uint8_t ipcss, ipcso, tucss, tucso, hdr_len; 1878 int err; 1879 ··· 1886 1887 hdr_len = ((skb->h.raw - skb->data) + (skb->h.th->doff << 2)); 1888 mss = skb_shinfo(skb)->tso_size; 1889 - skb->nh.iph->tot_len = 0; 1890 - skb->nh.iph->check = 0; 1891 - skb->h.th->check = ~csum_tcpudp_magic(skb->nh.iph->saddr, 1892 - skb->nh.iph->daddr, 1893 - 0, 1894 - IPPROTO_TCP, 1895 - 0); 1896 ipcss = skb->nh.raw - skb->data; 1897 ipcso = (void *)&(skb->nh.iph->check) - (void *)skb->data; 1898 - ipcse = skb->h.raw - skb->data - 1; 1899 tucss = skb->h.raw - skb->data; 1900 tucso = (void *)&(skb->h.th->check) - (void *)skb->data; 1901 tucse = 0; 1902 1903 cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE | 1904 - E1000_TXD_CMD_IP | E1000_TXD_CMD_TCP | 1905 - (skb->len - (hdr_len))); 1906 1907 i = adapter->tx_ring.next_to_use; 1908 context_desc = E1000_CONTEXT_DESC(adapter->tx_ring, i); ··· 1995 if(unlikely(mss && !nr_frags && size == len && size > 8)) 1996 size -= 4; 1997 #endif 1998 /* Workaround for potential 82544 hang in PCI-X. Avoid 1999 * terminating buffers within evenly-aligned dwords. */ 2000 if(unlikely(adapter->pcix_82544 && ··· 2084 if(likely(tx_flags & E1000_TX_FLAGS_TSO)) { 2085 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D | 2086 E1000_TXD_CMD_TSE; 2087 - txd_upper |= (E1000_TXD_POPTS_IXSM | E1000_TXD_POPTS_TXSM) << 8; 2088 } 2089 2090 if(likely(tx_flags & E1000_TX_FLAGS_CSUM)) { ··· 2162 return 0; 2163 } 2164 2165 #define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1 ) 2166 static int 2167 e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev) ··· 2233 2234 #ifdef NETIF_F_TSO 2235 mss = skb_shinfo(skb)->tso_size; 2236 - /* The controller does a simple calculation to 2237 * make sure there is enough room in the FIFO before 2238 * initiating the DMA for each buffer. The calc is: 2239 * 4 = ceil(buffer len/mss). To make sure we don't ··· 2246 2247 if((mss) || (skb->ip_summed == CHECKSUM_HW)) 2248 count++; 2249 - count++; /* for sentinel desc */ 2250 #else 2251 if(skb->ip_summed == CHECKSUM_HW) 2252 count++; ··· 2254 count += TXD_USE_COUNT(len, max_txd_pwr); 2255 2256 if(adapter->pcix_82544) 2257 count++; 2258 2259 nr_frags = skb_shinfo(skb)->nr_frags; ··· 2276 local_irq_restore(flags); 2277 return NETDEV_TX_LOCKED; 2278 } 2279 2280 /* need: count + 2 desc gap to keep tail from touching 2281 * head, otherwise try next time */ ··· 2314 tx_flags |= E1000_TX_FLAGS_TSO; 2315 else if(likely(e1000_tx_csum(adapter, skb))) 2316 tx_flags |= E1000_TX_FLAGS_CSUM; 2317 2318 e1000_tx_queue(adapter, 2319 e1000_tx_map(adapter, skb, first, max_per_txd, nr_frags, mss), ··· 2387 e1000_change_mtu(struct net_device *netdev, int new_mtu) 2388 { 2389 struct e1000_adapter *adapter = netdev->priv; 2390 - int old_mtu = adapter->rx_buffer_len; 2391 int max_frame = new_mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE; 2392 2393 if((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) || ··· 2395 return -EINVAL; 2396 } 2397 2398 - if(max_frame <= MAXIMUM_ETHERNET_FRAME_SIZE) { 2399 - adapter->rx_buffer_len = E1000_RXBUFFER_2048; 2400 - 2401 - } else if(adapter->hw.mac_type < e1000_82543) { 2402 - DPRINTK(PROBE, ERR, "Jumbo Frames not supported on 82542\n"); 2403 return -EINVAL; 2404 - 2405 - } else if(max_frame <= E1000_RXBUFFER_4096) { 2406 - adapter->rx_buffer_len = E1000_RXBUFFER_4096; 2407 - 2408 - } else if(max_frame <= E1000_RXBUFFER_8192) { 2409 - adapter->rx_buffer_len = E1000_RXBUFFER_8192; 2410 - 2411 - } else { 2412 - adapter->rx_buffer_len = E1000_RXBUFFER_16384; 2413 } 2414 2415 - if(old_mtu != adapter->rx_buffer_len && netif_running(netdev)) { 2416 e1000_down(adapter); 2417 e1000_up(adapter); 2418 } 2419 2420 - netdev->mtu = new_mtu; 2421 adapter->hw.max_frame_size = max_frame; 2422 2423 return 0; ··· 2524 adapter->stats.tsctc += E1000_READ_REG(hw, TSCTC); 2525 adapter->stats.tsctfc += E1000_READ_REG(hw, TSCTFC); 2526 } 2527 2528 /* Fill out the OS statistics structure */ 2529 ··· 2549 2550 adapter->net_stats.rx_errors = adapter->stats.rxerrc + 2551 adapter->stats.crcerrs + adapter->stats.algnerrc + 2552 - adapter->stats.rlec + adapter->stats.rnbc + 2553 - adapter->stats.mpc + adapter->stats.cexterr; 2554 - adapter->net_stats.rx_dropped = adapter->stats.rnbc; 2555 adapter->net_stats.rx_length_errors = adapter->stats.rlec; 2556 adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs; 2557 adapter->net_stats.rx_frame_errors = adapter->stats.algnerrc; ··· 2636 */ 2637 if(hw->mac_type == e1000_82547 || hw->mac_type == e1000_82547_rev_2){ 2638 atomic_inc(&adapter->irq_sem); 2639 - E1000_WRITE_REG(&adapter->hw, IMC, ~0); 2640 } 2641 2642 for(i = 0; i < E1000_MAX_INTR; i++) 2643 - if(unlikely(!e1000_clean_rx_irq(adapter) & 2644 !e1000_clean_tx_irq(adapter))) 2645 break; 2646 ··· 2664 int work_to_do = min(*budget, netdev->quota); 2665 int tx_cleaned; 2666 int work_done = 0; 2667 - 2668 tx_cleaned = e1000_clean_tx_irq(adapter); 2669 - e1000_clean_rx_irq(adapter, &work_done, work_to_do); 2670 2671 *budget -= work_done; 2672 netdev->quota -= work_done; 2673 2674 - /* if no Tx and not enough Rx work done, exit the polling mode */ 2675 - if((!tx_cleaned && (work_done < work_to_do)) || 2676 - !netif_running(netdev)) { 2677 netif_rx_complete(netdev); 2678 e1000_irq_enable(adapter); 2679 return 0; ··· 2702 eop_desc = E1000_TX_DESC(*tx_ring, eop); 2703 2704 while(eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) { 2705 - /* pre-mature writeback of Tx descriptors */ 2706 - /* clear (free buffers and unmap pci_mapping) */ 2707 - /* previous_buffer_info */ 2708 if (likely(adapter->previous_buffer_info.skb != NULL)) { 2709 - e1000_unmap_and_free_tx_resource(adapter, 2710 &adapter->previous_buffer_info); 2711 } 2712 ··· 2714 buffer_info = &tx_ring->buffer_info[i]; 2715 cleaned = (i == eop); 2716 2717 - /* pre-mature writeback of Tx descriptors */ 2718 - /* save the cleaning of the this for the */ 2719 - /* next iteration */ 2720 - if (cleaned) { 2721 - memcpy(&adapter->previous_buffer_info, 2722 - buffer_info, 2723 - sizeof(struct e1000_buffer)); 2724 - memset(buffer_info, 2725 - 0, 2726 - sizeof(struct e1000_buffer)); 2727 } else { 2728 - e1000_unmap_and_free_tx_resource(adapter, 2729 - buffer_info); 2730 } 2731 2732 tx_desc->buffer_addr = 0; 2733 tx_desc->lower.data = 0; 2734 tx_desc->upper.data = 0; 2735 2736 - cleaned = (i == eop); 2737 if(unlikely(++i == tx_ring->count)) i = 0; 2738 } 2739 ··· 2754 netif_wake_queue(netdev); 2755 2756 spin_unlock(&adapter->tx_lock); 2757 - 2758 if(adapter->detect_tx_hung) { 2759 - /* detect a transmit hang in hardware, this serializes the 2760 * check with the clearing of time_stamp and movement of i */ 2761 adapter->detect_tx_hung = FALSE; 2762 - if(tx_ring->buffer_info[i].dma && 2763 - time_after(jiffies, tx_ring->buffer_info[i].time_stamp + HZ) && 2764 - !(E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_TXOFF)) 2765 - netif_stop_queue(netdev); 2766 - } 2767 2768 return cleaned; 2769 } 2770 2771 /** 2772 * e1000_rx_checksum - Receive Checksum Offload for 82543 2773 - * @adapter: board private structure 2774 - * @rx_desc: receive descriptor 2775 - * @sk_buff: socket buffer with received data 2776 **/ 2777 2778 static inline void 2779 e1000_rx_checksum(struct e1000_adapter *adapter, 2780 - struct e1000_rx_desc *rx_desc, 2781 - struct sk_buff *skb) 2782 { 2783 /* 82543 or newer only */ 2784 - if(unlikely((adapter->hw.mac_type < e1000_82543) || 2785 /* Ignore Checksum bit is set */ 2786 - (rx_desc->status & E1000_RXD_STAT_IXSM) || 2787 - /* TCP Checksum has not been calculated */ 2788 - (!(rx_desc->status & E1000_RXD_STAT_TCPCS)))) { 2789 - skb->ip_summed = CHECKSUM_NONE; 2790 return; 2791 } 2792 - 2793 - /* At this point we know the hardware did the TCP checksum */ 2794 - /* now look at the TCP checksum error bit */ 2795 - if(rx_desc->errors & E1000_RXD_ERR_TCPE) { 2796 - /* let the stack verify checksum errors */ 2797 - skb->ip_summed = CHECKSUM_NONE; 2798 - adapter->hw_csum_err++; 2799 } else { 2800 /* TCP checksum is good */ 2801 skb->ip_summed = CHECKSUM_UNNECESSARY; 2802 - adapter->hw_csum_good++; 2803 } 2804 } 2805 2806 /** 2807 - * e1000_clean_rx_irq - Send received data up the network stack 2808 * @adapter: board private structure 2809 **/ 2810 ··· 2901 if(unlikely(!(rx_desc->status & E1000_RXD_STAT_EOP))) { 2902 /* All receives must fit into a single buffer */ 2903 E1000_DBG("%s: Receive packet consumed multiple" 2904 - " buffers\n", netdev->name); 2905 dev_kfree_skb_irq(skb); 2906 goto next_desc; 2907 } ··· 2927 skb_put(skb, length - ETHERNET_FCS_SIZE); 2928 2929 /* Receive Checksum Offload */ 2930 - e1000_rx_checksum(adapter, rx_desc, skb); 2931 - 2932 skb->protocol = eth_type_trans(skb, netdev); 2933 #ifdef CONFIG_E1000_NAPI 2934 if(unlikely(adapter->vlgrp && 2935 (rx_desc->status & E1000_RXD_STAT_VP))) { 2936 vlan_hwaccel_receive_skb(skb, adapter->vlgrp, 2937 - le16_to_cpu(rx_desc->special) & 2938 - E1000_RXD_SPC_VLAN_MASK); 2939 } else { 2940 netif_receive_skb(skb); 2941 } ··· 2960 2961 rx_desc = E1000_RX_DESC(*rx_ring, i); 2962 } 2963 - 2964 rx_ring->next_to_clean = i; 2965 - 2966 - e1000_alloc_rx_buffers(adapter); 2967 2968 return cleaned; 2969 } 2970 2971 /** 2972 - * e1000_alloc_rx_buffers - Replace used receive buffers 2973 * @adapter: address of board private structure 2974 **/ 2975 ··· 3108 struct e1000_rx_desc *rx_desc; 3109 struct e1000_buffer *buffer_info; 3110 struct sk_buff *skb; 3111 - unsigned int i, bufsz; 3112 3113 i = rx_ring->next_to_use; 3114 buffer_info = &rx_ring->buffer_info[i]; 3115 3116 while(!buffer_info->skb) { 3117 - bufsz = adapter->rx_buffer_len + NET_IP_ALIGN; 3118 - 3119 skb = dev_alloc_skb(bufsz); 3120 if(unlikely(!skb)) { 3121 /* Better luck next round */ 3122 break; 3123 } 3124 3125 - /* fix for errata 23, cant cross 64kB boundary */ 3126 if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) { 3127 struct sk_buff *oldskb = skb; 3128 - DPRINTK(RX_ERR,ERR, 3129 - "skb align check failed: %u bytes at %p\n", 3130 - bufsz, skb->data); 3131 - /* try again, without freeing the previous */ 3132 skb = dev_alloc_skb(bufsz); 3133 if (!skb) { 3134 dev_kfree_skb(oldskb); 3135 break; 3136 } 3137 if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) { 3138 /* give up */ 3139 dev_kfree_skb(skb); 3140 dev_kfree_skb(oldskb); 3141 break; /* while !buffer_info->skb */ 3142 } else { 3143 - /* move on with the new one */ 3144 dev_kfree_skb(oldskb); 3145 } 3146 } 3147 - 3148 /* Make buffer alignment 2 beyond a 16 byte boundary 3149 * this will result in a 16 byte aligned IP header after 3150 * the 14 byte MAC header is removed ··· 3160 adapter->rx_buffer_len, 3161 PCI_DMA_FROMDEVICE); 3162 3163 - /* fix for errata 23, cant cross 64kB boundary */ 3164 - if(!e1000_check_64k_bound(adapter, 3165 - (void *)(unsigned long)buffer_info->dma, 3166 - adapter->rx_buffer_len)) { 3167 - DPRINTK(RX_ERR,ERR, 3168 - "dma align check failed: %u bytes at %ld\n", 3169 - adapter->rx_buffer_len, (unsigned long)buffer_info->dma); 3170 - 3171 dev_kfree_skb(skb); 3172 buffer_info->skb = NULL; 3173 3174 - pci_unmap_single(pdev, 3175 - buffer_info->dma, 3176 adapter->rx_buffer_len, 3177 PCI_DMA_FROMDEVICE); 3178 3179 break; /* while !buffer_info->skb */ 3180 } 3181 - 3182 rx_desc = E1000_RX_DESC(*rx_ring, i); 3183 rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); 3184 ··· 3186 * applicable for weak-ordered memory model archs, 3187 * such as IA-64). */ 3188 wmb(); 3189 - 3190 E1000_WRITE_REG(&adapter->hw, RDT, i); 3191 } 3192 ··· 3193 buffer_info = &rx_ring->buffer_info[i]; 3194 } 3195 3196 rx_ring->next_to_use = i; 3197 } 3198 ··· 3458 e1000_pci_set_mwi(struct e1000_hw *hw) 3459 { 3460 struct e1000_adapter *adapter = hw->back; 3461 3462 - int ret; 3463 - ret = pci_set_mwi(adapter->pdev); 3464 } 3465 3466 void ··· 3520 rctl |= E1000_RCTL_VFE; 3521 rctl &= ~E1000_RCTL_CFIEN; 3522 E1000_WRITE_REG(&adapter->hw, RCTL, rctl); 3523 } else { 3524 /* disable VLAN tag insert/strip */ 3525 ctrl = E1000_READ_REG(&adapter->hw, CTRL); ··· 3531 rctl = E1000_READ_REG(&adapter->hw, RCTL); 3532 rctl &= ~E1000_RCTL_VFE; 3533 E1000_WRITE_REG(&adapter->hw, RCTL, rctl); 3534 } 3535 3536 e1000_irq_enable(adapter); ··· 3545 { 3546 struct e1000_adapter *adapter = netdev->priv; 3547 uint32_t vfta, index; 3548 - 3549 /* add VID to filter table */ 3550 index = (vid >> 5) & 0x7F; 3551 vfta = E1000_READ_REG_ARRAY(&adapter->hw, VFTA, index); ··· 3569 3570 e1000_irq_enable(adapter); 3571 3572 /* remove VID from filter table */ 3573 index = (vid >> 5) & 0x7F; 3574 vfta = E1000_READ_REG_ARRAY(&adapter->hw, VFTA, index); ··· 3619 break; 3620 case SPEED_1000 + DUPLEX_HALF: /* not supported */ 3621 default: 3622 - DPRINTK(PROBE, ERR, 3623 - "Unsupported Speed/Duplexity configuration\n"); 3624 return -EINVAL; 3625 } 3626 return 0; ··· 3647 { 3648 struct net_device *netdev = pci_get_drvdata(pdev); 3649 struct e1000_adapter *adapter = netdev->priv; 3650 - uint32_t ctrl, ctrl_ext, rctl, manc, status; 3651 uint32_t wufc = adapter->wol; 3652 3653 netif_device_detach(netdev); ··· 3689 E1000_WRITE_REG(&adapter->hw, CTRL_EXT, ctrl_ext); 3690 } 3691 3692 E1000_WRITE_REG(&adapter->hw, WUC, E1000_WUC_PME_EN); 3693 E1000_WRITE_REG(&adapter->hw, WUFC, wufc); 3694 pci_enable_wake(pdev, 3, 1); ··· 3716 } 3717 } 3718 3719 pci_disable_device(pdev); 3720 3721 state = (state > 0) ? 3 : 0; ··· 3740 { 3741 struct net_device *netdev = pci_get_drvdata(pdev); 3742 struct e1000_adapter *adapter = netdev->priv; 3743 - uint32_t manc, ret; 3744 3745 pci_set_power_state(pdev, 0); 3746 pci_restore_state(pdev); 3747 ret = pci_enable_device(pdev); 3748 - if (pdev->is_busmaster) 3749 - pci_set_master(pdev); 3750 3751 pci_enable_wake(pdev, 3, 0); 3752 pci_enable_wake(pdev, 4, 0); /* 4 == D3 cold */ ··· 3765 E1000_WRITE_REG(&adapter->hw, MANC, manc); 3766 } 3767 3768 return 0; 3769 } 3770 #endif 3771 - 3772 #ifdef CONFIG_NET_POLL_CONTROLLER 3773 /* 3774 * Polling 'interrupt' - used by things like netconsole to send skbs ··· 3785 * the interrupt routine is executing. 3786 */ 3787 static void 3788 - e1000_netpoll (struct net_device *netdev) 3789 { 3790 struct e1000_adapter *adapter = netdev->priv; 3791 disable_irq(adapter->pdev->irq);
··· 1 /******************************************************************************* 2 3 4 + Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved. 5 6 This program is free software; you can redistribute it and/or modify it 7 under the terms of the GNU General Public License as published by the Free ··· 29 #include "e1000.h" 30 31 /* Change Log 32 + * 6.0.44+ 2/15/05 33 + * o applied Anton's patch to resolve tx hang in hardware 34 + * o Applied Andrew Mortons patch - e1000 stops working after resume 35 */ 36 37 char e1000_driver_name[] = "e1000"; ··· 65 #else 66 #define DRIVERNAPI "-NAPI" 67 #endif 68 + #define DRV_VERSION "6.0.54-k2"DRIVERNAPI 69 char e1000_driver_version[] = DRV_VERSION; 70 char e1000_copyright[] = "Copyright (c) 1999-2004 Intel Corporation."; 71 ··· 96 INTEL_E1000_ETHERNET_DEVICE(0x1017), 97 INTEL_E1000_ETHERNET_DEVICE(0x1018), 98 INTEL_E1000_ETHERNET_DEVICE(0x1019), 99 + INTEL_E1000_ETHERNET_DEVICE(0x101A), 100 INTEL_E1000_ETHERNET_DEVICE(0x101D), 101 INTEL_E1000_ETHERNET_DEVICE(0x101E), 102 INTEL_E1000_ETHERNET_DEVICE(0x1026), ··· 110 INTEL_E1000_ETHERNET_DEVICE(0x107B), 111 INTEL_E1000_ETHERNET_DEVICE(0x107C), 112 INTEL_E1000_ETHERNET_DEVICE(0x108A), 113 + INTEL_E1000_ETHERNET_DEVICE(0x108B), 114 + INTEL_E1000_ETHERNET_DEVICE(0x108C), 115 + INTEL_E1000_ETHERNET_DEVICE(0x1099), 116 /* required last entry */ 117 {0,} 118 }; ··· 155 static int e1000_clean(struct net_device *netdev, int *budget); 156 static boolean_t e1000_clean_rx_irq(struct e1000_adapter *adapter, 157 int *work_done, int work_to_do); 158 + static boolean_t e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, 159 + int *work_done, int work_to_do); 160 #else 161 static boolean_t e1000_clean_rx_irq(struct e1000_adapter *adapter); 162 + static boolean_t e1000_clean_rx_irq_ps(struct e1000_adapter *adapter); 163 #endif 164 static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter); 165 + static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter); 166 static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd); 167 static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, 168 int cmd); ··· 286 E1000_WRITE_FLUSH(&adapter->hw); 287 } 288 } 289 + void 290 + e1000_update_mng_vlan(struct e1000_adapter *adapter) 291 + { 292 + struct net_device *netdev = adapter->netdev; 293 + uint16_t vid = adapter->hw.mng_cookie.vlan_id; 294 + uint16_t old_vid = adapter->mng_vlan_id; 295 + if(adapter->vlgrp) { 296 + if(!adapter->vlgrp->vlan_devices[vid]) { 297 + if(adapter->hw.mng_cookie.status & 298 + E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) { 299 + e1000_vlan_rx_add_vid(netdev, vid); 300 + adapter->mng_vlan_id = vid; 301 + } else 302 + adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; 303 + 304 + if((old_vid != (uint16_t)E1000_MNG_VLAN_NONE) && 305 + (vid != old_vid) && 306 + !adapter->vlgrp->vlan_devices[old_vid]) 307 + e1000_vlan_rx_kill_vid(netdev, old_vid); 308 + } 309 + } 310 + } 311 + 312 int 313 e1000_up(struct e1000_adapter *adapter) 314 { ··· 310 e1000_configure_tx(adapter); 311 e1000_setup_rctl(adapter); 312 e1000_configure_rx(adapter); 313 + adapter->alloc_rx_buf(adapter); 314 315 + #ifdef CONFIG_PCI_MSI 316 + if(adapter->hw.mac_type > e1000_82547_rev_2) { 317 + adapter->have_msi = TRUE; 318 + if((err = pci_enable_msi(adapter->pdev))) { 319 + DPRINTK(PROBE, ERR, 320 + "Unable to allocate MSI interrupt Error: %d\n", err); 321 + adapter->have_msi = FALSE; 322 + } 323 + } 324 + #endif 325 if((err = request_irq(adapter->pdev->irq, &e1000_intr, 326 SA_SHIRQ | SA_SAMPLE_RANDOM, 327 + netdev->name, netdev))) { 328 + DPRINTK(PROBE, ERR, 329 + "Unable to allocate interrupt Error: %d\n", err); 330 return err; 331 + } 332 333 mod_timer(&adapter->watchdog_timer, jiffies); 334 335 #ifdef CONFIG_E1000_NAPI 336 netif_poll_enable(netdev); 337 #endif 338 + e1000_irq_enable(adapter); 339 + 340 return 0; 341 } 342 ··· 333 334 e1000_irq_disable(adapter); 335 free_irq(adapter->pdev->irq, netdev); 336 + #ifdef CONFIG_PCI_MSI 337 + if(adapter->hw.mac_type > e1000_82547_rev_2 && 338 + adapter->have_msi == TRUE) 339 + pci_disable_msi(adapter->pdev); 340 + #endif 341 del_timer_sync(&adapter->tx_fifo_stall_timer); 342 del_timer_sync(&adapter->watchdog_timer); 343 del_timer_sync(&adapter->phy_info_timer); ··· 350 e1000_clean_rx_ring(adapter); 351 352 /* If WoL is not enabled 353 + * and management mode is not IAMT 354 * Power down the PHY so no link is implied when interface is down */ 355 + if(!adapter->wol && adapter->hw.mac_type >= e1000_82540 && 356 + adapter->hw.media_type == e1000_media_type_copper && 357 + !e1000_check_mng_mode(&adapter->hw) && 358 + !(E1000_READ_REG(&adapter->hw, MANC) & E1000_MANC_SMBUS_EN)) { 359 uint16_t mii_reg; 360 e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &mii_reg); 361 mii_reg |= MII_CR_POWER_DOWN; 362 e1000_write_phy_reg(&adapter->hw, PHY_CTRL, mii_reg); 363 + mdelay(1); 364 } 365 } 366 367 void 368 e1000_reset(struct e1000_adapter *adapter) 369 { 370 + struct net_device *netdev = adapter->netdev; 371 + uint32_t pba, manc; 372 + uint16_t fc_high_water_mark = E1000_FC_HIGH_DIFF; 373 + uint16_t fc_low_water_mark = E1000_FC_LOW_DIFF; 374 375 /* Repartition Pba for greater than 9k mtu 376 * To take effect CTRL.RST is required. 377 */ 378 379 + switch (adapter->hw.mac_type) { 380 + case e1000_82547: 381 + case e1000_82547_rev_2: 382 + pba = E1000_PBA_30K; 383 + break; 384 + case e1000_82573: 385 + pba = E1000_PBA_12K; 386 + break; 387 + default: 388 + pba = E1000_PBA_48K; 389 + break; 390 + } 391 + 392 + if((adapter->hw.mac_type != e1000_82573) && 393 + (adapter->rx_buffer_len > E1000_RXBUFFER_8192)) { 394 + pba -= 8; /* allocate more FIFO for Tx */ 395 + /* send an XOFF when there is enough space in the 396 + * Rx FIFO to hold one extra full size Rx packet 397 + */ 398 + fc_high_water_mark = netdev->mtu + ENET_HEADER_SIZE + 399 + ETHERNET_FCS_SIZE + 1; 400 + fc_low_water_mark = fc_high_water_mark + 8; 401 + } 402 + 403 + 404 + if(adapter->hw.mac_type == e1000_82547) { 405 adapter->tx_fifo_head = 0; 406 adapter->tx_head_addr = pba << E1000_TX_HEAD_ADDR_SHIFT; 407 adapter->tx_fifo_size = 408 (E1000_PBA_40K - pba) << E1000_PBA_BYTES_SHIFT; 409 atomic_set(&adapter->tx_fifo_stall, 0); 410 } 411 + 412 E1000_WRITE_REG(&adapter->hw, PBA, pba); 413 414 /* flow control settings */ 415 adapter->hw.fc_high_water = (pba << E1000_PBA_BYTES_SHIFT) - 416 + fc_high_water_mark; 417 adapter->hw.fc_low_water = (pba << E1000_PBA_BYTES_SHIFT) - 418 + fc_low_water_mark; 419 adapter->hw.fc_pause_time = E1000_FC_PAUSE_TIME; 420 adapter->hw.fc_send_xon = 1; 421 adapter->hw.fc = adapter->hw.original_fc; 422 423 + /* Allow time for pending master requests to run */ 424 e1000_reset_hw(&adapter->hw); 425 if(adapter->hw.mac_type >= e1000_82544) 426 E1000_WRITE_REG(&adapter->hw, WUC, 0); 427 if(e1000_init_hw(&adapter->hw)) 428 DPRINTK(PROBE, ERR, "Hardware Error\n"); 429 + e1000_update_mng_vlan(adapter); 430 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */ 431 E1000_WRITE_REG(&adapter->hw, VET, ETHERNET_IEEE_VLAN_TYPE); 432 433 e1000_reset_adaptive(&adapter->hw); 434 e1000_phy_get_info(&adapter->hw, &adapter->phy_info); 435 + if (adapter->en_mng_pt) { 436 + manc = E1000_READ_REG(&adapter->hw, MANC); 437 + manc |= (E1000_MANC_ARP_EN | E1000_MANC_EN_MNG2HOST); 438 + E1000_WRITE_REG(&adapter->hw, MANC, manc); 439 + } 440 } 441 442 /** ··· 426 { 427 struct net_device *netdev; 428 struct e1000_adapter *adapter; 429 + unsigned long mmio_start, mmio_len; 430 + uint32_t swsm; 431 + 432 static int cards_found = 0; 433 + int i, err, pci_using_dac; 434 uint16_t eeprom_data; 435 uint16_t eeprom_apme_mask = E1000_EEPROM_APME; 436 if((err = pci_enable_device(pdev))) 437 return err; 438 ··· 521 if((err = e1000_sw_init(adapter))) 522 goto err_sw_init; 523 524 + if((err = e1000_check_phy_reset_block(&adapter->hw))) 525 + DPRINTK(PROBE, INFO, "PHY reset is blocked due to SOL/IDER session.\n"); 526 + 527 if(adapter->hw.mac_type >= e1000_82543) { 528 netdev->features = NETIF_F_SG | 529 NETIF_F_HW_CSUM | ··· 533 if((adapter->hw.mac_type >= e1000_82544) && 534 (adapter->hw.mac_type != e1000_82547)) 535 netdev->features |= NETIF_F_TSO; 536 + 537 + #ifdef NETIF_F_TSO_IPV6 538 + if(adapter->hw.mac_type > e1000_82547_rev_2) 539 + netdev->features |= NETIF_F_TSO_IPV6; 540 + #endif 541 #endif 542 if(pci_using_dac) 543 netdev->features |= NETIF_F_HIGHDMA; ··· 540 /* hard_start_xmit is safe against parallel locking */ 541 netdev->features |= NETIF_F_LLTX; 542 543 + adapter->en_mng_pt = e1000_enable_mng_pass_thru(&adapter->hw); 544 + 545 /* before reading the EEPROM, reset the controller to 546 * put the device in a known good starting state */ 547 ··· 555 556 /* copy the MAC address out of the EEPROM */ 557 558 + if(e1000_read_mac_addr(&adapter->hw)) 559 DPRINTK(PROBE, ERR, "EEPROM Read Error\n"); 560 memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len); 561 ··· 629 /* reset the hardware with the new settings */ 630 e1000_reset(adapter); 631 632 + /* Let firmware know the driver has taken over */ 633 + switch(adapter->hw.mac_type) { 634 + case e1000_82573: 635 + swsm = E1000_READ_REG(&adapter->hw, SWSM); 636 + E1000_WRITE_REG(&adapter->hw, SWSM, 637 + swsm | E1000_SWSM_DRV_LOAD); 638 + break; 639 + default: 640 + break; 641 + } 642 + 643 strcpy(netdev->name, "eth%d"); 644 if((err = register_netdev(netdev))) 645 goto err_register; ··· 664 { 665 struct net_device *netdev = pci_get_drvdata(pdev); 666 struct e1000_adapter *adapter = netdev->priv; 667 + uint32_t manc, swsm; 668 669 flush_scheduled_work(); 670 ··· 677 } 678 } 679 680 + switch(adapter->hw.mac_type) { 681 + case e1000_82573: 682 + swsm = E1000_READ_REG(&adapter->hw, SWSM); 683 + E1000_WRITE_REG(&adapter->hw, SWSM, 684 + swsm & ~E1000_SWSM_DRV_LOAD); 685 + break; 686 + 687 + default: 688 + break; 689 + } 690 + 691 unregister_netdev(netdev); 692 693 + if(!e1000_check_phy_reset_block(&adapter->hw)) 694 + e1000_phy_hw_reset(&adapter->hw); 695 696 iounmap(adapter->hw.hw_addr); 697 pci_release_regions(pdev); ··· 717 pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word); 718 719 adapter->rx_buffer_len = E1000_RXBUFFER_2048; 720 + adapter->rx_ps_bsize0 = E1000_RXBUFFER_256; 721 hw->max_frame_size = netdev->mtu + 722 ENET_HEADER_SIZE + ETHERNET_FCS_SIZE; 723 hw->min_frame_size = MINIMUM_ETHERNET_FRAME_SIZE; ··· 730 731 /* initialize eeprom parameters */ 732 733 + if(e1000_init_eeprom_params(hw)) { 734 + E1000_ERR("EEPROM initialization failed\n"); 735 + return -EIO; 736 + } 737 738 switch(hw->mac_type) { 739 default: ··· 795 796 if((err = e1000_up(adapter))) 797 goto err_up; 798 + adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; 799 + if((adapter->hw.mng_cookie.status & 800 + E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) { 801 + e1000_update_mng_vlan(adapter); 802 + } 803 804 return E1000_SUCCESS; 805 ··· 830 e1000_free_tx_resources(adapter); 831 e1000_free_rx_resources(adapter); 832 833 + if((adapter->hw.mng_cookie.status & 834 + E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) { 835 + e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id); 836 + } 837 return 0; 838 } 839 840 /** 841 * e1000_check_64k_bound - check that memory doesn't cross 64kB boundary 842 * @adapter: address of board private structure 843 + * @start: address of beginning of memory 844 + * @len: length of memory 845 **/ 846 static inline boolean_t 847 e1000_check_64k_bound(struct e1000_adapter *adapter, ··· 846 unsigned long begin = (unsigned long) start; 847 unsigned long end = begin + len; 848 849 + /* First rev 82545 and 82546 need to not allow any memory 850 + * write location to cross 64k boundary due to errata 23 */ 851 if (adapter->hw.mac_type == e1000_82545 || 852 + adapter->hw.mac_type == e1000_82546) { 853 return ((begin ^ (end - 1)) >> 16) != 0 ? FALSE : TRUE; 854 } 855 ··· 875 size = sizeof(struct e1000_buffer) * txdr->count; 876 txdr->buffer_info = vmalloc(size); 877 if(!txdr->buffer_info) { 878 + DPRINTK(PROBE, ERR, 879 + "Unable to allocate memory for the transmit descriptor ring\n"); 880 return -ENOMEM; 881 } 882 memset(txdr->buffer_info, 0, size); ··· 889 txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma); 890 if(!txdr->desc) { 891 setup_tx_desc_die: 892 vfree(txdr->buffer_info); 893 + DPRINTK(PROBE, ERR, 894 + "Unable to allocate memory for the transmit descriptor ring\n"); 895 return -ENOMEM; 896 } 897 898 + /* Fix for errata 23, can't cross 64kB boundary */ 899 if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) { 900 void *olddesc = txdr->desc; 901 dma_addr_t olddma = txdr->dma; 902 + DPRINTK(TX_ERR, ERR, "txdr align check failed: %u bytes " 903 + "at %p\n", txdr->size, txdr->desc); 904 + /* Try again, without freeing the previous */ 905 txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma); 906 if(!txdr->desc) { 907 + /* Failed allocation, critical failure */ 908 pci_free_consistent(pdev, txdr->size, olddesc, olddma); 909 goto setup_tx_desc_die; 910 } 911 912 if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) { 913 /* give up */ 914 + pci_free_consistent(pdev, txdr->size, txdr->desc, 915 + txdr->dma); 916 pci_free_consistent(pdev, txdr->size, olddesc, olddma); 917 DPRINTK(PROBE, ERR, 918 + "Unable to allocate aligned memory " 919 + "for the transmit descriptor ring\n"); 920 vfree(txdr->buffer_info); 921 return -ENOMEM; 922 } else { 923 + /* Free old allocation, new allocation was successful */ 924 pci_free_consistent(pdev, txdr->size, olddesc, olddma); 925 } 926 } ··· 1022 { 1023 struct e1000_desc_ring *rxdr = &adapter->rx_ring; 1024 struct pci_dev *pdev = adapter->pdev; 1025 + int size, desc_len; 1026 1027 size = sizeof(struct e1000_buffer) * rxdr->count; 1028 rxdr->buffer_info = vmalloc(size); 1029 if(!rxdr->buffer_info) { 1030 + DPRINTK(PROBE, ERR, 1031 + "Unable to allocate memory for the receive descriptor ring\n"); 1032 return -ENOMEM; 1033 } 1034 memset(rxdr->buffer_info, 0, size); 1035 1036 + size = sizeof(struct e1000_ps_page) * rxdr->count; 1037 + rxdr->ps_page = kmalloc(size, GFP_KERNEL); 1038 + if(!rxdr->ps_page) { 1039 + vfree(rxdr->buffer_info); 1040 + DPRINTK(PROBE, ERR, 1041 + "Unable to allocate memory for the receive descriptor ring\n"); 1042 + return -ENOMEM; 1043 + } 1044 + memset(rxdr->ps_page, 0, size); 1045 + 1046 + size = sizeof(struct e1000_ps_page_dma) * rxdr->count; 1047 + rxdr->ps_page_dma = kmalloc(size, GFP_KERNEL); 1048 + if(!rxdr->ps_page_dma) { 1049 + vfree(rxdr->buffer_info); 1050 + kfree(rxdr->ps_page); 1051 + DPRINTK(PROBE, ERR, 1052 + "Unable to allocate memory for the receive descriptor ring\n"); 1053 + return -ENOMEM; 1054 + } 1055 + memset(rxdr->ps_page_dma, 0, size); 1056 + 1057 + if(adapter->hw.mac_type <= e1000_82547_rev_2) 1058 + desc_len = sizeof(struct e1000_rx_desc); 1059 + else 1060 + desc_len = sizeof(union e1000_rx_desc_packet_split); 1061 + 1062 /* Round up to nearest 4K */ 1063 1064 + rxdr->size = rxdr->count * desc_len; 1065 E1000_ROUNDUP(rxdr->size, 4096); 1066 1067 rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma); 1068 1069 if(!rxdr->desc) { 1070 setup_rx_desc_die: 1071 vfree(rxdr->buffer_info); 1072 + kfree(rxdr->ps_page); 1073 + kfree(rxdr->ps_page_dma); 1074 + DPRINTK(PROBE, ERR, 1075 + "Unable to allocate memory for the receive descriptor ring\n"); 1076 return -ENOMEM; 1077 } 1078 1079 + /* Fix for errata 23, can't cross 64kB boundary */ 1080 if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) { 1081 void *olddesc = rxdr->desc; 1082 dma_addr_t olddma = rxdr->dma; 1083 + DPRINTK(RX_ERR, ERR, "rxdr align check failed: %u bytes " 1084 + "at %p\n", rxdr->size, rxdr->desc); 1085 + /* Try again, without freeing the previous */ 1086 rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma); 1087 if(!rxdr->desc) { 1088 + /* Failed allocation, critical failure */ 1089 pci_free_consistent(pdev, rxdr->size, olddesc, olddma); 1090 goto setup_rx_desc_die; 1091 } 1092 1093 if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) { 1094 /* give up */ 1095 + pci_free_consistent(pdev, rxdr->size, rxdr->desc, 1096 + rxdr->dma); 1097 pci_free_consistent(pdev, rxdr->size, olddesc, olddma); 1098 + DPRINTK(PROBE, ERR, 1099 + "Unable to allocate aligned memory " 1100 + "for the receive descriptor ring\n"); 1101 vfree(rxdr->buffer_info); 1102 + kfree(rxdr->ps_page); 1103 + kfree(rxdr->ps_page_dma); 1104 return -ENOMEM; 1105 } else { 1106 + /* Free old allocation, new allocation was successful */ 1107 pci_free_consistent(pdev, rxdr->size, olddesc, olddma); 1108 } 1109 } ··· 1087 } 1088 1089 /** 1090 + * e1000_setup_rctl - configure the receive control registers 1091 * @adapter: Board private structure 1092 **/ 1093 1094 static void 1095 e1000_setup_rctl(struct e1000_adapter *adapter) 1096 { 1097 + uint32_t rctl, rfctl; 1098 + uint32_t psrctl = 0; 1099 1100 rctl = E1000_READ_REG(&adapter->hw, RCTL); 1101 ··· 1109 else 1110 rctl &= ~E1000_RCTL_SBP; 1111 1112 + if (adapter->netdev->mtu <= ETH_DATA_LEN) 1113 + rctl &= ~E1000_RCTL_LPE; 1114 + else 1115 + rctl |= E1000_RCTL_LPE; 1116 + 1117 /* Setup buffer sizes */ 1118 + if(adapter->hw.mac_type == e1000_82573) { 1119 + /* We can now specify buffers in 1K increments. 1120 + * BSIZE and BSEX are ignored in this case. */ 1121 + rctl |= adapter->rx_buffer_len << 0x11; 1122 + } else { 1123 + rctl &= ~E1000_RCTL_SZ_4096; 1124 + rctl |= E1000_RCTL_BSEX; 1125 + switch (adapter->rx_buffer_len) { 1126 + case E1000_RXBUFFER_2048: 1127 + default: 1128 + rctl |= E1000_RCTL_SZ_2048; 1129 + rctl &= ~E1000_RCTL_BSEX; 1130 + break; 1131 + case E1000_RXBUFFER_4096: 1132 + rctl |= E1000_RCTL_SZ_4096; 1133 + break; 1134 + case E1000_RXBUFFER_8192: 1135 + rctl |= E1000_RCTL_SZ_8192; 1136 + break; 1137 + case E1000_RXBUFFER_16384: 1138 + rctl |= E1000_RCTL_SZ_16384; 1139 + break; 1140 + } 1141 + } 1142 + 1143 + #ifdef CONFIG_E1000_PACKET_SPLIT 1144 + /* 82571 and greater support packet-split where the protocol 1145 + * header is placed in skb->data and the packet data is 1146 + * placed in pages hanging off of skb_shinfo(skb)->nr_frags. 1147 + * In the case of a non-split, skb->data is linearly filled, 1148 + * followed by the page buffers. Therefore, skb->data is 1149 + * sized to hold the largest protocol header. 1150 + */ 1151 + adapter->rx_ps = (adapter->hw.mac_type > e1000_82547_rev_2) 1152 + && (adapter->netdev->mtu 1153 + < ((3 * PAGE_SIZE) + adapter->rx_ps_bsize0)); 1154 + #endif 1155 + if(adapter->rx_ps) { 1156 + /* Configure extra packet-split registers */ 1157 + rfctl = E1000_READ_REG(&adapter->hw, RFCTL); 1158 + rfctl |= E1000_RFCTL_EXTEN; 1159 + /* disable IPv6 packet split support */ 1160 + rfctl |= E1000_RFCTL_IPV6_DIS; 1161 + E1000_WRITE_REG(&adapter->hw, RFCTL, rfctl); 1162 + 1163 + rctl |= E1000_RCTL_DTYP_PS | E1000_RCTL_SECRC; 1164 + 1165 + psrctl |= adapter->rx_ps_bsize0 >> 1166 + E1000_PSRCTL_BSIZE0_SHIFT; 1167 + psrctl |= PAGE_SIZE >> 1168 + E1000_PSRCTL_BSIZE1_SHIFT; 1169 + psrctl |= PAGE_SIZE << 1170 + E1000_PSRCTL_BSIZE2_SHIFT; 1171 + psrctl |= PAGE_SIZE << 1172 + E1000_PSRCTL_BSIZE3_SHIFT; 1173 + 1174 + E1000_WRITE_REG(&adapter->hw, PSRCTL, psrctl); 1175 } 1176 1177 E1000_WRITE_REG(&adapter->hw, RCTL, rctl); ··· 1143 e1000_configure_rx(struct e1000_adapter *adapter) 1144 { 1145 uint64_t rdba = adapter->rx_ring.dma; 1146 + uint32_t rdlen, rctl, rxcsum; 1147 + 1148 + if(adapter->rx_ps) { 1149 + rdlen = adapter->rx_ring.count * 1150 + sizeof(union e1000_rx_desc_packet_split); 1151 + adapter->clean_rx = e1000_clean_rx_irq_ps; 1152 + adapter->alloc_rx_buf = e1000_alloc_rx_buffers_ps; 1153 + } else { 1154 + rdlen = adapter->rx_ring.count * sizeof(struct e1000_rx_desc); 1155 + adapter->clean_rx = e1000_clean_rx_irq; 1156 + adapter->alloc_rx_buf = e1000_alloc_rx_buffers; 1157 + } 1158 1159 /* disable receives while setting up the descriptors */ 1160 rctl = E1000_READ_REG(&adapter->hw, RCTL); ··· 1172 E1000_WRITE_REG(&adapter->hw, RDT, 0); 1173 1174 /* Enable 82543 Receive Checksum Offload for TCP and UDP */ 1175 + if(adapter->hw.mac_type >= e1000_82543) { 1176 rxcsum = E1000_READ_REG(&adapter->hw, RXCSUM); 1177 + if(adapter->rx_csum == TRUE) { 1178 + rxcsum |= E1000_RXCSUM_TUOFL; 1179 + 1180 + /* Enable 82573 IPv4 payload checksum for UDP fragments 1181 + * Must be used in conjunction with packet-split. */ 1182 + if((adapter->hw.mac_type > e1000_82547_rev_2) && 1183 + (adapter->rx_ps)) { 1184 + rxcsum |= E1000_RXCSUM_IPPCSE; 1185 + } 1186 + } else { 1187 + rxcsum &= ~E1000_RXCSUM_TUOFL; 1188 + /* don't need to clear IPPCSE as it defaults to 0 */ 1189 + } 1190 E1000_WRITE_REG(&adapter->hw, RXCSUM, rxcsum); 1191 } 1192 + 1193 + if (adapter->hw.mac_type == e1000_82573) 1194 + E1000_WRITE_REG(&adapter->hw, ERT, 0x0100); 1195 1196 /* Enable Receives */ 1197 E1000_WRITE_REG(&adapter->hw, RCTL, rctl); ··· 1210 e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter, 1211 struct e1000_buffer *buffer_info) 1212 { 1213 if(buffer_info->dma) { 1214 + pci_unmap_page(adapter->pdev, 1215 + buffer_info->dma, 1216 + buffer_info->length, 1217 + PCI_DMA_TODEVICE); 1218 buffer_info->dma = 0; 1219 } 1220 if(buffer_info->skb) { ··· 1241 /* Free all the Tx ring sk_buffs */ 1242 1243 if (likely(adapter->previous_buffer_info.skb != NULL)) { 1244 + e1000_unmap_and_free_tx_resource(adapter, 1245 &adapter->previous_buffer_info); 1246 } 1247 ··· 1281 1282 vfree(rx_ring->buffer_info); 1283 rx_ring->buffer_info = NULL; 1284 + kfree(rx_ring->ps_page); 1285 + rx_ring->ps_page = NULL; 1286 + kfree(rx_ring->ps_page_dma); 1287 + rx_ring->ps_page_dma = NULL; 1288 1289 pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma); 1290 ··· 1297 { 1298 struct e1000_desc_ring *rx_ring = &adapter->rx_ring; 1299 struct e1000_buffer *buffer_info; 1300 + struct e1000_ps_page *ps_page; 1301 + struct e1000_ps_page_dma *ps_page_dma; 1302 struct pci_dev *pdev = adapter->pdev; 1303 unsigned long size; 1304 + unsigned int i, j; 1305 1306 /* Free all the Rx ring sk_buffs */ 1307 1308 for(i = 0; i < rx_ring->count; i++) { 1309 buffer_info = &rx_ring->buffer_info[i]; 1310 if(buffer_info->skb) { 1311 + ps_page = &rx_ring->ps_page[i]; 1312 + ps_page_dma = &rx_ring->ps_page_dma[i]; 1313 pci_unmap_single(pdev, 1314 buffer_info->dma, 1315 buffer_info->length, ··· 1314 1315 dev_kfree_skb(buffer_info->skb); 1316 buffer_info->skb = NULL; 1317 + 1318 + for(j = 0; j < PS_PAGE_BUFFERS; j++) { 1319 + if(!ps_page->ps_page[j]) break; 1320 + pci_unmap_single(pdev, 1321 + ps_page_dma->ps_page_dma[j], 1322 + PAGE_SIZE, PCI_DMA_FROMDEVICE); 1323 + ps_page_dma->ps_page_dma[j] = 0; 1324 + put_page(ps_page->ps_page[j]); 1325 + ps_page->ps_page[j] = NULL; 1326 + } 1327 } 1328 } 1329 1330 size = sizeof(struct e1000_buffer) * rx_ring->count; 1331 memset(rx_ring->buffer_info, 0, size); 1332 + size = sizeof(struct e1000_ps_page) * rx_ring->count; 1333 + memset(rx_ring->ps_page, 0, size); 1334 + size = sizeof(struct e1000_ps_page_dma) * rx_ring->count; 1335 + memset(rx_ring->ps_page_dma, 0, size); 1336 1337 /* Zero out the descriptor ring */ 1338 ··· 1422 struct e1000_adapter *adapter = netdev->priv; 1423 struct e1000_hw *hw = &adapter->hw; 1424 struct dev_mc_list *mc_ptr; 1425 + unsigned long flags; 1426 uint32_t rctl; 1427 uint32_t hash_value; 1428 int i; 1429 1430 spin_lock_irqsave(&adapter->tx_lock, flags); 1431 + 1432 + /* Check for Promiscuous and All Multicast modes */ 1433 1434 rctl = E1000_READ_REG(hw, RCTL); 1435 ··· 1556 uint32_t link; 1557 1558 e1000_check_for_link(&adapter->hw); 1559 + if (adapter->hw.mac_type == e1000_82573) { 1560 + e1000_enable_tx_pkt_filtering(&adapter->hw); 1561 + if(adapter->mng_vlan_id != adapter->hw.mng_cookie.vlan_id) 1562 + e1000_update_mng_vlan(adapter); 1563 + } 1564 1565 if((adapter->hw.media_type == e1000_media_type_internal_serdes) && 1566 !(E1000_READ_REG(&adapter->hw, TXCW) & E1000_TXCW_ANE)) ··· 1632 /* Cause software interrupt to ensure rx ring is cleaned */ 1633 E1000_WRITE_REG(&adapter->hw, ICS, E1000_ICS_RXDMT0); 1634 1635 + /* Force detection of hung controller every watchdog period */ 1636 adapter->detect_tx_hung = TRUE; 1637 1638 /* Reset the timer */ ··· 1642 #define E1000_TX_FLAGS_CSUM 0x00000001 1643 #define E1000_TX_FLAGS_VLAN 0x00000002 1644 #define E1000_TX_FLAGS_TSO 0x00000004 1645 + #define E1000_TX_FLAGS_IPV4 0x00000008 1646 #define E1000_TX_FLAGS_VLAN_MASK 0xffff0000 1647 #define E1000_TX_FLAGS_VLAN_SHIFT 16 1648 ··· 1652 struct e1000_context_desc *context_desc; 1653 unsigned int i; 1654 uint32_t cmd_length = 0; 1655 + uint16_t ipcse = 0, tucse, mss; 1656 uint8_t ipcss, ipcso, tucss, tucso, hdr_len; 1657 int err; 1658 ··· 1665 1666 hdr_len = ((skb->h.raw - skb->data) + (skb->h.th->doff << 2)); 1667 mss = skb_shinfo(skb)->tso_size; 1668 + if(skb->protocol == ntohs(ETH_P_IP)) { 1669 + skb->nh.iph->tot_len = 0; 1670 + skb->nh.iph->check = 0; 1671 + skb->h.th->check = 1672 + ~csum_tcpudp_magic(skb->nh.iph->saddr, 1673 + skb->nh.iph->daddr, 1674 + 0, 1675 + IPPROTO_TCP, 1676 + 0); 1677 + cmd_length = E1000_TXD_CMD_IP; 1678 + ipcse = skb->h.raw - skb->data - 1; 1679 + #ifdef NETIF_F_TSO_IPV6 1680 + } else if(skb->protocol == ntohs(ETH_P_IPV6)) { 1681 + skb->nh.ipv6h->payload_len = 0; 1682 + skb->h.th->check = 1683 + ~csum_ipv6_magic(&skb->nh.ipv6h->saddr, 1684 + &skb->nh.ipv6h->daddr, 1685 + 0, 1686 + IPPROTO_TCP, 1687 + 0); 1688 + ipcse = 0; 1689 + #endif 1690 + } 1691 ipcss = skb->nh.raw - skb->data; 1692 ipcso = (void *)&(skb->nh.iph->check) - (void *)skb->data; 1693 tucss = skb->h.raw - skb->data; 1694 tucso = (void *)&(skb->h.th->check) - (void *)skb->data; 1695 tucse = 0; 1696 1697 cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE | 1698 + E1000_TXD_CMD_TCP | (skb->len - (hdr_len))); 1699 1700 i = adapter->tx_ring.next_to_use; 1701 context_desc = E1000_CONTEXT_DESC(adapter->tx_ring, i); ··· 1760 if(unlikely(mss && !nr_frags && size == len && size > 8)) 1761 size -= 4; 1762 #endif 1763 + /* work-around for errata 10 and it applies 1764 + * to all controllers in PCI-X mode 1765 + * The fix is to make sure that the first descriptor of a 1766 + * packet is smaller than 2048 - 16 - 16 (or 2016) bytes 1767 + */ 1768 + if(unlikely((adapter->hw.bus_type == e1000_bus_type_pcix) && 1769 + (size > 2015) && count == 0)) 1770 + size = 2015; 1771 + 1772 /* Workaround for potential 82544 hang in PCI-X. Avoid 1773 * terminating buffers within evenly-aligned dwords. */ 1774 if(unlikely(adapter->pcix_82544 && ··· 1840 if(likely(tx_flags & E1000_TX_FLAGS_TSO)) { 1841 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D | 1842 E1000_TXD_CMD_TSE; 1843 + txd_upper |= E1000_TXD_POPTS_TXSM << 8; 1844 + 1845 + if(likely(tx_flags & E1000_TX_FLAGS_IPV4)) 1846 + txd_upper |= E1000_TXD_POPTS_IXSM << 8; 1847 } 1848 1849 if(likely(tx_flags & E1000_TX_FLAGS_CSUM)) { ··· 1915 return 0; 1916 } 1917 1918 + #define MINIMUM_DHCP_PACKET_SIZE 282 1919 + static inline int 1920 + e1000_transfer_dhcp_info(struct e1000_adapter *adapter, struct sk_buff *skb) 1921 + { 1922 + struct e1000_hw *hw = &adapter->hw; 1923 + uint16_t length, offset; 1924 + if(vlan_tx_tag_present(skb)) { 1925 + if(!((vlan_tx_tag_get(skb) == adapter->hw.mng_cookie.vlan_id) && 1926 + ( adapter->hw.mng_cookie.status & 1927 + E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) ) 1928 + return 0; 1929 + } 1930 + if(htons(ETH_P_IP) == skb->protocol) { 1931 + const struct iphdr *ip = skb->nh.iph; 1932 + if(IPPROTO_UDP == ip->protocol) { 1933 + struct udphdr *udp = (struct udphdr *)(skb->h.uh); 1934 + if(ntohs(udp->dest) == 67) { 1935 + offset = (uint8_t *)udp + 8 - skb->data; 1936 + length = skb->len - offset; 1937 + 1938 + return e1000_mng_write_dhcp_info(hw, 1939 + (uint8_t *)udp + 8, length); 1940 + } 1941 + } 1942 + } else if((skb->len > MINIMUM_DHCP_PACKET_SIZE) && (!skb->protocol)) { 1943 + struct ethhdr *eth = (struct ethhdr *) skb->data; 1944 + if((htons(ETH_P_IP) == eth->h_proto)) { 1945 + const struct iphdr *ip = 1946 + (struct iphdr *)((uint8_t *)skb->data+14); 1947 + if(IPPROTO_UDP == ip->protocol) { 1948 + struct udphdr *udp = 1949 + (struct udphdr *)((uint8_t *)ip + 1950 + (ip->ihl << 2)); 1951 + if(ntohs(udp->dest) == 67) { 1952 + offset = (uint8_t *)udp + 8 - skb->data; 1953 + length = skb->len - offset; 1954 + 1955 + return e1000_mng_write_dhcp_info(hw, 1956 + (uint8_t *)udp + 8, 1957 + length); 1958 + } 1959 + } 1960 + } 1961 + } 1962 + return 0; 1963 + } 1964 + 1965 #define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1 ) 1966 static int 1967 e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev) ··· 1939 1940 #ifdef NETIF_F_TSO 1941 mss = skb_shinfo(skb)->tso_size; 1942 + /* The controller does a simple calculation to 1943 * make sure there is enough room in the FIFO before 1944 * initiating the DMA for each buffer. The calc is: 1945 * 4 = ceil(buffer len/mss). To make sure we don't ··· 1952 1953 if((mss) || (skb->ip_summed == CHECKSUM_HW)) 1954 count++; 1955 + count++; 1956 #else 1957 if(skb->ip_summed == CHECKSUM_HW) 1958 count++; ··· 1960 count += TXD_USE_COUNT(len, max_txd_pwr); 1961 1962 if(adapter->pcix_82544) 1963 + count++; 1964 + 1965 + /* work-around for errata 10 and it applies to all controllers 1966 + * in PCI-X mode, so add one more descriptor to the count 1967 + */ 1968 + if(unlikely((adapter->hw.bus_type == e1000_bus_type_pcix) && 1969 + (len > 2015))) 1970 count++; 1971 1972 nr_frags = skb_shinfo(skb)->nr_frags; ··· 1975 local_irq_restore(flags); 1976 return NETDEV_TX_LOCKED; 1977 } 1978 + if(adapter->hw.tx_pkt_filtering && (adapter->hw.mac_type == e1000_82573) ) 1979 + e1000_transfer_dhcp_info(adapter, skb); 1980 + 1981 1982 /* need: count + 2 desc gap to keep tail from touching 1983 * head, otherwise try next time */ ··· 2010 tx_flags |= E1000_TX_FLAGS_TSO; 2011 else if(likely(e1000_tx_csum(adapter, skb))) 2012 tx_flags |= E1000_TX_FLAGS_CSUM; 2013 + 2014 + /* Old method was to assume IPv4 packet by default if TSO was enabled. 2015 + * 82573 hardware supports TSO capabilities for IPv6 as well... 2016 + * no longer assume, we must. */ 2017 + if(likely(skb->protocol == ntohs(ETH_P_IP))) 2018 + tx_flags |= E1000_TX_FLAGS_IPV4; 2019 2020 e1000_tx_queue(adapter, 2021 e1000_tx_map(adapter, skb, first, max_per_txd, nr_frags, mss), ··· 2077 e1000_change_mtu(struct net_device *netdev, int new_mtu) 2078 { 2079 struct e1000_adapter *adapter = netdev->priv; 2080 int max_frame = new_mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE; 2081 2082 if((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) || ··· 2086 return -EINVAL; 2087 } 2088 2089 + #define MAX_STD_JUMBO_FRAME_SIZE 9216 2090 + /* might want this to be bigger enum check... */ 2091 + if (adapter->hw.mac_type == e1000_82573 && 2092 + max_frame > MAXIMUM_ETHERNET_FRAME_SIZE) { 2093 + DPRINTK(PROBE, ERR, "Jumbo Frames not supported " 2094 + "on 82573\n"); 2095 return -EINVAL; 2096 } 2097 2098 + if(adapter->hw.mac_type > e1000_82547_rev_2) { 2099 + adapter->rx_buffer_len = max_frame; 2100 + E1000_ROUNDUP(adapter->rx_buffer_len, 1024); 2101 + } else { 2102 + if(unlikely((adapter->hw.mac_type < e1000_82543) && 2103 + (max_frame > MAXIMUM_ETHERNET_FRAME_SIZE))) { 2104 + DPRINTK(PROBE, ERR, "Jumbo Frames not supported " 2105 + "on 82542\n"); 2106 + return -EINVAL; 2107 + 2108 + } else { 2109 + if(max_frame <= E1000_RXBUFFER_2048) { 2110 + adapter->rx_buffer_len = E1000_RXBUFFER_2048; 2111 + } else if(max_frame <= E1000_RXBUFFER_4096) { 2112 + adapter->rx_buffer_len = E1000_RXBUFFER_4096; 2113 + } else if(max_frame <= E1000_RXBUFFER_8192) { 2114 + adapter->rx_buffer_len = E1000_RXBUFFER_8192; 2115 + } else if(max_frame <= E1000_RXBUFFER_16384) { 2116 + adapter->rx_buffer_len = E1000_RXBUFFER_16384; 2117 + } 2118 + } 2119 + } 2120 + 2121 + netdev->mtu = new_mtu; 2122 + 2123 + if(netif_running(netdev)) { 2124 e1000_down(adapter); 2125 e1000_up(adapter); 2126 } 2127 2128 adapter->hw.max_frame_size = max_frame; 2129 2130 return 0; ··· 2199 adapter->stats.tsctc += E1000_READ_REG(hw, TSCTC); 2200 adapter->stats.tsctfc += E1000_READ_REG(hw, TSCTFC); 2201 } 2202 + if(hw->mac_type > e1000_82547_rev_2) { 2203 + adapter->stats.iac += E1000_READ_REG(hw, IAC); 2204 + adapter->stats.icrxoc += E1000_READ_REG(hw, ICRXOC); 2205 + adapter->stats.icrxptc += E1000_READ_REG(hw, ICRXPTC); 2206 + adapter->stats.icrxatc += E1000_READ_REG(hw, ICRXATC); 2207 + adapter->stats.ictxptc += E1000_READ_REG(hw, ICTXPTC); 2208 + adapter->stats.ictxatc += E1000_READ_REG(hw, ICTXATC); 2209 + adapter->stats.ictxqec += E1000_READ_REG(hw, ICTXQEC); 2210 + adapter->stats.ictxqmtc += E1000_READ_REG(hw, ICTXQMTC); 2211 + adapter->stats.icrxdmtc += E1000_READ_REG(hw, ICRXDMTC); 2212 + } 2213 2214 /* Fill out the OS statistics structure */ 2215 ··· 2213 2214 adapter->net_stats.rx_errors = adapter->stats.rxerrc + 2215 adapter->stats.crcerrs + adapter->stats.algnerrc + 2216 + adapter->stats.rlec + adapter->stats.mpc + 2217 + adapter->stats.cexterr; 2218 + adapter->net_stats.rx_dropped = adapter->stats.mpc; 2219 adapter->net_stats.rx_length_errors = adapter->stats.rlec; 2220 adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs; 2221 adapter->net_stats.rx_frame_errors = adapter->stats.algnerrc; ··· 2300 */ 2301 if(hw->mac_type == e1000_82547 || hw->mac_type == e1000_82547_rev_2){ 2302 atomic_inc(&adapter->irq_sem); 2303 + E1000_WRITE_REG(hw, IMC, ~0); 2304 } 2305 2306 for(i = 0; i < E1000_MAX_INTR; i++) 2307 + if(unlikely(!adapter->clean_rx(adapter) & 2308 !e1000_clean_tx_irq(adapter))) 2309 break; 2310 ··· 2328 int work_to_do = min(*budget, netdev->quota); 2329 int tx_cleaned; 2330 int work_done = 0; 2331 + 2332 tx_cleaned = e1000_clean_tx_irq(adapter); 2333 + adapter->clean_rx(adapter, &work_done, work_to_do); 2334 2335 *budget -= work_done; 2336 netdev->quota -= work_done; 2337 2338 + /* If no Tx and no Rx work done, exit the polling mode */ 2339 + if ((!tx_cleaned && (work_done == 0)) || !netif_running(netdev)) { 2340 netif_rx_complete(netdev); 2341 e1000_irq_enable(adapter); 2342 return 0; ··· 2367 eop_desc = E1000_TX_DESC(*tx_ring, eop); 2368 2369 while(eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) { 2370 + /* Premature writeback of Tx descriptors clear (free buffers 2371 + * and unmap pci_mapping) previous_buffer_info */ 2372 if (likely(adapter->previous_buffer_info.skb != NULL)) { 2373 + e1000_unmap_and_free_tx_resource(adapter, 2374 &adapter->previous_buffer_info); 2375 } 2376 ··· 2380 buffer_info = &tx_ring->buffer_info[i]; 2381 cleaned = (i == eop); 2382 2383 + #ifdef NETIF_F_TSO 2384 + if (!(netdev->features & NETIF_F_TSO)) { 2385 + #endif 2386 + e1000_unmap_and_free_tx_resource(adapter, 2387 + buffer_info); 2388 + #ifdef NETIF_F_TSO 2389 } else { 2390 + if (cleaned) { 2391 + memcpy(&adapter->previous_buffer_info, 2392 + buffer_info, 2393 + sizeof(struct e1000_buffer)); 2394 + memset(buffer_info, 0, 2395 + sizeof(struct e1000_buffer)); 2396 + } else { 2397 + e1000_unmap_and_free_tx_resource( 2398 + adapter, buffer_info); 2399 + } 2400 } 2401 + #endif 2402 2403 tx_desc->buffer_addr = 0; 2404 tx_desc->lower.data = 0; 2405 tx_desc->upper.data = 0; 2406 2407 if(unlikely(++i == tx_ring->count)) i = 0; 2408 } 2409 ··· 2416 netif_wake_queue(netdev); 2417 2418 spin_unlock(&adapter->tx_lock); 2419 if(adapter->detect_tx_hung) { 2420 + 2421 + /* Detect a transmit hang in hardware, this serializes the 2422 * check with the clearing of time_stamp and movement of i */ 2423 adapter->detect_tx_hung = FALSE; 2424 + if (tx_ring->buffer_info[i].dma && 2425 + time_after(jiffies, tx_ring->buffer_info[i].time_stamp + HZ) 2426 + && !(E1000_READ_REG(&adapter->hw, STATUS) & 2427 + E1000_STATUS_TXOFF)) { 2428 2429 + /* detected Tx unit hang */ 2430 + i = tx_ring->next_to_clean; 2431 + eop = tx_ring->buffer_info[i].next_to_watch; 2432 + eop_desc = E1000_TX_DESC(*tx_ring, eop); 2433 + DPRINTK(TX_ERR, ERR, "Detected Tx Unit Hang\n" 2434 + " TDH <%x>\n" 2435 + " TDT <%x>\n" 2436 + " next_to_use <%x>\n" 2437 + " next_to_clean <%x>\n" 2438 + "buffer_info[next_to_clean]\n" 2439 + " dma <%llx>\n" 2440 + " time_stamp <%lx>\n" 2441 + " next_to_watch <%x>\n" 2442 + " jiffies <%lx>\n" 2443 + " next_to_watch.status <%x>\n", 2444 + E1000_READ_REG(&adapter->hw, TDH), 2445 + E1000_READ_REG(&adapter->hw, TDT), 2446 + tx_ring->next_to_use, 2447 + i, 2448 + tx_ring->buffer_info[i].dma, 2449 + tx_ring->buffer_info[i].time_stamp, 2450 + eop, 2451 + jiffies, 2452 + eop_desc->upper.fields.status); 2453 + netif_stop_queue(netdev); 2454 + } 2455 + } 2456 + #ifdef NETIF_F_TSO 2457 + 2458 + if( unlikely(!(eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) && 2459 + time_after(jiffies, adapter->previous_buffer_info.time_stamp + HZ))) 2460 + e1000_unmap_and_free_tx_resource( 2461 + adapter, &adapter->previous_buffer_info); 2462 + 2463 + #endif 2464 return cleaned; 2465 } 2466 2467 /** 2468 * e1000_rx_checksum - Receive Checksum Offload for 82543 2469 + * @adapter: board private structure 2470 + * @status_err: receive descriptor status and error fields 2471 + * @csum: receive descriptor csum field 2472 + * @sk_buff: socket buffer with received data 2473 **/ 2474 2475 static inline void 2476 e1000_rx_checksum(struct e1000_adapter *adapter, 2477 + uint32_t status_err, uint32_t csum, 2478 + struct sk_buff *skb) 2479 { 2480 + uint16_t status = (uint16_t)status_err; 2481 + uint8_t errors = (uint8_t)(status_err >> 24); 2482 + skb->ip_summed = CHECKSUM_NONE; 2483 + 2484 /* 82543 or newer only */ 2485 + if(unlikely(adapter->hw.mac_type < e1000_82543)) return; 2486 /* Ignore Checksum bit is set */ 2487 + if(unlikely(status & E1000_RXD_STAT_IXSM)) return; 2488 + /* TCP/UDP checksum error bit is set */ 2489 + if(unlikely(errors & E1000_RXD_ERR_TCPE)) { 2490 + /* let the stack verify checksum errors */ 2491 + adapter->hw_csum_err++; 2492 return; 2493 } 2494 + /* TCP/UDP Checksum has not been calculated */ 2495 + if(adapter->hw.mac_type <= e1000_82547_rev_2) { 2496 + if(!(status & E1000_RXD_STAT_TCPCS)) 2497 + return; 2498 } else { 2499 + if(!(status & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS))) 2500 + return; 2501 + } 2502 + /* It must be a TCP or UDP packet with a valid checksum */ 2503 + if (likely(status & E1000_RXD_STAT_TCPCS)) { 2504 /* TCP checksum is good */ 2505 skb->ip_summed = CHECKSUM_UNNECESSARY; 2506 + } else if (adapter->hw.mac_type > e1000_82547_rev_2) { 2507 + /* IP fragment with UDP payload */ 2508 + /* Hardware complements the payload checksum, so we undo it 2509 + * and then put the value in host order for further stack use. 2510 + */ 2511 + csum = ntohl(csum ^ 0xFFFF); 2512 + skb->csum = csum; 2513 + skb->ip_summed = CHECKSUM_HW; 2514 } 2515 + adapter->hw_csum_good++; 2516 } 2517 2518 /** 2519 + * e1000_clean_rx_irq - Send received data up the network stack; legacy 2520 * @adapter: board private structure 2521 **/ 2522 ··· 2513 if(unlikely(!(rx_desc->status & E1000_RXD_STAT_EOP))) { 2514 /* All receives must fit into a single buffer */ 2515 E1000_DBG("%s: Receive packet consumed multiple" 2516 + " buffers\n", netdev->name); 2517 dev_kfree_skb_irq(skb); 2518 goto next_desc; 2519 } ··· 2539 skb_put(skb, length - ETHERNET_FCS_SIZE); 2540 2541 /* Receive Checksum Offload */ 2542 + e1000_rx_checksum(adapter, 2543 + (uint32_t)(rx_desc->status) | 2544 + ((uint32_t)(rx_desc->errors) << 24), 2545 + rx_desc->csum, skb); 2546 skb->protocol = eth_type_trans(skb, netdev); 2547 #ifdef CONFIG_E1000_NAPI 2548 if(unlikely(adapter->vlgrp && 2549 (rx_desc->status & E1000_RXD_STAT_VP))) { 2550 vlan_hwaccel_receive_skb(skb, adapter->vlgrp, 2551 + le16_to_cpu(rx_desc->special) & 2552 + E1000_RXD_SPC_VLAN_MASK); 2553 } else { 2554 netif_receive_skb(skb); 2555 } ··· 2570 2571 rx_desc = E1000_RX_DESC(*rx_ring, i); 2572 } 2573 rx_ring->next_to_clean = i; 2574 + adapter->alloc_rx_buf(adapter); 2575 2576 return cleaned; 2577 } 2578 2579 /** 2580 + * e1000_clean_rx_irq_ps - Send received data up the network stack; packet split 2581 + * @adapter: board private structure 2582 + **/ 2583 + 2584 + static boolean_t 2585 + #ifdef CONFIG_E1000_NAPI 2586 + e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, int *work_done, 2587 + int work_to_do) 2588 + #else 2589 + e1000_clean_rx_irq_ps(struct e1000_adapter *adapter) 2590 + #endif 2591 + { 2592 + struct e1000_desc_ring *rx_ring = &adapter->rx_ring; 2593 + union e1000_rx_desc_packet_split *rx_desc; 2594 + struct net_device *netdev = adapter->netdev; 2595 + struct pci_dev *pdev = adapter->pdev; 2596 + struct e1000_buffer *buffer_info; 2597 + struct e1000_ps_page *ps_page; 2598 + struct e1000_ps_page_dma *ps_page_dma; 2599 + struct sk_buff *skb; 2600 + unsigned int i, j; 2601 + uint32_t length, staterr; 2602 + boolean_t cleaned = FALSE; 2603 + 2604 + i = rx_ring->next_to_clean; 2605 + rx_desc = E1000_RX_DESC_PS(*rx_ring, i); 2606 + staterr = rx_desc->wb.middle.status_error; 2607 + 2608 + while(staterr & E1000_RXD_STAT_DD) { 2609 + buffer_info = &rx_ring->buffer_info[i]; 2610 + ps_page = &rx_ring->ps_page[i]; 2611 + ps_page_dma = &rx_ring->ps_page_dma[i]; 2612 + #ifdef CONFIG_E1000_NAPI 2613 + if(unlikely(*work_done >= work_to_do)) 2614 + break; 2615 + (*work_done)++; 2616 + #endif 2617 + cleaned = TRUE; 2618 + pci_unmap_single(pdev, buffer_info->dma, 2619 + buffer_info->length, 2620 + PCI_DMA_FROMDEVICE); 2621 + 2622 + skb = buffer_info->skb; 2623 + 2624 + if(unlikely(!(staterr & E1000_RXD_STAT_EOP))) { 2625 + E1000_DBG("%s: Packet Split buffers didn't pick up" 2626 + " the full packet\n", netdev->name); 2627 + dev_kfree_skb_irq(skb); 2628 + goto next_desc; 2629 + } 2630 + 2631 + if(unlikely(staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK)) { 2632 + dev_kfree_skb_irq(skb); 2633 + goto next_desc; 2634 + } 2635 + 2636 + length = le16_to_cpu(rx_desc->wb.middle.length0); 2637 + 2638 + if(unlikely(!length)) { 2639 + E1000_DBG("%s: Last part of the packet spanning" 2640 + " multiple descriptors\n", netdev->name); 2641 + dev_kfree_skb_irq(skb); 2642 + goto next_desc; 2643 + } 2644 + 2645 + /* Good Receive */ 2646 + skb_put(skb, length); 2647 + 2648 + for(j = 0; j < PS_PAGE_BUFFERS; j++) { 2649 + if(!(length = le16_to_cpu(rx_desc->wb.upper.length[j]))) 2650 + break; 2651 + 2652 + pci_unmap_page(pdev, ps_page_dma->ps_page_dma[j], 2653 + PAGE_SIZE, PCI_DMA_FROMDEVICE); 2654 + ps_page_dma->ps_page_dma[j] = 0; 2655 + skb_shinfo(skb)->frags[j].page = 2656 + ps_page->ps_page[j]; 2657 + ps_page->ps_page[j] = NULL; 2658 + skb_shinfo(skb)->frags[j].page_offset = 0; 2659 + skb_shinfo(skb)->frags[j].size = length; 2660 + skb_shinfo(skb)->nr_frags++; 2661 + skb->len += length; 2662 + skb->data_len += length; 2663 + } 2664 + 2665 + e1000_rx_checksum(adapter, staterr, 2666 + rx_desc->wb.lower.hi_dword.csum_ip.csum, skb); 2667 + skb->protocol = eth_type_trans(skb, netdev); 2668 + 2669 + #ifdef HAVE_RX_ZERO_COPY 2670 + if(likely(rx_desc->wb.upper.header_status & 2671 + E1000_RXDPS_HDRSTAT_HDRSP)) 2672 + skb_shinfo(skb)->zero_copy = TRUE; 2673 + #endif 2674 + #ifdef CONFIG_E1000_NAPI 2675 + if(unlikely(adapter->vlgrp && (staterr & E1000_RXD_STAT_VP))) { 2676 + vlan_hwaccel_receive_skb(skb, adapter->vlgrp, 2677 + le16_to_cpu(rx_desc->wb.middle.vlan & 2678 + E1000_RXD_SPC_VLAN_MASK)); 2679 + } else { 2680 + netif_receive_skb(skb); 2681 + } 2682 + #else /* CONFIG_E1000_NAPI */ 2683 + if(unlikely(adapter->vlgrp && (staterr & E1000_RXD_STAT_VP))) { 2684 + vlan_hwaccel_rx(skb, adapter->vlgrp, 2685 + le16_to_cpu(rx_desc->wb.middle.vlan & 2686 + E1000_RXD_SPC_VLAN_MASK)); 2687 + } else { 2688 + netif_rx(skb); 2689 + } 2690 + #endif /* CONFIG_E1000_NAPI */ 2691 + netdev->last_rx = jiffies; 2692 + 2693 + next_desc: 2694 + rx_desc->wb.middle.status_error &= ~0xFF; 2695 + buffer_info->skb = NULL; 2696 + if(unlikely(++i == rx_ring->count)) i = 0; 2697 + 2698 + rx_desc = E1000_RX_DESC_PS(*rx_ring, i); 2699 + staterr = rx_desc->wb.middle.status_error; 2700 + } 2701 + rx_ring->next_to_clean = i; 2702 + adapter->alloc_rx_buf(adapter); 2703 + 2704 + return cleaned; 2705 + } 2706 + 2707 + /** 2708 + * e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended 2709 * @adapter: address of board private structure 2710 **/ 2711 ··· 2592 struct e1000_rx_desc *rx_desc; 2593 struct e1000_buffer *buffer_info; 2594 struct sk_buff *skb; 2595 + unsigned int i; 2596 + unsigned int bufsz = adapter->rx_buffer_len + NET_IP_ALIGN; 2597 2598 i = rx_ring->next_to_use; 2599 buffer_info = &rx_ring->buffer_info[i]; 2600 2601 while(!buffer_info->skb) { 2602 skb = dev_alloc_skb(bufsz); 2603 + 2604 if(unlikely(!skb)) { 2605 /* Better luck next round */ 2606 break; 2607 } 2608 2609 + /* Fix for errata 23, can't cross 64kB boundary */ 2610 if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) { 2611 struct sk_buff *oldskb = skb; 2612 + DPRINTK(RX_ERR, ERR, "skb align check failed: %u bytes " 2613 + "at %p\n", bufsz, skb->data); 2614 + /* Try again, without freeing the previous */ 2615 skb = dev_alloc_skb(bufsz); 2616 + /* Failed allocation, critical failure */ 2617 if (!skb) { 2618 dev_kfree_skb(oldskb); 2619 break; 2620 } 2621 + 2622 if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) { 2623 /* give up */ 2624 dev_kfree_skb(skb); 2625 dev_kfree_skb(oldskb); 2626 break; /* while !buffer_info->skb */ 2627 } else { 2628 + /* Use new allocation */ 2629 dev_kfree_skb(oldskb); 2630 } 2631 } 2632 /* Make buffer alignment 2 beyond a 16 byte boundary 2633 * this will result in a 16 byte aligned IP header after 2634 * the 14 byte MAC header is removed ··· 2644 adapter->rx_buffer_len, 2645 PCI_DMA_FROMDEVICE); 2646 2647 + /* Fix for errata 23, can't cross 64kB boundary */ 2648 + if (!e1000_check_64k_bound(adapter, 2649 + (void *)(unsigned long)buffer_info->dma, 2650 + adapter->rx_buffer_len)) { 2651 + DPRINTK(RX_ERR, ERR, 2652 + "dma align check failed: %u bytes at %p\n", 2653 + adapter->rx_buffer_len, 2654 + (void *)(unsigned long)buffer_info->dma); 2655 dev_kfree_skb(skb); 2656 buffer_info->skb = NULL; 2657 2658 + pci_unmap_single(pdev, buffer_info->dma, 2659 adapter->rx_buffer_len, 2660 PCI_DMA_FROMDEVICE); 2661 2662 break; /* while !buffer_info->skb */ 2663 } 2664 rx_desc = E1000_RX_DESC(*rx_ring, i); 2665 rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); 2666 ··· 2672 * applicable for weak-ordered memory model archs, 2673 * such as IA-64). */ 2674 wmb(); 2675 E1000_WRITE_REG(&adapter->hw, RDT, i); 2676 } 2677 ··· 2680 buffer_info = &rx_ring->buffer_info[i]; 2681 } 2682 2683 + rx_ring->next_to_use = i; 2684 + } 2685 + 2686 + /** 2687 + * e1000_alloc_rx_buffers_ps - Replace used receive buffers; packet split 2688 + * @adapter: address of board private structure 2689 + **/ 2690 + 2691 + static void 2692 + e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter) 2693 + { 2694 + struct e1000_desc_ring *rx_ring = &adapter->rx_ring; 2695 + struct net_device *netdev = adapter->netdev; 2696 + struct pci_dev *pdev = adapter->pdev; 2697 + union e1000_rx_desc_packet_split *rx_desc; 2698 + struct e1000_buffer *buffer_info; 2699 + struct e1000_ps_page *ps_page; 2700 + struct e1000_ps_page_dma *ps_page_dma; 2701 + struct sk_buff *skb; 2702 + unsigned int i, j; 2703 + 2704 + i = rx_ring->next_to_use; 2705 + buffer_info = &rx_ring->buffer_info[i]; 2706 + ps_page = &rx_ring->ps_page[i]; 2707 + ps_page_dma = &rx_ring->ps_page_dma[i]; 2708 + 2709 + while(!buffer_info->skb) { 2710 + rx_desc = E1000_RX_DESC_PS(*rx_ring, i); 2711 + 2712 + for(j = 0; j < PS_PAGE_BUFFERS; j++) { 2713 + if(unlikely(!ps_page->ps_page[j])) { 2714 + ps_page->ps_page[j] = 2715 + alloc_page(GFP_ATOMIC); 2716 + if(unlikely(!ps_page->ps_page[j])) 2717 + goto no_buffers; 2718 + ps_page_dma->ps_page_dma[j] = 2719 + pci_map_page(pdev, 2720 + ps_page->ps_page[j], 2721 + 0, PAGE_SIZE, 2722 + PCI_DMA_FROMDEVICE); 2723 + } 2724 + /* Refresh the desc even if buffer_addrs didn't 2725 + * change because each write-back erases this info. 2726 + */ 2727 + rx_desc->read.buffer_addr[j+1] = 2728 + cpu_to_le64(ps_page_dma->ps_page_dma[j]); 2729 + } 2730 + 2731 + skb = dev_alloc_skb(adapter->rx_ps_bsize0 + NET_IP_ALIGN); 2732 + 2733 + if(unlikely(!skb)) 2734 + break; 2735 + 2736 + /* Make buffer alignment 2 beyond a 16 byte boundary 2737 + * this will result in a 16 byte aligned IP header after 2738 + * the 14 byte MAC header is removed 2739 + */ 2740 + skb_reserve(skb, NET_IP_ALIGN); 2741 + 2742 + skb->dev = netdev; 2743 + 2744 + buffer_info->skb = skb; 2745 + buffer_info->length = adapter->rx_ps_bsize0; 2746 + buffer_info->dma = pci_map_single(pdev, skb->data, 2747 + adapter->rx_ps_bsize0, 2748 + PCI_DMA_FROMDEVICE); 2749 + 2750 + rx_desc->read.buffer_addr[0] = cpu_to_le64(buffer_info->dma); 2751 + 2752 + if(unlikely((i & ~(E1000_RX_BUFFER_WRITE - 1)) == i)) { 2753 + /* Force memory writes to complete before letting h/w 2754 + * know there are new descriptors to fetch. (Only 2755 + * applicable for weak-ordered memory model archs, 2756 + * such as IA-64). */ 2757 + wmb(); 2758 + /* Hardware increments by 16 bytes, but packet split 2759 + * descriptors are 32 bytes...so we increment tail 2760 + * twice as much. 2761 + */ 2762 + E1000_WRITE_REG(&adapter->hw, RDT, i<<1); 2763 + } 2764 + 2765 + if(unlikely(++i == rx_ring->count)) i = 0; 2766 + buffer_info = &rx_ring->buffer_info[i]; 2767 + ps_page = &rx_ring->ps_page[i]; 2768 + ps_page_dma = &rx_ring->ps_page_dma[i]; 2769 + } 2770 + 2771 + no_buffers: 2772 rx_ring->next_to_use = i; 2773 } 2774 ··· 2856 e1000_pci_set_mwi(struct e1000_hw *hw) 2857 { 2858 struct e1000_adapter *adapter = hw->back; 2859 + int ret_val = pci_set_mwi(adapter->pdev); 2860 2861 + if(ret_val) 2862 + DPRINTK(PROBE, ERR, "Error in setting MWI\n"); 2863 } 2864 2865 void ··· 2917 rctl |= E1000_RCTL_VFE; 2918 rctl &= ~E1000_RCTL_CFIEN; 2919 E1000_WRITE_REG(&adapter->hw, RCTL, rctl); 2920 + e1000_update_mng_vlan(adapter); 2921 } else { 2922 /* disable VLAN tag insert/strip */ 2923 ctrl = E1000_READ_REG(&adapter->hw, CTRL); ··· 2927 rctl = E1000_READ_REG(&adapter->hw, RCTL); 2928 rctl &= ~E1000_RCTL_VFE; 2929 E1000_WRITE_REG(&adapter->hw, RCTL, rctl); 2930 + if(adapter->mng_vlan_id != (uint16_t)E1000_MNG_VLAN_NONE) { 2931 + e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id); 2932 + adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; 2933 + } 2934 } 2935 2936 e1000_irq_enable(adapter); ··· 2937 { 2938 struct e1000_adapter *adapter = netdev->priv; 2939 uint32_t vfta, index; 2940 + if((adapter->hw.mng_cookie.status & 2941 + E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) && 2942 + (vid == adapter->mng_vlan_id)) 2943 + return; 2944 /* add VID to filter table */ 2945 index = (vid >> 5) & 0x7F; 2946 vfta = E1000_READ_REG_ARRAY(&adapter->hw, VFTA, index); ··· 2958 2959 e1000_irq_enable(adapter); 2960 2961 + if((adapter->hw.mng_cookie.status & 2962 + E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) && 2963 + (vid == adapter->mng_vlan_id)) 2964 + return; 2965 /* remove VID from filter table */ 2966 index = (vid >> 5) & 0x7F; 2967 vfta = E1000_READ_REG_ARRAY(&adapter->hw, VFTA, index); ··· 3004 break; 3005 case SPEED_1000 + DUPLEX_HALF: /* not supported */ 3006 default: 3007 + DPRINTK(PROBE, ERR, "Unsupported Speed/Duplex configuration\n"); 3008 return -EINVAL; 3009 } 3010 return 0; ··· 3033 { 3034 struct net_device *netdev = pci_get_drvdata(pdev); 3035 struct e1000_adapter *adapter = netdev->priv; 3036 + uint32_t ctrl, ctrl_ext, rctl, manc, status, swsm; 3037 uint32_t wufc = adapter->wol; 3038 3039 netif_device_detach(netdev); ··· 3075 E1000_WRITE_REG(&adapter->hw, CTRL_EXT, ctrl_ext); 3076 } 3077 3078 + /* Allow time for pending master requests to run */ 3079 + e1000_disable_pciex_master(&adapter->hw); 3080 + 3081 E1000_WRITE_REG(&adapter->hw, WUC, E1000_WUC_PME_EN); 3082 E1000_WRITE_REG(&adapter->hw, WUFC, wufc); 3083 pci_enable_wake(pdev, 3, 1); ··· 3099 } 3100 } 3101 3102 + switch(adapter->hw.mac_type) { 3103 + case e1000_82573: 3104 + swsm = E1000_READ_REG(&adapter->hw, SWSM); 3105 + E1000_WRITE_REG(&adapter->hw, SWSM, 3106 + swsm & ~E1000_SWSM_DRV_LOAD); 3107 + break; 3108 + default: 3109 + break; 3110 + } 3111 + 3112 pci_disable_device(pdev); 3113 3114 state = (state > 0) ? 3 : 0; ··· 3113 { 3114 struct net_device *netdev = pci_get_drvdata(pdev); 3115 struct e1000_adapter *adapter = netdev->priv; 3116 + uint32_t manc, ret, swsm; 3117 3118 pci_set_power_state(pdev, 0); 3119 pci_restore_state(pdev); 3120 ret = pci_enable_device(pdev); 3121 + pci_set_master(pdev); 3122 3123 pci_enable_wake(pdev, 3, 0); 3124 pci_enable_wake(pdev, 4, 0); /* 4 == D3 cold */ ··· 3139 E1000_WRITE_REG(&adapter->hw, MANC, manc); 3140 } 3141 3142 + switch(adapter->hw.mac_type) { 3143 + case e1000_82573: 3144 + swsm = E1000_READ_REG(&adapter->hw, SWSM); 3145 + E1000_WRITE_REG(&adapter->hw, SWSM, 3146 + swsm | E1000_SWSM_DRV_LOAD); 3147 + break; 3148 + default: 3149 + break; 3150 + } 3151 + 3152 return 0; 3153 } 3154 #endif 3155 #ifdef CONFIG_NET_POLL_CONTROLLER 3156 /* 3157 * Polling 'interrupt' - used by things like netconsole to send skbs ··· 3150 * the interrupt routine is executing. 3151 */ 3152 static void 3153 + e1000_netpoll(struct net_device *netdev) 3154 { 3155 struct e1000_adapter *adapter = netdev->priv; 3156 disable_irq(adapter->pdev->irq);
+30 -2
drivers/net/e1000/e1000_osdep.h
··· 1 /******************************************************************************* 2 3 4 - Copyright(c) 1999 - 2004 Intel Corporation. All rights reserved. 5 6 This program is free software; you can redistribute it and/or modify it 7 under the terms of the GNU General Public License as published by the Free ··· 42 #include <linux/sched.h> 43 44 #ifndef msec_delay 45 - #define msec_delay(x) msleep(x) 46 47 /* Some workarounds require millisecond delays and are run during interrupt 48 * context. Most notably, when establishing link, the phy may need tweaking ··· 100 readl((a)->hw_addr + \ 101 (((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \ 102 ((offset) << 2))) 103 104 #define E1000_WRITE_FLUSH(a) E1000_READ_REG(a, STATUS) 105
··· 1 /******************************************************************************* 2 3 4 + Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved. 5 6 This program is free software; you can redistribute it and/or modify it 7 under the terms of the GNU General Public License as published by the Free ··· 42 #include <linux/sched.h> 43 44 #ifndef msec_delay 45 + #define msec_delay(x) do { if(in_interrupt()) { \ 46 + /* Don't mdelay in interrupt context! */ \ 47 + BUG(); \ 48 + } else { \ 49 + msleep(x); \ 50 + } } while(0) 51 52 /* Some workarounds require millisecond delays and are run during interrupt 53 * context. Most notably, when establishing link, the phy may need tweaking ··· 95 readl((a)->hw_addr + \ 96 (((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \ 97 ((offset) << 2))) 98 + 99 + #define E1000_READ_REG_ARRAY_DWORD E1000_READ_REG_ARRAY 100 + #define E1000_WRITE_REG_ARRAY_DWORD E1000_WRITE_REG_ARRAY 101 + 102 + #define E1000_WRITE_REG_ARRAY_WORD(a, reg, offset, value) ( \ 103 + writew((value), ((a)->hw_addr + \ 104 + (((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \ 105 + ((offset) << 1)))) 106 + 107 + #define E1000_READ_REG_ARRAY_WORD(a, reg, offset) ( \ 108 + readw((a)->hw_addr + \ 109 + (((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \ 110 + ((offset) << 1))) 111 + 112 + #define E1000_WRITE_REG_ARRAY_BYTE(a, reg, offset, value) ( \ 113 + writeb((value), ((a)->hw_addr + \ 114 + (((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \ 115 + (offset)))) 116 + 117 + #define E1000_READ_REG_ARRAY_BYTE(a, reg, offset) ( \ 118 + readb((a)->hw_addr + \ 119 + (((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \ 120 + (offset))) 121 122 #define E1000_WRITE_FLUSH(a) E1000_READ_REG(a, STATUS) 123
+1 -2
drivers/net/e1000/e1000_param.c
··· 1 /******************************************************************************* 2 3 4 - Copyright(c) 1999 - 2004 Intel Corporation. All rights reserved. 5 6 This program is free software; you can redistribute it and/or modify it 7 under the terms of the GNU General Public License as published by the Free ··· 478 DPRINTK(PROBE, INFO, "%s set to dynamic mode\n", 479 opt.name); 480 break; 481 - case -1: 482 default: 483 e1000_validate_option(&adapter->itr, &opt, 484 adapter);
··· 1 /******************************************************************************* 2 3 4 + Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved. 5 6 This program is free software; you can redistribute it and/or modify it 7 under the terms of the GNU General Public License as published by the Free ··· 478 DPRINTK(PROBE, INFO, "%s set to dynamic mode\n", 479 opt.name); 480 break; 481 default: 482 e1000_validate_option(&adapter->itr, &opt, 483 adapter);
+1 -1
drivers/net/ixgb/ixgb.h
··· 110 #define IXGB_TX_QUEUE_WAKE 16 111 112 /* How many Rx Buffers do we bundle into one write to the hardware ? */ 113 - #define IXGB_RX_BUFFER_WRITE 16 /* Must be power of 2 */ 114 115 /* only works for sizes that are powers of 2 */ 116 #define IXGB_ROUNDUP(i, size) ((i) = (((i) + (size) - 1) & ~((size) - 1)))
··· 110 #define IXGB_TX_QUEUE_WAKE 16 111 112 /* How many Rx Buffers do we bundle into one write to the hardware ? */ 113 + #define IXGB_RX_BUFFER_WRITE 4 /* Must be power of 2 */ 114 115 /* only works for sizes that are powers of 2 */ 116 #define IXGB_ROUNDUP(i, size) ((i) = (((i) + (size) - 1) & ~((size) - 1)))
+12 -12
drivers/net/ixgb/ixgb_ee.c
··· 411 ixgb_cleanup_eeprom(hw); 412 413 /* clear the init_ctrl_reg_1 to signify that the cache is invalidated */ 414 - ee_map->init_ctrl_reg_1 = EEPROM_ICW1_SIGNATURE_CLEAR; 415 416 return; 417 } ··· 483 DEBUGOUT("ixgb_ee: Checksum invalid.\n"); 484 /* clear the init_ctrl_reg_1 to signify that the cache is 485 * invalidated */ 486 - ee_map->init_ctrl_reg_1 = EEPROM_ICW1_SIGNATURE_CLEAR; 487 return (FALSE); 488 } 489 ··· 579 struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom; 580 581 if(ixgb_check_and_get_eeprom_data(hw) == TRUE) 582 - return(ee_map->compatibility); 583 584 return(0); 585 } ··· 616 struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom; 617 618 if(ixgb_check_and_get_eeprom_data(hw) == TRUE) 619 - return(ee_map->init_ctrl_reg_1); 620 621 return(0); 622 } ··· 635 struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom; 636 637 if(ixgb_check_and_get_eeprom_data(hw) == TRUE) 638 - return(ee_map->init_ctrl_reg_2); 639 640 return(0); 641 } ··· 654 struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom; 655 656 if(ixgb_check_and_get_eeprom_data(hw) == TRUE) 657 - return(ee_map->subsystem_id); 658 659 return(0); 660 } ··· 673 struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom; 674 675 if(ixgb_check_and_get_eeprom_data(hw) == TRUE) 676 - return(ee_map->subvendor_id); 677 678 return(0); 679 } ··· 692 struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom; 693 694 if(ixgb_check_and_get_eeprom_data(hw) == TRUE) 695 - return(ee_map->device_id); 696 697 return(0); 698 } ··· 711 struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom; 712 713 if(ixgb_check_and_get_eeprom_data(hw) == TRUE) 714 - return(ee_map->vendor_id); 715 716 return(0); 717 } ··· 730 struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom; 731 732 if(ixgb_check_and_get_eeprom_data(hw) == TRUE) 733 - return(ee_map->swdpins_reg); 734 735 return(0); 736 } ··· 749 struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom; 750 751 if(ixgb_check_and_get_eeprom_data(hw) == TRUE) 752 - return(ee_map->d3_power); 753 754 return(0); 755 } ··· 768 struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom; 769 770 if(ixgb_check_and_get_eeprom_data(hw) == TRUE) 771 - return(ee_map->d0_power); 772 773 return(0); 774 }
··· 411 ixgb_cleanup_eeprom(hw); 412 413 /* clear the init_ctrl_reg_1 to signify that the cache is invalidated */ 414 + ee_map->init_ctrl_reg_1 = le16_to_cpu(EEPROM_ICW1_SIGNATURE_CLEAR); 415 416 return; 417 } ··· 483 DEBUGOUT("ixgb_ee: Checksum invalid.\n"); 484 /* clear the init_ctrl_reg_1 to signify that the cache is 485 * invalidated */ 486 + ee_map->init_ctrl_reg_1 = le16_to_cpu(EEPROM_ICW1_SIGNATURE_CLEAR); 487 return (FALSE); 488 } 489 ··· 579 struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom; 580 581 if(ixgb_check_and_get_eeprom_data(hw) == TRUE) 582 + return (le16_to_cpu(ee_map->compatibility)); 583 584 return(0); 585 } ··· 616 struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom; 617 618 if(ixgb_check_and_get_eeprom_data(hw) == TRUE) 619 + return (le16_to_cpu(ee_map->init_ctrl_reg_1)); 620 621 return(0); 622 } ··· 635 struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom; 636 637 if(ixgb_check_and_get_eeprom_data(hw) == TRUE) 638 + return (le16_to_cpu(ee_map->init_ctrl_reg_2)); 639 640 return(0); 641 } ··· 654 struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom; 655 656 if(ixgb_check_and_get_eeprom_data(hw) == TRUE) 657 + return (le16_to_cpu(ee_map->subsystem_id)); 658 659 return(0); 660 } ··· 673 struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom; 674 675 if(ixgb_check_and_get_eeprom_data(hw) == TRUE) 676 + return (le16_to_cpu(ee_map->subvendor_id)); 677 678 return(0); 679 } ··· 692 struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom; 693 694 if(ixgb_check_and_get_eeprom_data(hw) == TRUE) 695 + return (le16_to_cpu(ee_map->device_id)); 696 697 return(0); 698 } ··· 711 struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom; 712 713 if(ixgb_check_and_get_eeprom_data(hw) == TRUE) 714 + return (le16_to_cpu(ee_map->vendor_id)); 715 716 return(0); 717 } ··· 730 struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom; 731 732 if(ixgb_check_and_get_eeprom_data(hw) == TRUE) 733 + return (le16_to_cpu(ee_map->swdpins_reg)); 734 735 return(0); 736 } ··· 749 struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom; 750 751 if(ixgb_check_and_get_eeprom_data(hw) == TRUE) 752 + return (le16_to_cpu(ee_map->d3_power)); 753 754 return(0); 755 } ··· 768 struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom; 769 770 if(ixgb_check_and_get_eeprom_data(hw) == TRUE) 771 + return (le16_to_cpu(ee_map->d0_power)); 772 773 return(0); 774 }
+3 -1
drivers/net/ixgb/ixgb_ethtool.c
··· 252 uint32_t *reg_start = reg; 253 uint8_t i; 254 255 - regs->version = (adapter->hw.device_id << 16) | adapter->hw.subsystem_id; 256 257 /* General Registers */ 258 *reg++ = IXGB_READ_REG(hw, CTRL0); /* 0 */
··· 252 uint32_t *reg_start = reg; 253 uint8_t i; 254 255 + /* the 1 (one) below indicates an attempt at versioning, if the 256 + * interface in ethtool or the driver this 1 should be incremented */ 257 + regs->version = (1<<24) | hw->revision_id << 16 | hw->device_id; 258 259 /* General Registers */ 260 *reg++ = IXGB_READ_REG(hw, CTRL0); /* 0 */
+53 -100
drivers/net/ixgb/ixgb_main.c
··· 47 #else 48 #define DRIVERNAPI "-NAPI" 49 #endif 50 - char ixgb_driver_version[] = "1.0.90-k2"DRIVERNAPI; 51 char ixgb_copyright[] = "Copyright (c) 1999-2005 Intel Corporation."; 52 53 /* ixgb_pci_tbl - PCI Device ID Table ··· 103 static int ixgb_set_mac(struct net_device *netdev, void *p); 104 static irqreturn_t ixgb_intr(int irq, void *data, struct pt_regs *regs); 105 static boolean_t ixgb_clean_tx_irq(struct ixgb_adapter *adapter); 106 #ifdef CONFIG_IXGB_NAPI 107 static int ixgb_clean(struct net_device *netdev, int *budget); 108 static boolean_t ixgb_clean_rx_irq(struct ixgb_adapter *adapter, ··· 121 static void ixgb_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid); 122 static void ixgb_restore_vlan(struct ixgb_adapter *adapter); 123 124 - static int ixgb_notify_reboot(struct notifier_block *, unsigned long event, 125 - void *ptr); 126 - static int ixgb_suspend(struct pci_dev *pdev, uint32_t state); 127 - 128 #ifdef CONFIG_NET_POLL_CONTROLLER 129 /* for netdump / net console */ 130 static void ixgb_netpoll(struct net_device *dev); 131 #endif 132 - 133 - struct notifier_block ixgb_notifier_reboot = { 134 - .notifier_call = ixgb_notify_reboot, 135 - .next = NULL, 136 - .priority = 0 137 - }; 138 139 /* Exported from other modules */ 140 141 extern void ixgb_check_options(struct ixgb_adapter *adapter); 142 143 static struct pci_driver ixgb_driver = { 144 - .name = ixgb_driver_name, 145 .id_table = ixgb_pci_tbl, 146 - .probe = ixgb_probe, 147 - .remove = __devexit_p(ixgb_remove), 148 - /* Power Managment Hooks */ 149 - .suspend = NULL, 150 - .resume = NULL 151 }; 152 153 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); ··· 157 static int __init 158 ixgb_init_module(void) 159 { 160 - int ret; 161 printk(KERN_INFO "%s - version %s\n", 162 ixgb_driver_string, ixgb_driver_version); 163 164 printk(KERN_INFO "%s\n", ixgb_copyright); 165 166 - ret = pci_module_init(&ixgb_driver); 167 - if(ret >= 0) { 168 - register_reboot_notifier(&ixgb_notifier_reboot); 169 - } 170 - return ret; 171 } 172 173 module_init(ixgb_init_module); ··· 177 static void __exit 178 ixgb_exit_module(void) 179 { 180 - unregister_reboot_notifier(&ixgb_notifier_reboot); 181 pci_unregister_driver(&ixgb_driver); 182 } 183 ··· 206 { 207 if(atomic_dec_and_test(&adapter->irq_sem)) { 208 IXGB_WRITE_REG(&adapter->hw, IMS, 209 - IXGB_INT_RXT0 | IXGB_INT_RXDMT0 | IXGB_INT_TXDW | 210 - IXGB_INT_RXO | IXGB_INT_LSC); 211 IXGB_WRITE_FLUSH(&adapter->hw); 212 } 213 } ··· 1191 | IXGB_CONTEXT_DESC_CMD_TSE 1192 | IXGB_CONTEXT_DESC_CMD_IP 1193 | IXGB_CONTEXT_DESC_CMD_TCP 1194 - | IXGB_CONTEXT_DESC_CMD_RS 1195 | IXGB_CONTEXT_DESC_CMD_IDE 1196 | (skb->len - (hdr_len))); 1197 1198 if(++i == adapter->tx_ring.count) i = 0; 1199 adapter->tx_ring.next_to_use = i; ··· 1229 context_desc->mss = 0; 1230 context_desc->cmd_type_len = 1231 cpu_to_le32(IXGB_CONTEXT_DESC_TYPE 1232 - | IXGB_TX_DESC_CMD_RS 1233 - | IXGB_TX_DESC_CMD_IDE); 1234 1235 if(++i == adapter->tx_ring.count) i = 0; 1236 adapter->tx_ring.next_to_use = i; ··· 1254 1255 unsigned int nr_frags = skb_shinfo(skb)->nr_frags; 1256 unsigned int f; 1257 len -= skb->data_len; 1258 1259 i = tx_ring->next_to_use; ··· 1508 void 1509 ixgb_update_stats(struct ixgb_adapter *adapter) 1510 { 1511 adapter->stats.tprl += IXGB_READ_REG(&adapter->hw, TPRL); 1512 adapter->stats.tprh += IXGB_READ_REG(&adapter->hw, TPRH); 1513 adapter->stats.gprcl += IXGB_READ_REG(&adapter->hw, GPRCL); 1514 adapter->stats.gprch += IXGB_READ_REG(&adapter->hw, GPRCH); 1515 - adapter->stats.bprcl += IXGB_READ_REG(&adapter->hw, BPRCL); 1516 - adapter->stats.bprch += IXGB_READ_REG(&adapter->hw, BPRCH); 1517 - adapter->stats.mprcl += IXGB_READ_REG(&adapter->hw, MPRCL); 1518 - adapter->stats.mprch += IXGB_READ_REG(&adapter->hw, MPRCH); 1519 adapter->stats.uprcl += IXGB_READ_REG(&adapter->hw, UPRCL); 1520 adapter->stats.uprch += IXGB_READ_REG(&adapter->hw, UPRCH); 1521 adapter->stats.vprcl += IXGB_READ_REG(&adapter->hw, VPRCL); ··· 1824 struct pci_dev *pdev = adapter->pdev; 1825 struct ixgb_rx_desc *rx_desc, *next_rxd; 1826 struct ixgb_buffer *buffer_info, *next_buffer, *next2_buffer; 1827 - struct sk_buff *skb, *next_skb; 1828 uint32_t length; 1829 unsigned int i, j; 1830 boolean_t cleaned = FALSE; ··· 1833 buffer_info = &rx_ring->buffer_info[i]; 1834 1835 while(rx_desc->status & IXGB_RX_DESC_STATUS_DD) { 1836 1837 #ifdef CONFIG_IXGB_NAPI 1838 if(*work_done >= work_to_do) ··· 1842 1843 (*work_done)++; 1844 #endif 1845 skb = buffer_info->skb; 1846 prefetch(skb->data); 1847 1848 if(++i == rx_ring->count) i = 0; ··· 1859 next_skb = next_buffer->skb; 1860 prefetch(next_skb); 1861 1862 - 1863 cleaned = TRUE; 1864 1865 pci_unmap_single(pdev, ··· 1868 1869 length = le16_to_cpu(rx_desc->length); 1870 1871 - if(unlikely(!(rx_desc->status & IXGB_RX_DESC_STATUS_EOP))) { 1872 1873 /* All receives must fit into a single buffer */ 1874 ··· 1876 "length<%x>\n", length); 1877 1878 dev_kfree_skb_irq(skb); 1879 - rx_desc->status = 0; 1880 - buffer_info->skb = NULL; 1881 - 1882 - rx_desc = next_rxd; 1883 - buffer_info = next_buffer; 1884 - continue; 1885 } 1886 1887 if (unlikely(rx_desc->errors ··· 1885 IXGB_RX_DESC_ERRORS_RXE))) { 1886 1887 dev_kfree_skb_irq(skb); 1888 - rx_desc->status = 0; 1889 - buffer_info->skb = NULL; 1890 - 1891 - rx_desc = next_rxd; 1892 - buffer_info = next_buffer; 1893 - continue; 1894 } 1895 1896 /* Good Receive */ ··· 1896 1897 skb->protocol = eth_type_trans(skb, netdev); 1898 #ifdef CONFIG_IXGB_NAPI 1899 - if(adapter->vlgrp && (rx_desc->status & IXGB_RX_DESC_STATUS_VP)) { 1900 vlan_hwaccel_receive_skb(skb, adapter->vlgrp, 1901 le16_to_cpu(rx_desc->special) & 1902 IXGB_RX_DESC_SPECIAL_VLAN_MASK); ··· 1904 netif_receive_skb(skb); 1905 } 1906 #else /* CONFIG_IXGB_NAPI */ 1907 - if(adapter->vlgrp && (rx_desc->status & IXGB_RX_DESC_STATUS_VP)) { 1908 vlan_hwaccel_rx(skb, adapter->vlgrp, 1909 le16_to_cpu(rx_desc->special) & 1910 IXGB_RX_DESC_SPECIAL_VLAN_MASK); ··· 1914 #endif /* CONFIG_IXGB_NAPI */ 1915 netdev->last_rx = jiffies; 1916 1917 rx_desc->status = 0; 1918 buffer_info->skb = NULL; 1919 1920 rx_desc = next_rxd; 1921 buffer_info = next_buffer; 1922 } ··· 1955 1956 num_group_tail_writes = IXGB_RX_BUFFER_WRITE; 1957 1958 - /* leave one descriptor unused */ 1959 - while(--cleancount > 0) { 1960 rx_desc = IXGB_RX_DESC(*rx_ring, i); 1961 1962 skb = dev_alloc_skb(adapter->rx_buffer_len + NET_IP_ALIGN); ··· 1983 PCI_DMA_FROMDEVICE); 1984 1985 rx_desc->buff_addr = cpu_to_le64(buffer_info->dma); 1986 1987 if((i & ~(num_group_tail_writes- 1)) == i) { 1988 /* Force memory writes to complete before letting h/w ··· 2099 } 2100 } 2101 2102 - /** 2103 - * ixgb_notify_reboot - handles OS notification of reboot event. 2104 - * @param nb notifier block, unused 2105 - * @param event Event being passed to driver to act upon 2106 - * @param p A pointer to our net device 2107 - **/ 2108 - static int 2109 - ixgb_notify_reboot(struct notifier_block *nb, unsigned long event, void *p) 2110 - { 2111 - struct pci_dev *pdev = NULL; 2112 - 2113 - switch(event) { 2114 - case SYS_DOWN: 2115 - case SYS_HALT: 2116 - case SYS_POWER_OFF: 2117 - while ((pdev = pci_find_device(PCI_ANY_ID, PCI_ANY_ID, pdev))) { 2118 - if (pci_dev_driver(pdev) == &ixgb_driver) 2119 - ixgb_suspend(pdev, 3); 2120 - } 2121 - } 2122 - return NOTIFY_DONE; 2123 - } 2124 - 2125 - /** 2126 - * ixgb_suspend - driver suspend function called from notify. 2127 - * @param pdev pci driver structure used for passing to 2128 - * @param state power state to enter 2129 - **/ 2130 - static int 2131 - ixgb_suspend(struct pci_dev *pdev, uint32_t state) 2132 - { 2133 - struct net_device *netdev = pci_get_drvdata(pdev); 2134 - struct ixgb_adapter *adapter = netdev->priv; 2135 - 2136 - netif_device_detach(netdev); 2137 - 2138 - if(netif_running(netdev)) 2139 - ixgb_down(adapter, TRUE); 2140 - 2141 - pci_save_state(pdev); 2142 - 2143 - state = (state > 0) ? 3 : 0; 2144 - pci_set_power_state(pdev, state); 2145 - msec_delay(200); 2146 - 2147 - return 0; 2148 - } 2149 - 2150 #ifdef CONFIG_NET_POLL_CONTROLLER 2151 /* 2152 * Polling 'interrupt' - used by things like netconsole to send skbs ··· 2109 static void ixgb_netpoll(struct net_device *dev) 2110 { 2111 struct ixgb_adapter *adapter = dev->priv; 2112 disable_irq(adapter->pdev->irq); 2113 ixgb_intr(adapter->pdev->irq, dev, NULL); 2114 enable_irq(adapter->pdev->irq);
··· 47 #else 48 #define DRIVERNAPI "-NAPI" 49 #endif 50 + char ixgb_driver_version[] = "1.0.95-k2"DRIVERNAPI; 51 char ixgb_copyright[] = "Copyright (c) 1999-2005 Intel Corporation."; 52 53 /* ixgb_pci_tbl - PCI Device ID Table ··· 103 static int ixgb_set_mac(struct net_device *netdev, void *p); 104 static irqreturn_t ixgb_intr(int irq, void *data, struct pt_regs *regs); 105 static boolean_t ixgb_clean_tx_irq(struct ixgb_adapter *adapter); 106 + 107 #ifdef CONFIG_IXGB_NAPI 108 static int ixgb_clean(struct net_device *netdev, int *budget); 109 static boolean_t ixgb_clean_rx_irq(struct ixgb_adapter *adapter, ··· 120 static void ixgb_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid); 121 static void ixgb_restore_vlan(struct ixgb_adapter *adapter); 122 123 #ifdef CONFIG_NET_POLL_CONTROLLER 124 /* for netdump / net console */ 125 static void ixgb_netpoll(struct net_device *dev); 126 #endif 127 128 /* Exported from other modules */ 129 130 extern void ixgb_check_options(struct ixgb_adapter *adapter); 131 132 static struct pci_driver ixgb_driver = { 133 + .name = ixgb_driver_name, 134 .id_table = ixgb_pci_tbl, 135 + .probe = ixgb_probe, 136 + .remove = __devexit_p(ixgb_remove), 137 }; 138 139 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); ··· 169 static int __init 170 ixgb_init_module(void) 171 { 172 printk(KERN_INFO "%s - version %s\n", 173 ixgb_driver_string, ixgb_driver_version); 174 175 printk(KERN_INFO "%s\n", ixgb_copyright); 176 177 + return pci_module_init(&ixgb_driver); 178 } 179 180 module_init(ixgb_init_module); ··· 194 static void __exit 195 ixgb_exit_module(void) 196 { 197 pci_unregister_driver(&ixgb_driver); 198 } 199 ··· 224 { 225 if(atomic_dec_and_test(&adapter->irq_sem)) { 226 IXGB_WRITE_REG(&adapter->hw, IMS, 227 + IXGB_INT_RXT0 | IXGB_INT_RXDMT0 | IXGB_INT_TXDW | 228 + IXGB_INT_LSC); 229 IXGB_WRITE_FLUSH(&adapter->hw); 230 } 231 } ··· 1209 | IXGB_CONTEXT_DESC_CMD_TSE 1210 | IXGB_CONTEXT_DESC_CMD_IP 1211 | IXGB_CONTEXT_DESC_CMD_TCP 1212 | IXGB_CONTEXT_DESC_CMD_IDE 1213 | (skb->len - (hdr_len))); 1214 + 1215 1216 if(++i == adapter->tx_ring.count) i = 0; 1217 adapter->tx_ring.next_to_use = i; ··· 1247 context_desc->mss = 0; 1248 context_desc->cmd_type_len = 1249 cpu_to_le32(IXGB_CONTEXT_DESC_TYPE 1250 + | IXGB_TX_DESC_CMD_IDE); 1251 1252 if(++i == adapter->tx_ring.count) i = 0; 1253 adapter->tx_ring.next_to_use = i; ··· 1273 1274 unsigned int nr_frags = skb_shinfo(skb)->nr_frags; 1275 unsigned int f; 1276 + 1277 len -= skb->data_len; 1278 1279 i = tx_ring->next_to_use; ··· 1526 void 1527 ixgb_update_stats(struct ixgb_adapter *adapter) 1528 { 1529 + struct net_device *netdev = adapter->netdev; 1530 + 1531 + if((netdev->flags & IFF_PROMISC) || (netdev->flags & IFF_ALLMULTI) || 1532 + (netdev->mc_count > IXGB_MAX_NUM_MULTICAST_ADDRESSES)) { 1533 + u64 multi = IXGB_READ_REG(&adapter->hw, MPRCL); 1534 + u32 bcast_l = IXGB_READ_REG(&adapter->hw, BPRCL); 1535 + u32 bcast_h = IXGB_READ_REG(&adapter->hw, BPRCH); 1536 + u64 bcast = ((u64)bcast_h << 32) | bcast_l; 1537 + 1538 + multi |= ((u64)IXGB_READ_REG(&adapter->hw, MPRCH) << 32); 1539 + /* fix up multicast stats by removing broadcasts */ 1540 + multi -= bcast; 1541 + 1542 + adapter->stats.mprcl += (multi & 0xFFFFFFFF); 1543 + adapter->stats.mprch += (multi >> 32); 1544 + adapter->stats.bprcl += bcast_l; 1545 + adapter->stats.bprch += bcast_h; 1546 + } else { 1547 + adapter->stats.mprcl += IXGB_READ_REG(&adapter->hw, MPRCL); 1548 + adapter->stats.mprch += IXGB_READ_REG(&adapter->hw, MPRCH); 1549 + adapter->stats.bprcl += IXGB_READ_REG(&adapter->hw, BPRCL); 1550 + adapter->stats.bprch += IXGB_READ_REG(&adapter->hw, BPRCH); 1551 + } 1552 adapter->stats.tprl += IXGB_READ_REG(&adapter->hw, TPRL); 1553 adapter->stats.tprh += IXGB_READ_REG(&adapter->hw, TPRH); 1554 adapter->stats.gprcl += IXGB_READ_REG(&adapter->hw, GPRCL); 1555 adapter->stats.gprch += IXGB_READ_REG(&adapter->hw, GPRCH); 1556 adapter->stats.uprcl += IXGB_READ_REG(&adapter->hw, UPRCL); 1557 adapter->stats.uprch += IXGB_READ_REG(&adapter->hw, UPRCH); 1558 adapter->stats.vprcl += IXGB_READ_REG(&adapter->hw, VPRCL); ··· 1823 struct pci_dev *pdev = adapter->pdev; 1824 struct ixgb_rx_desc *rx_desc, *next_rxd; 1825 struct ixgb_buffer *buffer_info, *next_buffer, *next2_buffer; 1826 uint32_t length; 1827 unsigned int i, j; 1828 boolean_t cleaned = FALSE; ··· 1833 buffer_info = &rx_ring->buffer_info[i]; 1834 1835 while(rx_desc->status & IXGB_RX_DESC_STATUS_DD) { 1836 + struct sk_buff *skb, *next_skb; 1837 + u8 status; 1838 1839 #ifdef CONFIG_IXGB_NAPI 1840 if(*work_done >= work_to_do) ··· 1840 1841 (*work_done)++; 1842 #endif 1843 + status = rx_desc->status; 1844 skb = buffer_info->skb; 1845 + 1846 prefetch(skb->data); 1847 1848 if(++i == rx_ring->count) i = 0; ··· 1855 next_skb = next_buffer->skb; 1856 prefetch(next_skb); 1857 1858 cleaned = TRUE; 1859 1860 pci_unmap_single(pdev, ··· 1865 1866 length = le16_to_cpu(rx_desc->length); 1867 1868 + if(unlikely(!(status & IXGB_RX_DESC_STATUS_EOP))) { 1869 1870 /* All receives must fit into a single buffer */ 1871 ··· 1873 "length<%x>\n", length); 1874 1875 dev_kfree_skb_irq(skb); 1876 + goto rxdesc_done; 1877 } 1878 1879 if (unlikely(rx_desc->errors ··· 1887 IXGB_RX_DESC_ERRORS_RXE))) { 1888 1889 dev_kfree_skb_irq(skb); 1890 + goto rxdesc_done; 1891 } 1892 1893 /* Good Receive */ ··· 1903 1904 skb->protocol = eth_type_trans(skb, netdev); 1905 #ifdef CONFIG_IXGB_NAPI 1906 + if(adapter->vlgrp && (status & IXGB_RX_DESC_STATUS_VP)) { 1907 vlan_hwaccel_receive_skb(skb, adapter->vlgrp, 1908 le16_to_cpu(rx_desc->special) & 1909 IXGB_RX_DESC_SPECIAL_VLAN_MASK); ··· 1911 netif_receive_skb(skb); 1912 } 1913 #else /* CONFIG_IXGB_NAPI */ 1914 + if(adapter->vlgrp && (status & IXGB_RX_DESC_STATUS_VP)) { 1915 vlan_hwaccel_rx(skb, adapter->vlgrp, 1916 le16_to_cpu(rx_desc->special) & 1917 IXGB_RX_DESC_SPECIAL_VLAN_MASK); ··· 1921 #endif /* CONFIG_IXGB_NAPI */ 1922 netdev->last_rx = jiffies; 1923 1924 + rxdesc_done: 1925 + /* clean up descriptor, might be written over by hw */ 1926 rx_desc->status = 0; 1927 buffer_info->skb = NULL; 1928 1929 + /* use prefetched values */ 1930 rx_desc = next_rxd; 1931 buffer_info = next_buffer; 1932 } ··· 1959 1960 num_group_tail_writes = IXGB_RX_BUFFER_WRITE; 1961 1962 + /* leave three descriptors unused */ 1963 + while(--cleancount > 2) { 1964 rx_desc = IXGB_RX_DESC(*rx_ring, i); 1965 1966 skb = dev_alloc_skb(adapter->rx_buffer_len + NET_IP_ALIGN); ··· 1987 PCI_DMA_FROMDEVICE); 1988 1989 rx_desc->buff_addr = cpu_to_le64(buffer_info->dma); 1990 + /* guarantee DD bit not set now before h/w gets descriptor 1991 + * this is the rest of the workaround for h/w double 1992 + * writeback. */ 1993 + rx_desc->status = 0; 1994 1995 if((i & ~(num_group_tail_writes- 1)) == i) { 1996 /* Force memory writes to complete before letting h/w ··· 2099 } 2100 } 2101 2102 #ifdef CONFIG_NET_POLL_CONTROLLER 2103 /* 2104 * Polling 'interrupt' - used by things like netconsole to send skbs ··· 2157 static void ixgb_netpoll(struct net_device *dev) 2158 { 2159 struct ixgb_adapter *adapter = dev->priv; 2160 + 2161 disable_irq(adapter->pdev->irq); 2162 ixgb_intr(adapter->pdev->irq, dev, NULL); 2163 enable_irq(adapter->pdev->irq);
+1 -2
drivers/net/ixgb/ixgb_osdep.h
··· 45 /* Don't mdelay in interrupt context! */ \ 46 BUG(); \ 47 } else { \ 48 - set_current_state(TASK_UNINTERRUPTIBLE); \ 49 - schedule_timeout((x * HZ)/1000 + 2); \ 50 } } while(0) 51 #endif 52
··· 45 /* Don't mdelay in interrupt context! */ \ 46 BUG(); \ 47 } else { \ 48 + msleep(x); \ 49 } } while(0) 50 #endif 51
+5 -2
drivers/net/pcnet32.c
··· 22 *************************************************************************/ 23 24 #define DRV_NAME "pcnet32" 25 - #define DRV_VERSION "1.30i" 26 - #define DRV_RELDATE "06.28.2004" 27 #define PFX DRV_NAME ": " 28 29 static const char *version = ··· 256 * homepna for selecting HomePNA mode for PCNet/Home 79C978. 257 * v1.30h 24 Jun 2004 Don Fry correctly select auto, speed, duplex in bcr32. 258 * v1.30i 28 Jun 2004 Don Fry change to use module_param. 259 */ 260 261 ··· 396 static int pcnet32_get_regs_len(struct net_device *dev); 397 static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs, 398 void *ptr); 399 400 enum pci_flags_bit { 401 PCI_USES_IO=1, PCI_USES_MEM=2, PCI_USES_MASTER=4, ··· 787 } 788 789 clean_up: 790 x = a->read_csr(ioaddr, 15) & 0xFFFF; 791 a->write_csr(ioaddr, 15, (x & ~0x0044)); /* reset bits 6 and 2 */ 792
··· 22 *************************************************************************/ 23 24 #define DRV_NAME "pcnet32" 25 + #define DRV_VERSION "1.30j" 26 + #define DRV_RELDATE "29.04.2005" 27 #define PFX DRV_NAME ": " 28 29 static const char *version = ··· 256 * homepna for selecting HomePNA mode for PCNet/Home 79C978. 257 * v1.30h 24 Jun 2004 Don Fry correctly select auto, speed, duplex in bcr32. 258 * v1.30i 28 Jun 2004 Don Fry change to use module_param. 259 + * v1.30j 29 Apr 2005 Don Fry fix skb/map leak with loopback test. 260 */ 261 262 ··· 395 static int pcnet32_get_regs_len(struct net_device *dev); 396 static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs, 397 void *ptr); 398 + static void pcnet32_purge_tx_ring(struct net_device *dev); 399 400 enum pci_flags_bit { 401 PCI_USES_IO=1, PCI_USES_MEM=2, PCI_USES_MASTER=4, ··· 785 } 786 787 clean_up: 788 + pcnet32_purge_tx_ring(dev); 789 x = a->read_csr(ioaddr, 15) & 0xFFFF; 790 a->write_csr(ioaddr, 15, (x & ~0x0044)); /* reset bits 6 and 2 */ 791
+1
drivers/net/tulip/media.c
··· 174 break; 175 } 176 spin_unlock_irqrestore(&tp->mii_lock, flags); 177 } 178 179 /* Establish sync by sending 32 logic ones. */
··· 174 break; 175 } 176 spin_unlock_irqrestore(&tp->mii_lock, flags); 177 + return; 178 } 179 180 /* Establish sync by sending 32 logic ones. */