Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

via-rhine: move work from irq handler to softirq and beyond.

- Tx processing is moved from the irq handler to NAPI poll
- link events and obscure event processing is moved to its proper work queue

Locking rules undergo some changes through the driver.

- the driver offers the usual lock-free Tx path
- besides the IRQ handler, the link event task schedules the napi handler.
The driver thus adds some internal locking to prevent a loop when both
must be disabled.
- the reset task keeps being scheduled from the Tx watchdog handler, thus
with implicit Tx queue disabling. It does not need to care about irq,
only napi softirq and competing task.
- it is not worth to add a dedicated lock between {g, s}et_wol and
rhine_shutdown. It should not hurt no narrow it down a bit though.
- rhine_reset_task must keep its huge spin_lock_bh protected section due
to :
- races for the CAM registers (see rhine_vlan_rx_{add, kill}_vid)
- implicit use of napi_enable (see init_registers)
- use of the same lock for stats read / update exclusion between
napi rx processing and rhine_get_stats
- rhine_resume requires a softirq disabled section for the same reason
as rhine_reset_task
- {free, request}_irq have been replaced with IntrEnable actions in
rhine_{suspend, resume}. It is hidden behind init_registers for the
latter.

Signed-off-by: Francois Romieu <romieu@fr.zoreil.com>

+227 -179
+227 -179
drivers/net/ethernet/via/via-rhine.c
··· 42 42 43 43 #define DEBUG 44 44 static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */ 45 - static int max_interrupt_work = 20; 46 45 47 46 /* Set the copy breakpoint for the copy-only-tiny-frames scheme. 48 47 Setting to > 1518 effectively disables this feature. */ ··· 127 128 MODULE_DESCRIPTION("VIA Rhine PCI Fast Ethernet driver"); 128 129 MODULE_LICENSE("GPL"); 129 130 130 - module_param(max_interrupt_work, int, 0); 131 131 module_param(debug, int, 0); 132 132 module_param(rx_copybreak, int, 0); 133 133 module_param(avoid_D3, bool, 0); 134 - MODULE_PARM_DESC(max_interrupt_work, "VIA Rhine maximum events handled per interrupt"); 135 134 MODULE_PARM_DESC(debug, "VIA Rhine debug level (0-7)"); 136 135 MODULE_PARM_DESC(rx_copybreak, "VIA Rhine copy breakpoint for copy-only-tiny-frames"); 137 136 MODULE_PARM_DESC(avoid_D3, "Avoid power state D3 (work-around for broken BIOSes)"); ··· 348 351 349 352 /* Bits in the interrupt status/mask registers. */ 350 353 enum intr_status_bits { 351 - IntrRxDone=0x0001, IntrRxErr=0x0004, IntrRxEmpty=0x0020, 352 - IntrTxDone=0x0002, IntrTxError=0x0008, IntrTxUnderrun=0x0210, 353 - IntrPCIErr=0x0040, 354 - IntrStatsMax=0x0080, IntrRxEarly=0x0100, 355 - IntrRxOverflow=0x0400, IntrRxDropped=0x0800, IntrRxNoBuf=0x1000, 356 - IntrTxAborted=0x2000, IntrLinkChange=0x4000, 357 - IntrRxWakeUp=0x8000, 358 - IntrNormalSummary=0x0003, IntrAbnormalSummary=0xC260, 359 - IntrTxDescRace=0x080000, /* mapped from IntrStatus2 */ 360 - IntrTxErrSummary=0x082218, 354 + IntrRxDone = 0x0001, 355 + IntrTxDone = 0x0002, 356 + IntrRxErr = 0x0004, 357 + IntrTxError = 0x0008, 358 + IntrRxEmpty = 0x0020, 359 + IntrPCIErr = 0x0040, 360 + IntrStatsMax = 0x0080, 361 + IntrRxEarly = 0x0100, 362 + IntrTxUnderrun = 0x0210, 363 + IntrRxOverflow = 0x0400, 364 + IntrRxDropped = 0x0800, 365 + IntrRxNoBuf = 0x1000, 366 + IntrTxAborted = 0x2000, 367 + IntrLinkChange = 0x4000, 368 + IntrRxWakeUp = 0x8000, 369 + IntrTxDescRace = 0x080000, /* mapped from IntrStatus2 */ 370 + IntrNormalSummary = IntrRxDone | IntrTxDone, 371 + IntrTxErrSummary = IntrTxDescRace | IntrTxAborted | IntrTxError | 372 + IntrTxUnderrun, 361 373 }; 362 374 363 375 /* Bits in WOLcrSet/WOLcrClr and PwrcsrSet/PwrcsrClr */ ··· 445 439 struct net_device *dev; 446 440 struct napi_struct napi; 447 441 spinlock_t lock; 442 + struct mutex task_lock; 443 + bool task_enable; 444 + struct work_struct slow_event_task; 448 445 struct work_struct reset_task; 449 446 450 447 /* Frequently used values: keep some adjacent for cache effect. */ ··· 485 476 static void mdio_write(struct net_device *dev, int phy_id, int location, int value); 486 477 static int rhine_open(struct net_device *dev); 487 478 static void rhine_reset_task(struct work_struct *work); 479 + static void rhine_slow_event_task(struct work_struct *work); 488 480 static void rhine_tx_timeout(struct net_device *dev); 489 481 static netdev_tx_t rhine_start_tx(struct sk_buff *skb, 490 482 struct net_device *dev); 491 483 static irqreturn_t rhine_interrupt(int irq, void *dev_instance); 492 484 static void rhine_tx(struct net_device *dev); 493 485 static int rhine_rx(struct net_device *dev, int limit); 494 - static void rhine_error(struct net_device *dev, int intr_status); 495 486 static void rhine_set_rx_mode(struct net_device *dev); 496 487 static struct net_device_stats *rhine_get_stats(struct net_device *dev); 497 488 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); ··· 499 490 static int rhine_close(struct net_device *dev); 500 491 static int rhine_vlan_rx_add_vid(struct net_device *dev, unsigned short vid); 501 492 static int rhine_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid); 493 + static void rhine_restart_tx(struct net_device *dev); 502 494 503 495 #define RHINE_WAIT_FOR(condition) \ 504 496 do { \ ··· 530 520 if (rp->quirks & rqStatusWBRace) 531 521 iowrite8(mask >> 16, ioaddr + IntrStatus2); 532 522 iowrite16(mask, ioaddr + IntrStatus); 533 - IOSYNC; 523 + mmiowb(); 534 524 } 535 525 536 526 /* ··· 679 669 } 680 670 } 681 671 672 + static void rhine_tx_err(struct rhine_private *rp, u32 status) 673 + { 674 + struct net_device *dev = rp->dev; 675 + 676 + if (status & IntrTxAborted) { 677 + if (debug > 1) 678 + netdev_info(dev, "Abort %08x, frame dropped\n", status); 679 + } 680 + 681 + if (status & IntrTxUnderrun) { 682 + rhine_kick_tx_threshold(rp); 683 + if (debug > 1) 684 + netdev_info(dev, "Transmitter underrun, Tx threshold now %02x\n", 685 + rp->tx_thresh); 686 + } 687 + 688 + if (status & IntrTxDescRace) { 689 + if (debug > 2) 690 + netdev_info(dev, "Tx descriptor write-back race\n"); 691 + } 692 + 693 + if ((status & IntrTxError) && 694 + (status & (IntrTxAborted | IntrTxUnderrun | IntrTxDescRace)) == 0) { 695 + rhine_kick_tx_threshold(rp); 696 + if (debug > 1) 697 + netdev_info(dev, "Unspecified error. Tx threshold now %02x\n", 698 + rp->tx_thresh); 699 + } 700 + 701 + rhine_restart_tx(dev); 702 + } 703 + 704 + static void rhine_update_rx_crc_and_missed_errord(struct rhine_private *rp) 705 + { 706 + void __iomem *ioaddr = rp->base; 707 + struct net_device_stats *stats = &rp->dev->stats; 708 + 709 + stats->rx_crc_errors += ioread16(ioaddr + RxCRCErrs); 710 + stats->rx_missed_errors += ioread16(ioaddr + RxMissed); 711 + 712 + /* 713 + * Clears the "tally counters" for CRC errors and missed frames(?). 714 + * It has been reported that some chips need a write of 0 to clear 715 + * these, for others the counters are set to 1 when written to and 716 + * instead cleared when read. So we clear them both ways ... 717 + */ 718 + iowrite32(0, ioaddr + RxMissed); 719 + ioread16(ioaddr + RxCRCErrs); 720 + ioread16(ioaddr + RxMissed); 721 + } 722 + 723 + #define RHINE_EVENT_NAPI_RX (IntrRxDone | \ 724 + IntrRxErr | \ 725 + IntrRxEmpty | \ 726 + IntrRxOverflow | \ 727 + IntrRxDropped | \ 728 + IntrRxNoBuf | \ 729 + IntrRxWakeUp) 730 + 731 + #define RHINE_EVENT_NAPI_TX_ERR (IntrTxError | \ 732 + IntrTxAborted | \ 733 + IntrTxUnderrun | \ 734 + IntrTxDescRace) 735 + #define RHINE_EVENT_NAPI_TX (IntrTxDone | RHINE_EVENT_NAPI_TX_ERR) 736 + 737 + #define RHINE_EVENT_NAPI (RHINE_EVENT_NAPI_RX | \ 738 + RHINE_EVENT_NAPI_TX | \ 739 + IntrStatsMax) 740 + #define RHINE_EVENT_SLOW (IntrPCIErr | IntrLinkChange) 741 + #define RHINE_EVENT (RHINE_EVENT_NAPI | RHINE_EVENT_SLOW) 742 + 682 743 static int rhine_napipoll(struct napi_struct *napi, int budget) 683 744 { 684 745 struct rhine_private *rp = container_of(napi, struct rhine_private, napi); 685 746 struct net_device *dev = rp->dev; 686 747 void __iomem *ioaddr = rp->base; 687 - int work_done; 748 + u16 enable_mask = RHINE_EVENT & 0xffff; 749 + int work_done = 0; 750 + u32 status; 688 751 689 - work_done = rhine_rx(dev, budget); 752 + status = rhine_get_events(rp); 753 + rhine_ack_events(rp, status & ~RHINE_EVENT_SLOW); 754 + 755 + if (status & RHINE_EVENT_NAPI_RX) 756 + work_done += rhine_rx(dev, budget); 757 + 758 + if (status & RHINE_EVENT_NAPI_TX) { 759 + if (status & RHINE_EVENT_NAPI_TX_ERR) { 760 + u8 cmd; 761 + 762 + /* Avoid scavenging before Tx engine turned off */ 763 + RHINE_WAIT_FOR(!(ioread8(ioaddr + ChipCmd) & CmdTxOn)); 764 + cmd = ioread8(ioaddr + ChipCmd); 765 + if ((cmd & CmdTxOn) && (debug > 2)) { 766 + netdev_warn(dev, "%s: Tx engine still on\n", 767 + __func__); 768 + } 769 + } 770 + rhine_tx(dev); 771 + 772 + if (status & RHINE_EVENT_NAPI_TX_ERR) 773 + rhine_tx_err(rp, status); 774 + } 775 + 776 + if (status & IntrStatsMax) { 777 + spin_lock(&rp->lock); 778 + rhine_update_rx_crc_and_missed_errord(rp); 779 + spin_unlock(&rp->lock); 780 + } 781 + 782 + if (status & RHINE_EVENT_SLOW) { 783 + enable_mask &= ~RHINE_EVENT_SLOW; 784 + schedule_work(&rp->slow_event_task); 785 + } 690 786 691 787 if (work_done < budget) { 692 788 napi_complete(napi); 693 - 694 - iowrite16(IntrRxDone | IntrRxErr | IntrRxEmpty| IntrRxOverflow | 695 - IntrRxDropped | IntrRxNoBuf | IntrTxAborted | 696 - IntrTxDone | IntrTxError | IntrTxUnderrun | 697 - IntrPCIErr | IntrStatsMax | IntrLinkChange, 698 - ioaddr + IntrEnable); 789 + iowrite16(enable_mask, ioaddr + IntrEnable); 790 + mmiowb(); 699 791 } 700 792 return work_done; 701 793 } ··· 980 868 dev->irq = pdev->irq; 981 869 982 870 spin_lock_init(&rp->lock); 871 + mutex_init(&rp->task_lock); 983 872 INIT_WORK(&rp->reset_task, rhine_reset_task); 873 + INIT_WORK(&rp->slow_event_task, rhine_slow_event_task); 984 874 985 875 rp->mii_if.dev = dev; 986 876 rp->mii_if.mdio_read = mdio_read; ··· 1392 1278 { 1393 1279 struct rhine_private *rp = netdev_priv(dev); 1394 1280 1395 - spin_lock_irq(&rp->lock); 1281 + spin_lock_bh(&rp->lock); 1396 1282 set_bit(vid, rp->active_vlans); 1397 1283 rhine_update_vcam(dev); 1398 - spin_unlock_irq(&rp->lock); 1284 + spin_unlock_bh(&rp->lock); 1399 1285 return 0; 1400 1286 } 1401 1287 ··· 1403 1289 { 1404 1290 struct rhine_private *rp = netdev_priv(dev); 1405 1291 1406 - spin_lock_irq(&rp->lock); 1292 + spin_lock_bh(&rp->lock); 1407 1293 clear_bit(vid, rp->active_vlans); 1408 1294 rhine_update_vcam(dev); 1409 - spin_unlock_irq(&rp->lock); 1295 + spin_unlock_bh(&rp->lock); 1410 1296 return 0; 1411 1297 } 1412 1298 ··· 1436 1322 1437 1323 napi_enable(&rp->napi); 1438 1324 1439 - /* Enable interrupts by setting the interrupt mask. */ 1440 - iowrite16(IntrRxDone | IntrRxErr | IntrRxEmpty| IntrRxOverflow | 1441 - IntrRxDropped | IntrRxNoBuf | IntrTxAborted | 1442 - IntrTxDone | IntrTxError | IntrTxUnderrun | 1443 - IntrPCIErr | IntrStatsMax | IntrLinkChange, 1444 - ioaddr + IntrEnable); 1325 + iowrite16(RHINE_EVENT & 0xffff, ioaddr + IntrEnable); 1445 1326 1446 1327 iowrite16(CmdStart | CmdTxOn | CmdRxOn | (Cmd1NoTxPoll << 8), 1447 1328 ioaddr + ChipCmd); ··· 1516 1407 rhine_enable_linkmon(ioaddr); 1517 1408 } 1518 1409 1410 + static void rhine_task_disable(struct rhine_private *rp) 1411 + { 1412 + mutex_lock(&rp->task_lock); 1413 + rp->task_enable = false; 1414 + mutex_unlock(&rp->task_lock); 1415 + 1416 + cancel_work_sync(&rp->slow_event_task); 1417 + cancel_work_sync(&rp->reset_task); 1418 + } 1419 + 1420 + static void rhine_task_enable(struct rhine_private *rp) 1421 + { 1422 + mutex_lock(&rp->task_lock); 1423 + rp->task_enable = true; 1424 + mutex_unlock(&rp->task_lock); 1425 + } 1426 + 1519 1427 static int rhine_open(struct net_device *dev) 1520 1428 { 1521 1429 struct rhine_private *rp = netdev_priv(dev); ··· 1555 1429 alloc_rbufs(dev); 1556 1430 alloc_tbufs(dev); 1557 1431 rhine_chip_reset(dev); 1432 + rhine_task_enable(rp); 1558 1433 init_registers(dev); 1559 1434 if (debug > 2) 1560 1435 netdev_dbg(dev, "%s() Done - status %04x MII status: %04x\n", ··· 1573 1446 reset_task); 1574 1447 struct net_device *dev = rp->dev; 1575 1448 1576 - /* protect against concurrent rx interrupts */ 1577 - disable_irq(rp->pdev->irq); 1449 + mutex_lock(&rp->task_lock); 1450 + 1451 + if (!rp->task_enable) 1452 + goto out_unlock; 1578 1453 1579 1454 napi_disable(&rp->napi); 1580 - 1581 1455 spin_lock_bh(&rp->lock); 1582 1456 1583 1457 /* clear all descriptors */ ··· 1592 1464 init_registers(dev); 1593 1465 1594 1466 spin_unlock_bh(&rp->lock); 1595 - enable_irq(rp->pdev->irq); 1596 1467 1597 1468 dev->trans_start = jiffies; /* prevent tx timeout */ 1598 1469 dev->stats.tx_errors++; 1599 1470 netif_wake_queue(dev); 1471 + 1472 + out_unlock: 1473 + mutex_unlock(&rp->task_lock); 1600 1474 } 1601 1475 1602 1476 static void rhine_tx_timeout(struct net_device *dev) ··· 1619 1489 struct rhine_private *rp = netdev_priv(dev); 1620 1490 void __iomem *ioaddr = rp->base; 1621 1491 unsigned entry; 1622 - unsigned long flags; 1623 1492 1624 1493 /* Caution: the write order is important here, set the field 1625 1494 with the "ownership" bits last. */ ··· 1670 1541 rp->tx_ring[entry].tx_status = 0; 1671 1542 1672 1543 /* lock eth irq */ 1673 - spin_lock_irqsave(&rp->lock, flags); 1674 1544 wmb(); 1675 1545 rp->tx_ring[entry].tx_status |= cpu_to_le32(DescOwn); 1676 1546 wmb(); ··· 1690 1562 if (rp->cur_tx == rp->dirty_tx + TX_QUEUE_LEN) 1691 1563 netif_stop_queue(dev); 1692 1564 1693 - spin_unlock_irqrestore(&rp->lock, flags); 1694 - 1695 1565 if (debug > 4) { 1696 1566 netdev_dbg(dev, "Transmit frame #%d queued in slot %d\n", 1697 1567 rp->cur_tx-1, entry); 1698 1568 } 1699 1569 return NETDEV_TX_OK; 1570 + } 1571 + 1572 + static void rhine_irq_disable(struct rhine_private *rp) 1573 + { 1574 + iowrite16(0x0000, rp->base + IntrEnable); 1575 + mmiowb(); 1700 1576 } 1701 1577 1702 1578 /* The interrupt handler does all of the Rx thread work and cleans up ··· 1709 1577 { 1710 1578 struct net_device *dev = dev_instance; 1711 1579 struct rhine_private *rp = netdev_priv(dev); 1712 - void __iomem *ioaddr = rp->base; 1713 - u32 intr_status; 1714 - int boguscnt = max_interrupt_work; 1580 + u32 status; 1715 1581 int handled = 0; 1716 1582 1717 - while ((intr_status = rhine_get_events(rp))) { 1583 + status = rhine_get_events(rp); 1584 + 1585 + if (debug > 4) 1586 + netdev_dbg(dev, "Interrupt, status %08x\n", status); 1587 + 1588 + if (status & RHINE_EVENT) { 1718 1589 handled = 1; 1719 1590 1720 - /* Acknowledge all of the current interrupt sources ASAP. */ 1721 - rhine_ack_events(rp, intr_status); 1722 - 1723 - if (debug > 4) 1724 - netdev_dbg(dev, "Interrupt, status %08x\n", 1725 - intr_status); 1726 - 1727 - if (intr_status & (IntrRxDone | IntrRxErr | IntrRxDropped | 1728 - IntrRxWakeUp | IntrRxEmpty | IntrRxNoBuf)) { 1729 - iowrite16(IntrTxAborted | 1730 - IntrTxDone | IntrTxError | IntrTxUnderrun | 1731 - IntrPCIErr | IntrStatsMax | IntrLinkChange, 1732 - ioaddr + IntrEnable); 1733 - 1734 - napi_schedule(&rp->napi); 1735 - } 1736 - 1737 - if (intr_status & (IntrTxErrSummary | IntrTxDone)) { 1738 - if (intr_status & IntrTxErrSummary) { 1739 - /* Avoid scavenging before Tx engine turned off */ 1740 - RHINE_WAIT_FOR(!(ioread8(ioaddr+ChipCmd) & CmdTxOn)); 1741 - if (debug > 2 && 1742 - ioread8(ioaddr+ChipCmd) & CmdTxOn) 1743 - netdev_warn(dev, 1744 - "%s: Tx engine still on\n", 1745 - __func__); 1746 - } 1747 - rhine_tx(dev); 1748 - } 1749 - 1750 - /* Abnormal error summary/uncommon events handlers. */ 1751 - if (intr_status & (IntrPCIErr | IntrLinkChange | 1752 - IntrStatsMax | IntrTxError | IntrTxAborted | 1753 - IntrTxUnderrun | IntrTxDescRace)) 1754 - rhine_error(dev, intr_status); 1755 - 1756 - if (--boguscnt < 0) { 1757 - netdev_warn(dev, "Too much work at interrupt, status=%#08x\n", 1758 - intr_status); 1759 - break; 1760 - } 1591 + rhine_irq_disable(rp); 1592 + napi_schedule(&rp->napi); 1761 1593 } 1762 1594 1763 - if (debug > 3) 1764 - netdev_dbg(dev, "exiting interrupt, status=%08x\n", 1765 - ioread16(ioaddr + IntrStatus)); 1595 + if (status & ~(IntrLinkChange | IntrStatsMax | RHINE_EVENT_NAPI)) { 1596 + if (debug > 1) 1597 + netdev_err(dev, "Something Wicked happened! %08x\n", 1598 + status); 1599 + } 1600 + 1766 1601 return IRQ_RETVAL(handled); 1767 1602 } 1768 1603 ··· 1739 1640 { 1740 1641 struct rhine_private *rp = netdev_priv(dev); 1741 1642 int txstatus = 0, entry = rp->dirty_tx % TX_RING_SIZE; 1742 - 1743 - spin_lock(&rp->lock); 1744 1643 1745 1644 /* find and cleanup dirty tx descriptors */ 1746 1645 while (rp->dirty_tx != rp->cur_tx) { ··· 1793 1696 } 1794 1697 if ((rp->cur_tx - rp->dirty_tx) < TX_QUEUE_LEN - 4) 1795 1698 netif_wake_queue(dev); 1796 - 1797 - spin_unlock(&rp->lock); 1798 1699 } 1799 1700 1800 1701 /** ··· 1943 1848 return count; 1944 1849 } 1945 1850 1946 - /* 1947 - * Clears the "tally counters" for CRC errors and missed frames(?). 1948 - * It has been reported that some chips need a write of 0 to clear 1949 - * these, for others the counters are set to 1 when written to and 1950 - * instead cleared when read. So we clear them both ways ... 1951 - */ 1952 - static inline void clear_tally_counters(void __iomem *ioaddr) 1953 - { 1954 - iowrite32(0, ioaddr + RxMissed); 1955 - ioread16(ioaddr + RxCRCErrs); 1956 - ioread16(ioaddr + RxMissed); 1957 - } 1958 - 1959 1851 static void rhine_restart_tx(struct net_device *dev) { 1960 1852 struct rhine_private *rp = netdev_priv(dev); 1961 1853 void __iomem *ioaddr = rp->base; ··· 1981 1899 1982 1900 } 1983 1901 1984 - static void rhine_error(struct net_device *dev, int intr_status) 1902 + static void rhine_slow_event_task(struct work_struct *work) 1985 1903 { 1986 - struct rhine_private *rp = netdev_priv(dev); 1987 - void __iomem *ioaddr = rp->base; 1904 + struct rhine_private *rp = 1905 + container_of(work, struct rhine_private, slow_event_task); 1906 + struct net_device *dev = rp->dev; 1907 + u32 intr_status; 1988 1908 1989 - spin_lock(&rp->lock); 1909 + mutex_lock(&rp->task_lock); 1910 + 1911 + if (!rp->task_enable) 1912 + goto out_unlock; 1913 + 1914 + intr_status = rhine_get_events(rp); 1915 + rhine_ack_events(rp, intr_status & RHINE_EVENT_SLOW); 1990 1916 1991 1917 if (intr_status & IntrLinkChange) 1992 1918 rhine_check_media(dev, 0); 1993 - if (intr_status & IntrStatsMax) { 1994 - dev->stats.rx_crc_errors += ioread16(ioaddr + RxCRCErrs); 1995 - dev->stats.rx_missed_errors += ioread16(ioaddr + RxMissed); 1996 - clear_tally_counters(ioaddr); 1997 - } 1998 - if (intr_status & IntrTxAborted) { 1999 - if (debug > 1) 2000 - netdev_info(dev, "Abort %08x, frame dropped\n", 2001 - intr_status); 2002 - } 2003 - if (intr_status & IntrTxUnderrun) { 2004 - rhine_kick_tx_threshold(rp); 2005 - if (debug > 1) 2006 - netdev_info(dev, "Transmitter underrun, Tx threshold now %02x\n", 2007 - rp->tx_thresh); 2008 - } 2009 - if (intr_status & IntrTxDescRace) { 2010 - if (debug > 2) 2011 - netdev_info(dev, "Tx descriptor write-back race\n"); 2012 - } 2013 - if ((intr_status & IntrTxError) && 2014 - (intr_status & (IntrTxAborted | 2015 - IntrTxUnderrun | IntrTxDescRace)) == 0) { 2016 - rhine_kick_tx_threshold(rp); 2017 - if (debug > 1) 2018 - netdev_info(dev, "Unspecified error. Tx threshold now %02x\n", 2019 - rp->tx_thresh); 2020 - } 2021 - if (intr_status & (IntrTxAborted | IntrTxUnderrun | IntrTxDescRace | 2022 - IntrTxError)) 2023 - rhine_restart_tx(dev); 2024 1919 2025 - if (intr_status & ~(IntrLinkChange | IntrStatsMax | IntrTxUnderrun | 2026 - IntrTxError | IntrTxAborted | IntrNormalSummary | 2027 - IntrTxDescRace)) { 2028 - if (debug > 1) 2029 - netdev_err(dev, "Something Wicked happened! %08x\n", 2030 - intr_status); 2031 - } 1920 + napi_disable(&rp->napi); 1921 + rhine_irq_disable(rp); 1922 + /* Slow and safe. Consider __napi_schedule as a replacement ? */ 1923 + napi_enable(&rp->napi); 1924 + napi_schedule(&rp->napi); 2032 1925 2033 - spin_unlock(&rp->lock); 1926 + out_unlock: 1927 + mutex_unlock(&rp->task_lock); 2034 1928 } 2035 1929 2036 1930 static struct net_device_stats *rhine_get_stats(struct net_device *dev) 2037 1931 { 2038 1932 struct rhine_private *rp = netdev_priv(dev); 2039 - void __iomem *ioaddr = rp->base; 2040 - unsigned long flags; 2041 1933 2042 - spin_lock_irqsave(&rp->lock, flags); 2043 - dev->stats.rx_crc_errors += ioread16(ioaddr + RxCRCErrs); 2044 - dev->stats.rx_missed_errors += ioread16(ioaddr + RxMissed); 2045 - clear_tally_counters(ioaddr); 2046 - spin_unlock_irqrestore(&rp->lock, flags); 1934 + spin_lock_bh(&rp->lock); 1935 + rhine_update_rx_crc_and_missed_errord(rp); 1936 + spin_unlock_bh(&rp->lock); 2047 1937 2048 1938 return &dev->stats; 2049 1939 } ··· 2082 2028 struct rhine_private *rp = netdev_priv(dev); 2083 2029 int rc; 2084 2030 2085 - spin_lock_irq(&rp->lock); 2031 + mutex_lock(&rp->task_lock); 2086 2032 rc = mii_ethtool_gset(&rp->mii_if, cmd); 2087 - spin_unlock_irq(&rp->lock); 2033 + mutex_unlock(&rp->task_lock); 2088 2034 2089 2035 return rc; 2090 2036 } ··· 2094 2040 struct rhine_private *rp = netdev_priv(dev); 2095 2041 int rc; 2096 2042 2097 - spin_lock_irq(&rp->lock); 2043 + mutex_lock(&rp->task_lock); 2098 2044 rc = mii_ethtool_sset(&rp->mii_if, cmd); 2099 - spin_unlock_irq(&rp->lock); 2100 2045 rhine_set_carrier(&rp->mii_if); 2046 + mutex_unlock(&rp->task_lock); 2101 2047 2102 2048 return rc; 2103 2049 } ··· 2179 2125 if (!netif_running(dev)) 2180 2126 return -EINVAL; 2181 2127 2182 - spin_lock_irq(&rp->lock); 2128 + mutex_lock(&rp->task_lock); 2183 2129 rc = generic_mii_ioctl(&rp->mii_if, if_mii(rq), cmd, NULL); 2184 - spin_unlock_irq(&rp->lock); 2185 2130 rhine_set_carrier(&rp->mii_if); 2131 + mutex_unlock(&rp->task_lock); 2186 2132 2187 2133 return rc; 2188 2134 } ··· 2192 2138 struct rhine_private *rp = netdev_priv(dev); 2193 2139 void __iomem *ioaddr = rp->base; 2194 2140 2141 + rhine_task_disable(rp); 2195 2142 napi_disable(&rp->napi); 2196 - cancel_work_sync(&rp->reset_task); 2197 2143 netif_stop_queue(dev); 2198 - 2199 - spin_lock_irq(&rp->lock); 2200 2144 2201 2145 if (debug > 1) 2202 2146 netdev_dbg(dev, "Shutting down ethercard, status was %04x\n", ··· 2203 2151 /* Switch to loopback mode to avoid hardware races. */ 2204 2152 iowrite8(rp->tx_thresh | 0x02, ioaddr + TxConfig); 2205 2153 2206 - /* Disable interrupts by clearing the interrupt mask. */ 2207 - iowrite16(0x0000, ioaddr + IntrEnable); 2154 + rhine_irq_disable(rp); 2208 2155 2209 2156 /* Stop the chip's Tx and Rx processes. */ 2210 2157 iowrite16(CmdStop, ioaddr + ChipCmd); 2211 - 2212 - spin_unlock_irq(&rp->lock); 2213 2158 2214 2159 free_irq(rp->pdev->irq, dev); 2215 2160 free_rbufs(dev); ··· 2247 2198 if (rp->quirks & rq6patterns) 2248 2199 iowrite8(0x04, ioaddr + WOLcgClr); 2249 2200 2201 + spin_lock(&rp->lock); 2202 + 2250 2203 if (rp->wolopts & WAKE_MAGIC) { 2251 2204 iowrite8(WOLmagic, ioaddr + WOLcrSet); 2252 2205 /* ··· 2273 2222 iowrite8(ioread8(ioaddr + StickyHW) | 0x04, ioaddr + StickyHW); 2274 2223 } 2275 2224 2225 + spin_unlock(&rp->lock); 2226 + 2276 2227 /* Hit power state D3 (sleep) */ 2277 2228 if (!avoid_D3) 2278 2229 iowrite8(ioread8(ioaddr + StickyHW) | 0x03, ioaddr + StickyHW); ··· 2288 2235 { 2289 2236 struct net_device *dev = pci_get_drvdata(pdev); 2290 2237 struct rhine_private *rp = netdev_priv(dev); 2291 - unsigned long flags; 2292 2238 2293 2239 if (!netif_running(dev)) 2294 2240 return 0; 2295 2241 2242 + rhine_task_disable(rp); 2243 + rhine_irq_disable(rp); 2296 2244 napi_disable(&rp->napi); 2297 2245 2298 2246 netif_device_detach(dev); 2299 2247 pci_save_state(pdev); 2300 2248 2301 - spin_lock_irqsave(&rp->lock, flags); 2302 2249 rhine_shutdown(pdev); 2303 - spin_unlock_irqrestore(&rp->lock, flags); 2304 2250 2305 - free_irq(dev->irq, dev); 2306 2251 return 0; 2307 2252 } 2308 2253 ··· 2308 2257 { 2309 2258 struct net_device *dev = pci_get_drvdata(pdev); 2310 2259 struct rhine_private *rp = netdev_priv(dev); 2311 - unsigned long flags; 2312 2260 int ret; 2313 2261 2314 2262 if (!netif_running(dev)) 2315 2263 return 0; 2316 - 2317 - if (request_irq(dev->irq, rhine_interrupt, IRQF_SHARED, dev->name, dev)) 2318 - netdev_err(dev, "request_irq failed\n"); 2319 2264 2320 2265 ret = pci_set_power_state(pdev, PCI_D0); 2321 2266 if (debug > 1) ··· 2320 2273 2321 2274 pci_restore_state(pdev); 2322 2275 2323 - spin_lock_irqsave(&rp->lock, flags); 2324 2276 #ifdef USE_MMIO 2325 2277 enable_mmio(rp->pioaddr, rp->quirks); 2326 2278 #endif ··· 2328 2282 free_rbufs(dev); 2329 2283 alloc_tbufs(dev); 2330 2284 alloc_rbufs(dev); 2285 + rhine_task_enable(rp); 2286 + spin_lock_bh(&rp->lock); 2331 2287 init_registers(dev); 2332 - spin_unlock_irqrestore(&rp->lock, flags); 2288 + spin_unlock_bh(&rp->lock); 2333 2289 2334 2290 netif_device_attach(dev); 2335 2291